xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "dp_htt.h"
21 #include "hal_hw_headers.h"
22 #include "dp_tx.h"
23 #include "dp_tx_desc.h"
24 #include "dp_peer.h"
25 #include "dp_types.h"
26 #include "hal_tx.h"
27 #include "qdf_mem.h"
28 #include "qdf_nbuf.h"
29 #include "qdf_net_types.h"
30 #include <wlan_cfg.h>
31 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
32 #include "if_meta_hdr.h"
33 #endif
34 #include "enet.h"
35 #include "dp_internal.h"
36 #ifdef FEATURE_WDS
37 #include "dp_txrx_wds.h"
38 #endif
39 #ifdef ATH_SUPPORT_IQUE
40 #include "dp_txrx_me.h"
41 #endif
42 
43 
44 /* TODO Add support in TSO */
45 #define DP_DESC_NUM_FRAG(x) 0
46 
47 /* disable TQM_BYPASS */
48 #define TQM_BYPASS_WAR 0
49 
50 /* invalid peer id for reinject*/
51 #define DP_INVALID_PEER 0XFFFE
52 
53 /*mapping between hal encrypt type and cdp_sec_type*/
54 #define MAX_CDP_SEC_TYPE 12
55 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
56 					HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
57 					HAL_TX_ENCRYPT_TYPE_WEP_128,
58 					HAL_TX_ENCRYPT_TYPE_WEP_104,
59 					HAL_TX_ENCRYPT_TYPE_WEP_40,
60 					HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
61 					HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
62 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
63 					HAL_TX_ENCRYPT_TYPE_WAPI,
64 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
65 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
66 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
67 					HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
68 
69 #ifdef QCA_TX_LIMIT_CHECK
70 /**
71  * dp_tx_limit_check - Check if allocated tx descriptors reached
72  * soc max limit and pdev max limit
73  * @vdev: DP vdev handle
74  *
75  * Return: true if allocated tx descriptors reached max configured value, else
76  * false
77  */
78 static inline bool
79 dp_tx_limit_check(struct dp_vdev *vdev)
80 {
81 	struct dp_pdev *pdev = vdev->pdev;
82 	struct dp_soc *soc = pdev->soc;
83 
84 	if (qdf_atomic_read(&soc->num_tx_outstanding) >=
85 			soc->num_tx_allowed) {
86 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
87 			  "%s: queued packets are more than max tx, drop the frame",
88 			  __func__);
89 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
90 		return true;
91 	}
92 
93 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
94 			pdev->num_tx_allowed) {
95 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
96 			  "%s: queued packets are more than max tx, drop the frame",
97 			  __func__);
98 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
99 		return true;
100 	}
101 	return false;
102 }
103 
104 /**
105  * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
106  * @vdev: DP pdev handle
107  *
108  * Return: void
109  */
110 static inline void
111 dp_tx_outstanding_inc(struct dp_pdev *pdev)
112 {
113 	struct dp_soc *soc = pdev->soc;
114 
115 	qdf_atomic_inc(&pdev->num_tx_outstanding);
116 	qdf_atomic_inc(&soc->num_tx_outstanding);
117 }
118 
119 /**
120  * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
121  * @vdev: DP pdev handle
122  *
123  * Return: void
124  */
125 static inline void
126 dp_tx_outstanding_dec(struct dp_pdev *pdev)
127 {
128 	struct dp_soc *soc = pdev->soc;
129 
130 	qdf_atomic_dec(&pdev->num_tx_outstanding);
131 	qdf_atomic_dec(&soc->num_tx_outstanding);
132 }
133 
134 #else //QCA_TX_LIMIT_CHECK
135 static inline bool
136 dp_tx_limit_check(struct dp_vdev *vdev)
137 {
138 	return false;
139 }
140 
141 static inline void
142 dp_tx_outstanding_inc(struct dp_pdev *pdev)
143 {
144 }
145 
146 static inline void
147 dp_tx_outstanding_dec(struct dp_pdev *pdev)
148 {
149 }
150 #endif //QCA_TX_LIMIT_CHECK
151 
152 #if defined(FEATURE_TSO)
153 /**
154  * dp_tx_tso_unmap_segment() - Unmap TSO segment
155  *
156  * @soc - core txrx main context
157  * @seg_desc - tso segment descriptor
158  * @num_seg_desc - tso number segment descriptor
159  */
160 static void dp_tx_tso_unmap_segment(
161 		struct dp_soc *soc,
162 		struct qdf_tso_seg_elem_t *seg_desc,
163 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
164 {
165 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
166 	if (qdf_unlikely(!seg_desc)) {
167 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
168 			 __func__, __LINE__);
169 		qdf_assert(0);
170 	} else if (qdf_unlikely(!num_seg_desc)) {
171 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
172 			 __func__, __LINE__);
173 		qdf_assert(0);
174 	} else {
175 		bool is_last_seg;
176 		/* no tso segment left to do dma unmap */
177 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
178 			return;
179 
180 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
181 					true : false;
182 		qdf_nbuf_unmap_tso_segment(soc->osdev,
183 					   seg_desc, is_last_seg);
184 		num_seg_desc->num_seg.tso_cmn_num_seg--;
185 	}
186 }
187 
188 /**
189  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
190  *                            back to the freelist
191  *
192  * @soc - soc device handle
193  * @tx_desc - Tx software descriptor
194  */
195 static void dp_tx_tso_desc_release(struct dp_soc *soc,
196 				   struct dp_tx_desc_s *tx_desc)
197 {
198 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
199 	if (qdf_unlikely(!tx_desc->tso_desc)) {
200 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
201 			  "%s %d TSO desc is NULL!",
202 			  __func__, __LINE__);
203 		qdf_assert(0);
204 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
205 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
206 			  "%s %d TSO num desc is NULL!",
207 			  __func__, __LINE__);
208 		qdf_assert(0);
209 	} else {
210 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
211 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
212 
213 		/* Add the tso num segment into the free list */
214 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
215 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
216 					    tx_desc->tso_num_desc);
217 			tx_desc->tso_num_desc = NULL;
218 			DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
219 		}
220 
221 		/* Add the tso segment into the free list*/
222 		dp_tx_tso_desc_free(soc,
223 				    tx_desc->pool_id, tx_desc->tso_desc);
224 		tx_desc->tso_desc = NULL;
225 	}
226 }
227 #else
228 static void dp_tx_tso_unmap_segment(
229 		struct dp_soc *soc,
230 		struct qdf_tso_seg_elem_t *seg_desc,
231 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
232 
233 {
234 }
235 
236 static void dp_tx_tso_desc_release(struct dp_soc *soc,
237 				   struct dp_tx_desc_s *tx_desc)
238 {
239 }
240 #endif
241 /**
242  * dp_tx_desc_release() - Release Tx Descriptor
243  * @tx_desc : Tx Descriptor
244  * @desc_pool_id: Descriptor Pool ID
245  *
246  * Deallocate all resources attached to Tx descriptor and free the Tx
247  * descriptor.
248  *
249  * Return:
250  */
251 static void
252 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
253 {
254 	struct dp_pdev *pdev = tx_desc->pdev;
255 	struct dp_soc *soc;
256 	uint8_t comp_status = 0;
257 
258 	qdf_assert(pdev);
259 
260 	soc = pdev->soc;
261 
262 	if (tx_desc->frm_type == dp_tx_frm_tso)
263 		dp_tx_tso_desc_release(soc, tx_desc);
264 
265 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
266 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
267 
268 	if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
269 		dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
270 
271 	dp_tx_outstanding_dec(pdev);
272 
273 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
274 		qdf_atomic_dec(&pdev->num_tx_exception);
275 
276 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
277 				hal_tx_comp_get_buffer_source(&tx_desc->comp))
278 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
279 							     soc->hal_soc);
280 	else
281 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
282 
283 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
284 		"Tx Completion Release desc %d status %d outstanding %d",
285 		tx_desc->id, comp_status,
286 		qdf_atomic_read(&pdev->num_tx_outstanding));
287 
288 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
289 	return;
290 }
291 
292 /**
293  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
294  * @vdev: DP vdev Handle
295  * @nbuf: skb
296  * @msdu_info: msdu_info required to create HTT metadata
297  *
298  * Prepares and fills HTT metadata in the frame pre-header for special frames
299  * that should be transmitted using varying transmit parameters.
300  * There are 2 VDEV modes that currently needs this special metadata -
301  *  1) Mesh Mode
302  *  2) DSRC Mode
303  *
304  * Return: HTT metadata size
305  *
306  */
307 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
308 					  struct dp_tx_msdu_info_s *msdu_info)
309 {
310 	uint32_t *meta_data = msdu_info->meta_data;
311 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
312 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
313 
314 	uint8_t htt_desc_size;
315 
316 	/* Size rounded of multiple of 8 bytes */
317 	uint8_t htt_desc_size_aligned;
318 
319 	uint8_t *hdr = NULL;
320 
321 	/*
322 	 * Metadata - HTT MSDU Extension header
323 	 */
324 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
325 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
326 
327 	if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
328 	    HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
329 							   meta_data[0])) {
330 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
331 				 htt_desc_size_aligned)) {
332 			nbuf = qdf_nbuf_realloc_headroom(nbuf,
333 							 htt_desc_size_aligned);
334 			if (!nbuf) {
335 				/*
336 				 * qdf_nbuf_realloc_headroom won't do skb_clone
337 				 * as skb_realloc_headroom does. so, no free is
338 				 * needed here.
339 				 */
340 				DP_STATS_INC(vdev,
341 					     tx_i.dropped.headroom_insufficient,
342 					     1);
343 				qdf_print(" %s[%d] skb_realloc_headroom failed",
344 					  __func__, __LINE__);
345 				return 0;
346 			}
347 		}
348 		/* Fill and add HTT metaheader */
349 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
350 		if (!hdr) {
351 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
352 					"Error in filling HTT metadata");
353 
354 			return 0;
355 		}
356 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
357 
358 	} else if (vdev->opmode == wlan_op_mode_ocb) {
359 		/* Todo - Add support for DSRC */
360 	}
361 
362 	return htt_desc_size_aligned;
363 }
364 
365 /**
366  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
367  * @tso_seg: TSO segment to process
368  * @ext_desc: Pointer to MSDU extension descriptor
369  *
370  * Return: void
371  */
372 #if defined(FEATURE_TSO)
373 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
374 		void *ext_desc)
375 {
376 	uint8_t num_frag;
377 	uint32_t tso_flags;
378 
379 	/*
380 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
381 	 * tcp_flag_mask
382 	 *
383 	 * Checksum enable flags are set in TCL descriptor and not in Extension
384 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
385 	 */
386 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
387 
388 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
389 
390 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
391 		tso_seg->tso_flags.ip_len);
392 
393 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
394 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
395 
396 
397 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
398 		uint32_t lo = 0;
399 		uint32_t hi = 0;
400 
401 		qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
402 				  (tso_seg->tso_frags[num_frag].length));
403 
404 		qdf_dmaaddr_to_32s(
405 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
406 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
407 			tso_seg->tso_frags[num_frag].length);
408 	}
409 
410 	return;
411 }
412 #else
413 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
414 		void *ext_desc)
415 {
416 	return;
417 }
418 #endif
419 
420 #if defined(FEATURE_TSO)
421 /**
422  * dp_tx_free_tso_seg_list() - Loop through the tso segments
423  *                             allocated and free them
424  *
425  * @soc: soc handle
426  * @free_seg: list of tso segments
427  * @msdu_info: msdu descriptor
428  *
429  * Return - void
430  */
431 static void dp_tx_free_tso_seg_list(
432 		struct dp_soc *soc,
433 		struct qdf_tso_seg_elem_t *free_seg,
434 		struct dp_tx_msdu_info_s *msdu_info)
435 {
436 	struct qdf_tso_seg_elem_t *next_seg;
437 
438 	while (free_seg) {
439 		next_seg = free_seg->next;
440 		dp_tx_tso_desc_free(soc,
441 				    msdu_info->tx_queue.desc_pool_id,
442 				    free_seg);
443 		free_seg = next_seg;
444 	}
445 }
446 
447 /**
448  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
449  *                                 allocated and free them
450  *
451  * @soc:  soc handle
452  * @free_num_seg: list of tso number segments
453  * @msdu_info: msdu descriptor
454  * Return - void
455  */
456 static void dp_tx_free_tso_num_seg_list(
457 		struct dp_soc *soc,
458 		struct qdf_tso_num_seg_elem_t *free_num_seg,
459 		struct dp_tx_msdu_info_s *msdu_info)
460 {
461 	struct qdf_tso_num_seg_elem_t *next_num_seg;
462 
463 	while (free_num_seg) {
464 		next_num_seg = free_num_seg->next;
465 		dp_tso_num_seg_free(soc,
466 				    msdu_info->tx_queue.desc_pool_id,
467 				    free_num_seg);
468 		free_num_seg = next_num_seg;
469 	}
470 }
471 
472 /**
473  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
474  *                              do dma unmap for each segment
475  *
476  * @soc: soc handle
477  * @free_seg: list of tso segments
478  * @num_seg_desc: tso number segment descriptor
479  *
480  * Return - void
481  */
482 static void dp_tx_unmap_tso_seg_list(
483 		struct dp_soc *soc,
484 		struct qdf_tso_seg_elem_t *free_seg,
485 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
486 {
487 	struct qdf_tso_seg_elem_t *next_seg;
488 
489 	if (qdf_unlikely(!num_seg_desc)) {
490 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
491 		return;
492 	}
493 
494 	while (free_seg) {
495 		next_seg = free_seg->next;
496 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
497 		free_seg = next_seg;
498 	}
499 }
500 
501 #ifdef FEATURE_TSO_STATS
502 /**
503  * dp_tso_get_stats_idx: Retrieve the tso packet id
504  * @pdev - pdev handle
505  *
506  * Return: id
507  */
508 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
509 {
510 	uint32_t stats_idx;
511 
512 	stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
513 						% CDP_MAX_TSO_PACKETS);
514 	return stats_idx;
515 }
516 #else
517 static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
518 {
519 	return 0;
520 }
521 #endif /* FEATURE_TSO_STATS */
522 
523 /**
524  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
525  *				     free the tso segments descriptor and
526  *				     tso num segments descriptor
527  *
528  * @soc:  soc handle
529  * @msdu_info: msdu descriptor
530  * @tso_seg_unmap: flag to show if dma unmap is necessary
531  *
532  * Return - void
533  */
534 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
535 					  struct dp_tx_msdu_info_s *msdu_info,
536 					  bool tso_seg_unmap)
537 {
538 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
539 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
540 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
541 					tso_info->tso_num_seg_list;
542 
543 	/* do dma unmap for each segment */
544 	if (tso_seg_unmap)
545 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
546 
547 	/* free all tso number segment descriptor though looks only have 1 */
548 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
549 
550 	/* free all tso segment descriptor */
551 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
552 }
553 
554 /**
555  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
556  * @vdev: virtual device handle
557  * @msdu: network buffer
558  * @msdu_info: meta data associated with the msdu
559  *
560  * Return: QDF_STATUS_SUCCESS success
561  */
562 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
563 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
564 {
565 	struct qdf_tso_seg_elem_t *tso_seg;
566 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
567 	struct dp_soc *soc = vdev->pdev->soc;
568 	struct dp_pdev *pdev = vdev->pdev;
569 	struct qdf_tso_info_t *tso_info;
570 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
571 	tso_info = &msdu_info->u.tso_info;
572 	tso_info->curr_seg = NULL;
573 	tso_info->tso_seg_list = NULL;
574 	tso_info->num_segs = num_seg;
575 	msdu_info->frm_type = dp_tx_frm_tso;
576 	tso_info->tso_num_seg_list = NULL;
577 
578 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
579 
580 	while (num_seg) {
581 		tso_seg = dp_tx_tso_desc_alloc(
582 				soc, msdu_info->tx_queue.desc_pool_id);
583 		if (tso_seg) {
584 			tso_seg->next = tso_info->tso_seg_list;
585 			tso_info->tso_seg_list = tso_seg;
586 			num_seg--;
587 		} else {
588 			DP_TRACE(ERROR, "%s: Failed to alloc tso seg desc",
589 				 __func__);
590 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
591 
592 			return QDF_STATUS_E_NOMEM;
593 		}
594 	}
595 
596 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
597 
598 	tso_num_seg = dp_tso_num_seg_alloc(soc,
599 			msdu_info->tx_queue.desc_pool_id);
600 
601 	if (tso_num_seg) {
602 		tso_num_seg->next = tso_info->tso_num_seg_list;
603 		tso_info->tso_num_seg_list = tso_num_seg;
604 	} else {
605 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
606 			 __func__);
607 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
608 
609 		return QDF_STATUS_E_NOMEM;
610 	}
611 
612 	msdu_info->num_seg =
613 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
614 
615 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
616 			msdu_info->num_seg);
617 
618 	if (!(msdu_info->num_seg)) {
619 		/*
620 		 * Free allocated TSO seg desc and number seg desc,
621 		 * do unmap for segments if dma map has done.
622 		 */
623 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
624 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
625 
626 		return QDF_STATUS_E_INVAL;
627 	}
628 
629 	tso_info->curr_seg = tso_info->tso_seg_list;
630 
631 	tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
632 	dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
633 			     msdu, msdu_info->num_seg);
634 	dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
635 				    tso_info->msdu_stats_idx);
636 	dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
637 	return QDF_STATUS_SUCCESS;
638 }
639 #else
640 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
641 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
642 {
643 	return QDF_STATUS_E_NOMEM;
644 }
645 #endif
646 
647 /**
648  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
649  * @vdev: DP Vdev handle
650  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
651  * @desc_pool_id: Descriptor Pool ID
652  *
653  * Return:
654  */
655 static
656 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
657 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
658 {
659 	uint8_t i;
660 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
661 	struct dp_tx_seg_info_s *seg_info;
662 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
663 	struct dp_soc *soc = vdev->pdev->soc;
664 
665 	/* Allocate an extension descriptor */
666 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
667 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
668 
669 	if (!msdu_ext_desc) {
670 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
671 		return NULL;
672 	}
673 
674 	if (msdu_info->exception_fw &&
675 			qdf_unlikely(vdev->mesh_vdev)) {
676 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
677 				&msdu_info->meta_data[0],
678 				sizeof(struct htt_tx_msdu_desc_ext2_t));
679 		qdf_atomic_inc(&vdev->pdev->num_tx_exception);
680 	}
681 
682 	switch (msdu_info->frm_type) {
683 	case dp_tx_frm_sg:
684 	case dp_tx_frm_me:
685 	case dp_tx_frm_raw:
686 		seg_info = msdu_info->u.sg_info.curr_seg;
687 		/* Update the buffer pointers in MSDU Extension Descriptor */
688 		for (i = 0; i < seg_info->frag_cnt; i++) {
689 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
690 				seg_info->frags[i].paddr_lo,
691 				seg_info->frags[i].paddr_hi,
692 				seg_info->frags[i].len);
693 		}
694 
695 		break;
696 
697 	case dp_tx_frm_tso:
698 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
699 				&cached_ext_desc[0]);
700 		break;
701 
702 
703 	default:
704 		break;
705 	}
706 
707 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
708 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
709 
710 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
711 			msdu_ext_desc->vaddr);
712 
713 	return msdu_ext_desc;
714 }
715 
716 /**
717  * dp_tx_trace_pkt() - Trace TX packet at DP layer
718  *
719  * @skb: skb to be traced
720  * @msdu_id: msdu_id of the packet
721  * @vdev_id: vdev_id of the packet
722  *
723  * Return: None
724  */
725 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
726 			    uint8_t vdev_id)
727 {
728 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
729 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
730 	DPTRACE(qdf_dp_trace_ptr(skb,
731 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
732 				 QDF_TRACE_DEFAULT_PDEV_ID,
733 				 qdf_nbuf_data_addr(skb),
734 				 sizeof(qdf_nbuf_data(skb)),
735 				 msdu_id, vdev_id));
736 
737 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
738 
739 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
740 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
741 				      msdu_id, QDF_TX));
742 }
743 
744 /**
745  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
746  * @vdev: DP vdev handle
747  * @nbuf: skb
748  * @desc_pool_id: Descriptor pool ID
749  * @meta_data: Metadata to the fw
750  * @tx_exc_metadata: Handle that holds exception path metadata
751  * Allocate and prepare Tx descriptor with msdu information.
752  *
753  * Return: Pointer to Tx Descriptor on success,
754  *         NULL on failure
755  */
756 static
757 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
758 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
759 		struct dp_tx_msdu_info_s *msdu_info,
760 		struct cdp_tx_exception_metadata *tx_exc_metadata)
761 {
762 	uint8_t align_pad;
763 	uint8_t is_exception = 0;
764 	uint8_t htt_hdr_size;
765 	qdf_ether_header_t *eh;
766 	struct dp_tx_desc_s *tx_desc;
767 	struct dp_pdev *pdev = vdev->pdev;
768 	struct dp_soc *soc = pdev->soc;
769 
770 	if (dp_tx_limit_check(vdev))
771 		return NULL;
772 
773 	/* Allocate software Tx descriptor */
774 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
775 	if (qdf_unlikely(!tx_desc)) {
776 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
777 		return NULL;
778 	}
779 
780 	dp_tx_outstanding_inc(pdev);
781 
782 	/* Initialize the SW tx descriptor */
783 	tx_desc->nbuf = nbuf;
784 	tx_desc->frm_type = dp_tx_frm_std;
785 	tx_desc->tx_encap_type = ((tx_exc_metadata &&
786 		(tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
787 		tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
788 	tx_desc->vdev = vdev;
789 	tx_desc->pdev = pdev;
790 	tx_desc->msdu_ext_desc = NULL;
791 	tx_desc->pkt_offset = 0;
792 
793 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
794 
795 	if (qdf_unlikely(vdev->multipass_en)) {
796 		if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
797 			goto failure;
798 	}
799 
800 	/*
801 	 * For special modes (vdev_type == ocb or mesh), data frames should be
802 	 * transmitted using varying transmit parameters (tx spec) which include
803 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
804 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
805 	 * These frames are sent as exception packets to firmware.
806 	 *
807 	 * HW requirement is that metadata should always point to a
808 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
809 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
810 	 *  to get 8-byte aligned start address along with align_pad added
811 	 *
812 	 *  |-----------------------------|
813 	 *  |                             |
814 	 *  |-----------------------------| <-----Buffer Pointer Address given
815 	 *  |                             |  ^    in HW descriptor (aligned)
816 	 *  |       HTT Metadata          |  |
817 	 *  |                             |  |
818 	 *  |                             |  | Packet Offset given in descriptor
819 	 *  |                             |  |
820 	 *  |-----------------------------|  |
821 	 *  |       Alignment Pad         |  v
822 	 *  |-----------------------------| <----- Actual buffer start address
823 	 *  |        SKB Data             |           (Unaligned)
824 	 *  |                             |
825 	 *  |                             |
826 	 *  |                             |
827 	 *  |                             |
828 	 *  |                             |
829 	 *  |-----------------------------|
830 	 */
831 	if (qdf_unlikely((msdu_info->exception_fw)) ||
832 				(vdev->opmode == wlan_op_mode_ocb) ||
833 				(tx_exc_metadata &&
834 				tx_exc_metadata->is_tx_sniffer)) {
835 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
836 
837 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
838 			DP_STATS_INC(vdev,
839 				     tx_i.dropped.headroom_insufficient, 1);
840 			goto failure;
841 		}
842 
843 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
844 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
845 					"qdf_nbuf_push_head failed");
846 			goto failure;
847 		}
848 
849 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
850 				msdu_info);
851 		if (htt_hdr_size == 0)
852 			goto failure;
853 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
854 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
855 		is_exception = 1;
856 	}
857 
858 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
859 				qdf_nbuf_map(soc->osdev, nbuf,
860 					QDF_DMA_TO_DEVICE))) {
861 		/* Handle failure */
862 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
863 				"qdf_nbuf_map failed");
864 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
865 		goto failure;
866 	}
867 
868 	if (qdf_unlikely(vdev->nawds_enabled)) {
869 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
870 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
871 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
872 			is_exception = 1;
873 		}
874 	}
875 
876 #if !TQM_BYPASS_WAR
877 	if (is_exception || tx_exc_metadata)
878 #endif
879 	{
880 		/* Temporary WAR due to TQM VP issues */
881 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
882 		qdf_atomic_inc(&pdev->num_tx_exception);
883 	}
884 
885 	return tx_desc;
886 
887 failure:
888 	dp_tx_desc_release(tx_desc, desc_pool_id);
889 	return NULL;
890 }
891 
892 /**
893  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
894  * @vdev: DP vdev handle
895  * @nbuf: skb
896  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
897  * @desc_pool_id : Descriptor Pool ID
898  *
899  * Allocate and prepare Tx descriptor with msdu and fragment descritor
900  * information. For frames wth fragments, allocate and prepare
901  * an MSDU extension descriptor
902  *
903  * Return: Pointer to Tx Descriptor on success,
904  *         NULL on failure
905  */
906 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
907 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
908 		uint8_t desc_pool_id)
909 {
910 	struct dp_tx_desc_s *tx_desc;
911 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
912 	struct dp_pdev *pdev = vdev->pdev;
913 	struct dp_soc *soc = pdev->soc;
914 
915 	if (dp_tx_limit_check(vdev))
916 		return NULL;
917 
918 	/* Allocate software Tx descriptor */
919 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
920 	if (!tx_desc) {
921 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
922 		return NULL;
923 	}
924 
925 	dp_tx_outstanding_inc(pdev);
926 
927 	/* Initialize the SW tx descriptor */
928 	tx_desc->nbuf = nbuf;
929 	tx_desc->frm_type = msdu_info->frm_type;
930 	tx_desc->tx_encap_type = vdev->tx_encap_type;
931 	tx_desc->vdev = vdev;
932 	tx_desc->pdev = pdev;
933 	tx_desc->pkt_offset = 0;
934 	tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
935 	tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
936 
937 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
938 
939 	/* Handle scattered frames - TSO/SG/ME */
940 	/* Allocate and prepare an extension descriptor for scattered frames */
941 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
942 	if (!msdu_ext_desc) {
943 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
944 				"%s Tx Extension Descriptor Alloc Fail",
945 				__func__);
946 		goto failure;
947 	}
948 
949 #if TQM_BYPASS_WAR
950 	/* Temporary WAR due to TQM VP issues */
951 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
952 	qdf_atomic_inc(&pdev->num_tx_exception);
953 #endif
954 	if (qdf_unlikely(msdu_info->exception_fw))
955 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
956 
957 	tx_desc->msdu_ext_desc = msdu_ext_desc;
958 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
959 
960 	return tx_desc;
961 failure:
962 	dp_tx_desc_release(tx_desc, desc_pool_id);
963 	return NULL;
964 }
965 
966 /**
967  * dp_tx_prepare_raw() - Prepare RAW packet TX
968  * @vdev: DP vdev handle
969  * @nbuf: buffer pointer
970  * @seg_info: Pointer to Segment info Descriptor to be prepared
971  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
972  *     descriptor
973  *
974  * Return:
975  */
976 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
977 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
978 {
979 	qdf_nbuf_t curr_nbuf = NULL;
980 	uint16_t total_len = 0;
981 	qdf_dma_addr_t paddr;
982 	int32_t i;
983 	int32_t mapped_buf_num = 0;
984 
985 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
986 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
987 
988 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
989 
990 	/* Continue only if frames are of DATA type */
991 	if (!DP_FRAME_IS_DATA(qos_wh)) {
992 		DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
993 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
994 			  "Pkt. recd is of not data type");
995 		goto error;
996 	}
997 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
998 	if (vdev->raw_mode_war &&
999 	    (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
1000 	    (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
1001 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
1002 
1003 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
1004 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
1005 
1006 		if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf,
1007 					QDF_DMA_TO_DEVICE)) {
1008 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1009 				"%s dma map error ", __func__);
1010 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
1011 			mapped_buf_num = i;
1012 			goto error;
1013 		}
1014 
1015 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
1016 		seg_info->frags[i].paddr_lo = paddr;
1017 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
1018 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
1019 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
1020 		total_len += qdf_nbuf_len(curr_nbuf);
1021 	}
1022 
1023 	seg_info->frag_cnt = i;
1024 	seg_info->total_len = total_len;
1025 	seg_info->next = NULL;
1026 
1027 	sg_info->curr_seg = seg_info;
1028 
1029 	msdu_info->frm_type = dp_tx_frm_raw;
1030 	msdu_info->num_seg = 1;
1031 
1032 	return nbuf;
1033 
1034 error:
1035 	i = 0;
1036 	while (nbuf) {
1037 		curr_nbuf = nbuf;
1038 		if (i < mapped_buf_num) {
1039 			qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE);
1040 			i++;
1041 		}
1042 		nbuf = qdf_nbuf_next(nbuf);
1043 		qdf_nbuf_free(curr_nbuf);
1044 	}
1045 	return NULL;
1046 
1047 }
1048 
1049 /**
1050  * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
1051  * @soc: DP soc handle
1052  * @nbuf: Buffer pointer
1053  *
1054  * unmap the chain of nbufs that belong to this RAW frame.
1055  *
1056  * Return: None
1057  */
1058 static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
1059 				    qdf_nbuf_t nbuf)
1060 {
1061 	qdf_nbuf_t cur_nbuf = nbuf;
1062 
1063 	do {
1064 		qdf_nbuf_unmap(soc->osdev, cur_nbuf, QDF_DMA_TO_DEVICE);
1065 		cur_nbuf = qdf_nbuf_next(cur_nbuf);
1066 	} while (cur_nbuf);
1067 }
1068 
1069 /**
1070  * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
1071  * @soc: DP Soc Handle
1072  * @vdev: DP vdev handle
1073  * @tx_desc: Tx Descriptor Handle
1074  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1075  * @fw_metadata: Metadata to send to Target Firmware along with frame
1076  * @ring_id: Ring ID of H/W ring to which we enqueue the packet
1077  * @tx_exc_metadata: Handle that holds exception path meta data
1078  *
1079  *  Gets the next free TCL HW DMA descriptor and sets up required parameters
1080  *  from software Tx descriptor
1081  *
1082  * Return:
1083  */
1084 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
1085 				   struct dp_tx_desc_s *tx_desc, uint8_t tid,
1086 				   uint16_t fw_metadata, uint8_t ring_id,
1087 				   struct cdp_tx_exception_metadata
1088 					*tx_exc_metadata)
1089 {
1090 	uint8_t type;
1091 	uint16_t length;
1092 	void *hal_tx_desc, *hal_tx_desc_cached;
1093 	qdf_dma_addr_t dma_addr;
1094 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
1095 
1096 	enum cdp_sec_type sec_type = ((tx_exc_metadata &&
1097 			tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
1098 			tx_exc_metadata->sec_type : vdev->sec_type);
1099 
1100 	/* Return Buffer Manager ID */
1101 	uint8_t bm_id = ring_id;
1102 	hal_ring_handle_t hal_ring_hdl = soc->tcl_data_ring[ring_id].hal_srng;
1103 
1104 	hal_tx_desc_cached = (void *) cached_desc;
1105 	qdf_mem_zero(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
1106 
1107 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
1108 		length = HAL_TX_EXT_DESC_WITH_META_DATA;
1109 		type = HAL_TX_BUF_TYPE_EXT_DESC;
1110 		dma_addr = tx_desc->msdu_ext_desc->paddr;
1111 	} else {
1112 		length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
1113 		type = HAL_TX_BUF_TYPE_BUFFER;
1114 		dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
1115 	}
1116 
1117 	qdf_assert_always(dma_addr);
1118 
1119 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
1120 	hal_tx_desc_set_buf_addr(hal_tx_desc_cached,
1121 					dma_addr, bm_id, tx_desc->id,
1122 					type, soc->hal_soc);
1123 
1124 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id))
1125 		return QDF_STATUS_E_RESOURCES;
1126 
1127 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
1128 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
1129 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
1130 	hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
1131 				vdev->pdev->lmac_id);
1132 	hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
1133 				    vdev->search_type);
1134 	hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
1135 				     vdev->bss_ast_idx);
1136 	hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
1137 					  vdev->dscp_tid_map_id);
1138 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
1139 			sec_type_map[sec_type]);
1140 	hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
1141 				      (vdev->bss_ast_hash & 0xF));
1142 
1143 	dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
1144 			 length, type, (uint64_t)dma_addr,
1145 			 tx_desc->pkt_offset, tx_desc->id);
1146 
1147 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
1148 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
1149 
1150 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
1151 			vdev->hal_desc_addr_search_flags);
1152 
1153 	/* verify checksum offload configuration*/
1154 	if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) &&
1155 		((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
1156 		|| qdf_nbuf_is_tso(tx_desc->nbuf)))  {
1157 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
1158 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
1159 	}
1160 
1161 	if (tid != HTT_TX_EXT_TID_INVALID)
1162 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
1163 
1164 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
1165 		hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
1166 
1167 
1168 	tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_get());
1169 	/* Sync cached descriptor with HW */
1170 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
1171 
1172 	if (!hal_tx_desc) {
1173 		dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
1174 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1175 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1176 		return QDF_STATUS_E_RESOURCES;
1177 	}
1178 
1179 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
1180 
1181 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
1182 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
1183 
1184 	return QDF_STATUS_SUCCESS;
1185 }
1186 
1187 
1188 /**
1189  * dp_cce_classify() - Classify the frame based on CCE rules
1190  * @vdev: DP vdev handle
1191  * @nbuf: skb
1192  *
1193  * Classify frames based on CCE rules
1194  * Return: bool( true if classified,
1195  *               else false)
1196  */
1197 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1198 {
1199 	qdf_ether_header_t *eh = NULL;
1200 	uint16_t   ether_type;
1201 	qdf_llc_t *llcHdr;
1202 	qdf_nbuf_t nbuf_clone = NULL;
1203 	qdf_dot3_qosframe_t *qos_wh = NULL;
1204 
1205 	/* for mesh packets don't do any classification */
1206 	if (qdf_unlikely(vdev->mesh_vdev))
1207 		return false;
1208 
1209 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1210 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1211 		ether_type = eh->ether_type;
1212 		llcHdr = (qdf_llc_t *)(nbuf->data +
1213 					sizeof(qdf_ether_header_t));
1214 	} else {
1215 		qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1216 		/* For encrypted packets don't do any classification */
1217 		if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
1218 			return false;
1219 
1220 		if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
1221 			if (qdf_unlikely(
1222 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
1223 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
1224 
1225 				ether_type = *(uint16_t *)(nbuf->data
1226 						+ QDF_IEEE80211_4ADDR_HDR_LEN
1227 						+ sizeof(qdf_llc_t)
1228 						- sizeof(ether_type));
1229 				llcHdr = (qdf_llc_t *)(nbuf->data +
1230 						QDF_IEEE80211_4ADDR_HDR_LEN);
1231 			} else {
1232 				ether_type = *(uint16_t *)(nbuf->data
1233 						+ QDF_IEEE80211_3ADDR_HDR_LEN
1234 						+ sizeof(qdf_llc_t)
1235 						- sizeof(ether_type));
1236 				llcHdr = (qdf_llc_t *)(nbuf->data +
1237 					QDF_IEEE80211_3ADDR_HDR_LEN);
1238 			}
1239 
1240 			if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
1241 				&& (ether_type ==
1242 				qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
1243 
1244 				DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
1245 				return true;
1246 			}
1247 		}
1248 
1249 		return false;
1250 	}
1251 
1252 	if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
1253 		ether_type = *(uint16_t *)(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1254 				sizeof(*llcHdr));
1255 		nbuf_clone = qdf_nbuf_clone(nbuf);
1256 		if (qdf_unlikely(nbuf_clone)) {
1257 			qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
1258 
1259 			if (ether_type == htons(ETHERTYPE_VLAN)) {
1260 				qdf_nbuf_pull_head(nbuf_clone,
1261 						sizeof(qdf_net_vlanhdr_t));
1262 			}
1263 		}
1264 	} else {
1265 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1266 			nbuf_clone = qdf_nbuf_clone(nbuf);
1267 			if (qdf_unlikely(nbuf_clone)) {
1268 				qdf_nbuf_pull_head(nbuf_clone,
1269 					sizeof(qdf_net_vlanhdr_t));
1270 			}
1271 		}
1272 	}
1273 
1274 	if (qdf_unlikely(nbuf_clone))
1275 		nbuf = nbuf_clone;
1276 
1277 
1278 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
1279 		|| qdf_nbuf_is_ipv4_arp_pkt(nbuf)
1280 		|| qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
1281 		|| qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
1282 		|| (qdf_nbuf_is_ipv4_pkt(nbuf)
1283 			&& qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
1284 		|| (qdf_nbuf_is_ipv6_pkt(nbuf) &&
1285 			qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
1286 		if (qdf_unlikely(nbuf_clone))
1287 			qdf_nbuf_free(nbuf_clone);
1288 		return true;
1289 	}
1290 
1291 	if (qdf_unlikely(nbuf_clone))
1292 		qdf_nbuf_free(nbuf_clone);
1293 
1294 	return false;
1295 }
1296 
1297 /**
1298  * dp_tx_get_tid() - Obtain TID to be used for this frame
1299  * @vdev: DP vdev handle
1300  * @nbuf: skb
1301  *
1302  * Extract the DSCP or PCP information from frame and map into TID value.
1303  *
1304  * Return: void
1305  */
1306 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1307 			  struct dp_tx_msdu_info_s *msdu_info)
1308 {
1309 	uint8_t tos = 0, dscp_tid_override = 0;
1310 	uint8_t *hdr_ptr, *L3datap;
1311 	uint8_t is_mcast = 0;
1312 	qdf_ether_header_t *eh = NULL;
1313 	qdf_ethervlan_header_t *evh = NULL;
1314 	uint16_t   ether_type;
1315 	qdf_llc_t *llcHdr;
1316 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1317 
1318 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1319 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1320 		eh = (qdf_ether_header_t *)nbuf->data;
1321 		hdr_ptr = eh->ether_dhost;
1322 		L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
1323 	} else {
1324 		qdf_dot3_qosframe_t *qos_wh =
1325 			(qdf_dot3_qosframe_t *) nbuf->data;
1326 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1327 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1328 		return;
1329 	}
1330 
1331 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1332 	ether_type = eh->ether_type;
1333 
1334 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
1335 	/*
1336 	 * Check if packet is dot3 or eth2 type.
1337 	 */
1338 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1339 		ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1340 				sizeof(*llcHdr));
1341 
1342 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1343 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1344 				sizeof(*llcHdr);
1345 			ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
1346 					+ sizeof(*llcHdr) +
1347 					sizeof(qdf_net_vlanhdr_t));
1348 		} else {
1349 			L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
1350 				sizeof(*llcHdr);
1351 		}
1352 	} else {
1353 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1354 			evh = (qdf_ethervlan_header_t *) eh;
1355 			ether_type = evh->ether_type;
1356 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1357 		}
1358 	}
1359 
1360 	/*
1361 	 * Find priority from IP TOS DSCP field
1362 	 */
1363 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1364 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1365 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1366 			/* Only for unicast frames */
1367 			if (!is_mcast) {
1368 				/* send it on VO queue */
1369 				msdu_info->tid = DP_VO_TID;
1370 			}
1371 		} else {
1372 			/*
1373 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1374 			 * from TOS byte.
1375 			 */
1376 			tos = ip->ip_tos;
1377 			dscp_tid_override = 1;
1378 
1379 		}
1380 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1381 		/* TODO
1382 		 * use flowlabel
1383 		 *igmpmld cases to be handled in phase 2
1384 		 */
1385 		unsigned long ver_pri_flowlabel;
1386 		unsigned long pri;
1387 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1388 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1389 			DP_IPV6_PRIORITY_SHIFT;
1390 		tos = pri;
1391 		dscp_tid_override = 1;
1392 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1393 		msdu_info->tid = DP_VO_TID;
1394 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1395 		/* Only for unicast frames */
1396 		if (!is_mcast) {
1397 			/* send ucast arp on VO queue */
1398 			msdu_info->tid = DP_VO_TID;
1399 		}
1400 	}
1401 
1402 	/*
1403 	 * Assign all MCAST packets to BE
1404 	 */
1405 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1406 		if (is_mcast) {
1407 			tos = 0;
1408 			dscp_tid_override = 1;
1409 		}
1410 	}
1411 
1412 	if (dscp_tid_override == 1) {
1413 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1414 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1415 	}
1416 
1417 	if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
1418 		msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
1419 
1420 	return;
1421 }
1422 
1423 /**
1424  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1425  * @vdev: DP vdev handle
1426  * @nbuf: skb
1427  *
1428  * Software based TID classification is required when more than 2 DSCP-TID
1429  * mapping tables are needed.
1430  * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
1431  *
1432  * Return: void
1433  */
1434 static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1435 			       struct dp_tx_msdu_info_s *msdu_info)
1436 {
1437 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1438 
1439 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1440 
1441 	if (pdev->soc && vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map)
1442 		return;
1443 
1444 	/* for mesh packets don't do any classification */
1445 	if (qdf_unlikely(vdev->mesh_vdev))
1446 		return;
1447 
1448 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1449 }
1450 
1451 #ifdef FEATURE_WLAN_TDLS
1452 /**
1453  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1454  * @tx_desc: TX descriptor
1455  *
1456  * Return: None
1457  */
1458 static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1459 {
1460 	if (tx_desc->vdev) {
1461 		if (tx_desc->vdev->is_tdls_frame) {
1462 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1463 			tx_desc->vdev->is_tdls_frame = false;
1464 		}
1465 	}
1466 }
1467 
1468 /**
1469  * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1470  * @tx_desc: TX descriptor
1471  * @vdev: datapath vdev handle
1472  *
1473  * Return: None
1474  */
1475 static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
1476 					 struct dp_vdev *vdev)
1477 {
1478 	struct hal_tx_completion_status ts = {0};
1479 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1480 
1481 	if (qdf_unlikely(!vdev)) {
1482 		dp_err("vdev is null!");
1483 		return;
1484 	}
1485 
1486 	hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
1487 	if (vdev->tx_non_std_data_callback.func) {
1488 		qdf_nbuf_set_next(tx_desc->nbuf, NULL);
1489 		vdev->tx_non_std_data_callback.func(
1490 				vdev->tx_non_std_data_callback.ctxt,
1491 				nbuf, ts.status);
1492 		return;
1493 	}
1494 }
1495 #else
1496 static inline void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1497 {
1498 }
1499 
1500 static inline void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
1501 						struct dp_vdev *vdev)
1502 {
1503 }
1504 #endif
1505 
1506 /**
1507  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1508  * @vdev: DP vdev handle
1509  * @nbuf: skb
1510  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1511  * @meta_data: Metadata to the fw
1512  * @tx_q: Tx queue to be used for this Tx frame
1513  * @peer_id: peer_id of the peer in case of NAWDS frames
1514  * @tx_exc_metadata: Handle that holds exception path metadata
1515  *
1516  * Return: NULL on success,
1517  *         nbuf when it fails to send
1518  */
1519 qdf_nbuf_t
1520 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1521 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
1522 		       struct cdp_tx_exception_metadata *tx_exc_metadata)
1523 {
1524 	struct dp_pdev *pdev = vdev->pdev;
1525 	struct dp_soc *soc = pdev->soc;
1526 	struct dp_tx_desc_s *tx_desc;
1527 	QDF_STATUS status;
1528 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
1529 	hal_ring_handle_t hal_ring_hdl =
1530 				soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1531 	uint16_t htt_tcl_metadata = 0;
1532 	uint8_t tid = msdu_info->tid;
1533 	struct cdp_tid_tx_stats *tid_stats = NULL;
1534 
1535 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
1536 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
1537 			msdu_info, tx_exc_metadata);
1538 	if (!tx_desc) {
1539 		dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
1540 			  vdev, tx_q->desc_pool_id);
1541 		dp_tx_get_tid(vdev, nbuf, msdu_info);
1542 		tid_stats = &pdev->stats.tid_stats.
1543 			    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
1544 		tid_stats->swdrop_cnt[TX_DESC_ERR]++;
1545 		return nbuf;
1546 	}
1547 
1548 	if (qdf_unlikely(soc->cce_disable)) {
1549 		if (dp_cce_classify(vdev, nbuf) == true) {
1550 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1551 			tid = DP_VO_TID;
1552 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1553 		}
1554 	}
1555 
1556 	dp_tx_update_tdls_flags(tx_desc);
1557 
1558 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_ring_hdl))) {
1559 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1560 				"%s %d : HAL RING Access Failed -- %pK",
1561 				__func__, __LINE__, hal_ring_hdl);
1562 		dp_tx_get_tid(vdev, nbuf, msdu_info);
1563 		tid_stats = &pdev->stats.tid_stats.
1564 			    tid_tx_stats[tx_q->ring_id][tid];
1565 		tid_stats->swdrop_cnt[TX_HAL_RING_ACCESS_ERR]++;
1566 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1567 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1568 		qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE);
1569 		goto fail_return;
1570 	}
1571 
1572 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
1573 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1574 		HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
1575 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
1576 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
1577 				HTT_TCL_METADATA_TYPE_PEER_BASED);
1578 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
1579 				peer_id);
1580 	} else
1581 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1582 
1583 
1584 	if (msdu_info->exception_fw) {
1585 		HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1586 	}
1587 
1588 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
1589 	status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
1590 			htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
1591 
1592 	if (status != QDF_STATUS_SUCCESS) {
1593 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1594 			  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1595 			  __func__, tx_desc, tx_q->ring_id);
1596 		dp_tx_get_tid(vdev, nbuf, msdu_info);
1597 		tid_stats = &pdev->stats.tid_stats.
1598 			    tid_tx_stats[tx_q->ring_id][tid];
1599 		tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
1600 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1601 		qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE);
1602 		goto fail_return;
1603 	}
1604 
1605 	nbuf = NULL;
1606 
1607 fail_return:
1608 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1609 		hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
1610 		hif_pm_runtime_put(soc->hif_handle);
1611 	} else {
1612 		hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
1613 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1614 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1615 	}
1616 
1617 	return nbuf;
1618 }
1619 
1620 /**
1621  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
1622  * @vdev: DP vdev handle
1623  * @nbuf: skb
1624  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
1625  *
1626  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
1627  *
1628  * Return: NULL on success,
1629  *         nbuf when it fails to send
1630  */
1631 #if QDF_LOCK_STATS
1632 noinline
1633 #else
1634 #endif
1635 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1636 				    struct dp_tx_msdu_info_s *msdu_info)
1637 {
1638 	uint8_t i;
1639 	struct dp_pdev *pdev = vdev->pdev;
1640 	struct dp_soc *soc = pdev->soc;
1641 	struct dp_tx_desc_s *tx_desc;
1642 	bool is_cce_classified = false;
1643 	QDF_STATUS status;
1644 	uint16_t htt_tcl_metadata = 0;
1645 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1646 	hal_ring_handle_t hal_ring_hdl =
1647 				soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1648 	struct cdp_tid_tx_stats *tid_stats = NULL;
1649 
1650 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_ring_hdl))) {
1651 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1652 				"%s %d : HAL RING Access Failed -- %pK",
1653 				__func__, __LINE__, hal_ring_hdl);
1654 		dp_tx_get_tid(vdev, nbuf, msdu_info);
1655 		tid_stats = &pdev->stats.tid_stats.
1656 			    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
1657 		tid_stats->swdrop_cnt[TX_HAL_RING_ACCESS_ERR]++;
1658 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1659 		return nbuf;
1660 	}
1661 
1662 	if (qdf_unlikely(soc->cce_disable)) {
1663 		is_cce_classified = dp_cce_classify(vdev, nbuf);
1664 		if (is_cce_classified) {
1665 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1666 			msdu_info->tid = DP_VO_TID;
1667 		}
1668 	}
1669 
1670 	if (msdu_info->frm_type == dp_tx_frm_me)
1671 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1672 
1673 	i = 0;
1674 	/* Print statement to track i and num_seg */
1675 	/*
1676 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
1677 	 * descriptors using information in msdu_info
1678 	 */
1679 	while (i < msdu_info->num_seg) {
1680 		/*
1681 		 * Setup Tx descriptor for an MSDU, and MSDU extension
1682 		 * descriptor
1683 		 */
1684 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
1685 				tx_q->desc_pool_id);
1686 
1687 		if (!tx_desc) {
1688 			if (msdu_info->frm_type == dp_tx_frm_me) {
1689 				dp_tx_me_free_buf(pdev,
1690 					(void *)(msdu_info->u.sg_info
1691 						.curr_seg->frags[0].vaddr));
1692 				i++;
1693 				continue;
1694 			}
1695 			goto done;
1696 		}
1697 
1698 		if (msdu_info->frm_type == dp_tx_frm_me) {
1699 			tx_desc->me_buffer =
1700 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
1701 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
1702 		}
1703 
1704 		if (is_cce_classified)
1705 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1706 
1707 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1708 		if (msdu_info->exception_fw) {
1709 			HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1710 		}
1711 
1712 		/*
1713 		 * Enqueue the Tx MSDU descriptor to HW for transmit
1714 		 */
1715 		status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
1716 			htt_tcl_metadata, tx_q->ring_id, NULL);
1717 
1718 		if (status != QDF_STATUS_SUCCESS) {
1719 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1720 					"%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1721 					__func__, tx_desc, tx_q->ring_id);
1722 
1723 			dp_tx_get_tid(vdev, nbuf, msdu_info);
1724 			tid_stats = &pdev->stats.tid_stats.
1725 				    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
1726 			tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
1727 
1728 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1729 			if (msdu_info->frm_type == dp_tx_frm_me) {
1730 				i++;
1731 				continue;
1732 			}
1733 			goto done;
1734 		}
1735 
1736 		/*
1737 		 * TODO
1738 		 * if tso_info structure can be modified to have curr_seg
1739 		 * as first element, following 2 blocks of code (for TSO and SG)
1740 		 * can be combined into 1
1741 		 */
1742 
1743 		/*
1744 		 * For frames with multiple segments (TSO, ME), jump to next
1745 		 * segment.
1746 		 */
1747 		if (msdu_info->frm_type == dp_tx_frm_tso) {
1748 			if (msdu_info->u.tso_info.curr_seg->next) {
1749 				msdu_info->u.tso_info.curr_seg =
1750 					msdu_info->u.tso_info.curr_seg->next;
1751 
1752 				/*
1753 				 * If this is a jumbo nbuf, then increment the number of
1754 				 * nbuf users for each additional segment of the msdu.
1755 				 * This will ensure that the skb is freed only after
1756 				 * receiving tx completion for all segments of an nbuf
1757 				 */
1758 				qdf_nbuf_inc_users(nbuf);
1759 
1760 				/* Check with MCL if this is needed */
1761 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
1762 			}
1763 		}
1764 
1765 		/*
1766 		 * For Multicast-Unicast converted packets,
1767 		 * each converted frame (for a client) is represented as
1768 		 * 1 segment
1769 		 */
1770 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
1771 				(msdu_info->frm_type == dp_tx_frm_me)) {
1772 			if (msdu_info->u.sg_info.curr_seg->next) {
1773 				msdu_info->u.sg_info.curr_seg =
1774 					msdu_info->u.sg_info.curr_seg->next;
1775 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1776 			}
1777 		}
1778 		i++;
1779 	}
1780 
1781 	nbuf = NULL;
1782 
1783 done:
1784 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1785 		hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
1786 		hif_pm_runtime_put(soc->hif_handle);
1787 	} else {
1788 		hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
1789 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1790 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1791 	}
1792 
1793 	return nbuf;
1794 }
1795 
1796 /**
1797  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
1798  *                     for SG frames
1799  * @vdev: DP vdev handle
1800  * @nbuf: skb
1801  * @seg_info: Pointer to Segment info Descriptor to be prepared
1802  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1803  *
1804  * Return: NULL on success,
1805  *         nbuf when it fails to send
1806  */
1807 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1808 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1809 {
1810 	uint32_t cur_frag, nr_frags;
1811 	qdf_dma_addr_t paddr;
1812 	struct dp_tx_sg_info_s *sg_info;
1813 
1814 	sg_info = &msdu_info->u.sg_info;
1815 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
1816 
1817 	if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
1818 				QDF_DMA_TO_DEVICE)) {
1819 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1820 				"dma map error");
1821 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1822 
1823 		qdf_nbuf_free(nbuf);
1824 		return NULL;
1825 	}
1826 
1827 	paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1828 	seg_info->frags[0].paddr_lo = paddr;
1829 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
1830 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
1831 	seg_info->frags[0].vaddr = (void *) nbuf;
1832 
1833 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
1834 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
1835 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
1836 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1837 					"frag dma map error");
1838 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1839 			qdf_nbuf_free(nbuf);
1840 			return NULL;
1841 		}
1842 
1843 		paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1844 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
1845 		seg_info->frags[cur_frag + 1].paddr_hi =
1846 			((uint64_t) paddr) >> 32;
1847 		seg_info->frags[cur_frag + 1].len =
1848 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
1849 	}
1850 
1851 	seg_info->frag_cnt = (cur_frag + 1);
1852 	seg_info->total_len = qdf_nbuf_len(nbuf);
1853 	seg_info->next = NULL;
1854 
1855 	sg_info->curr_seg = seg_info;
1856 
1857 	msdu_info->frm_type = dp_tx_frm_sg;
1858 	msdu_info->num_seg = 1;
1859 
1860 	return nbuf;
1861 }
1862 
1863 /**
1864  * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
1865  * @vdev: DP vdev handle
1866  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1867  * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
1868  *
1869  * Return: NULL on failure,
1870  *         nbuf when extracted successfully
1871  */
1872 static
1873 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
1874 				    struct dp_tx_msdu_info_s *msdu_info,
1875 				    uint16_t ppdu_cookie)
1876 {
1877 	struct htt_tx_msdu_desc_ext2_t *meta_data =
1878 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
1879 
1880 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
1881 
1882 	HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
1883 				(msdu_info->meta_data[5], 1);
1884 	HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
1885 				(msdu_info->meta_data[5], 1);
1886 	HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
1887 				(msdu_info->meta_data[6], ppdu_cookie);
1888 
1889 	msdu_info->exception_fw = 1;
1890 	msdu_info->is_tx_sniffer = 1;
1891 }
1892 
1893 #ifdef MESH_MODE_SUPPORT
1894 
1895 /**
1896  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
1897 				and prepare msdu_info for mesh frames.
1898  * @vdev: DP vdev handle
1899  * @nbuf: skb
1900  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1901  *
1902  * Return: NULL on failure,
1903  *         nbuf when extracted successfully
1904  */
1905 static
1906 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1907 				struct dp_tx_msdu_info_s *msdu_info)
1908 {
1909 	struct meta_hdr_s *mhdr;
1910 	struct htt_tx_msdu_desc_ext2_t *meta_data =
1911 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
1912 
1913 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1914 
1915 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
1916 		msdu_info->exception_fw = 0;
1917 		goto remove_meta_hdr;
1918 	}
1919 
1920 	msdu_info->exception_fw = 1;
1921 
1922 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
1923 
1924 	meta_data->host_tx_desc_pool = 1;
1925 	meta_data->update_peer_cache = 1;
1926 	meta_data->learning_frame = 1;
1927 
1928 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
1929 		meta_data->power = mhdr->power;
1930 
1931 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
1932 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
1933 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
1934 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
1935 
1936 		meta_data->dyn_bw = 1;
1937 
1938 		meta_data->valid_pwr = 1;
1939 		meta_data->valid_mcs_mask = 1;
1940 		meta_data->valid_nss_mask = 1;
1941 		meta_data->valid_preamble_type  = 1;
1942 		meta_data->valid_retries = 1;
1943 		meta_data->valid_bw_info = 1;
1944 	}
1945 
1946 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
1947 		meta_data->encrypt_type = 0;
1948 		meta_data->valid_encrypt_type = 1;
1949 		meta_data->learning_frame = 0;
1950 	}
1951 
1952 	meta_data->valid_key_flags = 1;
1953 	meta_data->key_flags = (mhdr->keyix & 0x3);
1954 
1955 remove_meta_hdr:
1956 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
1957 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1958 				"qdf_nbuf_pull_head failed");
1959 		qdf_nbuf_free(nbuf);
1960 		return NULL;
1961 	}
1962 
1963 	msdu_info->tid = qdf_nbuf_get_priority(nbuf);
1964 
1965 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1966 			"%s , Meta hdr %0x %0x %0x %0x %0x %0x"
1967 			" tid %d to_fw %d",
1968 			__func__, msdu_info->meta_data[0],
1969 			msdu_info->meta_data[1],
1970 			msdu_info->meta_data[2],
1971 			msdu_info->meta_data[3],
1972 			msdu_info->meta_data[4],
1973 			msdu_info->meta_data[5],
1974 			msdu_info->tid, msdu_info->exception_fw);
1975 
1976 	return nbuf;
1977 }
1978 #else
1979 static
1980 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1981 				struct dp_tx_msdu_info_s *msdu_info)
1982 {
1983 	return nbuf;
1984 }
1985 
1986 #endif
1987 
1988 /**
1989  * dp_check_exc_metadata() - Checks if parameters are valid
1990  * @tx_exc - holds all exception path parameters
1991  *
1992  * Returns true when all the parameters are valid else false
1993  *
1994  */
1995 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
1996 {
1997 	bool invalid_tid = (tx_exc->tid > DP_MAX_TIDS && tx_exc->tid !=
1998 			    HTT_INVALID_TID);
1999 	bool invalid_encap_type =
2000 			(tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
2001 			 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
2002 	bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
2003 				 tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
2004 	bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
2005 			       tx_exc->ppdu_cookie == 0);
2006 
2007 	if (invalid_tid || invalid_encap_type || invalid_sec_type ||
2008 	    invalid_cookie) {
2009 		return false;
2010 	}
2011 
2012 	return true;
2013 }
2014 
2015 /**
2016  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
2017  * @vap_dev: DP vdev handle
2018  * @nbuf: skb
2019  * @tx_exc_metadata: Handle that holds exception path meta data
2020  *
2021  * Entry point for Core Tx layer (DP_TX) invoked from
2022  * hard_start_xmit in OSIF/HDD to transmit frames through fw
2023  *
2024  * Return: NULL on success,
2025  *         nbuf when it fails to send
2026  */
2027 qdf_nbuf_t
2028 dp_tx_send_exception(struct cdp_vdev *vap_dev, qdf_nbuf_t nbuf,
2029 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
2030 {
2031 	qdf_ether_header_t *eh = NULL;
2032 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
2033 	struct dp_tx_msdu_info_s msdu_info;
2034 
2035 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
2036 
2037 	if (!tx_exc_metadata)
2038 		goto fail;
2039 
2040 	msdu_info.tid = tx_exc_metadata->tid;
2041 
2042 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2043 	dp_verbose_debug("skb %pM", nbuf->data);
2044 
2045 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
2046 
2047 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
2048 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2049 			"Invalid parameters in exception path");
2050 		goto fail;
2051 	}
2052 
2053 	/* Basic sanity checks for unsupported packets */
2054 
2055 	/* MESH mode */
2056 	if (qdf_unlikely(vdev->mesh_vdev)) {
2057 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2058 			"Mesh mode is not supported in exception path");
2059 		goto fail;
2060 	}
2061 
2062 	/* TSO or SG */
2063 	if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
2064 	    qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2065 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2066 			  "TSO and SG are not supported in exception path");
2067 
2068 		goto fail;
2069 	}
2070 
2071 	/* RAW */
2072 	if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
2073 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2074 			  "Raw frame is not supported in exception path");
2075 		goto fail;
2076 	}
2077 
2078 
2079 	/* Mcast enhancement*/
2080 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
2081 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2082 		    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2083 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2084 					  "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW");
2085 		}
2086 	}
2087 
2088 	if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
2089 		DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
2090 				 qdf_nbuf_len(nbuf));
2091 
2092 		dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
2093 					       tx_exc_metadata->ppdu_cookie);
2094 	}
2095 
2096 	/*
2097 	 * Get HW Queue to use for this frame.
2098 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
2099 	 * dedicated for data and 1 for command.
2100 	 * "queue_id" maps to one hardware ring.
2101 	 *  With each ring, we also associate a unique Tx descriptor pool
2102 	 *  to minimize lock contention for these resources.
2103 	 */
2104 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2105 
2106 	/*  Single linear frame */
2107 	/*
2108 	 * If nbuf is a simple linear frame, use send_single function to
2109 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2110 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2111 	 */
2112 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
2113 			tx_exc_metadata->peer_id, tx_exc_metadata);
2114 
2115 	return nbuf;
2116 
2117 fail:
2118 	dp_verbose_debug("pkt send failed");
2119 	return nbuf;
2120 }
2121 
2122 /**
2123  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
2124  * @vap_dev: DP vdev handle
2125  * @nbuf: skb
2126  *
2127  * Entry point for Core Tx layer (DP_TX) invoked from
2128  * hard_start_xmit in OSIF/HDD
2129  *
2130  * Return: NULL on success,
2131  *         nbuf when it fails to send
2132  */
2133 #ifdef MESH_MODE_SUPPORT
2134 qdf_nbuf_t dp_tx_send_mesh(struct cdp_vdev *vap_dev, qdf_nbuf_t nbuf)
2135 {
2136 	struct meta_hdr_s *mhdr;
2137 	qdf_nbuf_t nbuf_mesh = NULL;
2138 	qdf_nbuf_t nbuf_clone = NULL;
2139 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
2140 	uint8_t no_enc_frame = 0;
2141 
2142 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
2143 	if (!nbuf_mesh) {
2144 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2145 				"qdf_nbuf_unshare failed");
2146 		return nbuf;
2147 	}
2148 	nbuf = nbuf_mesh;
2149 
2150 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2151 
2152 	if ((vdev->sec_type != cdp_sec_type_none) &&
2153 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
2154 		no_enc_frame = 1;
2155 
2156 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
2157 		qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
2158 
2159 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
2160 		       !no_enc_frame) {
2161 		nbuf_clone = qdf_nbuf_clone(nbuf);
2162 		if (!nbuf_clone) {
2163 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2164 				"qdf_nbuf_clone failed");
2165 			return nbuf;
2166 		}
2167 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
2168 	}
2169 
2170 	if (nbuf_clone) {
2171 		if (!dp_tx_send(vap_dev, nbuf_clone)) {
2172 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2173 		} else {
2174 			qdf_nbuf_free(nbuf_clone);
2175 		}
2176 	}
2177 
2178 	if (no_enc_frame)
2179 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
2180 	else
2181 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
2182 
2183 	nbuf = dp_tx_send(vap_dev, nbuf);
2184 	if ((!nbuf) && no_enc_frame) {
2185 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2186 	}
2187 
2188 	return nbuf;
2189 }
2190 
2191 #else
2192 
2193 qdf_nbuf_t dp_tx_send_mesh(struct cdp_vdev *vap_dev, qdf_nbuf_t nbuf)
2194 {
2195 	return dp_tx_send(vap_dev, nbuf);
2196 }
2197 
2198 #endif
2199 
2200 /**
2201  * dp_tx_send() - Transmit a frame on a given VAP
2202  * @vap_dev: DP vdev handle
2203  * @nbuf: skb
2204  *
2205  * Entry point for Core Tx layer (DP_TX) invoked from
2206  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
2207  * cases
2208  *
2209  * Return: NULL on success,
2210  *         nbuf when it fails to send
2211  */
2212 qdf_nbuf_t dp_tx_send(struct cdp_vdev *vap_dev, qdf_nbuf_t nbuf)
2213 {
2214 	qdf_ether_header_t *eh = NULL;
2215 	struct dp_tx_msdu_info_s msdu_info;
2216 	struct dp_tx_seg_info_s seg_info;
2217 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
2218 	uint16_t peer_id = HTT_INVALID_PEER;
2219 	qdf_nbuf_t nbuf_mesh = NULL;
2220 
2221 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
2222 	qdf_mem_zero(&seg_info, sizeof(seg_info));
2223 
2224 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2225 
2226 	dp_verbose_debug("skb %pM", nbuf->data);
2227 
2228 	/*
2229 	 * Set Default Host TID value to invalid TID
2230 	 * (TID override disabled)
2231 	 */
2232 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
2233 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
2234 
2235 	if (qdf_unlikely(vdev->mesh_vdev)) {
2236 		nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
2237 								&msdu_info);
2238 		if (!nbuf_mesh) {
2239 			dp_verbose_debug("Extracting mesh metadata failed");
2240 			return nbuf;
2241 		}
2242 		nbuf = nbuf_mesh;
2243 	}
2244 
2245 	/*
2246 	 * Get HW Queue to use for this frame.
2247 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
2248 	 * dedicated for data and 1 for command.
2249 	 * "queue_id" maps to one hardware ring.
2250 	 *  With each ring, we also associate a unique Tx descriptor pool
2251 	 *  to minimize lock contention for these resources.
2252 	 */
2253 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2254 
2255 	/*
2256 	 * TCL H/W supports 2 DSCP-TID mapping tables.
2257 	 *  Table 1 - Default DSCP-TID mapping table
2258 	 *  Table 2 - 1 DSCP-TID override table
2259 	 *
2260 	 * If we need a different DSCP-TID mapping for this vap,
2261 	 * call tid_classify to extract DSCP/ToS from frame and
2262 	 * map to a TID and store in msdu_info. This is later used
2263 	 * to fill in TCL Input descriptor (per-packet TID override).
2264 	 */
2265 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
2266 
2267 	/*
2268 	 * Classify the frame and call corresponding
2269 	 * "prepare" function which extracts the segment (TSO)
2270 	 * and fragmentation information (for TSO , SG, ME, or Raw)
2271 	 * into MSDU_INFO structure which is later used to fill
2272 	 * SW and HW descriptors.
2273 	 */
2274 	if (qdf_nbuf_is_tso(nbuf)) {
2275 		dp_verbose_debug("TSO frame %pK", vdev);
2276 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
2277 				 qdf_nbuf_len(nbuf));
2278 
2279 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
2280 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
2281 					 qdf_nbuf_len(nbuf));
2282 			return nbuf;
2283 		}
2284 
2285 		goto send_multiple;
2286 	}
2287 
2288 	/* SG */
2289 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2290 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
2291 
2292 		if (!nbuf)
2293 			return NULL;
2294 
2295 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
2296 
2297 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
2298 				qdf_nbuf_len(nbuf));
2299 
2300 		goto send_multiple;
2301 	}
2302 
2303 #ifdef ATH_SUPPORT_IQUE
2304 	/* Mcast to Ucast Conversion*/
2305 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
2306 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2307 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2308 		    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2309 			dp_verbose_debug("Mcast frm for ME %pK", vdev);
2310 
2311 			DP_STATS_INC_PKT(vdev,
2312 					tx_i.mcast_en.mcast_pkt, 1,
2313 					qdf_nbuf_len(nbuf));
2314 			if (dp_tx_prepare_send_me(vdev, nbuf) ==
2315 					QDF_STATUS_SUCCESS) {
2316 				return NULL;
2317 			}
2318 		}
2319 	}
2320 #endif
2321 
2322 	/* RAW */
2323 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
2324 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
2325 		if (!nbuf)
2326 			return NULL;
2327 
2328 		dp_verbose_debug("Raw frame %pK", vdev);
2329 
2330 		goto send_multiple;
2331 
2332 	}
2333 
2334 	/*  Single linear frame */
2335 	/*
2336 	 * If nbuf is a simple linear frame, use send_single function to
2337 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2338 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2339 	 */
2340 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
2341 
2342 	return nbuf;
2343 
2344 send_multiple:
2345 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2346 
2347 	if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
2348 		dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
2349 
2350 	return nbuf;
2351 }
2352 
2353 /**
2354  * dp_tx_reinject_handler() - Tx Reinject Handler
2355  * @tx_desc: software descriptor head pointer
2356  * @status : Tx completion status from HTT descriptor
2357  *
2358  * This function reinjects frames back to Target.
2359  * Todo - Host queue needs to be added
2360  *
2361  * Return: none
2362  */
2363 static
2364 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2365 {
2366 	struct dp_vdev *vdev;
2367 	struct dp_peer *peer = NULL;
2368 	uint32_t peer_id = HTT_INVALID_PEER;
2369 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2370 	qdf_nbuf_t nbuf_copy = NULL;
2371 	struct dp_tx_msdu_info_s msdu_info;
2372 	struct dp_peer *sa_peer = NULL;
2373 	struct dp_ast_entry *ast_entry = NULL;
2374 	struct dp_soc *soc = NULL;
2375 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2376 #ifdef WDS_VENDOR_EXTENSION
2377 	int is_mcast = 0, is_ucast = 0;
2378 	int num_peers_3addr = 0;
2379 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
2380 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
2381 #endif
2382 
2383 	vdev = tx_desc->vdev;
2384 	soc = vdev->pdev->soc;
2385 
2386 	qdf_assert(vdev);
2387 
2388 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
2389 
2390 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2391 
2392 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2393 			"%s Tx reinject path", __func__);
2394 
2395 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
2396 			qdf_nbuf_len(tx_desc->nbuf));
2397 
2398 	qdf_spin_lock_bh(&(soc->ast_lock));
2399 
2400 	ast_entry = dp_peer_ast_hash_find_by_pdevid
2401 				(soc,
2402 				 (uint8_t *)(eh->ether_shost),
2403 				 vdev->pdev->pdev_id);
2404 
2405 	if (ast_entry)
2406 		sa_peer = ast_entry->peer;
2407 
2408 	qdf_spin_unlock_bh(&(soc->ast_lock));
2409 
2410 #ifdef WDS_VENDOR_EXTENSION
2411 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
2412 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
2413 	} else {
2414 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
2415 	}
2416 	is_ucast = !is_mcast;
2417 
2418 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2419 		if (peer->bss_peer)
2420 			continue;
2421 
2422 		/* Detect wds peers that use 3-addr framing for mcast.
2423 		 * if there are any, the bss_peer is used to send the
2424 		 * the mcast frame using 3-addr format. all wds enabled
2425 		 * peers that use 4-addr framing for mcast frames will
2426 		 * be duplicated and sent as 4-addr frames below.
2427 		 */
2428 		if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
2429 			num_peers_3addr = 1;
2430 			break;
2431 		}
2432 	}
2433 #endif
2434 
2435 	if (qdf_unlikely(vdev->mesh_vdev)) {
2436 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
2437 	} else {
2438 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2439 			if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
2440 #ifdef WDS_VENDOR_EXTENSION
2441 			/*
2442 			 * . if 3-addr STA, then send on BSS Peer
2443 			 * . if Peer WDS enabled and accept 4-addr mcast,
2444 			 * send mcast on that peer only
2445 			 * . if Peer WDS enabled and accept 4-addr ucast,
2446 			 * send ucast on that peer only
2447 			 */
2448 			((peer->bss_peer && num_peers_3addr && is_mcast) ||
2449 			 (peer->wds_enabled &&
2450 				  ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
2451 				   (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
2452 #else
2453 			((peer->bss_peer &&
2454 			  !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) ||
2455 				 peer->nawds_enabled)) {
2456 #endif
2457 				peer_id = DP_INVALID_PEER;
2458 
2459 				if (peer->nawds_enabled) {
2460 					peer_id = peer->peer_ids[0];
2461 					if (sa_peer == peer) {
2462 						QDF_TRACE(
2463 							QDF_MODULE_ID_DP,
2464 							QDF_TRACE_LEVEL_DEBUG,
2465 							" %s: multicast packet",
2466 							__func__);
2467 						DP_STATS_INC(peer,
2468 							tx.nawds_mcast_drop, 1);
2469 						continue;
2470 					}
2471 				}
2472 
2473 				nbuf_copy = qdf_nbuf_copy(nbuf);
2474 
2475 				if (!nbuf_copy) {
2476 					QDF_TRACE(QDF_MODULE_ID_DP,
2477 						QDF_TRACE_LEVEL_DEBUG,
2478 						FL("nbuf copy failed"));
2479 					break;
2480 				}
2481 
2482 				nbuf_copy = dp_tx_send_msdu_single(vdev,
2483 						nbuf_copy,
2484 						&msdu_info,
2485 						peer_id,
2486 						NULL);
2487 
2488 				if (nbuf_copy) {
2489 					QDF_TRACE(QDF_MODULE_ID_DP,
2490 						QDF_TRACE_LEVEL_DEBUG,
2491 						FL("pkt send failed"));
2492 					qdf_nbuf_free(nbuf_copy);
2493 				} else {
2494 					if (peer_id != DP_INVALID_PEER)
2495 						DP_STATS_INC_PKT(peer,
2496 							tx.nawds_mcast,
2497 							1, qdf_nbuf_len(nbuf));
2498 				}
2499 			}
2500 		}
2501 	}
2502 
2503 	if (vdev->nawds_enabled) {
2504 		peer_id = DP_INVALID_PEER;
2505 
2506 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
2507 					1, qdf_nbuf_len(nbuf));
2508 
2509 		nbuf = dp_tx_send_msdu_single(vdev,
2510 				nbuf,
2511 				&msdu_info,
2512 				peer_id, NULL);
2513 
2514 		if (nbuf) {
2515 			QDF_TRACE(QDF_MODULE_ID_DP,
2516 				QDF_TRACE_LEVEL_DEBUG,
2517 				FL("pkt send failed"));
2518 			qdf_nbuf_free(nbuf);
2519 		}
2520 	} else
2521 		qdf_nbuf_free(nbuf);
2522 
2523 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2524 }
2525 
2526 /**
2527  * dp_tx_inspect_handler() - Tx Inspect Handler
2528  * @tx_desc: software descriptor head pointer
2529  * @status : Tx completion status from HTT descriptor
2530  *
2531  * Handles Tx frames sent back to Host for inspection
2532  * (ProxyARP)
2533  *
2534  * Return: none
2535  */
2536 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2537 {
2538 
2539 	struct dp_soc *soc;
2540 	struct dp_pdev *pdev = tx_desc->pdev;
2541 
2542 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2543 			"%s Tx inspect path",
2544 			__func__);
2545 
2546 	qdf_assert(pdev);
2547 
2548 	soc = pdev->soc;
2549 
2550 	DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
2551 			qdf_nbuf_len(tx_desc->nbuf));
2552 
2553 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
2554 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2555 }
2556 
2557 #ifdef FEATURE_PERPKT_INFO
2558 /**
2559  * dp_get_completion_indication_for_stack() - send completion to stack
2560  * @soc : dp_soc handle
2561  * @pdev: dp_pdev handle
2562  * @peer: dp peer handle
2563  * @ts: transmit completion status structure
2564  * @netbuf: Buffer pointer for free
2565  *
2566  * This function is used for indication whether buffer needs to be
2567  * sent to stack for freeing or not
2568 */
2569 QDF_STATUS
2570 dp_get_completion_indication_for_stack(struct dp_soc *soc,
2571 				       struct dp_pdev *pdev,
2572 				       struct dp_peer *peer,
2573 				       struct hal_tx_completion_status *ts,
2574 				       qdf_nbuf_t netbuf,
2575 				       uint64_t time_latency)
2576 {
2577 	struct tx_capture_hdr *ppdu_hdr;
2578 	uint16_t peer_id = ts->peer_id;
2579 	uint32_t ppdu_id = ts->ppdu_id;
2580 	uint8_t first_msdu = ts->first_msdu;
2581 	uint8_t last_msdu = ts->last_msdu;
2582 
2583 	if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
2584 			 !pdev->latency_capture_enable))
2585 		return QDF_STATUS_E_NOSUPPORT;
2586 
2587 	if (!peer) {
2588 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2589 				FL("Peer Invalid"));
2590 		return QDF_STATUS_E_INVAL;
2591 	}
2592 
2593 	if (pdev->mcopy_mode) {
2594 		if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
2595 				(pdev->m_copy_id.tx_peer_id == peer_id)) {
2596 			return QDF_STATUS_E_INVAL;
2597 		}
2598 
2599 		pdev->m_copy_id.tx_ppdu_id = ppdu_id;
2600 		pdev->m_copy_id.tx_peer_id = peer_id;
2601 	}
2602 
2603 	if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) {
2604 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2605 				FL("No headroom"));
2606 		return QDF_STATUS_E_NOMEM;
2607 	}
2608 
2609 	ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
2610 	qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
2611 		     QDF_MAC_ADDR_SIZE);
2612 	qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
2613 		     QDF_MAC_ADDR_SIZE);
2614 	ppdu_hdr->ppdu_id = ppdu_id;
2615 	ppdu_hdr->peer_id = peer_id;
2616 	ppdu_hdr->first_msdu = first_msdu;
2617 	ppdu_hdr->last_msdu = last_msdu;
2618 	if (qdf_unlikely(pdev->latency_capture_enable)) {
2619 		ppdu_hdr->tsf = ts->tsf;
2620 		ppdu_hdr->time_latency = time_latency;
2621 	}
2622 
2623 	return QDF_STATUS_SUCCESS;
2624 }
2625 
2626 
2627 /**
2628  * dp_send_completion_to_stack() - send completion to stack
2629  * @soc :  dp_soc handle
2630  * @pdev:  dp_pdev handle
2631  * @peer_id: peer_id of the peer for which completion came
2632  * @ppdu_id: ppdu_id
2633  * @netbuf: Buffer pointer for free
2634  *
2635  * This function is used to send completion to stack
2636  * to free buffer
2637 */
2638 void  dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2639 					uint16_t peer_id, uint32_t ppdu_id,
2640 					qdf_nbuf_t netbuf)
2641 {
2642 	dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
2643 				netbuf, peer_id,
2644 				WDI_NO_VAL, pdev->pdev_id);
2645 }
2646 #else
2647 static QDF_STATUS
2648 dp_get_completion_indication_for_stack(struct dp_soc *soc,
2649 				       struct dp_pdev *pdev,
2650 				       struct dp_peer *peer,
2651 				       struct hal_tx_completion_status *ts,
2652 				       qdf_nbuf_t netbuf,
2653 				       uint64_t time_latency)
2654 {
2655 	return QDF_STATUS_E_NOSUPPORT;
2656 }
2657 
2658 static void
2659 dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2660 	uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
2661 {
2662 }
2663 #endif
2664 
2665 /**
2666  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2667  * @soc: Soc handle
2668  * @desc: software Tx descriptor to be processed
2669  *
2670  * Return: none
2671  */
2672 static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
2673 				       struct dp_tx_desc_s *desc)
2674 {
2675 	struct dp_vdev *vdev = desc->vdev;
2676 	qdf_nbuf_t nbuf = desc->nbuf;
2677 
2678 	/* nbuf already freed in vdev detach path */
2679 	if (!nbuf)
2680 		return;
2681 
2682 	/* If it is TDLS mgmt, don't unmap or free the frame */
2683 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2684 		return dp_non_std_tx_comp_free_buff(desc, vdev);
2685 
2686 	/* 0 : MSDU buffer, 1 : MLE */
2687 	if (desc->msdu_ext_desc) {
2688 		/* TSO free */
2689 		if (hal_tx_ext_desc_get_tso_enable(
2690 					desc->msdu_ext_desc->vaddr)) {
2691 			/* unmap eash TSO seg before free the nbuf */
2692 			dp_tx_tso_unmap_segment(soc, desc->tso_desc,
2693 						desc->tso_num_desc);
2694 			qdf_nbuf_free(nbuf);
2695 			return;
2696 		}
2697 	}
2698 
2699 	qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
2700 
2701 	if (qdf_unlikely(!vdev)) {
2702 		qdf_nbuf_free(nbuf);
2703 		return;
2704 	}
2705 
2706 	if (qdf_likely(!vdev->mesh_vdev))
2707 		qdf_nbuf_free(nbuf);
2708 	else {
2709 		if (desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2710 			qdf_nbuf_free(nbuf);
2711 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2712 		} else
2713 			vdev->osif_tx_free_ext((nbuf));
2714 	}
2715 }
2716 
2717 #ifdef MESH_MODE_SUPPORT
2718 /**
2719  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
2720  *                                         in mesh meta header
2721  * @tx_desc: software descriptor head pointer
2722  * @ts: pointer to tx completion stats
2723  * Return: none
2724  */
2725 static
2726 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2727 		struct hal_tx_completion_status *ts)
2728 {
2729 	struct meta_hdr_s *mhdr;
2730 	qdf_nbuf_t netbuf = tx_desc->nbuf;
2731 
2732 	if (!tx_desc->msdu_ext_desc) {
2733 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
2734 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2735 				"netbuf %pK offset %d",
2736 				netbuf, tx_desc->pkt_offset);
2737 			return;
2738 		}
2739 	}
2740 	if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
2741 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2742 			"netbuf %pK offset %lu", netbuf,
2743 			sizeof(struct meta_hdr_s));
2744 		return;
2745 	}
2746 
2747 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
2748 	mhdr->rssi = ts->ack_frame_rssi;
2749 	mhdr->channel = tx_desc->pdev->operating_channel;
2750 }
2751 
2752 #else
2753 static
2754 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2755 		struct hal_tx_completion_status *ts)
2756 {
2757 }
2758 
2759 #endif
2760 
2761 /**
2762  * dp_tx_compute_delay() - Compute and fill in all timestamps
2763  *				to pass in correct fields
2764  *
2765  * @vdev: pdev handle
2766  * @tx_desc: tx descriptor
2767  * @tid: tid value
2768  * @ring_id: TCL or WBM ring number for transmit path
2769  * Return: none
2770  */
2771 static void dp_tx_compute_delay(struct dp_vdev *vdev,
2772 				struct dp_tx_desc_s *tx_desc,
2773 				uint8_t tid, uint8_t ring_id)
2774 {
2775 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
2776 	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
2777 
2778 	if (qdf_likely(!vdev->pdev->delay_stats_flag))
2779 		return;
2780 
2781 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_get());
2782 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
2783 	timestamp_hw_enqueue = tx_desc->timestamp;
2784 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
2785 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
2786 					 timestamp_hw_enqueue);
2787 	interframe_delay = (uint32_t)(timestamp_ingress -
2788 				      vdev->prev_tx_enq_tstamp);
2789 
2790 	/*
2791 	 * Delay in software enqueue
2792 	 */
2793 	dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid,
2794 			      CDP_DELAY_STATS_SW_ENQ, ring_id);
2795 	/*
2796 	 * Delay between packet enqueued to HW and Tx completion
2797 	 */
2798 	dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid,
2799 			      CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id);
2800 
2801 	/*
2802 	 * Update interframe delay stats calculated at hardstart receive point.
2803 	 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
2804 	 * interframe delay will not be calculate correctly for 1st frame.
2805 	 * On the other side, this will help in avoiding extra per packet check
2806 	 * of !vdev->prev_tx_enq_tstamp.
2807 	 */
2808 	dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
2809 			      CDP_DELAY_STATS_TX_INTERFRAME, ring_id);
2810 	vdev->prev_tx_enq_tstamp = timestamp_ingress;
2811 }
2812 
2813 /**
2814  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
2815  *				per wbm ring
2816  *
2817  * @tx_desc: software descriptor head pointer
2818  * @ts: Tx completion status
2819  * @peer: peer handle
2820  * @ring_id: ring number
2821  *
2822  * Return: None
2823  */
2824 static inline void
2825 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
2826 			struct hal_tx_completion_status *ts,
2827 			struct dp_peer *peer, uint8_t ring_id)
2828 {
2829 	struct dp_pdev *pdev = peer->vdev->pdev;
2830 	struct dp_soc *soc = NULL;
2831 	uint8_t mcs, pkt_type;
2832 	uint8_t tid = ts->tid;
2833 	uint32_t length;
2834 	struct cdp_tid_tx_stats *tid_stats;
2835 
2836 	if (!pdev)
2837 		return;
2838 
2839 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
2840 		tid = CDP_MAX_DATA_TIDS - 1;
2841 
2842 	tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
2843 	soc = pdev->soc;
2844 
2845 	mcs = ts->mcs;
2846 	pkt_type = ts->pkt_type;
2847 
2848 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
2849 		dp_err("Release source is not from TQM");
2850 		return;
2851 	}
2852 
2853 	length = qdf_nbuf_len(tx_desc->nbuf);
2854 	DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
2855 
2856 	if (qdf_unlikely(pdev->delay_stats_flag))
2857 		dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id);
2858 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
2859 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
2860 
2861 	DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length,
2862 			  (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
2863 
2864 	DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
2865 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
2866 
2867 	DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
2868 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
2869 
2870 	DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
2871 		     (ts->status == HAL_TX_TQM_RR_FW_REASON1));
2872 
2873 	DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
2874 		     (ts->status == HAL_TX_TQM_RR_FW_REASON2));
2875 
2876 	DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
2877 		     (ts->status == HAL_TX_TQM_RR_FW_REASON3));
2878 
2879 	/*
2880 	 * tx_failed is ideally supposed to be updated from HTT ppdu completion
2881 	 * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there
2882 	 * are no completions for failed cases. Hence updating tx_failed from
2883 	 * data path. Please note that if tx_failed is fixed to be from ppdu,
2884 	 * then this has to be removed
2885 	 */
2886 	peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num +
2887 				peer->stats.tx.dropped.fw_rem_notx +
2888 				peer->stats.tx.dropped.fw_rem_tx +
2889 				peer->stats.tx.dropped.age_out +
2890 				peer->stats.tx.dropped.fw_reason1 +
2891 				peer->stats.tx.dropped.fw_reason2 +
2892 				peer->stats.tx.dropped.fw_reason3;
2893 
2894 	if (ts->status < CDP_MAX_TX_TQM_STATUS) {
2895 		tid_stats->tqm_status_cnt[ts->status]++;
2896 	}
2897 
2898 	if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
2899 		return;
2900 	}
2901 
2902 	DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
2903 
2904 	DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
2905 	DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
2906 
2907 	/*
2908 	 * Following Rate Statistics are updated from HTT PPDU events from FW.
2909 	 * Return from here if HTT PPDU events are enabled.
2910 	 */
2911 	if (!(soc->process_tx_status))
2912 		return;
2913 
2914 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2915 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
2916 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2917 			((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
2918 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2919 			((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
2920 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2921 			((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
2922 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2923 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
2924 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2925 			((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
2926 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2927 			((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2928 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2929 			((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2930 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2931 			((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2932 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2933 			((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2934 
2935 	DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
2936 	DP_STATS_INC(peer, tx.bw[ts->bw], 1);
2937 	DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
2938 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
2939 	DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
2940 	DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
2941 	DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
2942 
2943 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
2944 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
2945 			     &peer->stats, ts->peer_id,
2946 			     UPDATE_PEER_STATS, pdev->pdev_id);
2947 #endif
2948 }
2949 
2950 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2951 /**
2952  * dp_tx_flow_pool_lock() - take flow pool lock
2953  * @soc: core txrx main context
2954  * @tx_desc: tx desc
2955  *
2956  * Return: None
2957  */
2958 static inline
2959 void dp_tx_flow_pool_lock(struct dp_soc *soc,
2960 			  struct dp_tx_desc_s *tx_desc)
2961 {
2962 	struct dp_tx_desc_pool_s *pool;
2963 	uint8_t desc_pool_id;
2964 
2965 	desc_pool_id = tx_desc->pool_id;
2966 	pool = &soc->tx_desc[desc_pool_id];
2967 
2968 	qdf_spin_lock_bh(&pool->flow_pool_lock);
2969 }
2970 
2971 /**
2972  * dp_tx_flow_pool_unlock() - release flow pool lock
2973  * @soc: core txrx main context
2974  * @tx_desc: tx desc
2975  *
2976  * Return: None
2977  */
2978 static inline
2979 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
2980 			    struct dp_tx_desc_s *tx_desc)
2981 {
2982 	struct dp_tx_desc_pool_s *pool;
2983 	uint8_t desc_pool_id;
2984 
2985 	desc_pool_id = tx_desc->pool_id;
2986 	pool = &soc->tx_desc[desc_pool_id];
2987 
2988 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
2989 }
2990 #else
2991 static inline
2992 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2993 {
2994 }
2995 
2996 static inline
2997 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2998 {
2999 }
3000 #endif
3001 
3002 /**
3003  * dp_tx_notify_completion() - Notify tx completion for this desc
3004  * @soc: core txrx main context
3005  * @tx_desc: tx desc
3006  * @netbuf:  buffer
3007  *
3008  * Return: none
3009  */
3010 static inline void dp_tx_notify_completion(struct dp_soc *soc,
3011 					   struct dp_tx_desc_s *tx_desc,
3012 					   qdf_nbuf_t netbuf)
3013 {
3014 	void *osif_dev;
3015 	ol_txrx_completion_fp tx_compl_cbk = NULL;
3016 
3017 	qdf_assert(tx_desc);
3018 
3019 	dp_tx_flow_pool_lock(soc, tx_desc);
3020 
3021 	if (!tx_desc->vdev ||
3022 	    !tx_desc->vdev->osif_vdev) {
3023 		dp_tx_flow_pool_unlock(soc, tx_desc);
3024 		return;
3025 	}
3026 
3027 	osif_dev = tx_desc->vdev->osif_vdev;
3028 	tx_compl_cbk = tx_desc->vdev->tx_comp;
3029 	dp_tx_flow_pool_unlock(soc, tx_desc);
3030 
3031 	if (tx_compl_cbk)
3032 		tx_compl_cbk(netbuf, osif_dev);
3033 }
3034 
3035 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
3036  * @pdev: pdev handle
3037  * @tid: tid value
3038  * @txdesc_ts: timestamp from txdesc
3039  * @ppdu_id: ppdu id
3040  *
3041  * Return: none
3042  */
3043 #ifdef FEATURE_PERPKT_INFO
3044 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
3045 					       struct dp_peer *peer,
3046 					       uint8_t tid,
3047 					       uint64_t txdesc_ts,
3048 					       uint32_t ppdu_id)
3049 {
3050 	uint64_t delta_ms;
3051 	struct cdp_tx_sojourn_stats *sojourn_stats;
3052 
3053 	if (qdf_unlikely(pdev->enhanced_stats_en == 0))
3054 		return;
3055 
3056 	if (qdf_unlikely(tid == HTT_INVALID_TID ||
3057 			 tid >= CDP_DATA_TID_MAX))
3058 		return;
3059 
3060 	if (qdf_unlikely(!pdev->sojourn_buf))
3061 		return;
3062 
3063 	sojourn_stats = (struct cdp_tx_sojourn_stats *)
3064 		qdf_nbuf_data(pdev->sojourn_buf);
3065 
3066 	sojourn_stats->cookie = (void *)peer->wlanstats_ctx;
3067 
3068 	delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
3069 				txdesc_ts;
3070 	qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid],
3071 			    delta_ms);
3072 	sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
3073 	sojourn_stats->num_msdus[tid] = 1;
3074 	sojourn_stats->avg_sojourn_msdu[tid].internal =
3075 		peer->avg_sojourn_msdu[tid].internal;
3076 	dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
3077 			     pdev->sojourn_buf, HTT_INVALID_PEER,
3078 			     WDI_NO_VAL, pdev->pdev_id);
3079 	sojourn_stats->sum_sojourn_msdu[tid] = 0;
3080 	sojourn_stats->num_msdus[tid] = 0;
3081 	sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
3082 }
3083 #else
3084 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
3085 					       uint8_t tid,
3086 					       uint64_t txdesc_ts,
3087 					       uint32_t ppdu_id)
3088 {
3089 }
3090 #endif
3091 
3092 /**
3093  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
3094  * @soc: DP Soc handle
3095  * @tx_desc: software Tx descriptor
3096  * @ts : Tx completion status from HAL/HTT descriptor
3097  *
3098  * Return: none
3099  */
3100 static inline void
3101 dp_tx_comp_process_desc(struct dp_soc *soc,
3102 			struct dp_tx_desc_s *desc,
3103 			struct hal_tx_completion_status *ts,
3104 			struct dp_peer *peer)
3105 {
3106 	uint64_t time_latency = 0;
3107 	/*
3108 	 * m_copy/tx_capture modes are not supported for
3109 	 * scatter gather packets
3110 	 */
3111 	if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
3112 		time_latency = (qdf_ktime_to_ms(qdf_ktime_get()) -
3113 				desc->timestamp);
3114 	}
3115 	if (!(desc->msdu_ext_desc)) {
3116 		if (QDF_STATUS_SUCCESS ==
3117 		    dp_tx_add_to_comp_queue(soc, desc, ts, peer)) {
3118 			return;
3119 		}
3120 
3121 		if (QDF_STATUS_SUCCESS ==
3122 		    dp_get_completion_indication_for_stack(soc,
3123 							   desc->pdev,
3124 							   peer, ts,
3125 							   desc->nbuf,
3126 							   time_latency)) {
3127 			qdf_nbuf_unmap(soc->osdev, desc->nbuf,
3128 				       QDF_DMA_TO_DEVICE);
3129 			dp_send_completion_to_stack(soc,
3130 						    desc->pdev,
3131 						    ts->peer_id,
3132 						    ts->ppdu_id,
3133 						    desc->nbuf);
3134 			return;
3135 		}
3136 	}
3137 
3138 	dp_tx_comp_free_buf(soc, desc);
3139 }
3140 
3141 /**
3142  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
3143  * @tx_desc: software descriptor head pointer
3144  * @ts: Tx completion status
3145  * @peer: peer handle
3146  * @ring_id: ring number
3147  *
3148  * Return: none
3149  */
3150 static inline
3151 void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
3152 				  struct hal_tx_completion_status *ts,
3153 				  struct dp_peer *peer, uint8_t ring_id)
3154 {
3155 	uint32_t length;
3156 	qdf_ether_header_t *eh;
3157 	struct dp_soc *soc = NULL;
3158 	struct dp_vdev *vdev = tx_desc->vdev;
3159 	qdf_nbuf_t nbuf = tx_desc->nbuf;
3160 
3161 	if (!vdev || !nbuf) {
3162 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3163 				"invalid tx descriptor. vdev or nbuf NULL");
3164 		goto out;
3165 	}
3166 
3167 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3168 
3169 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
3170 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
3171 				 QDF_TRACE_DEFAULT_PDEV_ID,
3172 				 qdf_nbuf_data_addr(nbuf),
3173 				 sizeof(qdf_nbuf_data(nbuf)),
3174 				 tx_desc->id,
3175 				 ts->status));
3176 
3177 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3178 				"-------------------- \n"
3179 				"Tx Completion Stats: \n"
3180 				"-------------------- \n"
3181 				"ack_frame_rssi = %d \n"
3182 				"first_msdu = %d \n"
3183 				"last_msdu = %d \n"
3184 				"msdu_part_of_amsdu = %d \n"
3185 				"rate_stats valid = %d \n"
3186 				"bw = %d \n"
3187 				"pkt_type = %d \n"
3188 				"stbc = %d \n"
3189 				"ldpc = %d \n"
3190 				"sgi = %d \n"
3191 				"mcs = %d \n"
3192 				"ofdma = %d \n"
3193 				"tones_in_ru = %d \n"
3194 				"tsf = %d \n"
3195 				"ppdu_id = %d \n"
3196 				"transmit_cnt = %d \n"
3197 				"tid = %d \n"
3198 				"peer_id = %d\n",
3199 				ts->ack_frame_rssi, ts->first_msdu,
3200 				ts->last_msdu, ts->msdu_part_of_amsdu,
3201 				ts->valid, ts->bw, ts->pkt_type, ts->stbc,
3202 				ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
3203 				ts->tones_in_ru, ts->tsf, ts->ppdu_id,
3204 				ts->transmit_cnt, ts->tid, ts->peer_id);
3205 
3206 	soc = vdev->pdev->soc;
3207 
3208 	/* Update SoC level stats */
3209 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
3210 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
3211 
3212 	/* Update per-packet stats for mesh mode */
3213 	if (qdf_unlikely(vdev->mesh_vdev) &&
3214 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
3215 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
3216 
3217 	length = qdf_nbuf_len(nbuf);
3218 	/* Update peer level stats */
3219 	if (!peer) {
3220 		QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
3221 				   "peer is null or deletion in progress");
3222 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
3223 		goto out;
3224 	}
3225 
3226 	if (qdf_unlikely(peer->bss_peer && vdev->opmode == wlan_op_mode_ap)) {
3227 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
3228 			DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
3229 
3230 			if ((peer->vdev->tx_encap_type ==
3231 				htt_cmn_pkt_type_ethernet) &&
3232 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
3233 				DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
3234 			}
3235 		}
3236 	} else {
3237 		DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
3238 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
3239 			DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
3240 	}
3241 
3242 	dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id);
3243 
3244 #ifdef QCA_SUPPORT_RDK_STATS
3245 	if (soc->wlanstats_enabled)
3246 		dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid,
3247 					    tx_desc->timestamp,
3248 					    ts->ppdu_id);
3249 #endif
3250 
3251 out:
3252 	return;
3253 }
3254 /**
3255  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
3256  * @soc: core txrx main context
3257  * @comp_head: software descriptor head pointer
3258  * @ring_id: ring number
3259  *
3260  * This function will process batch of descriptors reaped by dp_tx_comp_handler
3261  * and release the software descriptors after processing is complete
3262  *
3263  * Return: none
3264  */
3265 static void
3266 dp_tx_comp_process_desc_list(struct dp_soc *soc,
3267 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id)
3268 {
3269 	struct dp_tx_desc_s *desc;
3270 	struct dp_tx_desc_s *next;
3271 	struct hal_tx_completion_status ts = {0};
3272 	struct dp_peer *peer;
3273 	qdf_nbuf_t netbuf;
3274 
3275 	desc = comp_head;
3276 
3277 	while (desc) {
3278 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
3279 		peer = dp_peer_find_by_id(soc, ts.peer_id);
3280 		dp_tx_comp_process_tx_status(desc, &ts, peer, ring_id);
3281 
3282 		netbuf = desc->nbuf;
3283 		/* check tx complete notification */
3284 		if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(netbuf))
3285 			dp_tx_notify_completion(soc, desc, netbuf);
3286 
3287 		dp_tx_comp_process_desc(soc, desc, &ts, peer);
3288 
3289 		if (peer)
3290 			dp_peer_unref_del_find_by_id(peer);
3291 
3292 		next = desc->next;
3293 
3294 		dp_tx_desc_release(desc, desc->pool_id);
3295 		desc = next;
3296 	}
3297 
3298 }
3299 
3300 /**
3301  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
3302  * @tx_desc: software descriptor head pointer
3303  * @status : Tx completion status from HTT descriptor
3304  * @ring_id: ring number
3305  *
3306  * This function will process HTT Tx indication messages from Target
3307  *
3308  * Return: none
3309  */
3310 static
3311 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status,
3312 				  uint8_t ring_id)
3313 {
3314 	uint8_t tx_status;
3315 	struct dp_pdev *pdev;
3316 	struct dp_vdev *vdev;
3317 	struct dp_soc *soc;
3318 	struct hal_tx_completion_status ts = {0};
3319 	uint32_t *htt_desc = (uint32_t *)status;
3320 	struct dp_peer *peer;
3321 	struct cdp_tid_tx_stats *tid_stats = NULL;
3322 	struct htt_soc *htt_handle;
3323 
3324 	qdf_assert(tx_desc->pdev);
3325 
3326 	pdev = tx_desc->pdev;
3327 	vdev = tx_desc->vdev;
3328 	soc = pdev->soc;
3329 
3330 	if (!vdev)
3331 		return;
3332 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
3333 	htt_handle = (struct htt_soc *)soc->htt_handle;
3334 	htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
3335 
3336 	switch (tx_status) {
3337 	case HTT_TX_FW2WBM_TX_STATUS_OK:
3338 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
3339 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
3340 	{
3341 		uint8_t tid;
3342 		if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
3343 			ts.peer_id =
3344 				HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
3345 						htt_desc[2]);
3346 			ts.tid =
3347 				HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
3348 						htt_desc[2]);
3349 		} else {
3350 			ts.peer_id = HTT_INVALID_PEER;
3351 			ts.tid = HTT_INVALID_TID;
3352 		}
3353 		ts.ppdu_id =
3354 			HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
3355 					htt_desc[1]);
3356 		ts.ack_frame_rssi =
3357 			HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
3358 					htt_desc[1]);
3359 
3360 		ts.first_msdu = 1;
3361 		ts.last_msdu = 1;
3362 		tid = ts.tid;
3363 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
3364 			tid = CDP_MAX_DATA_TIDS - 1;
3365 
3366 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
3367 
3368 		if (qdf_unlikely(pdev->delay_stats_flag))
3369 			dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
3370 		if (tx_status < CDP_MAX_TX_HTT_STATUS) {
3371 			tid_stats->htt_status_cnt[tx_status]++;
3372 		}
3373 
3374 		peer = dp_peer_find_by_id(soc, ts.peer_id);
3375 
3376 		if (qdf_likely(peer))
3377 			dp_peer_unref_del_find_by_id(peer);
3378 
3379 		dp_tx_comp_process_tx_status(tx_desc, &ts, peer, ring_id);
3380 		dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
3381 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3382 
3383 		break;
3384 	}
3385 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
3386 	{
3387 		dp_tx_reinject_handler(tx_desc, status);
3388 		break;
3389 	}
3390 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
3391 	{
3392 		dp_tx_inspect_handler(tx_desc, status);
3393 		break;
3394 	}
3395 	case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
3396 	{
3397 		dp_tx_mec_handler(vdev, status);
3398 		break;
3399 	}
3400 	default:
3401 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3402 			  "%s Invalid HTT tx_status %d\n",
3403 			  __func__, tx_status);
3404 		break;
3405 	}
3406 }
3407 
3408 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
3409 static inline
3410 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
3411 {
3412 	bool limit_hit = false;
3413 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
3414 
3415 	limit_hit =
3416 		(num_reaped >= cfg->tx_comp_loop_pkt_limit) ? true : false;
3417 
3418 	if (limit_hit)
3419 		DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
3420 
3421 	return limit_hit;
3422 }
3423 
3424 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
3425 {
3426 	return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
3427 }
3428 #else
3429 static inline
3430 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
3431 {
3432 	return false;
3433 }
3434 
3435 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
3436 {
3437 	return false;
3438 }
3439 #endif
3440 
3441 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
3442 			    hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
3443 			    uint32_t quota)
3444 {
3445 	void *tx_comp_hal_desc;
3446 	uint8_t buffer_src;
3447 	uint8_t pool_id;
3448 	uint32_t tx_desc_id;
3449 	struct dp_tx_desc_s *tx_desc = NULL;
3450 	struct dp_tx_desc_s *head_desc = NULL;
3451 	struct dp_tx_desc_s *tail_desc = NULL;
3452 	uint32_t num_processed = 0;
3453 	uint32_t count = 0;
3454 	bool force_break = false;
3455 
3456 	DP_HIST_INIT();
3457 
3458 more_data:
3459 	/* Re-initialize local variables to be re-used */
3460 		head_desc = NULL;
3461 		tail_desc = NULL;
3462 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
3463 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3464 				"%s %d : HAL RING Access Failed -- %pK",
3465 				__func__, __LINE__, hal_ring_hdl);
3466 		return 0;
3467 	}
3468 
3469 	/* Find head descriptor from completion ring */
3470 	while (qdf_likely(tx_comp_hal_desc =
3471 			hal_srng_dst_get_next(soc->hal_soc, hal_ring_hdl))) {
3472 
3473 		buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
3474 
3475 		/* If this buffer was not released by TQM or FW, then it is not
3476 		 * Tx completion indication, assert */
3477 		if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
3478 				(buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
3479 			uint8_t wbm_internal_error;
3480 
3481 			QDF_TRACE(QDF_MODULE_ID_DP,
3482 				  QDF_TRACE_LEVEL_FATAL,
3483 				  "Tx comp release_src != TQM | FW but from %d",
3484 				  buffer_src);
3485 			hal_dump_comp_desc(tx_comp_hal_desc);
3486 			DP_STATS_INC(soc, tx.invalid_release_source, 1);
3487 
3488 			/* When WBM sees NULL buffer_addr_info in any of
3489 			 * ingress rings it sends an error indication,
3490 			 * with wbm_internal_error=1, to a specific ring.
3491 			 * The WBM2SW ring used to indicate these errors is
3492 			 * fixed in HW, and that ring is being used as Tx
3493 			 * completion ring. These errors are not related to
3494 			 * Tx completions, and should just be ignored
3495 			 */
3496 
3497 			wbm_internal_error =
3498 			hal_get_wbm_internal_error(tx_comp_hal_desc);
3499 
3500 			if (wbm_internal_error) {
3501 				QDF_TRACE(QDF_MODULE_ID_DP,
3502 					  QDF_TRACE_LEVEL_ERROR,
3503 					  "Tx comp wbm_internal_error!!!\n");
3504 				DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
3505 
3506 				if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
3507 								buffer_src)
3508 					dp_handle_wbm_internal_error(
3509 						soc,
3510 						tx_comp_hal_desc,
3511 						hal_tx_comp_get_buffer_type(
3512 							tx_comp_hal_desc));
3513 
3514 				continue;
3515 			} else {
3516 				qdf_assert_always(0);
3517 			}
3518 		}
3519 
3520 		/* Get descriptor id */
3521 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
3522 		pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
3523 			DP_TX_DESC_ID_POOL_OS;
3524 
3525 		/* Find Tx descriptor */
3526 		tx_desc = dp_tx_desc_find(soc, pool_id,
3527 				(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
3528 				DP_TX_DESC_ID_PAGE_OS,
3529 				(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
3530 				DP_TX_DESC_ID_OFFSET_OS);
3531 
3532 		/*
3533 		 * If the descriptor is already freed in vdev_detach,
3534 		 * continue to next descriptor
3535 		 */
3536 		if (!tx_desc->vdev && !tx_desc->flags) {
3537 			QDF_TRACE(QDF_MODULE_ID_DP,
3538 				  QDF_TRACE_LEVEL_INFO,
3539 				  "Descriptor freed in vdev_detach %d",
3540 				  tx_desc_id);
3541 
3542 			num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
3543 			count++;
3544 			continue;
3545 		}
3546 
3547 		if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
3548 			QDF_TRACE(QDF_MODULE_ID_DP,
3549 				  QDF_TRACE_LEVEL_INFO,
3550 				  "pdev in down state %d",
3551 				  tx_desc_id);
3552 
3553 			num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
3554 			count++;
3555 
3556 			dp_tx_comp_free_buf(soc, tx_desc);
3557 			dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3558 			continue;
3559 		}
3560 
3561 		/*
3562 		 * If the release source is FW, process the HTT status
3563 		 */
3564 		if (qdf_unlikely(buffer_src ==
3565 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
3566 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
3567 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
3568 					htt_tx_status);
3569 			dp_tx_process_htt_completion(tx_desc,
3570 					htt_tx_status, ring_id);
3571 		} else {
3572 			/* Pool id is not matching. Error */
3573 			if (tx_desc->pool_id != pool_id) {
3574 				QDF_TRACE(QDF_MODULE_ID_DP,
3575 					QDF_TRACE_LEVEL_FATAL,
3576 					"Tx Comp pool id %d not matched %d",
3577 					pool_id, tx_desc->pool_id);
3578 
3579 				qdf_assert_always(0);
3580 			}
3581 
3582 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
3583 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
3584 				QDF_TRACE(QDF_MODULE_ID_DP,
3585 					QDF_TRACE_LEVEL_FATAL,
3586 					"Txdesc invalid, flgs = %x,id = %d",
3587 					tx_desc->flags,	tx_desc_id);
3588 				qdf_assert_always(0);
3589 			}
3590 
3591 			/* First ring descriptor on the cycle */
3592 			if (!head_desc) {
3593 				head_desc = tx_desc;
3594 				tail_desc = tx_desc;
3595 			}
3596 
3597 			tail_desc->next = tx_desc;
3598 			tx_desc->next = NULL;
3599 			tail_desc = tx_desc;
3600 
3601 			DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
3602 
3603 			/* Collect hw completion contents */
3604 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
3605 					&tx_desc->comp, 1);
3606 
3607 		}
3608 
3609 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
3610 
3611 		/*
3612 		 * Processed packet count is more than given quota
3613 		 * stop to processing
3614 		 */
3615 		if (num_processed >= quota) {
3616 			force_break = true;
3617 			break;
3618 		}
3619 
3620 		count++;
3621 
3622 		if (dp_tx_comp_loop_pkt_limit_hit(soc, count))
3623 			break;
3624 	}
3625 
3626 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
3627 
3628 	/* Process the reaped descriptors */
3629 	if (head_desc)
3630 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
3631 
3632 	if (dp_tx_comp_enable_eol_data_check(soc)) {
3633 		if (!force_break &&
3634 		    hal_srng_dst_peek_sync_locked(soc->hal_soc,
3635 						  hal_ring_hdl)) {
3636 			DP_STATS_INC(soc, tx.hp_oos2, 1);
3637 			if (!hif_exec_should_yield(soc->hif_handle,
3638 						   int_ctx->dp_intr_id))
3639 				goto more_data;
3640 		}
3641 	}
3642 	DP_TX_HIST_STATS_PER_PDEV();
3643 
3644 	return num_processed;
3645 }
3646 
3647 #ifdef FEATURE_WLAN_TDLS
3648 /**
3649  * dp_tx_non_std() - Allow the control-path SW to send data frames
3650  *
3651  * @data_vdev - which vdev should transmit the tx data frames
3652  * @tx_spec - what non-standard handling to apply to the tx data frames
3653  * @msdu_list - NULL-terminated list of tx MSDUs
3654  *
3655  * Return: NULL on success,
3656  *         nbuf when it fails to send
3657  */
3658 qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle,
3659 			enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
3660 {
3661 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
3662 
3663 	if (tx_spec & OL_TX_SPEC_NO_FREE)
3664 		vdev->is_tdls_frame = true;
3665 	return dp_tx_send(vdev_handle, msdu_list);
3666 }
3667 #endif
3668 
3669 /**
3670  * dp_tx_vdev_attach() - attach vdev to dp tx
3671  * @vdev: virtual device instance
3672  *
3673  * Return: QDF_STATUS_SUCCESS: success
3674  *         QDF_STATUS_E_RESOURCES: Error return
3675  */
3676 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
3677 {
3678 	/*
3679 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
3680 	 */
3681 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
3682 			HTT_TCL_METADATA_TYPE_VDEV_BASED);
3683 
3684 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
3685 			vdev->vdev_id);
3686 
3687 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
3688 			DP_SW2HW_MACID(vdev->pdev->pdev_id));
3689 
3690 	/*
3691 	 * Set HTT Extension Valid bit to 0 by default
3692 	 */
3693 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
3694 
3695 	dp_tx_vdev_update_search_flags(vdev);
3696 
3697 	return QDF_STATUS_SUCCESS;
3698 }
3699 
3700 #ifndef FEATURE_WDS
3701 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
3702 {
3703 	return false;
3704 }
3705 #endif
3706 
3707 /**
3708  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
3709  * @vdev: virtual device instance
3710  *
3711  * Return: void
3712  *
3713  */
3714 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
3715 {
3716 	struct dp_soc *soc = vdev->pdev->soc;
3717 
3718 	/*
3719 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
3720 	 * for TDLS link
3721 	 *
3722 	 * Enable AddrY (SA based search) only for non-WDS STA and
3723 	 * ProxySTA VAP (in HKv1) modes.
3724 	 *
3725 	 * In all other VAP modes, only DA based search should be
3726 	 * enabled
3727 	 */
3728 	if (vdev->opmode == wlan_op_mode_sta &&
3729 	    vdev->tdls_link_connected)
3730 		vdev->hal_desc_addr_search_flags =
3731 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
3732 	else if ((vdev->opmode == wlan_op_mode_sta) &&
3733 		 !dp_tx_da_search_override(vdev))
3734 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
3735 	else
3736 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
3737 
3738 	/* Set search type only when peer map v2 messaging is enabled
3739 	 * as we will have the search index (AST hash) only when v2 is
3740 	 * enabled
3741 	 */
3742 	if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
3743 		vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
3744 	else
3745 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
3746 }
3747 
3748 static inline bool
3749 dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
3750 			  struct dp_vdev *vdev,
3751 			  struct dp_tx_desc_s *tx_desc)
3752 {
3753 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
3754 		return false;
3755 
3756 	/*
3757 	 * if vdev is given, then only check whether desc
3758 	 * vdev match. if vdev is NULL, then check whether
3759 	 * desc pdev match.
3760 	 */
3761 	return vdev ? (tx_desc->vdev == vdev) : (tx_desc->pdev == pdev);
3762 }
3763 
3764 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3765 /**
3766  * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
3767  *
3768  * @soc: Handle to DP SoC structure
3769  * @tx_desc: pointer of one TX desc
3770  * @desc_pool_id: TX Desc pool id
3771  */
3772 static inline void
3773 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
3774 		      uint8_t desc_pool_id)
3775 {
3776 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
3777 
3778 	qdf_spin_lock_bh(&pool->flow_pool_lock);
3779 
3780 	tx_desc->vdev = NULL;
3781 
3782 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
3783 }
3784 
3785 /**
3786  * dp_tx_desc_flush() - release resources associated
3787  *                      to TX Desc
3788  *
3789  * @dp_pdev: Handle to DP pdev structure
3790  * @vdev: virtual device instance
3791  * NULL: no specific Vdev is required and check all allcated TX desc
3792  * on this pdev.
3793  * Non-NULL: only check the allocated TX Desc associated to this Vdev.
3794  *
3795  * @force_free:
3796  * true: flush the TX desc.
3797  * false: only reset the Vdev in each allocated TX desc
3798  * that associated to current Vdev.
3799  *
3800  * This function will go through the TX desc pool to flush
3801  * the outstanding TX data or reset Vdev to NULL in associated TX
3802  * Desc.
3803  */
3804 static void dp_tx_desc_flush(struct dp_pdev *pdev,
3805 			     struct dp_vdev *vdev,
3806 			     bool force_free)
3807 {
3808 	uint8_t i;
3809 	uint32_t j;
3810 	uint32_t num_desc, page_id, offset;
3811 	uint16_t num_desc_per_page;
3812 	struct dp_soc *soc = pdev->soc;
3813 	struct dp_tx_desc_s *tx_desc = NULL;
3814 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
3815 
3816 	if (!vdev && !force_free) {
3817 		dp_err("Reset TX desc vdev, Vdev param is required!");
3818 		return;
3819 	}
3820 
3821 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
3822 		tx_desc_pool = &soc->tx_desc[i];
3823 		if (!(tx_desc_pool->pool_size) ||
3824 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
3825 		    !(tx_desc_pool->desc_pages.cacheable_pages))
3826 			continue;
3827 
3828 		num_desc = tx_desc_pool->pool_size;
3829 		num_desc_per_page =
3830 			tx_desc_pool->desc_pages.num_element_per_page;
3831 		for (j = 0; j < num_desc; j++) {
3832 			page_id = j / num_desc_per_page;
3833 			offset = j % num_desc_per_page;
3834 
3835 			if (qdf_unlikely(!(tx_desc_pool->
3836 					 desc_pages.cacheable_pages)))
3837 				break;
3838 
3839 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
3840 
3841 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
3842 				/*
3843 				 * Free TX desc if force free is
3844 				 * required, otherwise only reset vdev
3845 				 * in this TX desc.
3846 				 */
3847 				if (force_free) {
3848 					dp_tx_comp_free_buf(soc, tx_desc);
3849 					dp_tx_desc_release(tx_desc, i);
3850 				} else {
3851 					dp_tx_desc_reset_vdev(soc, tx_desc,
3852 							      i);
3853 				}
3854 			}
3855 		}
3856 	}
3857 }
3858 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3859 
3860 static inline void
3861 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
3862 		      uint8_t desc_pool_id)
3863 {
3864 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
3865 
3866 	tx_desc->vdev = NULL;
3867 
3868 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
3869 }
3870 
3871 static void dp_tx_desc_flush(struct dp_pdev *pdev,
3872 			     struct dp_vdev *vdev,
3873 			     bool force_free)
3874 {
3875 	uint8_t i, num_pool;
3876 	uint32_t j;
3877 	uint32_t num_desc, page_id, offset;
3878 	uint16_t num_desc_per_page;
3879 	struct dp_soc *soc = pdev->soc;
3880 	struct dp_tx_desc_s *tx_desc = NULL;
3881 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
3882 
3883 	if (!vdev && !force_free) {
3884 		dp_err("Reset TX desc vdev, Vdev param is required!");
3885 		return;
3886 	}
3887 
3888 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3889 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3890 
3891 	for (i = 0; i < num_pool; i++) {
3892 		tx_desc_pool = &soc->tx_desc[i];
3893 		if (!tx_desc_pool->desc_pages.cacheable_pages)
3894 			continue;
3895 
3896 		num_desc_per_page =
3897 			tx_desc_pool->desc_pages.num_element_per_page;
3898 		for (j = 0; j < num_desc; j++) {
3899 			page_id = j / num_desc_per_page;
3900 			offset = j % num_desc_per_page;
3901 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
3902 
3903 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
3904 				if (force_free) {
3905 					dp_tx_comp_free_buf(soc, tx_desc);
3906 					dp_tx_desc_release(tx_desc, i);
3907 				} else {
3908 					dp_tx_desc_reset_vdev(soc, tx_desc,
3909 							      i);
3910 				}
3911 			}
3912 		}
3913 	}
3914 }
3915 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3916 
3917 /**
3918  * dp_tx_vdev_detach() - detach vdev from dp tx
3919  * @vdev: virtual device instance
3920  *
3921  * Return: QDF_STATUS_SUCCESS: success
3922  *         QDF_STATUS_E_RESOURCES: Error return
3923  */
3924 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
3925 {
3926 	struct dp_pdev *pdev = vdev->pdev;
3927 
3928 	/* Reset TX desc associated to this Vdev as NULL */
3929 	dp_tx_desc_flush(pdev, vdev, false);
3930 	dp_tx_vdev_multipass_deinit(vdev);
3931 
3932 	return QDF_STATUS_SUCCESS;
3933 }
3934 
3935 /**
3936  * dp_tx_pdev_attach() - attach pdev to dp tx
3937  * @pdev: physical device instance
3938  *
3939  * Return: QDF_STATUS_SUCCESS: success
3940  *         QDF_STATUS_E_RESOURCES: Error return
3941  */
3942 QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
3943 {
3944 	struct dp_soc *soc = pdev->soc;
3945 
3946 	/* Initialize Flow control counters */
3947 	qdf_atomic_init(&pdev->num_tx_exception);
3948 	qdf_atomic_init(&pdev->num_tx_outstanding);
3949 
3950 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3951 		/* Initialize descriptors in TCL Ring */
3952 		hal_tx_init_data_ring(soc->hal_soc,
3953 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
3954 	}
3955 
3956 	return QDF_STATUS_SUCCESS;
3957 }
3958 
3959 /**
3960  * dp_tx_pdev_detach() - detach pdev from dp tx
3961  * @pdev: physical device instance
3962  *
3963  * Return: QDF_STATUS_SUCCESS: success
3964  *         QDF_STATUS_E_RESOURCES: Error return
3965  */
3966 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
3967 {
3968 	/* flush TX outstanding data per pdev */
3969 	dp_tx_desc_flush(pdev, NULL, true);
3970 	dp_tx_me_exit(pdev);
3971 	return QDF_STATUS_SUCCESS;
3972 }
3973 
3974 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3975 /* Pools will be allocated dynamically */
3976 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3977 					int num_desc)
3978 {
3979 	uint8_t i;
3980 
3981 	for (i = 0; i < num_pool; i++) {
3982 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
3983 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
3984 	}
3985 
3986 	return 0;
3987 }
3988 
3989 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3990 {
3991 	uint8_t i;
3992 
3993 	for (i = 0; i < num_pool; i++)
3994 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
3995 }
3996 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3997 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3998 					int num_desc)
3999 {
4000 	uint8_t i;
4001 
4002 	/* Allocate software Tx descriptor pools */
4003 	for (i = 0; i < num_pool; i++) {
4004 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
4005 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4006 					"%s Tx Desc Pool alloc %d failed %pK",
4007 					__func__, i, soc);
4008 			return ENOMEM;
4009 		}
4010 	}
4011 	return 0;
4012 }
4013 
4014 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
4015 {
4016 	uint8_t i;
4017 
4018 	for (i = 0; i < num_pool; i++) {
4019 		qdf_assert_always(!soc->tx_desc[i].num_allocated);
4020 		if (dp_tx_desc_pool_free(soc, i)) {
4021 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4022 				"%s Tx Desc Pool Free failed", __func__);
4023 		}
4024 	}
4025 }
4026 
4027 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
4028 
4029 #ifndef QCA_MEM_ATTACH_ON_WIFI3
4030 /**
4031  * dp_tso_attach_wifi3() - TSO attach handler
4032  * @txrx_soc: Opaque Dp handle
4033  *
4034  * Reserve TSO descriptor buffers
4035  *
4036  * Return: QDF_STATUS_E_FAILURE on failure or
4037  * QDF_STATUS_SUCCESS on success
4038  */
4039 static
4040 QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc)
4041 {
4042 	return dp_tso_soc_attach(txrx_soc);
4043 }
4044 
4045 /**
4046  * dp_tso_detach_wifi3() - TSO Detach handler
4047  * @txrx_soc: Opaque Dp handle
4048  *
4049  * Deallocate TSO descriptor buffers
4050  *
4051  * Return: QDF_STATUS_E_FAILURE on failure or
4052  * QDF_STATUS_SUCCESS on success
4053  */
4054 static
4055 QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc)
4056 {
4057 	return dp_tso_soc_detach(txrx_soc);
4058 }
4059 #else
4060 static
4061 QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc)
4062 {
4063 	return QDF_STATUS_SUCCESS;
4064 }
4065 
4066 static
4067 QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc)
4068 {
4069 	return QDF_STATUS_SUCCESS;
4070 }
4071 #endif
4072 
4073 QDF_STATUS dp_tso_soc_detach(void *txrx_soc)
4074 {
4075 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4076 	uint8_t i;
4077 	uint8_t num_pool;
4078 	uint32_t num_desc;
4079 
4080 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4081 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4082 
4083 	for (i = 0; i < num_pool; i++)
4084 		dp_tx_tso_desc_pool_free(soc, i);
4085 
4086 	dp_info("%s TSO Desc Pool %d Free descs = %d",
4087 		__func__, num_pool, num_desc);
4088 
4089 	for (i = 0; i < num_pool; i++)
4090 		dp_tx_tso_num_seg_pool_free(soc, i);
4091 
4092 	dp_info("%s TSO Num of seg Desc Pool %d Free descs = %d",
4093 		__func__, num_pool, num_desc);
4094 
4095 	return QDF_STATUS_SUCCESS;
4096 }
4097 
4098 /**
4099  * dp_tso_attach() - TSO attach handler
4100  * @txrx_soc: Opaque Dp handle
4101  *
4102  * Reserve TSO descriptor buffers
4103  *
4104  * Return: QDF_STATUS_E_FAILURE on failure or
4105  * QDF_STATUS_SUCCESS on success
4106  */
4107 QDF_STATUS dp_tso_soc_attach(void *txrx_soc)
4108 {
4109 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4110 	uint8_t i;
4111 	uint8_t num_pool;
4112 	uint32_t num_desc;
4113 
4114 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4115 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4116 
4117 	for (i = 0; i < num_pool; i++) {
4118 		if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
4119 			dp_err("TSO Desc Pool alloc %d failed %pK",
4120 			       i, soc);
4121 
4122 			return QDF_STATUS_E_FAILURE;
4123 		}
4124 	}
4125 
4126 	dp_info("%s TSO Desc Alloc %d, descs = %d",
4127 		__func__, num_pool, num_desc);
4128 
4129 	for (i = 0; i < num_pool; i++) {
4130 		if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
4131 			dp_err("TSO Num of seg Pool alloc %d failed %pK",
4132 			       i, soc);
4133 
4134 			return QDF_STATUS_E_FAILURE;
4135 		}
4136 	}
4137 	return QDF_STATUS_SUCCESS;
4138 }
4139 
4140 /**
4141  * dp_tx_soc_detach() - detach soc from dp tx
4142  * @soc: core txrx main context
4143  *
4144  * This function will detach dp tx into main device context
4145  * will free dp tx resource and initialize resources
4146  *
4147  * Return: QDF_STATUS_SUCCESS: success
4148  *         QDF_STATUS_E_RESOURCES: Error return
4149  */
4150 QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
4151 {
4152 	uint8_t num_pool;
4153 	uint16_t num_desc;
4154 	uint16_t num_ext_desc;
4155 	uint8_t i;
4156 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4157 
4158 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4159 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4160 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
4161 
4162 	dp_tx_flow_control_deinit(soc);
4163 	dp_tx_delete_static_pools(soc, num_pool);
4164 
4165 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4166 			"%s Tx Desc Pool Free num_pool = %d, descs = %d",
4167 			__func__, num_pool, num_desc);
4168 
4169 	for (i = 0; i < num_pool; i++) {
4170 		if (dp_tx_ext_desc_pool_free(soc, i)) {
4171 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4172 					"%s Tx Ext Desc Pool Free failed",
4173 					__func__);
4174 			return QDF_STATUS_E_RESOURCES;
4175 		}
4176 	}
4177 
4178 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4179 			"%s MSDU Ext Desc Pool %d Free descs = %d",
4180 			__func__, num_pool, num_ext_desc);
4181 
4182 	status = dp_tso_detach_wifi3(soc);
4183 	if (status != QDF_STATUS_SUCCESS)
4184 		return status;
4185 
4186 	return QDF_STATUS_SUCCESS;
4187 }
4188 
4189 /**
4190  * dp_tx_soc_attach() - attach soc to dp tx
4191  * @soc: core txrx main context
4192  *
4193  * This function will attach dp tx into main device context
4194  * will allocate dp tx resource and initialize resources
4195  *
4196  * Return: QDF_STATUS_SUCCESS: success
4197  *         QDF_STATUS_E_RESOURCES: Error return
4198  */
4199 QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
4200 {
4201 	uint8_t i;
4202 	uint8_t num_pool;
4203 	uint32_t num_desc;
4204 	uint32_t num_ext_desc;
4205 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4206 
4207 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4208 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4209 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
4210 
4211 	if (num_pool > MAX_TXDESC_POOLS)
4212 		goto fail;
4213 
4214 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
4215 		goto fail;
4216 
4217 	dp_tx_flow_control_init(soc);
4218 
4219 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4220 			"%s Tx Desc Alloc num_pool = %d, descs = %d",
4221 			__func__, num_pool, num_desc);
4222 
4223 	/* Allocate extension tx descriptor pools */
4224 	for (i = 0; i < num_pool; i++) {
4225 		if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
4226 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4227 				"MSDU Ext Desc Pool alloc %d failed %pK",
4228 				i, soc);
4229 
4230 			goto fail;
4231 		}
4232 	}
4233 
4234 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4235 			"%s MSDU Ext Desc Alloc %d, descs = %d",
4236 			__func__, num_pool, num_ext_desc);
4237 
4238 	status = dp_tso_attach_wifi3((void *)soc);
4239 	if (status != QDF_STATUS_SUCCESS)
4240 		goto fail;
4241 
4242 
4243 	/* Initialize descriptors in TCL Rings */
4244 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
4245 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
4246 			hal_tx_init_data_ring(soc->hal_soc,
4247 					soc->tcl_data_ring[i].hal_srng);
4248 		}
4249 	}
4250 
4251 	/*
4252 	 * todo - Add a runtime config option to enable this.
4253 	 */
4254 	/*
4255 	 * Due to multiple issues on NPR EMU, enable it selectively
4256 	 * only for NPR EMU, should be removed, once NPR platforms
4257 	 * are stable.
4258 	 */
4259 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
4260 
4261 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4262 			"%s HAL Tx init Success", __func__);
4263 
4264 	return QDF_STATUS_SUCCESS;
4265 
4266 fail:
4267 	/* Detach will take care of freeing only allocated resources */
4268 	dp_tx_soc_detach(soc);
4269 	return QDF_STATUS_E_RESOURCES;
4270 }
4271