xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision 861af9fad3d20cded5a90c420dd1ed901be32691)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "dp_htt.h"
21 #include "hal_hw_headers.h"
22 #include "dp_tx.h"
23 #include "dp_tx_desc.h"
24 #include "dp_peer.h"
25 #include "dp_types.h"
26 #include "hal_tx.h"
27 #include "qdf_mem.h"
28 #include "qdf_nbuf.h"
29 #include "qdf_net_types.h"
30 #include <wlan_cfg.h>
31 #include "dp_ipa.h"
32 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
33 #include "if_meta_hdr.h"
34 #endif
35 #include "enet.h"
36 #include "dp_internal.h"
37 #ifdef FEATURE_WDS
38 #include "dp_txrx_wds.h"
39 #endif
40 #ifdef ATH_SUPPORT_IQUE
41 #include "dp_txrx_me.h"
42 #endif
43 
44 
45 /* TODO Add support in TSO */
46 #define DP_DESC_NUM_FRAG(x) 0
47 
48 /* disable TQM_BYPASS */
49 #define TQM_BYPASS_WAR 0
50 
51 /* invalid peer id for reinject*/
52 #define DP_INVALID_PEER 0XFFFE
53 
54 /*mapping between hal encrypt type and cdp_sec_type*/
55 #define MAX_CDP_SEC_TYPE 12
56 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
57 					HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
58 					HAL_TX_ENCRYPT_TYPE_WEP_128,
59 					HAL_TX_ENCRYPT_TYPE_WEP_104,
60 					HAL_TX_ENCRYPT_TYPE_WEP_40,
61 					HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
62 					HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
63 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
64 					HAL_TX_ENCRYPT_TYPE_WAPI,
65 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
66 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
67 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
68 					HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
69 
70 #ifdef QCA_TX_LIMIT_CHECK
71 /**
72  * dp_tx_limit_check - Check if allocated tx descriptors reached
73  * soc max limit and pdev max limit
74  * @vdev: DP vdev handle
75  *
76  * Return: true if allocated tx descriptors reached max configured value, else
77  * false
78  */
79 static inline bool
80 dp_tx_limit_check(struct dp_vdev *vdev)
81 {
82 	struct dp_pdev *pdev = vdev->pdev;
83 	struct dp_soc *soc = pdev->soc;
84 
85 	if (qdf_atomic_read(&soc->num_tx_outstanding) >=
86 			soc->num_tx_allowed) {
87 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
88 			  "%s: queued packets are more than max tx, drop the frame",
89 			  __func__);
90 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
91 		return true;
92 	}
93 
94 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
95 			pdev->num_tx_allowed) {
96 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
97 			  "%s: queued packets are more than max tx, drop the frame",
98 			  __func__);
99 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
100 		return true;
101 	}
102 	return false;
103 }
104 
105 /**
106  * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
107  * @vdev: DP pdev handle
108  *
109  * Return: void
110  */
111 static inline void
112 dp_tx_outstanding_inc(struct dp_pdev *pdev)
113 {
114 	struct dp_soc *soc = pdev->soc;
115 
116 	qdf_atomic_inc(&pdev->num_tx_outstanding);
117 	qdf_atomic_inc(&soc->num_tx_outstanding);
118 }
119 
120 /**
121  * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
122  * @vdev: DP pdev handle
123  *
124  * Return: void
125  */
126 static inline void
127 dp_tx_outstanding_dec(struct dp_pdev *pdev)
128 {
129 	struct dp_soc *soc = pdev->soc;
130 
131 	qdf_atomic_dec(&pdev->num_tx_outstanding);
132 	qdf_atomic_dec(&soc->num_tx_outstanding);
133 }
134 
135 #else //QCA_TX_LIMIT_CHECK
136 static inline bool
137 dp_tx_limit_check(struct dp_vdev *vdev)
138 {
139 	return false;
140 }
141 
142 static inline void
143 dp_tx_outstanding_inc(struct dp_pdev *pdev)
144 {
145 	qdf_atomic_inc(&pdev->num_tx_outstanding);
146 }
147 
148 static inline void
149 dp_tx_outstanding_dec(struct dp_pdev *pdev)
150 {
151 	qdf_atomic_dec(&pdev->num_tx_outstanding);
152 }
153 #endif //QCA_TX_LIMIT_CHECK
154 
155 #if defined(FEATURE_TSO)
156 /**
157  * dp_tx_tso_unmap_segment() - Unmap TSO segment
158  *
159  * @soc - core txrx main context
160  * @seg_desc - tso segment descriptor
161  * @num_seg_desc - tso number segment descriptor
162  */
163 static void dp_tx_tso_unmap_segment(
164 		struct dp_soc *soc,
165 		struct qdf_tso_seg_elem_t *seg_desc,
166 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
167 {
168 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
169 	if (qdf_unlikely(!seg_desc)) {
170 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
171 			 __func__, __LINE__);
172 		qdf_assert(0);
173 	} else if (qdf_unlikely(!num_seg_desc)) {
174 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
175 			 __func__, __LINE__);
176 		qdf_assert(0);
177 	} else {
178 		bool is_last_seg;
179 		/* no tso segment left to do dma unmap */
180 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
181 			return;
182 
183 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
184 					true : false;
185 		qdf_nbuf_unmap_tso_segment(soc->osdev,
186 					   seg_desc, is_last_seg);
187 		num_seg_desc->num_seg.tso_cmn_num_seg--;
188 	}
189 }
190 
191 /**
192  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
193  *                            back to the freelist
194  *
195  * @soc - soc device handle
196  * @tx_desc - Tx software descriptor
197  */
198 static void dp_tx_tso_desc_release(struct dp_soc *soc,
199 				   struct dp_tx_desc_s *tx_desc)
200 {
201 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
202 	if (qdf_unlikely(!tx_desc->tso_desc)) {
203 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
204 			  "%s %d TSO desc is NULL!",
205 			  __func__, __LINE__);
206 		qdf_assert(0);
207 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
208 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
209 			  "%s %d TSO num desc is NULL!",
210 			  __func__, __LINE__);
211 		qdf_assert(0);
212 	} else {
213 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
214 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
215 
216 		/* Add the tso num segment into the free list */
217 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
218 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
219 					    tx_desc->tso_num_desc);
220 			tx_desc->tso_num_desc = NULL;
221 			DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
222 		}
223 
224 		/* Add the tso segment into the free list*/
225 		dp_tx_tso_desc_free(soc,
226 				    tx_desc->pool_id, tx_desc->tso_desc);
227 		tx_desc->tso_desc = NULL;
228 	}
229 }
230 #else
231 static void dp_tx_tso_unmap_segment(
232 		struct dp_soc *soc,
233 		struct qdf_tso_seg_elem_t *seg_desc,
234 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
235 
236 {
237 }
238 
239 static void dp_tx_tso_desc_release(struct dp_soc *soc,
240 				   struct dp_tx_desc_s *tx_desc)
241 {
242 }
243 #endif
244 /**
245  * dp_tx_desc_release() - Release Tx Descriptor
246  * @tx_desc : Tx Descriptor
247  * @desc_pool_id: Descriptor Pool ID
248  *
249  * Deallocate all resources attached to Tx descriptor and free the Tx
250  * descriptor.
251  *
252  * Return:
253  */
254 static void
255 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
256 {
257 	struct dp_pdev *pdev = tx_desc->pdev;
258 	struct dp_soc *soc;
259 	uint8_t comp_status = 0;
260 
261 	qdf_assert(pdev);
262 
263 	soc = pdev->soc;
264 
265 	dp_tx_outstanding_dec(pdev);
266 
267 	if (tx_desc->frm_type == dp_tx_frm_tso)
268 		dp_tx_tso_desc_release(soc, tx_desc);
269 
270 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
271 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
272 
273 	if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
274 		dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
275 
276 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
277 		qdf_atomic_dec(&pdev->num_tx_exception);
278 
279 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
280 				hal_tx_comp_get_buffer_source(&tx_desc->comp))
281 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
282 							     soc->hal_soc);
283 	else
284 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
285 
286 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
287 		"Tx Completion Release desc %d status %d outstanding %d",
288 		tx_desc->id, comp_status,
289 		qdf_atomic_read(&pdev->num_tx_outstanding));
290 
291 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
292 	return;
293 }
294 
295 /**
296  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
297  * @vdev: DP vdev Handle
298  * @nbuf: skb
299  * @msdu_info: msdu_info required to create HTT metadata
300  *
301  * Prepares and fills HTT metadata in the frame pre-header for special frames
302  * that should be transmitted using varying transmit parameters.
303  * There are 2 VDEV modes that currently needs this special metadata -
304  *  1) Mesh Mode
305  *  2) DSRC Mode
306  *
307  * Return: HTT metadata size
308  *
309  */
310 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
311 					  struct dp_tx_msdu_info_s *msdu_info)
312 {
313 	uint32_t *meta_data = msdu_info->meta_data;
314 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
315 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
316 
317 	uint8_t htt_desc_size;
318 
319 	/* Size rounded of multiple of 8 bytes */
320 	uint8_t htt_desc_size_aligned;
321 
322 	uint8_t *hdr = NULL;
323 
324 	/*
325 	 * Metadata - HTT MSDU Extension header
326 	 */
327 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
328 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
329 
330 	if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
331 	    HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
332 							   meta_data[0])) {
333 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
334 				 htt_desc_size_aligned)) {
335 			nbuf = qdf_nbuf_realloc_headroom(nbuf,
336 							 htt_desc_size_aligned);
337 			if (!nbuf) {
338 				/*
339 				 * qdf_nbuf_realloc_headroom won't do skb_clone
340 				 * as skb_realloc_headroom does. so, no free is
341 				 * needed here.
342 				 */
343 				DP_STATS_INC(vdev,
344 					     tx_i.dropped.headroom_insufficient,
345 					     1);
346 				qdf_print(" %s[%d] skb_realloc_headroom failed",
347 					  __func__, __LINE__);
348 				return 0;
349 			}
350 		}
351 		/* Fill and add HTT metaheader */
352 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
353 		if (!hdr) {
354 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
355 					"Error in filling HTT metadata");
356 
357 			return 0;
358 		}
359 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
360 
361 	} else if (vdev->opmode == wlan_op_mode_ocb) {
362 		/* Todo - Add support for DSRC */
363 	}
364 
365 	return htt_desc_size_aligned;
366 }
367 
368 /**
369  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
370  * @tso_seg: TSO segment to process
371  * @ext_desc: Pointer to MSDU extension descriptor
372  *
373  * Return: void
374  */
375 #if defined(FEATURE_TSO)
376 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
377 		void *ext_desc)
378 {
379 	uint8_t num_frag;
380 	uint32_t tso_flags;
381 
382 	/*
383 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
384 	 * tcp_flag_mask
385 	 *
386 	 * Checksum enable flags are set in TCL descriptor and not in Extension
387 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
388 	 */
389 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
390 
391 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
392 
393 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
394 		tso_seg->tso_flags.ip_len);
395 
396 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
397 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
398 
399 
400 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
401 		uint32_t lo = 0;
402 		uint32_t hi = 0;
403 
404 		qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
405 				  (tso_seg->tso_frags[num_frag].length));
406 
407 		qdf_dmaaddr_to_32s(
408 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
409 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
410 			tso_seg->tso_frags[num_frag].length);
411 	}
412 
413 	return;
414 }
415 #else
416 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
417 		void *ext_desc)
418 {
419 	return;
420 }
421 #endif
422 
423 #if defined(FEATURE_TSO)
424 /**
425  * dp_tx_free_tso_seg_list() - Loop through the tso segments
426  *                             allocated and free them
427  *
428  * @soc: soc handle
429  * @free_seg: list of tso segments
430  * @msdu_info: msdu descriptor
431  *
432  * Return - void
433  */
434 static void dp_tx_free_tso_seg_list(
435 		struct dp_soc *soc,
436 		struct qdf_tso_seg_elem_t *free_seg,
437 		struct dp_tx_msdu_info_s *msdu_info)
438 {
439 	struct qdf_tso_seg_elem_t *next_seg;
440 
441 	while (free_seg) {
442 		next_seg = free_seg->next;
443 		dp_tx_tso_desc_free(soc,
444 				    msdu_info->tx_queue.desc_pool_id,
445 				    free_seg);
446 		free_seg = next_seg;
447 	}
448 }
449 
450 /**
451  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
452  *                                 allocated and free them
453  *
454  * @soc:  soc handle
455  * @free_num_seg: list of tso number segments
456  * @msdu_info: msdu descriptor
457  * Return - void
458  */
459 static void dp_tx_free_tso_num_seg_list(
460 		struct dp_soc *soc,
461 		struct qdf_tso_num_seg_elem_t *free_num_seg,
462 		struct dp_tx_msdu_info_s *msdu_info)
463 {
464 	struct qdf_tso_num_seg_elem_t *next_num_seg;
465 
466 	while (free_num_seg) {
467 		next_num_seg = free_num_seg->next;
468 		dp_tso_num_seg_free(soc,
469 				    msdu_info->tx_queue.desc_pool_id,
470 				    free_num_seg);
471 		free_num_seg = next_num_seg;
472 	}
473 }
474 
475 /**
476  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
477  *                              do dma unmap for each segment
478  *
479  * @soc: soc handle
480  * @free_seg: list of tso segments
481  * @num_seg_desc: tso number segment descriptor
482  *
483  * Return - void
484  */
485 static void dp_tx_unmap_tso_seg_list(
486 		struct dp_soc *soc,
487 		struct qdf_tso_seg_elem_t *free_seg,
488 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
489 {
490 	struct qdf_tso_seg_elem_t *next_seg;
491 
492 	if (qdf_unlikely(!num_seg_desc)) {
493 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
494 		return;
495 	}
496 
497 	while (free_seg) {
498 		next_seg = free_seg->next;
499 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
500 		free_seg = next_seg;
501 	}
502 }
503 
504 #ifdef FEATURE_TSO_STATS
505 /**
506  * dp_tso_get_stats_idx: Retrieve the tso packet id
507  * @pdev - pdev handle
508  *
509  * Return: id
510  */
511 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
512 {
513 	uint32_t stats_idx;
514 
515 	stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
516 						% CDP_MAX_TSO_PACKETS);
517 	return stats_idx;
518 }
519 #else
520 static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
521 {
522 	return 0;
523 }
524 #endif /* FEATURE_TSO_STATS */
525 
526 /**
527  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
528  *				     free the tso segments descriptor and
529  *				     tso num segments descriptor
530  *
531  * @soc:  soc handle
532  * @msdu_info: msdu descriptor
533  * @tso_seg_unmap: flag to show if dma unmap is necessary
534  *
535  * Return - void
536  */
537 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
538 					  struct dp_tx_msdu_info_s *msdu_info,
539 					  bool tso_seg_unmap)
540 {
541 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
542 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
543 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
544 					tso_info->tso_num_seg_list;
545 
546 	/* do dma unmap for each segment */
547 	if (tso_seg_unmap)
548 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
549 
550 	/* free all tso number segment descriptor though looks only have 1 */
551 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
552 
553 	/* free all tso segment descriptor */
554 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
555 }
556 
557 /**
558  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
559  * @vdev: virtual device handle
560  * @msdu: network buffer
561  * @msdu_info: meta data associated with the msdu
562  *
563  * Return: QDF_STATUS_SUCCESS success
564  */
565 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
566 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
567 {
568 	struct qdf_tso_seg_elem_t *tso_seg;
569 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
570 	struct dp_soc *soc = vdev->pdev->soc;
571 	struct dp_pdev *pdev = vdev->pdev;
572 	struct qdf_tso_info_t *tso_info;
573 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
574 	tso_info = &msdu_info->u.tso_info;
575 	tso_info->curr_seg = NULL;
576 	tso_info->tso_seg_list = NULL;
577 	tso_info->num_segs = num_seg;
578 	msdu_info->frm_type = dp_tx_frm_tso;
579 	tso_info->tso_num_seg_list = NULL;
580 
581 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
582 
583 	while (num_seg) {
584 		tso_seg = dp_tx_tso_desc_alloc(
585 				soc, msdu_info->tx_queue.desc_pool_id);
586 		if (tso_seg) {
587 			tso_seg->next = tso_info->tso_seg_list;
588 			tso_info->tso_seg_list = tso_seg;
589 			num_seg--;
590 		} else {
591 			dp_err_rl("Failed to alloc tso seg desc");
592 			DP_STATS_INC_PKT(vdev->pdev,
593 					 tso_stats.tso_no_mem_dropped, 1,
594 					 qdf_nbuf_len(msdu));
595 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
596 
597 			return QDF_STATUS_E_NOMEM;
598 		}
599 	}
600 
601 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
602 
603 	tso_num_seg = dp_tso_num_seg_alloc(soc,
604 			msdu_info->tx_queue.desc_pool_id);
605 
606 	if (tso_num_seg) {
607 		tso_num_seg->next = tso_info->tso_num_seg_list;
608 		tso_info->tso_num_seg_list = tso_num_seg;
609 	} else {
610 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
611 			 __func__);
612 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
613 
614 		return QDF_STATUS_E_NOMEM;
615 	}
616 
617 	msdu_info->num_seg =
618 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
619 
620 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
621 			msdu_info->num_seg);
622 
623 	if (!(msdu_info->num_seg)) {
624 		/*
625 		 * Free allocated TSO seg desc and number seg desc,
626 		 * do unmap for segments if dma map has done.
627 		 */
628 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
629 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
630 
631 		return QDF_STATUS_E_INVAL;
632 	}
633 
634 	tso_info->curr_seg = tso_info->tso_seg_list;
635 
636 	tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
637 	dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
638 			     msdu, msdu_info->num_seg);
639 	dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
640 				    tso_info->msdu_stats_idx);
641 	dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
642 	return QDF_STATUS_SUCCESS;
643 }
644 #else
645 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
646 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
647 {
648 	return QDF_STATUS_E_NOMEM;
649 }
650 #endif
651 
652 /**
653  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
654  * @vdev: DP Vdev handle
655  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
656  * @desc_pool_id: Descriptor Pool ID
657  *
658  * Return:
659  */
660 static
661 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
662 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
663 {
664 	uint8_t i;
665 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
666 	struct dp_tx_seg_info_s *seg_info;
667 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
668 	struct dp_soc *soc = vdev->pdev->soc;
669 
670 	/* Allocate an extension descriptor */
671 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
672 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
673 
674 	if (!msdu_ext_desc) {
675 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
676 		return NULL;
677 	}
678 
679 	if (msdu_info->exception_fw &&
680 			qdf_unlikely(vdev->mesh_vdev)) {
681 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
682 				&msdu_info->meta_data[0],
683 				sizeof(struct htt_tx_msdu_desc_ext2_t));
684 		qdf_atomic_inc(&vdev->pdev->num_tx_exception);
685 	}
686 
687 	switch (msdu_info->frm_type) {
688 	case dp_tx_frm_sg:
689 	case dp_tx_frm_me:
690 	case dp_tx_frm_raw:
691 		seg_info = msdu_info->u.sg_info.curr_seg;
692 		/* Update the buffer pointers in MSDU Extension Descriptor */
693 		for (i = 0; i < seg_info->frag_cnt; i++) {
694 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
695 				seg_info->frags[i].paddr_lo,
696 				seg_info->frags[i].paddr_hi,
697 				seg_info->frags[i].len);
698 		}
699 
700 		break;
701 
702 	case dp_tx_frm_tso:
703 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
704 				&cached_ext_desc[0]);
705 		break;
706 
707 
708 	default:
709 		break;
710 	}
711 
712 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
713 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
714 
715 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
716 			msdu_ext_desc->vaddr);
717 
718 	return msdu_ext_desc;
719 }
720 
721 /**
722  * dp_tx_trace_pkt() - Trace TX packet at DP layer
723  *
724  * @skb: skb to be traced
725  * @msdu_id: msdu_id of the packet
726  * @vdev_id: vdev_id of the packet
727  *
728  * Return: None
729  */
730 #ifdef DP_DISABLE_TX_PKT_TRACE
731 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
732 			    uint8_t vdev_id)
733 {
734 }
735 #else
736 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
737 			    uint8_t vdev_id)
738 {
739 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
740 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
741 	DPTRACE(qdf_dp_trace_ptr(skb,
742 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
743 				 QDF_TRACE_DEFAULT_PDEV_ID,
744 				 qdf_nbuf_data_addr(skb),
745 				 sizeof(qdf_nbuf_data(skb)),
746 				 msdu_id, vdev_id));
747 
748 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
749 
750 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
751 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
752 				      msdu_id, QDF_TX));
753 }
754 #endif
755 
756 /**
757  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
758  * @vdev: DP vdev handle
759  * @nbuf: skb
760  * @desc_pool_id: Descriptor pool ID
761  * @meta_data: Metadata to the fw
762  * @tx_exc_metadata: Handle that holds exception path metadata
763  * Allocate and prepare Tx descriptor with msdu information.
764  *
765  * Return: Pointer to Tx Descriptor on success,
766  *         NULL on failure
767  */
768 static
769 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
770 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
771 		struct dp_tx_msdu_info_s *msdu_info,
772 		struct cdp_tx_exception_metadata *tx_exc_metadata)
773 {
774 	uint8_t align_pad;
775 	uint8_t is_exception = 0;
776 	uint8_t htt_hdr_size;
777 	struct dp_tx_desc_s *tx_desc;
778 	struct dp_pdev *pdev = vdev->pdev;
779 	struct dp_soc *soc = pdev->soc;
780 
781 	if (dp_tx_limit_check(vdev))
782 		return NULL;
783 
784 	/* Allocate software Tx descriptor */
785 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
786 	if (qdf_unlikely(!tx_desc)) {
787 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
788 		return NULL;
789 	}
790 
791 	dp_tx_outstanding_inc(pdev);
792 
793 	/* Initialize the SW tx descriptor */
794 	tx_desc->nbuf = nbuf;
795 	tx_desc->frm_type = dp_tx_frm_std;
796 	tx_desc->tx_encap_type = ((tx_exc_metadata &&
797 		(tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
798 		tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
799 	tx_desc->vdev = vdev;
800 	tx_desc->pdev = pdev;
801 	tx_desc->msdu_ext_desc = NULL;
802 	tx_desc->pkt_offset = 0;
803 
804 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
805 
806 	if (qdf_unlikely(vdev->multipass_en)) {
807 		if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
808 			goto failure;
809 	}
810 
811 	/*
812 	 * For special modes (vdev_type == ocb or mesh), data frames should be
813 	 * transmitted using varying transmit parameters (tx spec) which include
814 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
815 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
816 	 * These frames are sent as exception packets to firmware.
817 	 *
818 	 * HW requirement is that metadata should always point to a
819 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
820 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
821 	 *  to get 8-byte aligned start address along with align_pad added
822 	 *
823 	 *  |-----------------------------|
824 	 *  |                             |
825 	 *  |-----------------------------| <-----Buffer Pointer Address given
826 	 *  |                             |  ^    in HW descriptor (aligned)
827 	 *  |       HTT Metadata          |  |
828 	 *  |                             |  |
829 	 *  |                             |  | Packet Offset given in descriptor
830 	 *  |                             |  |
831 	 *  |-----------------------------|  |
832 	 *  |       Alignment Pad         |  v
833 	 *  |-----------------------------| <----- Actual buffer start address
834 	 *  |        SKB Data             |           (Unaligned)
835 	 *  |                             |
836 	 *  |                             |
837 	 *  |                             |
838 	 *  |                             |
839 	 *  |                             |
840 	 *  |-----------------------------|
841 	 */
842 	if (qdf_unlikely((msdu_info->exception_fw)) ||
843 				(vdev->opmode == wlan_op_mode_ocb) ||
844 				(tx_exc_metadata &&
845 				tx_exc_metadata->is_tx_sniffer)) {
846 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
847 
848 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
849 			DP_STATS_INC(vdev,
850 				     tx_i.dropped.headroom_insufficient, 1);
851 			goto failure;
852 		}
853 
854 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
855 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
856 					"qdf_nbuf_push_head failed");
857 			goto failure;
858 		}
859 
860 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
861 				msdu_info);
862 		if (htt_hdr_size == 0)
863 			goto failure;
864 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
865 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
866 		is_exception = 1;
867 	}
868 
869 #if !TQM_BYPASS_WAR
870 	if (is_exception || tx_exc_metadata)
871 #endif
872 	{
873 		/* Temporary WAR due to TQM VP issues */
874 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
875 		qdf_atomic_inc(&pdev->num_tx_exception);
876 	}
877 
878 	return tx_desc;
879 
880 failure:
881 	dp_tx_desc_release(tx_desc, desc_pool_id);
882 	return NULL;
883 }
884 
885 /**
886  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
887  * @vdev: DP vdev handle
888  * @nbuf: skb
889  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
890  * @desc_pool_id : Descriptor Pool ID
891  *
892  * Allocate and prepare Tx descriptor with msdu and fragment descritor
893  * information. For frames wth fragments, allocate and prepare
894  * an MSDU extension descriptor
895  *
896  * Return: Pointer to Tx Descriptor on success,
897  *         NULL on failure
898  */
899 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
900 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
901 		uint8_t desc_pool_id)
902 {
903 	struct dp_tx_desc_s *tx_desc;
904 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
905 	struct dp_pdev *pdev = vdev->pdev;
906 	struct dp_soc *soc = pdev->soc;
907 
908 	if (dp_tx_limit_check(vdev))
909 		return NULL;
910 
911 	/* Allocate software Tx descriptor */
912 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
913 	if (!tx_desc) {
914 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
915 		return NULL;
916 	}
917 
918 	dp_tx_outstanding_inc(pdev);
919 
920 	/* Initialize the SW tx descriptor */
921 	tx_desc->nbuf = nbuf;
922 	tx_desc->frm_type = msdu_info->frm_type;
923 	tx_desc->tx_encap_type = vdev->tx_encap_type;
924 	tx_desc->vdev = vdev;
925 	tx_desc->pdev = pdev;
926 	tx_desc->pkt_offset = 0;
927 	tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
928 	tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
929 
930 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
931 
932 	/* Handle scattered frames - TSO/SG/ME */
933 	/* Allocate and prepare an extension descriptor for scattered frames */
934 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
935 	if (!msdu_ext_desc) {
936 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
937 				"%s Tx Extension Descriptor Alloc Fail",
938 				__func__);
939 		goto failure;
940 	}
941 
942 #if TQM_BYPASS_WAR
943 	/* Temporary WAR due to TQM VP issues */
944 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
945 	qdf_atomic_inc(&pdev->num_tx_exception);
946 #endif
947 	if (qdf_unlikely(msdu_info->exception_fw))
948 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
949 
950 	tx_desc->msdu_ext_desc = msdu_ext_desc;
951 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
952 
953 	return tx_desc;
954 failure:
955 	dp_tx_desc_release(tx_desc, desc_pool_id);
956 	return NULL;
957 }
958 
959 /**
960  * dp_tx_prepare_raw() - Prepare RAW packet TX
961  * @vdev: DP vdev handle
962  * @nbuf: buffer pointer
963  * @seg_info: Pointer to Segment info Descriptor to be prepared
964  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
965  *     descriptor
966  *
967  * Return:
968  */
969 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
970 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
971 {
972 	qdf_nbuf_t curr_nbuf = NULL;
973 	uint16_t total_len = 0;
974 	qdf_dma_addr_t paddr;
975 	int32_t i;
976 	int32_t mapped_buf_num = 0;
977 
978 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
979 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
980 
981 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
982 
983 	/* Continue only if frames are of DATA type */
984 	if (!DP_FRAME_IS_DATA(qos_wh)) {
985 		DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
986 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
987 			  "Pkt. recd is of not data type");
988 		goto error;
989 	}
990 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
991 	if (vdev->raw_mode_war &&
992 	    (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
993 	    (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
994 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
995 
996 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
997 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
998 
999 		if (QDF_STATUS_SUCCESS !=
1000 			qdf_nbuf_map_nbytes_single(vdev->osdev,
1001 						   curr_nbuf,
1002 						   QDF_DMA_TO_DEVICE,
1003 						   curr_nbuf->len)) {
1004 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1005 				"%s dma map error ", __func__);
1006 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
1007 			mapped_buf_num = i;
1008 			goto error;
1009 		}
1010 
1011 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
1012 		seg_info->frags[i].paddr_lo = paddr;
1013 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
1014 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
1015 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
1016 		total_len += qdf_nbuf_len(curr_nbuf);
1017 	}
1018 
1019 	seg_info->frag_cnt = i;
1020 	seg_info->total_len = total_len;
1021 	seg_info->next = NULL;
1022 
1023 	sg_info->curr_seg = seg_info;
1024 
1025 	msdu_info->frm_type = dp_tx_frm_raw;
1026 	msdu_info->num_seg = 1;
1027 
1028 	return nbuf;
1029 
1030 error:
1031 	i = 0;
1032 	while (nbuf) {
1033 		curr_nbuf = nbuf;
1034 		if (i < mapped_buf_num) {
1035 			qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
1036 						     QDF_DMA_TO_DEVICE,
1037 						     curr_nbuf->len);
1038 			i++;
1039 		}
1040 		nbuf = qdf_nbuf_next(nbuf);
1041 		qdf_nbuf_free(curr_nbuf);
1042 	}
1043 	return NULL;
1044 
1045 }
1046 
1047 /**
1048  * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
1049  * @soc: DP soc handle
1050  * @nbuf: Buffer pointer
1051  *
1052  * unmap the chain of nbufs that belong to this RAW frame.
1053  *
1054  * Return: None
1055  */
1056 static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
1057 				    qdf_nbuf_t nbuf)
1058 {
1059 	qdf_nbuf_t cur_nbuf = nbuf;
1060 
1061 	do {
1062 		qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
1063 					     QDF_DMA_TO_DEVICE,
1064 					     cur_nbuf->len);
1065 		cur_nbuf = qdf_nbuf_next(cur_nbuf);
1066 	} while (cur_nbuf);
1067 }
1068 
1069 #ifdef VDEV_PEER_PROTOCOL_COUNT
1070 #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, nbuf) \
1071 { \
1072 	qdf_nbuf_t nbuf_local; \
1073 	struct dp_vdev *vdev_local = vdev_hdl; \
1074 	do { \
1075 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
1076 			break; \
1077 		nbuf_local = nbuf; \
1078 		if (qdf_unlikely(((vdev_local)->tx_encap_type) == \
1079 			 htt_cmn_pkt_type_raw)) \
1080 			break; \
1081 		else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local)))) \
1082 			break; \
1083 		else if (qdf_nbuf_is_tso((nbuf_local))) \
1084 			break; \
1085 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
1086 						       (nbuf_local), \
1087 						       NULL, 1, 0); \
1088 	} while (0); \
1089 }
1090 #else
1091 #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, skb)
1092 #endif
1093 
1094 /**
1095  * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
1096  * @soc: DP Soc Handle
1097  * @vdev: DP vdev handle
1098  * @tx_desc: Tx Descriptor Handle
1099  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1100  * @fw_metadata: Metadata to send to Target Firmware along with frame
1101  * @ring_id: Ring ID of H/W ring to which we enqueue the packet
1102  * @tx_exc_metadata: Handle that holds exception path meta data
1103  *
1104  *  Gets the next free TCL HW DMA descriptor and sets up required parameters
1105  *  from software Tx descriptor
1106  *
1107  * Return: QDF_STATUS_SUCCESS: success
1108  *         QDF_STATUS_E_RESOURCES: Error return
1109  */
1110 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
1111 				   struct dp_tx_desc_s *tx_desc, uint8_t tid,
1112 				   uint16_t fw_metadata, uint8_t ring_id,
1113 				   struct cdp_tx_exception_metadata
1114 					*tx_exc_metadata)
1115 {
1116 	uint8_t type;
1117 	void *hal_tx_desc;
1118 	uint32_t *hal_tx_desc_cached;
1119 
1120 	/*
1121 	 * Setting it initialization statically here to avoid
1122 	 * a memset call jump with qdf_mem_set call
1123 	 */
1124 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
1125 
1126 	enum cdp_sec_type sec_type = ((tx_exc_metadata &&
1127 			tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
1128 			tx_exc_metadata->sec_type : vdev->sec_type);
1129 
1130 	/* Return Buffer Manager ID */
1131 	uint8_t bm_id = dp_tx_get_rbm_id(soc, ring_id);
1132 
1133 	hal_ring_handle_t hal_ring_hdl = NULL;
1134 
1135 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
1136 
1137 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
1138 		dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
1139 		return QDF_STATUS_E_RESOURCES;
1140 	}
1141 
1142 	hal_tx_desc_cached = (void *) cached_desc;
1143 
1144 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
1145 		tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
1146 		type = HAL_TX_BUF_TYPE_EXT_DESC;
1147 		tx_desc->dma_addr = tx_desc->msdu_ext_desc->paddr;
1148 	} else {
1149 		tx_desc->length = qdf_nbuf_len(tx_desc->nbuf) -
1150 					tx_desc->pkt_offset;
1151 		type = HAL_TX_BUF_TYPE_BUFFER;
1152 		tx_desc->dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
1153 	}
1154 
1155 	qdf_assert_always(tx_desc->dma_addr);
1156 
1157 	hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
1158 				 tx_desc->dma_addr, bm_id, tx_desc->id,
1159 				 type);
1160 	hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
1161 				vdev->lmac_id);
1162 	hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
1163 				    vdev->search_type);
1164 	hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
1165 				     vdev->bss_ast_idx);
1166 	hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
1167 					  vdev->dscp_tid_map_id);
1168 
1169 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
1170 			sec_type_map[sec_type]);
1171 	hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
1172 				      (vdev->bss_ast_hash & 0xF));
1173 
1174 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
1175 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
1176 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
1177 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
1178 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
1179 					  vdev->hal_desc_addr_search_flags);
1180 
1181 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
1182 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
1183 
1184 	/* verify checksum offload configuration*/
1185 	if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) &&
1186 		((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
1187 		|| qdf_nbuf_is_tso(tx_desc->nbuf)))  {
1188 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
1189 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
1190 	}
1191 
1192 	if (tid != HTT_TX_EXT_TID_INVALID)
1193 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
1194 
1195 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
1196 		hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
1197 
1198 	if (qdf_unlikely(vdev->pdev->delay_stats_flag))
1199 		tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_get());
1200 
1201 	dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
1202 			 tx_desc->length, type, (uint64_t)tx_desc->dma_addr,
1203 			 tx_desc->pkt_offset, tx_desc->id);
1204 
1205 	hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
1206 
1207 	if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
1208 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1209 			  "%s %d : HAL RING Access Failed -- %pK",
1210 			 __func__, __LINE__, hal_ring_hdl);
1211 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1212 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1213 		return status;
1214 	}
1215 
1216 	/* Sync cached descriptor with HW */
1217 
1218 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
1219 	if (qdf_unlikely(!hal_tx_desc)) {
1220 		dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
1221 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1222 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1223 		goto ring_access_fail;
1224 	}
1225 
1226 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
1227 	dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
1228 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
1229 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
1230 	status = QDF_STATUS_SUCCESS;
1231 
1232 ring_access_fail:
1233 	if (hif_pm_runtime_get(soc->hif_handle,
1234 			       RTPM_ID_DW_TX_HW_ENQUEUE) == 0) {
1235 		dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1236 		hif_pm_runtime_put(soc->hif_handle,
1237 				   RTPM_ID_DW_TX_HW_ENQUEUE);
1238 	} else {
1239 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1240 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1241 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1242 	}
1243 
1244 	return status;
1245 }
1246 
1247 
1248 /**
1249  * dp_cce_classify() - Classify the frame based on CCE rules
1250  * @vdev: DP vdev handle
1251  * @nbuf: skb
1252  *
1253  * Classify frames based on CCE rules
1254  * Return: bool( true if classified,
1255  *               else false)
1256  */
1257 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1258 {
1259 	qdf_ether_header_t *eh = NULL;
1260 	uint16_t   ether_type;
1261 	qdf_llc_t *llcHdr;
1262 	qdf_nbuf_t nbuf_clone = NULL;
1263 	qdf_dot3_qosframe_t *qos_wh = NULL;
1264 
1265 	/* for mesh packets don't do any classification */
1266 	if (qdf_unlikely(vdev->mesh_vdev))
1267 		return false;
1268 
1269 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1270 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1271 		ether_type = eh->ether_type;
1272 		llcHdr = (qdf_llc_t *)(nbuf->data +
1273 					sizeof(qdf_ether_header_t));
1274 	} else {
1275 		qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1276 		/* For encrypted packets don't do any classification */
1277 		if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
1278 			return false;
1279 
1280 		if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
1281 			if (qdf_unlikely(
1282 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
1283 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
1284 
1285 				ether_type = *(uint16_t *)(nbuf->data
1286 						+ QDF_IEEE80211_4ADDR_HDR_LEN
1287 						+ sizeof(qdf_llc_t)
1288 						- sizeof(ether_type));
1289 				llcHdr = (qdf_llc_t *)(nbuf->data +
1290 						QDF_IEEE80211_4ADDR_HDR_LEN);
1291 			} else {
1292 				ether_type = *(uint16_t *)(nbuf->data
1293 						+ QDF_IEEE80211_3ADDR_HDR_LEN
1294 						+ sizeof(qdf_llc_t)
1295 						- sizeof(ether_type));
1296 				llcHdr = (qdf_llc_t *)(nbuf->data +
1297 					QDF_IEEE80211_3ADDR_HDR_LEN);
1298 			}
1299 
1300 			if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
1301 				&& (ether_type ==
1302 				qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
1303 
1304 				DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
1305 				return true;
1306 			}
1307 		}
1308 
1309 		return false;
1310 	}
1311 
1312 	if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
1313 		ether_type = *(uint16_t *)(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1314 				sizeof(*llcHdr));
1315 		nbuf_clone = qdf_nbuf_clone(nbuf);
1316 		if (qdf_unlikely(nbuf_clone)) {
1317 			qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
1318 
1319 			if (ether_type == htons(ETHERTYPE_VLAN)) {
1320 				qdf_nbuf_pull_head(nbuf_clone,
1321 						sizeof(qdf_net_vlanhdr_t));
1322 			}
1323 		}
1324 	} else {
1325 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1326 			nbuf_clone = qdf_nbuf_clone(nbuf);
1327 			if (qdf_unlikely(nbuf_clone)) {
1328 				qdf_nbuf_pull_head(nbuf_clone,
1329 					sizeof(qdf_net_vlanhdr_t));
1330 			}
1331 		}
1332 	}
1333 
1334 	if (qdf_unlikely(nbuf_clone))
1335 		nbuf = nbuf_clone;
1336 
1337 
1338 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
1339 		|| qdf_nbuf_is_ipv4_arp_pkt(nbuf)
1340 		|| qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
1341 		|| qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
1342 		|| (qdf_nbuf_is_ipv4_pkt(nbuf)
1343 			&& qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
1344 		|| (qdf_nbuf_is_ipv6_pkt(nbuf) &&
1345 			qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
1346 		if (qdf_unlikely(nbuf_clone))
1347 			qdf_nbuf_free(nbuf_clone);
1348 		return true;
1349 	}
1350 
1351 	if (qdf_unlikely(nbuf_clone))
1352 		qdf_nbuf_free(nbuf_clone);
1353 
1354 	return false;
1355 }
1356 
1357 /**
1358  * dp_tx_get_tid() - Obtain TID to be used for this frame
1359  * @vdev: DP vdev handle
1360  * @nbuf: skb
1361  *
1362  * Extract the DSCP or PCP information from frame and map into TID value.
1363  *
1364  * Return: void
1365  */
1366 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1367 			  struct dp_tx_msdu_info_s *msdu_info)
1368 {
1369 	uint8_t tos = 0, dscp_tid_override = 0;
1370 	uint8_t *hdr_ptr, *L3datap;
1371 	uint8_t is_mcast = 0;
1372 	qdf_ether_header_t *eh = NULL;
1373 	qdf_ethervlan_header_t *evh = NULL;
1374 	uint16_t   ether_type;
1375 	qdf_llc_t *llcHdr;
1376 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1377 
1378 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1379 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1380 		eh = (qdf_ether_header_t *)nbuf->data;
1381 		hdr_ptr = eh->ether_dhost;
1382 		L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
1383 	} else {
1384 		qdf_dot3_qosframe_t *qos_wh =
1385 			(qdf_dot3_qosframe_t *) nbuf->data;
1386 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1387 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1388 		return;
1389 	}
1390 
1391 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1392 	ether_type = eh->ether_type;
1393 
1394 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
1395 	/*
1396 	 * Check if packet is dot3 or eth2 type.
1397 	 */
1398 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1399 		ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1400 				sizeof(*llcHdr));
1401 
1402 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1403 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1404 				sizeof(*llcHdr);
1405 			ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
1406 					+ sizeof(*llcHdr) +
1407 					sizeof(qdf_net_vlanhdr_t));
1408 		} else {
1409 			L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
1410 				sizeof(*llcHdr);
1411 		}
1412 	} else {
1413 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1414 			evh = (qdf_ethervlan_header_t *) eh;
1415 			ether_type = evh->ether_type;
1416 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1417 		}
1418 	}
1419 
1420 	/*
1421 	 * Find priority from IP TOS DSCP field
1422 	 */
1423 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1424 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1425 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1426 			/* Only for unicast frames */
1427 			if (!is_mcast) {
1428 				/* send it on VO queue */
1429 				msdu_info->tid = DP_VO_TID;
1430 			}
1431 		} else {
1432 			/*
1433 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1434 			 * from TOS byte.
1435 			 */
1436 			tos = ip->ip_tos;
1437 			dscp_tid_override = 1;
1438 
1439 		}
1440 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1441 		/* TODO
1442 		 * use flowlabel
1443 		 *igmpmld cases to be handled in phase 2
1444 		 */
1445 		unsigned long ver_pri_flowlabel;
1446 		unsigned long pri;
1447 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1448 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1449 			DP_IPV6_PRIORITY_SHIFT;
1450 		tos = pri;
1451 		dscp_tid_override = 1;
1452 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1453 		msdu_info->tid = DP_VO_TID;
1454 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1455 		/* Only for unicast frames */
1456 		if (!is_mcast) {
1457 			/* send ucast arp on VO queue */
1458 			msdu_info->tid = DP_VO_TID;
1459 		}
1460 	}
1461 
1462 	/*
1463 	 * Assign all MCAST packets to BE
1464 	 */
1465 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1466 		if (is_mcast) {
1467 			tos = 0;
1468 			dscp_tid_override = 1;
1469 		}
1470 	}
1471 
1472 	if (dscp_tid_override == 1) {
1473 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1474 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1475 	}
1476 
1477 	if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
1478 		msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
1479 
1480 	return;
1481 }
1482 
1483 /**
1484  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1485  * @vdev: DP vdev handle
1486  * @nbuf: skb
1487  *
1488  * Software based TID classification is required when more than 2 DSCP-TID
1489  * mapping tables are needed.
1490  * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
1491  *
1492  * Return: void
1493  */
1494 static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1495 				      struct dp_tx_msdu_info_s *msdu_info)
1496 {
1497 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1498 
1499 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1500 
1501 	if (pdev->soc && vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map)
1502 		return;
1503 
1504 	/* for mesh packets don't do any classification */
1505 	if (qdf_unlikely(vdev->mesh_vdev))
1506 		return;
1507 
1508 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1509 }
1510 
1511 #ifdef FEATURE_WLAN_TDLS
1512 /**
1513  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1514  * @tx_desc: TX descriptor
1515  *
1516  * Return: None
1517  */
1518 static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1519 {
1520 	if (tx_desc->vdev) {
1521 		if (tx_desc->vdev->is_tdls_frame) {
1522 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1523 			tx_desc->vdev->is_tdls_frame = false;
1524 		}
1525 	}
1526 }
1527 
1528 /**
1529  * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1530  * @soc: dp_soc handle
1531  * @tx_desc: TX descriptor
1532  * @vdev: datapath vdev handle
1533  *
1534  * Return: None
1535  */
1536 static void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
1537 					 struct dp_tx_desc_s *tx_desc,
1538 					 struct dp_vdev *vdev)
1539 {
1540 	struct hal_tx_completion_status ts = {0};
1541 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1542 
1543 	if (qdf_unlikely(!vdev)) {
1544 		dp_err_rl("vdev is null!");
1545 		goto error;
1546 	}
1547 
1548 	hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
1549 	if (vdev->tx_non_std_data_callback.func) {
1550 		qdf_nbuf_set_next(nbuf, NULL);
1551 		vdev->tx_non_std_data_callback.func(
1552 				vdev->tx_non_std_data_callback.ctxt,
1553 				nbuf, ts.status);
1554 		return;
1555 	} else {
1556 		dp_err_rl("callback func is null");
1557 	}
1558 
1559 error:
1560 	qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
1561 	qdf_nbuf_free(nbuf);
1562 }
1563 
1564 /**
1565  * dp_tx_msdu_single_map() - do nbuf map
1566  * @vdev: DP vdev handle
1567  * @tx_desc: DP TX descriptor pointer
1568  * @nbuf: skb pointer
1569  *
1570  * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
1571  * operation done in other component.
1572  *
1573  * Return: QDF_STATUS
1574  */
1575 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1576 					       struct dp_tx_desc_s *tx_desc,
1577 					       qdf_nbuf_t nbuf)
1578 {
1579 	if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
1580 		return qdf_nbuf_map_nbytes_single(vdev->osdev,
1581 						  nbuf,
1582 						  QDF_DMA_TO_DEVICE,
1583 						  nbuf->len);
1584 	else
1585 		return qdf_nbuf_map_single(vdev->osdev, nbuf,
1586 					   QDF_DMA_TO_DEVICE);
1587 }
1588 #else
1589 static inline void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1590 {
1591 }
1592 
1593 static inline void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
1594 						struct dp_tx_desc_s *tx_desc,
1595 						struct dp_vdev *vdev)
1596 {
1597 }
1598 
1599 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1600 					       struct dp_tx_desc_s *tx_desc,
1601 					       qdf_nbuf_t nbuf)
1602 {
1603 	return qdf_nbuf_map_nbytes_single(vdev->osdev,
1604 					  nbuf,
1605 					  QDF_DMA_TO_DEVICE,
1606 					  nbuf->len);
1607 }
1608 #endif
1609 
1610 /**
1611  * dp_tx_frame_is_drop() - checks if the packet is loopback
1612  * @vdev: DP vdev handle
1613  * @nbuf: skb
1614  *
1615  * Return: 1 if frame needs to be dropped else 0
1616  */
1617 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
1618 {
1619 	struct dp_pdev *pdev = NULL;
1620 	struct dp_ast_entry *src_ast_entry = NULL;
1621 	struct dp_ast_entry *dst_ast_entry = NULL;
1622 	struct dp_soc *soc = NULL;
1623 
1624 	qdf_assert(vdev);
1625 	pdev = vdev->pdev;
1626 	qdf_assert(pdev);
1627 	soc = pdev->soc;
1628 
1629 	dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
1630 				(soc, dstmac, vdev->pdev->pdev_id);
1631 
1632 	src_ast_entry = dp_peer_ast_hash_find_by_pdevid
1633 				(soc, srcmac, vdev->pdev->pdev_id);
1634 	if (dst_ast_entry && src_ast_entry) {
1635 		if (dst_ast_entry->peer->peer_ids[0] ==
1636 				src_ast_entry->peer->peer_ids[0])
1637 			return 1;
1638 	}
1639 
1640 	return 0;
1641 }
1642 
1643 /**
1644  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1645  * @vdev: DP vdev handle
1646  * @nbuf: skb
1647  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1648  * @meta_data: Metadata to the fw
1649  * @tx_q: Tx queue to be used for this Tx frame
1650  * @peer_id: peer_id of the peer in case of NAWDS frames
1651  * @tx_exc_metadata: Handle that holds exception path metadata
1652  *
1653  * Return: NULL on success,
1654  *         nbuf when it fails to send
1655  */
1656 qdf_nbuf_t
1657 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1658 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
1659 		       struct cdp_tx_exception_metadata *tx_exc_metadata)
1660 {
1661 	struct dp_pdev *pdev = vdev->pdev;
1662 	struct dp_soc *soc = pdev->soc;
1663 	struct dp_tx_desc_s *tx_desc;
1664 	QDF_STATUS status;
1665 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
1666 	uint16_t htt_tcl_metadata = 0;
1667 	enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
1668 	uint8_t tid = msdu_info->tid;
1669 	struct cdp_tid_tx_stats *tid_stats = NULL;
1670 
1671 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
1672 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
1673 			msdu_info, tx_exc_metadata);
1674 	if (!tx_desc) {
1675 		dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
1676 			  vdev, tx_q->desc_pool_id);
1677 		drop_code = TX_DESC_ERR;
1678 		goto fail_return;
1679 	}
1680 
1681 	if (qdf_unlikely(soc->cce_disable)) {
1682 		if (dp_cce_classify(vdev, nbuf) == true) {
1683 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1684 			tid = DP_VO_TID;
1685 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1686 		}
1687 	}
1688 
1689 	dp_tx_update_tdls_flags(tx_desc);
1690 
1691 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
1692 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1693 		HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
1694 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
1695 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
1696 				HTT_TCL_METADATA_TYPE_PEER_BASED);
1697 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
1698 				peer_id);
1699 	} else
1700 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1701 
1702 	if (msdu_info->exception_fw)
1703 		HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1704 
1705 	dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
1706 					 !pdev->enhanced_stats_en);
1707 
1708 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
1709 			 dp_tx_msdu_single_map(vdev, tx_desc, nbuf))) {
1710 		/* Handle failure */
1711 		dp_err("qdf_nbuf_map failed");
1712 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
1713 		drop_code = TX_DMA_MAP_ERR;
1714 		goto release_desc;
1715 	}
1716 
1717 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
1718 	status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
1719 			htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
1720 
1721 	if (status != QDF_STATUS_SUCCESS) {
1722 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1723 			  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1724 			  __func__, tx_desc, tx_q->ring_id);
1725 		qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
1726 					     QDF_DMA_TO_DEVICE,
1727 					     nbuf->len);
1728 		drop_code = TX_HW_ENQUEUE;
1729 		goto release_desc;
1730 	}
1731 
1732 	return NULL;
1733 
1734 release_desc:
1735 	dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1736 
1737 fail_return:
1738 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1739 	tid_stats = &pdev->stats.tid_stats.
1740 		    tid_tx_stats[tx_q->ring_id][tid];
1741 	tid_stats->swdrop_cnt[drop_code]++;
1742 	return nbuf;
1743 }
1744 
1745 /**
1746  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
1747  * @vdev: DP vdev handle
1748  * @nbuf: skb
1749  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
1750  *
1751  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
1752  *
1753  * Return: NULL on success,
1754  *         nbuf when it fails to send
1755  */
1756 #if QDF_LOCK_STATS
1757 noinline
1758 #else
1759 #endif
1760 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1761 				    struct dp_tx_msdu_info_s *msdu_info)
1762 {
1763 	uint8_t i;
1764 	struct dp_pdev *pdev = vdev->pdev;
1765 	struct dp_soc *soc = pdev->soc;
1766 	struct dp_tx_desc_s *tx_desc;
1767 	bool is_cce_classified = false;
1768 	QDF_STATUS status;
1769 	uint16_t htt_tcl_metadata = 0;
1770 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1771 	struct cdp_tid_tx_stats *tid_stats = NULL;
1772 
1773 	if (qdf_unlikely(soc->cce_disable)) {
1774 		is_cce_classified = dp_cce_classify(vdev, nbuf);
1775 		if (is_cce_classified) {
1776 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1777 			msdu_info->tid = DP_VO_TID;
1778 		}
1779 	}
1780 
1781 	if (msdu_info->frm_type == dp_tx_frm_me)
1782 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1783 
1784 	i = 0;
1785 	/* Print statement to track i and num_seg */
1786 	/*
1787 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
1788 	 * descriptors using information in msdu_info
1789 	 */
1790 	while (i < msdu_info->num_seg) {
1791 		/*
1792 		 * Setup Tx descriptor for an MSDU, and MSDU extension
1793 		 * descriptor
1794 		 */
1795 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
1796 				tx_q->desc_pool_id);
1797 
1798 		if (!tx_desc) {
1799 			if (msdu_info->frm_type == dp_tx_frm_me) {
1800 				dp_tx_me_free_buf(pdev,
1801 					(void *)(msdu_info->u.sg_info
1802 						.curr_seg->frags[0].vaddr));
1803 				i++;
1804 				continue;
1805 			}
1806 			goto done;
1807 		}
1808 
1809 		if (msdu_info->frm_type == dp_tx_frm_me) {
1810 			tx_desc->me_buffer =
1811 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
1812 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
1813 		}
1814 
1815 		if (is_cce_classified)
1816 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1817 
1818 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1819 		if (msdu_info->exception_fw) {
1820 			HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1821 		}
1822 
1823 		/*
1824 		 * Enqueue the Tx MSDU descriptor to HW for transmit
1825 		 */
1826 		status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
1827 			htt_tcl_metadata, tx_q->ring_id, NULL);
1828 
1829 		if (status != QDF_STATUS_SUCCESS) {
1830 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1831 					"%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1832 					__func__, tx_desc, tx_q->ring_id);
1833 
1834 			dp_tx_get_tid(vdev, nbuf, msdu_info);
1835 			tid_stats = &pdev->stats.tid_stats.
1836 				    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
1837 			tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
1838 
1839 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1840 			if (msdu_info->frm_type == dp_tx_frm_me) {
1841 				i++;
1842 				continue;
1843 			}
1844 			goto done;
1845 		}
1846 
1847 		/*
1848 		 * TODO
1849 		 * if tso_info structure can be modified to have curr_seg
1850 		 * as first element, following 2 blocks of code (for TSO and SG)
1851 		 * can be combined into 1
1852 		 */
1853 
1854 		/*
1855 		 * For frames with multiple segments (TSO, ME), jump to next
1856 		 * segment.
1857 		 */
1858 		if (msdu_info->frm_type == dp_tx_frm_tso) {
1859 			if (msdu_info->u.tso_info.curr_seg->next) {
1860 				msdu_info->u.tso_info.curr_seg =
1861 					msdu_info->u.tso_info.curr_seg->next;
1862 
1863 				/*
1864 				 * If this is a jumbo nbuf, then increment the number of
1865 				 * nbuf users for each additional segment of the msdu.
1866 				 * This will ensure that the skb is freed only after
1867 				 * receiving tx completion for all segments of an nbuf
1868 				 */
1869 				qdf_nbuf_inc_users(nbuf);
1870 
1871 				/* Check with MCL if this is needed */
1872 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
1873 			}
1874 		}
1875 
1876 		/*
1877 		 * For Multicast-Unicast converted packets,
1878 		 * each converted frame (for a client) is represented as
1879 		 * 1 segment
1880 		 */
1881 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
1882 				(msdu_info->frm_type == dp_tx_frm_me)) {
1883 			if (msdu_info->u.sg_info.curr_seg->next) {
1884 				msdu_info->u.sg_info.curr_seg =
1885 					msdu_info->u.sg_info.curr_seg->next;
1886 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1887 			}
1888 		}
1889 		i++;
1890 	}
1891 
1892 	nbuf = NULL;
1893 
1894 done:
1895 	return nbuf;
1896 }
1897 
1898 /**
1899  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
1900  *                     for SG frames
1901  * @vdev: DP vdev handle
1902  * @nbuf: skb
1903  * @seg_info: Pointer to Segment info Descriptor to be prepared
1904  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1905  *
1906  * Return: NULL on success,
1907  *         nbuf when it fails to send
1908  */
1909 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1910 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1911 {
1912 	uint32_t cur_frag, nr_frags;
1913 	qdf_dma_addr_t paddr;
1914 	struct dp_tx_sg_info_s *sg_info;
1915 
1916 	sg_info = &msdu_info->u.sg_info;
1917 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
1918 
1919 	if (QDF_STATUS_SUCCESS !=
1920 		qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
1921 					   QDF_DMA_TO_DEVICE, nbuf->len)) {
1922 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1923 				"dma map error");
1924 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1925 
1926 		qdf_nbuf_free(nbuf);
1927 		return NULL;
1928 	}
1929 
1930 	paddr = qdf_nbuf_mapped_paddr_get(nbuf);
1931 	seg_info->frags[0].paddr_lo = paddr;
1932 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
1933 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
1934 	seg_info->frags[0].vaddr = (void *) nbuf;
1935 
1936 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
1937 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
1938 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
1939 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1940 					"frag dma map error");
1941 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1942 			qdf_nbuf_free(nbuf);
1943 			return NULL;
1944 		}
1945 
1946 		paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
1947 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
1948 		seg_info->frags[cur_frag + 1].paddr_hi =
1949 			((uint64_t) paddr) >> 32;
1950 		seg_info->frags[cur_frag + 1].len =
1951 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
1952 	}
1953 
1954 	seg_info->frag_cnt = (cur_frag + 1);
1955 	seg_info->total_len = qdf_nbuf_len(nbuf);
1956 	seg_info->next = NULL;
1957 
1958 	sg_info->curr_seg = seg_info;
1959 
1960 	msdu_info->frm_type = dp_tx_frm_sg;
1961 	msdu_info->num_seg = 1;
1962 
1963 	return nbuf;
1964 }
1965 
1966 /**
1967  * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
1968  * @vdev: DP vdev handle
1969  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1970  * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
1971  *
1972  * Return: NULL on failure,
1973  *         nbuf when extracted successfully
1974  */
1975 static
1976 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
1977 				    struct dp_tx_msdu_info_s *msdu_info,
1978 				    uint16_t ppdu_cookie)
1979 {
1980 	struct htt_tx_msdu_desc_ext2_t *meta_data =
1981 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
1982 
1983 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
1984 
1985 	HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
1986 				(msdu_info->meta_data[5], 1);
1987 	HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
1988 				(msdu_info->meta_data[5], 1);
1989 	HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
1990 				(msdu_info->meta_data[6], ppdu_cookie);
1991 
1992 	msdu_info->exception_fw = 1;
1993 	msdu_info->is_tx_sniffer = 1;
1994 }
1995 
1996 #ifdef MESH_MODE_SUPPORT
1997 
1998 /**
1999  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
2000 				and prepare msdu_info for mesh frames.
2001  * @vdev: DP vdev handle
2002  * @nbuf: skb
2003  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2004  *
2005  * Return: NULL on failure,
2006  *         nbuf when extracted successfully
2007  */
2008 static
2009 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2010 				struct dp_tx_msdu_info_s *msdu_info)
2011 {
2012 	struct meta_hdr_s *mhdr;
2013 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2014 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2015 
2016 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2017 
2018 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
2019 		msdu_info->exception_fw = 0;
2020 		goto remove_meta_hdr;
2021 	}
2022 
2023 	msdu_info->exception_fw = 1;
2024 
2025 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2026 
2027 	meta_data->host_tx_desc_pool = 1;
2028 	meta_data->update_peer_cache = 1;
2029 	meta_data->learning_frame = 1;
2030 
2031 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
2032 		meta_data->power = mhdr->power;
2033 
2034 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
2035 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
2036 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
2037 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
2038 
2039 		meta_data->dyn_bw = 1;
2040 
2041 		meta_data->valid_pwr = 1;
2042 		meta_data->valid_mcs_mask = 1;
2043 		meta_data->valid_nss_mask = 1;
2044 		meta_data->valid_preamble_type  = 1;
2045 		meta_data->valid_retries = 1;
2046 		meta_data->valid_bw_info = 1;
2047 	}
2048 
2049 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
2050 		meta_data->encrypt_type = 0;
2051 		meta_data->valid_encrypt_type = 1;
2052 		meta_data->learning_frame = 0;
2053 	}
2054 
2055 	meta_data->valid_key_flags = 1;
2056 	meta_data->key_flags = (mhdr->keyix & 0x3);
2057 
2058 remove_meta_hdr:
2059 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
2060 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2061 				"qdf_nbuf_pull_head failed");
2062 		qdf_nbuf_free(nbuf);
2063 		return NULL;
2064 	}
2065 
2066 	msdu_info->tid = qdf_nbuf_get_priority(nbuf);
2067 
2068 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2069 			"%s , Meta hdr %0x %0x %0x %0x %0x %0x"
2070 			" tid %d to_fw %d",
2071 			__func__, msdu_info->meta_data[0],
2072 			msdu_info->meta_data[1],
2073 			msdu_info->meta_data[2],
2074 			msdu_info->meta_data[3],
2075 			msdu_info->meta_data[4],
2076 			msdu_info->meta_data[5],
2077 			msdu_info->tid, msdu_info->exception_fw);
2078 
2079 	return nbuf;
2080 }
2081 #else
2082 static
2083 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2084 				struct dp_tx_msdu_info_s *msdu_info)
2085 {
2086 	return nbuf;
2087 }
2088 
2089 #endif
2090 
2091 /**
2092  * dp_check_exc_metadata() - Checks if parameters are valid
2093  * @tx_exc - holds all exception path parameters
2094  *
2095  * Returns true when all the parameters are valid else false
2096  *
2097  */
2098 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
2099 {
2100 	bool invalid_tid = (tx_exc->tid > DP_MAX_TIDS && tx_exc->tid !=
2101 			    HTT_INVALID_TID);
2102 	bool invalid_encap_type =
2103 			(tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
2104 			 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
2105 	bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
2106 				 tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
2107 	bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
2108 			       tx_exc->ppdu_cookie == 0);
2109 
2110 	if (invalid_tid || invalid_encap_type || invalid_sec_type ||
2111 	    invalid_cookie) {
2112 		return false;
2113 	}
2114 
2115 	return true;
2116 }
2117 
2118 /**
2119  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
2120  * @soc: DP soc handle
2121  * @vdev_id: id of DP vdev handle
2122  * @nbuf: skb
2123  * @tx_exc_metadata: Handle that holds exception path meta data
2124  *
2125  * Entry point for Core Tx layer (DP_TX) invoked from
2126  * hard_start_xmit in OSIF/HDD to transmit frames through fw
2127  *
2128  * Return: NULL on success,
2129  *         nbuf when it fails to send
2130  */
2131 qdf_nbuf_t
2132 dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf,
2133 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
2134 {
2135 	qdf_ether_header_t *eh = NULL;
2136 	struct dp_tx_msdu_info_s msdu_info;
2137 	struct dp_vdev *vdev =
2138 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
2139 						   vdev_id);
2140 
2141 	if (qdf_unlikely(!vdev))
2142 		goto fail;
2143 
2144 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
2145 
2146 	if (!tx_exc_metadata)
2147 		goto fail;
2148 
2149 	msdu_info.tid = tx_exc_metadata->tid;
2150 
2151 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2152 	dp_verbose_debug("skb %pM", nbuf->data);
2153 
2154 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
2155 
2156 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
2157 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2158 			"Invalid parameters in exception path");
2159 		goto fail;
2160 	}
2161 
2162 	/* Basic sanity checks for unsupported packets */
2163 
2164 	/* MESH mode */
2165 	if (qdf_unlikely(vdev->mesh_vdev)) {
2166 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2167 			"Mesh mode is not supported in exception path");
2168 		goto fail;
2169 	}
2170 
2171 	/* TSO or SG */
2172 	if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
2173 	    qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2174 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2175 			  "TSO and SG are not supported in exception path");
2176 
2177 		goto fail;
2178 	}
2179 
2180 	/* RAW */
2181 	if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
2182 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2183 			  "Raw frame is not supported in exception path");
2184 		goto fail;
2185 	}
2186 
2187 
2188 	/* Mcast enhancement*/
2189 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
2190 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2191 		    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2192 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2193 					  "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW");
2194 		}
2195 	}
2196 
2197 	if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
2198 		DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
2199 				 qdf_nbuf_len(nbuf));
2200 
2201 		dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
2202 					       tx_exc_metadata->ppdu_cookie);
2203 	}
2204 
2205 	/*
2206 	 * Get HW Queue to use for this frame.
2207 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
2208 	 * dedicated for data and 1 for command.
2209 	 * "queue_id" maps to one hardware ring.
2210 	 *  With each ring, we also associate a unique Tx descriptor pool
2211 	 *  to minimize lock contention for these resources.
2212 	 */
2213 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2214 
2215 	/*  Single linear frame */
2216 	/*
2217 	 * If nbuf is a simple linear frame, use send_single function to
2218 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2219 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2220 	 */
2221 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
2222 			tx_exc_metadata->peer_id, tx_exc_metadata);
2223 
2224 	return nbuf;
2225 
2226 fail:
2227 	dp_verbose_debug("pkt send failed");
2228 	return nbuf;
2229 }
2230 
2231 /**
2232  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
2233  * @soc: DP soc handle
2234  * @vdev_id: DP vdev handle
2235  * @nbuf: skb
2236  *
2237  * Entry point for Core Tx layer (DP_TX) invoked from
2238  * hard_start_xmit in OSIF/HDD
2239  *
2240  * Return: NULL on success,
2241  *         nbuf when it fails to send
2242  */
2243 #ifdef MESH_MODE_SUPPORT
2244 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
2245 			   qdf_nbuf_t nbuf)
2246 {
2247 	struct meta_hdr_s *mhdr;
2248 	qdf_nbuf_t nbuf_mesh = NULL;
2249 	qdf_nbuf_t nbuf_clone = NULL;
2250 	struct dp_vdev *vdev;
2251 	uint8_t no_enc_frame = 0;
2252 
2253 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
2254 	if (!nbuf_mesh) {
2255 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2256 				"qdf_nbuf_unshare failed");
2257 		return nbuf;
2258 	}
2259 
2260 	vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
2261 						  vdev_id);
2262 	if (!vdev) {
2263 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2264 				"vdev is NULL for vdev_id %d", vdev_id);
2265 		return nbuf;
2266 	}
2267 
2268 	nbuf = nbuf_mesh;
2269 
2270 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2271 
2272 	if ((vdev->sec_type != cdp_sec_type_none) &&
2273 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
2274 		no_enc_frame = 1;
2275 
2276 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
2277 		qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
2278 
2279 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
2280 		       !no_enc_frame) {
2281 		nbuf_clone = qdf_nbuf_clone(nbuf);
2282 		if (!nbuf_clone) {
2283 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2284 				"qdf_nbuf_clone failed");
2285 			return nbuf;
2286 		}
2287 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
2288 	}
2289 
2290 	if (nbuf_clone) {
2291 		if (!dp_tx_send(soc, vdev_id, nbuf_clone)) {
2292 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2293 		} else {
2294 			qdf_nbuf_free(nbuf_clone);
2295 		}
2296 	}
2297 
2298 	if (no_enc_frame)
2299 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
2300 	else
2301 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
2302 
2303 	nbuf = dp_tx_send(soc, vdev_id, nbuf);
2304 	if ((!nbuf) && no_enc_frame) {
2305 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2306 	}
2307 
2308 	return nbuf;
2309 }
2310 
2311 #else
2312 
2313 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
2314 			   qdf_nbuf_t nbuf)
2315 {
2316 	return dp_tx_send(soc, vdev_id, nbuf);
2317 }
2318 
2319 #endif
2320 
2321 /**
2322  * dp_tx_nawds_handler() - NAWDS handler
2323  *
2324  * @soc: DP soc handle
2325  * @vdev_id: id of DP vdev handle
2326  * @msdu_info: msdu_info required to create HTT metadata
2327  * @nbuf: skb
2328  *
2329  * This API transfers the multicast frames with the peer id
2330  * on NAWDS enabled peer.
2331 
2332  * Return: none
2333  */
2334 
2335 static inline
2336 void dp_tx_nawds_handler(struct cdp_soc_t *soc, struct dp_vdev *vdev,
2337 			 struct dp_tx_msdu_info_s *msdu_info, qdf_nbuf_t nbuf)
2338 {
2339 	struct dp_peer *peer = NULL;
2340 	qdf_nbuf_t nbuf_clone = NULL;
2341 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
2342 	uint16_t peer_id = DP_INVALID_PEER;
2343 	struct dp_peer *sa_peer = NULL;
2344 	struct dp_ast_entry *ast_entry = NULL;
2345 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2346 
2347 	if (qdf_nbuf_get_tx_ftype(nbuf) == CB_FTYPE_INTRABSS_FWD) {
2348 		qdf_spin_lock_bh(&dp_soc->ast_lock);
2349 
2350 		ast_entry = dp_peer_ast_hash_find_by_pdevid
2351 					(dp_soc,
2352 					 (uint8_t *)(eh->ether_shost),
2353 					 vdev->pdev->pdev_id);
2354 
2355 		if (ast_entry)
2356 			sa_peer = ast_entry->peer;
2357 		qdf_spin_unlock_bh(&dp_soc->ast_lock);
2358 	}
2359 
2360 	qdf_spin_lock_bh(&dp_soc->peer_ref_mutex);
2361 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2362 		if (!peer->bss_peer && peer->nawds_enabled) {
2363 			peer_id = peer->peer_ids[0];
2364 			/* Multicast packets needs to be
2365 			 * dropped in case of intra bss forwarding
2366 			 */
2367 			if (sa_peer == peer) {
2368 				QDF_TRACE(QDF_MODULE_ID_DP,
2369 					  QDF_TRACE_LEVEL_DEBUG,
2370 					  " %s: multicast packet",  __func__);
2371 				DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
2372 				continue;
2373 			}
2374 			nbuf_clone = qdf_nbuf_clone(nbuf);
2375 
2376 			if (!nbuf_clone) {
2377 				QDF_TRACE(QDF_MODULE_ID_DP,
2378 					  QDF_TRACE_LEVEL_ERROR,
2379 					  FL("nbuf clone failed"));
2380 				break;
2381 			}
2382 
2383 			nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
2384 							    msdu_info, peer_id,
2385 							    NULL);
2386 
2387 			if (nbuf_clone) {
2388 				QDF_TRACE(QDF_MODULE_ID_DP,
2389 					  QDF_TRACE_LEVEL_DEBUG,
2390 					  FL("pkt send failed"));
2391 				qdf_nbuf_free(nbuf_clone);
2392 			} else {
2393 				if (peer_id != DP_INVALID_PEER)
2394 					DP_STATS_INC_PKT(peer, tx.nawds_mcast,
2395 							 1, qdf_nbuf_len(nbuf));
2396 			}
2397 		}
2398 	}
2399 
2400 	qdf_spin_unlock_bh(&dp_soc->peer_ref_mutex);
2401 }
2402 
2403 /**
2404  * dp_tx_send() - Transmit a frame on a given VAP
2405  * @soc: DP soc handle
2406  * @vdev_id: id of DP vdev handle
2407  * @nbuf: skb
2408  *
2409  * Entry point for Core Tx layer (DP_TX) invoked from
2410  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
2411  * cases
2412  *
2413  * Return: NULL on success,
2414  *         nbuf when it fails to send
2415  */
2416 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf)
2417 {
2418 	uint16_t peer_id = HTT_INVALID_PEER;
2419 	/*
2420 	 * doing a memzero is causing additional function call overhead
2421 	 * so doing static stack clearing
2422 	 */
2423 	struct dp_tx_msdu_info_s msdu_info = {0};
2424 	struct dp_vdev *vdev =
2425 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
2426 						   vdev_id);
2427 	if (qdf_unlikely(!vdev))
2428 		return nbuf;
2429 
2430 	dp_verbose_debug("skb %pM", nbuf->data);
2431 
2432 	/*
2433 	 * Set Default Host TID value to invalid TID
2434 	 * (TID override disabled)
2435 	 */
2436 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
2437 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
2438 
2439 	if (qdf_unlikely(vdev->mesh_vdev)) {
2440 		qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
2441 								&msdu_info);
2442 		if (!nbuf_mesh) {
2443 			dp_verbose_debug("Extracting mesh metadata failed");
2444 			return nbuf;
2445 		}
2446 		nbuf = nbuf_mesh;
2447 	}
2448 
2449 	/*
2450 	 * Get HW Queue to use for this frame.
2451 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
2452 	 * dedicated for data and 1 for command.
2453 	 * "queue_id" maps to one hardware ring.
2454 	 *  With each ring, we also associate a unique Tx descriptor pool
2455 	 *  to minimize lock contention for these resources.
2456 	 */
2457 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2458 
2459 	/*
2460 	 * TCL H/W supports 2 DSCP-TID mapping tables.
2461 	 *  Table 1 - Default DSCP-TID mapping table
2462 	 *  Table 2 - 1 DSCP-TID override table
2463 	 *
2464 	 * If we need a different DSCP-TID mapping for this vap,
2465 	 * call tid_classify to extract DSCP/ToS from frame and
2466 	 * map to a TID and store in msdu_info. This is later used
2467 	 * to fill in TCL Input descriptor (per-packet TID override).
2468 	 */
2469 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
2470 
2471 	/*
2472 	 * Classify the frame and call corresponding
2473 	 * "prepare" function which extracts the segment (TSO)
2474 	 * and fragmentation information (for TSO , SG, ME, or Raw)
2475 	 * into MSDU_INFO structure which is later used to fill
2476 	 * SW and HW descriptors.
2477 	 */
2478 	if (qdf_nbuf_is_tso(nbuf)) {
2479 		dp_verbose_debug("TSO frame %pK", vdev);
2480 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
2481 				 qdf_nbuf_len(nbuf));
2482 
2483 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
2484 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
2485 					 qdf_nbuf_len(nbuf));
2486 			return nbuf;
2487 		}
2488 
2489 		goto send_multiple;
2490 	}
2491 
2492 	/* SG */
2493 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2494 		struct dp_tx_seg_info_s seg_info = {0};
2495 
2496 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
2497 		if (!nbuf)
2498 			return NULL;
2499 
2500 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
2501 
2502 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
2503 				qdf_nbuf_len(nbuf));
2504 
2505 		goto send_multiple;
2506 	}
2507 
2508 #ifdef ATH_SUPPORT_IQUE
2509 	/* Mcast to Ucast Conversion*/
2510 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
2511 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
2512 					  qdf_nbuf_data(nbuf);
2513 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2514 		    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2515 			dp_verbose_debug("Mcast frm for ME %pK", vdev);
2516 
2517 			DP_STATS_INC_PKT(vdev,
2518 					tx_i.mcast_en.mcast_pkt, 1,
2519 					qdf_nbuf_len(nbuf));
2520 			if (dp_tx_prepare_send_me(vdev, nbuf) ==
2521 					QDF_STATUS_SUCCESS) {
2522 				return NULL;
2523 			}
2524 		}
2525 	}
2526 #endif
2527 
2528 	/* RAW */
2529 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
2530 		struct dp_tx_seg_info_s seg_info = {0};
2531 
2532 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
2533 		if (!nbuf)
2534 			return NULL;
2535 
2536 		dp_verbose_debug("Raw frame %pK", vdev);
2537 
2538 		goto send_multiple;
2539 
2540 	}
2541 
2542 	if (qdf_unlikely(vdev->nawds_enabled)) {
2543 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
2544 					  qdf_nbuf_data(nbuf);
2545 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost))
2546 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf);
2547 
2548 		peer_id = DP_INVALID_PEER;
2549 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
2550 				 1, qdf_nbuf_len(nbuf));
2551 	}
2552 
2553 	/*  Single linear frame */
2554 	/*
2555 	 * If nbuf is a simple linear frame, use send_single function to
2556 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2557 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2558 	 */
2559 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
2560 
2561 	return nbuf;
2562 
2563 send_multiple:
2564 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2565 
2566 	if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
2567 		dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
2568 
2569 	return nbuf;
2570 }
2571 
2572 /**
2573  * dp_tx_reinject_handler() - Tx Reinject Handler
2574  * @tx_desc: software descriptor head pointer
2575  * @status : Tx completion status from HTT descriptor
2576  *
2577  * This function reinjects frames back to Target.
2578  * Todo - Host queue needs to be added
2579  *
2580  * Return: none
2581  */
2582 static
2583 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2584 {
2585 	struct dp_vdev *vdev;
2586 	struct dp_peer *peer = NULL;
2587 	uint32_t peer_id = HTT_INVALID_PEER;
2588 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2589 	qdf_nbuf_t nbuf_copy = NULL;
2590 	struct dp_tx_msdu_info_s msdu_info;
2591 	struct dp_soc *soc = NULL;
2592 #ifdef WDS_VENDOR_EXTENSION
2593 	int is_mcast = 0, is_ucast = 0;
2594 	int num_peers_3addr = 0;
2595 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
2596 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
2597 #endif
2598 
2599 	vdev = tx_desc->vdev;
2600 	soc = vdev->pdev->soc;
2601 
2602 	qdf_assert(vdev);
2603 
2604 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
2605 
2606 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2607 
2608 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2609 			"%s Tx reinject path", __func__);
2610 
2611 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
2612 			qdf_nbuf_len(tx_desc->nbuf));
2613 
2614 #ifdef WDS_VENDOR_EXTENSION
2615 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
2616 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
2617 	} else {
2618 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
2619 	}
2620 	is_ucast = !is_mcast;
2621 
2622 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2623 		if (peer->bss_peer)
2624 			continue;
2625 
2626 		/* Detect wds peers that use 3-addr framing for mcast.
2627 		 * if there are any, the bss_peer is used to send the
2628 		 * the mcast frame using 3-addr format. all wds enabled
2629 		 * peers that use 4-addr framing for mcast frames will
2630 		 * be duplicated and sent as 4-addr frames below.
2631 		 */
2632 		if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
2633 			num_peers_3addr = 1;
2634 			break;
2635 		}
2636 	}
2637 #endif
2638 
2639 	if (qdf_unlikely(vdev->mesh_vdev)) {
2640 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
2641 	} else {
2642 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2643 			if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
2644 #ifdef WDS_VENDOR_EXTENSION
2645 			/*
2646 			 * . if 3-addr STA, then send on BSS Peer
2647 			 * . if Peer WDS enabled and accept 4-addr mcast,
2648 			 * send mcast on that peer only
2649 			 * . if Peer WDS enabled and accept 4-addr ucast,
2650 			 * send ucast on that peer only
2651 			 */
2652 			((peer->bss_peer && num_peers_3addr && is_mcast) ||
2653 			 (peer->wds_enabled &&
2654 				  ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
2655 				   (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
2656 #else
2657 			((peer->bss_peer &&
2658 			  !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))))) {
2659 #endif
2660 				peer_id = DP_INVALID_PEER;
2661 
2662 				nbuf_copy = qdf_nbuf_copy(nbuf);
2663 
2664 				if (!nbuf_copy) {
2665 					QDF_TRACE(QDF_MODULE_ID_DP,
2666 						QDF_TRACE_LEVEL_DEBUG,
2667 						FL("nbuf copy failed"));
2668 					break;
2669 				}
2670 
2671 				nbuf_copy = dp_tx_send_msdu_single(vdev,
2672 						nbuf_copy,
2673 						&msdu_info,
2674 						peer_id,
2675 						NULL);
2676 
2677 				if (nbuf_copy) {
2678 					QDF_TRACE(QDF_MODULE_ID_DP,
2679 						QDF_TRACE_LEVEL_DEBUG,
2680 						FL("pkt send failed"));
2681 					qdf_nbuf_free(nbuf_copy);
2682 				} else {
2683 					if (peer_id != DP_INVALID_PEER)
2684 						DP_STATS_INC_PKT(peer,
2685 							tx.nawds_mcast,
2686 							1, qdf_nbuf_len(nbuf));
2687 				}
2688 			}
2689 		}
2690 	}
2691 
2692 	qdf_nbuf_free(nbuf);
2693 
2694 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2695 }
2696 
2697 /**
2698  * dp_tx_inspect_handler() - Tx Inspect Handler
2699  * @tx_desc: software descriptor head pointer
2700  * @status : Tx completion status from HTT descriptor
2701  *
2702  * Handles Tx frames sent back to Host for inspection
2703  * (ProxyARP)
2704  *
2705  * Return: none
2706  */
2707 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2708 {
2709 
2710 	struct dp_soc *soc;
2711 	struct dp_pdev *pdev = tx_desc->pdev;
2712 
2713 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2714 			"%s Tx inspect path",
2715 			__func__);
2716 
2717 	qdf_assert(pdev);
2718 
2719 	soc = pdev->soc;
2720 
2721 	DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
2722 			qdf_nbuf_len(tx_desc->nbuf));
2723 
2724 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
2725 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2726 }
2727 
2728 #ifdef FEATURE_PERPKT_INFO
2729 /**
2730  * dp_get_completion_indication_for_stack() - send completion to stack
2731  * @soc : dp_soc handle
2732  * @pdev: dp_pdev handle
2733  * @peer: dp peer handle
2734  * @ts: transmit completion status structure
2735  * @netbuf: Buffer pointer for free
2736  *
2737  * This function is used for indication whether buffer needs to be
2738  * sent to stack for freeing or not
2739 */
2740 QDF_STATUS
2741 dp_get_completion_indication_for_stack(struct dp_soc *soc,
2742 				       struct dp_pdev *pdev,
2743 				       struct dp_peer *peer,
2744 				       struct hal_tx_completion_status *ts,
2745 				       qdf_nbuf_t netbuf,
2746 				       uint64_t time_latency)
2747 {
2748 	struct tx_capture_hdr *ppdu_hdr;
2749 	uint16_t peer_id = ts->peer_id;
2750 	uint32_t ppdu_id = ts->ppdu_id;
2751 	uint8_t first_msdu = ts->first_msdu;
2752 	uint8_t last_msdu = ts->last_msdu;
2753 
2754 	if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
2755 			 !pdev->latency_capture_enable))
2756 		return QDF_STATUS_E_NOSUPPORT;
2757 
2758 	if (!peer) {
2759 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2760 				FL("Peer Invalid"));
2761 		return QDF_STATUS_E_INVAL;
2762 	}
2763 
2764 	if (pdev->mcopy_mode) {
2765 		if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
2766 				(pdev->m_copy_id.tx_peer_id == peer_id)) {
2767 			return QDF_STATUS_E_INVAL;
2768 		}
2769 
2770 		pdev->m_copy_id.tx_ppdu_id = ppdu_id;
2771 		pdev->m_copy_id.tx_peer_id = peer_id;
2772 	}
2773 
2774 	if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) {
2775 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2776 				FL("No headroom"));
2777 		return QDF_STATUS_E_NOMEM;
2778 	}
2779 
2780 	ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
2781 	qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
2782 		     QDF_MAC_ADDR_SIZE);
2783 	qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
2784 		     QDF_MAC_ADDR_SIZE);
2785 	ppdu_hdr->ppdu_id = ppdu_id;
2786 	ppdu_hdr->peer_id = peer_id;
2787 	ppdu_hdr->first_msdu = first_msdu;
2788 	ppdu_hdr->last_msdu = last_msdu;
2789 	if (qdf_unlikely(pdev->latency_capture_enable)) {
2790 		ppdu_hdr->tsf = ts->tsf;
2791 		ppdu_hdr->time_latency = time_latency;
2792 	}
2793 
2794 	return QDF_STATUS_SUCCESS;
2795 }
2796 
2797 
2798 /**
2799  * dp_send_completion_to_stack() - send completion to stack
2800  * @soc :  dp_soc handle
2801  * @pdev:  dp_pdev handle
2802  * @peer_id: peer_id of the peer for which completion came
2803  * @ppdu_id: ppdu_id
2804  * @netbuf: Buffer pointer for free
2805  *
2806  * This function is used to send completion to stack
2807  * to free buffer
2808 */
2809 void  dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2810 					uint16_t peer_id, uint32_t ppdu_id,
2811 					qdf_nbuf_t netbuf)
2812 {
2813 	dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
2814 				netbuf, peer_id,
2815 				WDI_NO_VAL, pdev->pdev_id);
2816 }
2817 #else
2818 static QDF_STATUS
2819 dp_get_completion_indication_for_stack(struct dp_soc *soc,
2820 				       struct dp_pdev *pdev,
2821 				       struct dp_peer *peer,
2822 				       struct hal_tx_completion_status *ts,
2823 				       qdf_nbuf_t netbuf,
2824 				       uint64_t time_latency)
2825 {
2826 	return QDF_STATUS_E_NOSUPPORT;
2827 }
2828 
2829 static void
2830 dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2831 	uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
2832 {
2833 }
2834 #endif
2835 
2836 /**
2837  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2838  * @soc: Soc handle
2839  * @desc: software Tx descriptor to be processed
2840  *
2841  * Return: none
2842  */
2843 static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
2844 				       struct dp_tx_desc_s *desc)
2845 {
2846 	struct dp_vdev *vdev = desc->vdev;
2847 	qdf_nbuf_t nbuf = desc->nbuf;
2848 
2849 	/* nbuf already freed in vdev detach path */
2850 	if (!nbuf)
2851 		return;
2852 
2853 	/* If it is TDLS mgmt, don't unmap or free the frame */
2854 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2855 		return dp_non_std_tx_comp_free_buff(soc, desc, vdev);
2856 
2857 	/* 0 : MSDU buffer, 1 : MLE */
2858 	if (desc->msdu_ext_desc) {
2859 		/* TSO free */
2860 		if (hal_tx_ext_desc_get_tso_enable(
2861 					desc->msdu_ext_desc->vaddr)) {
2862 			/* unmap eash TSO seg before free the nbuf */
2863 			dp_tx_tso_unmap_segment(soc, desc->tso_desc,
2864 						desc->tso_num_desc);
2865 			qdf_nbuf_free(nbuf);
2866 			return;
2867 		}
2868 	}
2869 
2870 	qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
2871 				     QDF_DMA_TO_DEVICE, nbuf->len);
2872 
2873 	if (qdf_unlikely(!vdev)) {
2874 		qdf_nbuf_free(nbuf);
2875 		return;
2876 	}
2877 
2878 	if (qdf_likely(!vdev->mesh_vdev))
2879 		qdf_nbuf_free(nbuf);
2880 	else {
2881 		if (desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2882 			qdf_nbuf_free(nbuf);
2883 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2884 		} else
2885 			vdev->osif_tx_free_ext((nbuf));
2886 	}
2887 }
2888 
2889 #ifdef MESH_MODE_SUPPORT
2890 /**
2891  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
2892  *                                         in mesh meta header
2893  * @tx_desc: software descriptor head pointer
2894  * @ts: pointer to tx completion stats
2895  * Return: none
2896  */
2897 static
2898 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2899 		struct hal_tx_completion_status *ts)
2900 {
2901 	struct meta_hdr_s *mhdr;
2902 	qdf_nbuf_t netbuf = tx_desc->nbuf;
2903 
2904 	if (!tx_desc->msdu_ext_desc) {
2905 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
2906 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2907 				"netbuf %pK offset %d",
2908 				netbuf, tx_desc->pkt_offset);
2909 			return;
2910 		}
2911 	}
2912 	if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
2913 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2914 			"netbuf %pK offset %lu", netbuf,
2915 			sizeof(struct meta_hdr_s));
2916 		return;
2917 	}
2918 
2919 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
2920 	mhdr->rssi = ts->ack_frame_rssi;
2921 	mhdr->band = tx_desc->pdev->operating_channel.band;
2922 	mhdr->channel = tx_desc->pdev->operating_channel.num;
2923 }
2924 
2925 #else
2926 static
2927 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2928 		struct hal_tx_completion_status *ts)
2929 {
2930 }
2931 
2932 #endif
2933 
2934 /**
2935  * dp_tx_compute_delay() - Compute and fill in all timestamps
2936  *				to pass in correct fields
2937  *
2938  * @vdev: pdev handle
2939  * @tx_desc: tx descriptor
2940  * @tid: tid value
2941  * @ring_id: TCL or WBM ring number for transmit path
2942  * Return: none
2943  */
2944 static void dp_tx_compute_delay(struct dp_vdev *vdev,
2945 				struct dp_tx_desc_s *tx_desc,
2946 				uint8_t tid, uint8_t ring_id)
2947 {
2948 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
2949 	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
2950 
2951 	if (qdf_likely(!vdev->pdev->delay_stats_flag))
2952 		return;
2953 
2954 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_get());
2955 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
2956 	timestamp_hw_enqueue = tx_desc->timestamp;
2957 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
2958 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
2959 					 timestamp_hw_enqueue);
2960 	interframe_delay = (uint32_t)(timestamp_ingress -
2961 				      vdev->prev_tx_enq_tstamp);
2962 
2963 	/*
2964 	 * Delay in software enqueue
2965 	 */
2966 	dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid,
2967 			      CDP_DELAY_STATS_SW_ENQ, ring_id);
2968 	/*
2969 	 * Delay between packet enqueued to HW and Tx completion
2970 	 */
2971 	dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid,
2972 			      CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id);
2973 
2974 	/*
2975 	 * Update interframe delay stats calculated at hardstart receive point.
2976 	 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
2977 	 * interframe delay will not be calculate correctly for 1st frame.
2978 	 * On the other side, this will help in avoiding extra per packet check
2979 	 * of !vdev->prev_tx_enq_tstamp.
2980 	 */
2981 	dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
2982 			      CDP_DELAY_STATS_TX_INTERFRAME, ring_id);
2983 	vdev->prev_tx_enq_tstamp = timestamp_ingress;
2984 }
2985 
2986 #ifdef DISABLE_DP_STATS
2987 static
2988 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
2989 {
2990 }
2991 #else
2992 static
2993 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
2994 {
2995 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
2996 
2997 	DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
2998 	if (subtype != QDF_PROTO_INVALID)
2999 		DP_STATS_INC(peer, tx.no_ack_count[subtype], 1);
3000 }
3001 #endif
3002 
3003 /**
3004  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
3005  *				per wbm ring
3006  *
3007  * @tx_desc: software descriptor head pointer
3008  * @ts: Tx completion status
3009  * @peer: peer handle
3010  * @ring_id: ring number
3011  *
3012  * Return: None
3013  */
3014 static inline void
3015 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
3016 			struct hal_tx_completion_status *ts,
3017 			struct dp_peer *peer, uint8_t ring_id)
3018 {
3019 	struct dp_pdev *pdev = peer->vdev->pdev;
3020 	struct dp_soc *soc = NULL;
3021 	uint8_t mcs, pkt_type;
3022 	uint8_t tid = ts->tid;
3023 	uint32_t length;
3024 	struct cdp_tid_tx_stats *tid_stats;
3025 
3026 	if (!pdev)
3027 		return;
3028 
3029 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
3030 		tid = CDP_MAX_DATA_TIDS - 1;
3031 
3032 	tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
3033 	soc = pdev->soc;
3034 
3035 	mcs = ts->mcs;
3036 	pkt_type = ts->pkt_type;
3037 
3038 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
3039 		dp_err("Release source is not from TQM");
3040 		return;
3041 	}
3042 
3043 	length = qdf_nbuf_len(tx_desc->nbuf);
3044 	DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
3045 
3046 	if (qdf_unlikely(pdev->delay_stats_flag))
3047 		dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id);
3048 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
3049 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
3050 
3051 	DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length,
3052 			  (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
3053 
3054 	DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
3055 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
3056 
3057 	DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
3058 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
3059 
3060 	DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
3061 		     (ts->status == HAL_TX_TQM_RR_FW_REASON1));
3062 
3063 	DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
3064 		     (ts->status == HAL_TX_TQM_RR_FW_REASON2));
3065 
3066 	DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
3067 		     (ts->status == HAL_TX_TQM_RR_FW_REASON3));
3068 
3069 	/*
3070 	 * tx_failed is ideally supposed to be updated from HTT ppdu completion
3071 	 * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there
3072 	 * are no completions for failed cases. Hence updating tx_failed from
3073 	 * data path. Please note that if tx_failed is fixed to be from ppdu,
3074 	 * then this has to be removed
3075 	 */
3076 	peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num +
3077 				peer->stats.tx.dropped.fw_rem_notx +
3078 				peer->stats.tx.dropped.fw_rem_tx +
3079 				peer->stats.tx.dropped.age_out +
3080 				peer->stats.tx.dropped.fw_reason1 +
3081 				peer->stats.tx.dropped.fw_reason2 +
3082 				peer->stats.tx.dropped.fw_reason3;
3083 
3084 	if (ts->status < CDP_MAX_TX_TQM_STATUS) {
3085 		tid_stats->tqm_status_cnt[ts->status]++;
3086 	}
3087 
3088 	if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
3089 		dp_update_no_ack_stats(tx_desc->nbuf, peer);
3090 		return;
3091 	}
3092 
3093 	DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
3094 
3095 	DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
3096 	DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
3097 
3098 	/*
3099 	 * Following Rate Statistics are updated from HTT PPDU events from FW.
3100 	 * Return from here if HTT PPDU events are enabled.
3101 	 */
3102 	if (!(soc->process_tx_status))
3103 		return;
3104 
3105 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3106 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
3107 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3108 			((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
3109 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3110 			((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
3111 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3112 			((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
3113 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3114 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
3115 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3116 			((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
3117 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3118 			((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
3119 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3120 			((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
3121 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3122 			((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
3123 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3124 			((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
3125 
3126 	DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
3127 	DP_STATS_INC(peer, tx.bw[ts->bw], 1);
3128 	DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
3129 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
3130 	DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
3131 	DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
3132 	DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
3133 
3134 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
3135 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
3136 			     &peer->stats, ts->peer_id,
3137 			     UPDATE_PEER_STATS, pdev->pdev_id);
3138 #endif
3139 }
3140 
3141 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3142 /**
3143  * dp_tx_flow_pool_lock() - take flow pool lock
3144  * @soc: core txrx main context
3145  * @tx_desc: tx desc
3146  *
3147  * Return: None
3148  */
3149 static inline
3150 void dp_tx_flow_pool_lock(struct dp_soc *soc,
3151 			  struct dp_tx_desc_s *tx_desc)
3152 {
3153 	struct dp_tx_desc_pool_s *pool;
3154 	uint8_t desc_pool_id;
3155 
3156 	desc_pool_id = tx_desc->pool_id;
3157 	pool = &soc->tx_desc[desc_pool_id];
3158 
3159 	qdf_spin_lock_bh(&pool->flow_pool_lock);
3160 }
3161 
3162 /**
3163  * dp_tx_flow_pool_unlock() - release flow pool lock
3164  * @soc: core txrx main context
3165  * @tx_desc: tx desc
3166  *
3167  * Return: None
3168  */
3169 static inline
3170 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
3171 			    struct dp_tx_desc_s *tx_desc)
3172 {
3173 	struct dp_tx_desc_pool_s *pool;
3174 	uint8_t desc_pool_id;
3175 
3176 	desc_pool_id = tx_desc->pool_id;
3177 	pool = &soc->tx_desc[desc_pool_id];
3178 
3179 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
3180 }
3181 #else
3182 static inline
3183 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
3184 {
3185 }
3186 
3187 static inline
3188 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
3189 {
3190 }
3191 #endif
3192 
3193 /**
3194  * dp_tx_notify_completion() - Notify tx completion for this desc
3195  * @soc: core txrx main context
3196  * @tx_desc: tx desc
3197  * @netbuf:  buffer
3198  *
3199  * Return: none
3200  */
3201 static inline void dp_tx_notify_completion(struct dp_soc *soc,
3202 					   struct dp_tx_desc_s *tx_desc,
3203 					   qdf_nbuf_t netbuf)
3204 {
3205 	void *osif_dev;
3206 	ol_txrx_completion_fp tx_compl_cbk = NULL;
3207 
3208 	qdf_assert(tx_desc);
3209 
3210 	dp_tx_flow_pool_lock(soc, tx_desc);
3211 
3212 	if (!tx_desc->vdev ||
3213 	    !tx_desc->vdev->osif_vdev) {
3214 		dp_tx_flow_pool_unlock(soc, tx_desc);
3215 		return;
3216 	}
3217 
3218 	osif_dev = tx_desc->vdev->osif_vdev;
3219 	tx_compl_cbk = tx_desc->vdev->tx_comp;
3220 	dp_tx_flow_pool_unlock(soc, tx_desc);
3221 
3222 	if (tx_compl_cbk)
3223 		tx_compl_cbk(netbuf, osif_dev);
3224 }
3225 
3226 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
3227  * @pdev: pdev handle
3228  * @tid: tid value
3229  * @txdesc_ts: timestamp from txdesc
3230  * @ppdu_id: ppdu id
3231  *
3232  * Return: none
3233  */
3234 #ifdef FEATURE_PERPKT_INFO
3235 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
3236 					       struct dp_peer *peer,
3237 					       uint8_t tid,
3238 					       uint64_t txdesc_ts,
3239 					       uint32_t ppdu_id)
3240 {
3241 	uint64_t delta_ms;
3242 	struct cdp_tx_sojourn_stats *sojourn_stats;
3243 
3244 	if (qdf_unlikely(pdev->enhanced_stats_en == 0))
3245 		return;
3246 
3247 	if (qdf_unlikely(tid == HTT_INVALID_TID ||
3248 			 tid >= CDP_DATA_TID_MAX))
3249 		return;
3250 
3251 	if (qdf_unlikely(!pdev->sojourn_buf))
3252 		return;
3253 
3254 	sojourn_stats = (struct cdp_tx_sojourn_stats *)
3255 		qdf_nbuf_data(pdev->sojourn_buf);
3256 
3257 	sojourn_stats->cookie = (void *)peer->wlanstats_ctx;
3258 
3259 	delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
3260 				txdesc_ts;
3261 	qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid],
3262 			    delta_ms);
3263 	sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
3264 	sojourn_stats->num_msdus[tid] = 1;
3265 	sojourn_stats->avg_sojourn_msdu[tid].internal =
3266 		peer->avg_sojourn_msdu[tid].internal;
3267 	dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
3268 			     pdev->sojourn_buf, HTT_INVALID_PEER,
3269 			     WDI_NO_VAL, pdev->pdev_id);
3270 	sojourn_stats->sum_sojourn_msdu[tid] = 0;
3271 	sojourn_stats->num_msdus[tid] = 0;
3272 	sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
3273 }
3274 #else
3275 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
3276 					       struct dp_peer *peer,
3277 					       uint8_t tid,
3278 					       uint64_t txdesc_ts,
3279 					       uint32_t ppdu_id)
3280 {
3281 }
3282 #endif
3283 
3284 /**
3285  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
3286  * @soc: DP Soc handle
3287  * @tx_desc: software Tx descriptor
3288  * @ts : Tx completion status from HAL/HTT descriptor
3289  *
3290  * Return: none
3291  */
3292 static inline void
3293 dp_tx_comp_process_desc(struct dp_soc *soc,
3294 			struct dp_tx_desc_s *desc,
3295 			struct hal_tx_completion_status *ts,
3296 			struct dp_peer *peer)
3297 {
3298 	uint64_t time_latency = 0;
3299 	/*
3300 	 * m_copy/tx_capture modes are not supported for
3301 	 * scatter gather packets
3302 	 */
3303 	if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
3304 		time_latency = (qdf_ktime_to_ms(qdf_ktime_get()) -
3305 				desc->timestamp);
3306 	}
3307 	if (!(desc->msdu_ext_desc)) {
3308 		if (QDF_STATUS_SUCCESS ==
3309 		    dp_tx_add_to_comp_queue(soc, desc, ts, peer)) {
3310 			return;
3311 		}
3312 
3313 		if (QDF_STATUS_SUCCESS ==
3314 		    dp_get_completion_indication_for_stack(soc,
3315 							   desc->pdev,
3316 							   peer, ts,
3317 							   desc->nbuf,
3318 							   time_latency)) {
3319 			qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
3320 						     QDF_DMA_TO_DEVICE,
3321 						     desc->nbuf->len);
3322 			dp_send_completion_to_stack(soc,
3323 						    desc->pdev,
3324 						    ts->peer_id,
3325 						    ts->ppdu_id,
3326 						    desc->nbuf);
3327 			return;
3328 		}
3329 	}
3330 
3331 	dp_tx_comp_free_buf(soc, desc);
3332 }
3333 
3334 /**
3335  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
3336  * @tx_desc: software descriptor head pointer
3337  * @ts: Tx completion status
3338  * @peer: peer handle
3339  * @ring_id: ring number
3340  *
3341  * Return: none
3342  */
3343 static inline
3344 void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
3345 				  struct hal_tx_completion_status *ts,
3346 				  struct dp_peer *peer, uint8_t ring_id)
3347 {
3348 	uint32_t length;
3349 	qdf_ether_header_t *eh;
3350 	struct dp_soc *soc = NULL;
3351 	struct dp_vdev *vdev = tx_desc->vdev;
3352 	qdf_nbuf_t nbuf = tx_desc->nbuf;
3353 
3354 	if (!vdev || !nbuf) {
3355 		dp_info_rl("invalid tx descriptor. vdev or nbuf NULL");
3356 		goto out;
3357 	}
3358 
3359 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3360 
3361 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
3362 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
3363 				 QDF_TRACE_DEFAULT_PDEV_ID,
3364 				 qdf_nbuf_data_addr(nbuf),
3365 				 sizeof(qdf_nbuf_data(nbuf)),
3366 				 tx_desc->id,
3367 				 ts->status));
3368 
3369 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3370 				"-------------------- \n"
3371 				"Tx Completion Stats: \n"
3372 				"-------------------- \n"
3373 				"ack_frame_rssi = %d \n"
3374 				"first_msdu = %d \n"
3375 				"last_msdu = %d \n"
3376 				"msdu_part_of_amsdu = %d \n"
3377 				"rate_stats valid = %d \n"
3378 				"bw = %d \n"
3379 				"pkt_type = %d \n"
3380 				"stbc = %d \n"
3381 				"ldpc = %d \n"
3382 				"sgi = %d \n"
3383 				"mcs = %d \n"
3384 				"ofdma = %d \n"
3385 				"tones_in_ru = %d \n"
3386 				"tsf = %d \n"
3387 				"ppdu_id = %d \n"
3388 				"transmit_cnt = %d \n"
3389 				"tid = %d \n"
3390 				"peer_id = %d\n",
3391 				ts->ack_frame_rssi, ts->first_msdu,
3392 				ts->last_msdu, ts->msdu_part_of_amsdu,
3393 				ts->valid, ts->bw, ts->pkt_type, ts->stbc,
3394 				ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
3395 				ts->tones_in_ru, ts->tsf, ts->ppdu_id,
3396 				ts->transmit_cnt, ts->tid, ts->peer_id);
3397 
3398 	soc = vdev->pdev->soc;
3399 
3400 	/* Update SoC level stats */
3401 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
3402 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
3403 
3404 	/* Update per-packet stats for mesh mode */
3405 	if (qdf_unlikely(vdev->mesh_vdev) &&
3406 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
3407 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
3408 
3409 	length = qdf_nbuf_len(nbuf);
3410 	/* Update peer level stats */
3411 	if (!peer) {
3412 		QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
3413 				   "peer is null or deletion in progress");
3414 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
3415 		goto out;
3416 	}
3417 
3418 	if (qdf_unlikely(peer->bss_peer && vdev->opmode == wlan_op_mode_ap)) {
3419 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
3420 			DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
3421 
3422 			if ((peer->vdev->tx_encap_type ==
3423 				htt_cmn_pkt_type_ethernet) &&
3424 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
3425 				DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
3426 			}
3427 		}
3428 	} else {
3429 		DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
3430 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
3431 			DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
3432 	}
3433 
3434 	dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id);
3435 
3436 #ifdef QCA_SUPPORT_RDK_STATS
3437 	if (soc->wlanstats_enabled)
3438 		dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid,
3439 					    tx_desc->timestamp,
3440 					    ts->ppdu_id);
3441 #endif
3442 
3443 out:
3444 	return;
3445 }
3446 /**
3447  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
3448  * @soc: core txrx main context
3449  * @comp_head: software descriptor head pointer
3450  * @ring_id: ring number
3451  *
3452  * This function will process batch of descriptors reaped by dp_tx_comp_handler
3453  * and release the software descriptors after processing is complete
3454  *
3455  * Return: none
3456  */
3457 static void
3458 dp_tx_comp_process_desc_list(struct dp_soc *soc,
3459 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id)
3460 {
3461 	struct dp_tx_desc_s *desc;
3462 	struct dp_tx_desc_s *next;
3463 	struct hal_tx_completion_status ts;
3464 	struct dp_peer *peer;
3465 	qdf_nbuf_t netbuf;
3466 
3467 	desc = comp_head;
3468 
3469 	while (desc) {
3470 		if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
3471 			struct dp_pdev *pdev = desc->pdev;
3472 
3473 			peer = dp_peer_find_by_id(soc, desc->peer_id);
3474 			if (qdf_likely(peer)) {
3475 				/*
3476 				 * Increment peer statistics
3477 				 * Minimal statistics update done here
3478 				 */
3479 				DP_STATS_INC_PKT(peer, tx.comp_pkt, 1,
3480 						 desc->length);
3481 
3482 				if (desc->tx_status !=
3483 						HAL_TX_TQM_RR_FRAME_ACKED)
3484 					peer->stats.tx.tx_failed++;
3485 
3486 				dp_peer_unref_del_find_by_id(peer);
3487 			}
3488 
3489 			qdf_assert(pdev);
3490 			dp_tx_outstanding_dec(pdev);
3491 
3492 			/*
3493 			 * Calling a QDF WRAPPER here is creating signifcant
3494 			 * performance impact so avoided the wrapper call here
3495 			 */
3496 			next = desc->next;
3497 			qdf_mem_unmap_nbytes_single(soc->osdev,
3498 						    desc->dma_addr,
3499 						    QDF_DMA_TO_DEVICE,
3500 						    desc->length);
3501 			qdf_nbuf_free(desc->nbuf);
3502 			dp_tx_desc_free(soc, desc, desc->pool_id);
3503 			desc = next;
3504 			continue;
3505 		}
3506 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
3507 		peer = dp_peer_find_by_id(soc, ts.peer_id);
3508 		dp_tx_comp_process_tx_status(desc, &ts, peer, ring_id);
3509 
3510 		netbuf = desc->nbuf;
3511 		/* check tx complete notification */
3512 		if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(netbuf))
3513 			dp_tx_notify_completion(soc, desc, netbuf);
3514 
3515 		dp_tx_comp_process_desc(soc, desc, &ts, peer);
3516 
3517 		if (peer)
3518 			dp_peer_unref_del_find_by_id(peer);
3519 
3520 		next = desc->next;
3521 
3522 		dp_tx_desc_release(desc, desc->pool_id);
3523 		desc = next;
3524 	}
3525 
3526 }
3527 
3528 /**
3529  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
3530  * @tx_desc: software descriptor head pointer
3531  * @status : Tx completion status from HTT descriptor
3532  * @ring_id: ring number
3533  *
3534  * This function will process HTT Tx indication messages from Target
3535  *
3536  * Return: none
3537  */
3538 static
3539 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status,
3540 				  uint8_t ring_id)
3541 {
3542 	uint8_t tx_status;
3543 	struct dp_pdev *pdev;
3544 	struct dp_vdev *vdev;
3545 	struct dp_soc *soc;
3546 	struct hal_tx_completion_status ts = {0};
3547 	uint32_t *htt_desc = (uint32_t *)status;
3548 	struct dp_peer *peer;
3549 	struct cdp_tid_tx_stats *tid_stats = NULL;
3550 	struct htt_soc *htt_handle;
3551 
3552 	/*
3553 	 * If the descriptor is already freed in vdev_detach,
3554 	 * continue to next descriptor
3555 	 */
3556 	if (!tx_desc->vdev && !tx_desc->flags) {
3557 		QDF_TRACE(QDF_MODULE_ID_DP,
3558 			  QDF_TRACE_LEVEL_INFO,
3559 			  "Descriptor freed in vdev_detach %d",
3560 			  tx_desc->id);
3561 		return;
3562 	}
3563 
3564 	pdev = tx_desc->pdev;
3565 	soc = pdev->soc;
3566 
3567 	if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
3568 		QDF_TRACE(QDF_MODULE_ID_DP,
3569 			  QDF_TRACE_LEVEL_INFO,
3570 			  "pdev in down state %d",
3571 			  tx_desc->id);
3572 		dp_tx_comp_free_buf(soc, tx_desc);
3573 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3574 		return;
3575 	}
3576 
3577 	qdf_assert(tx_desc->pdev);
3578 
3579 	vdev = tx_desc->vdev;
3580 
3581 	if (!vdev)
3582 		return;
3583 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
3584 	htt_handle = (struct htt_soc *)soc->htt_handle;
3585 	htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
3586 
3587 	switch (tx_status) {
3588 	case HTT_TX_FW2WBM_TX_STATUS_OK:
3589 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
3590 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
3591 	{
3592 		uint8_t tid;
3593 		if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
3594 			ts.peer_id =
3595 				HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
3596 						htt_desc[2]);
3597 			ts.tid =
3598 				HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
3599 						htt_desc[2]);
3600 		} else {
3601 			ts.peer_id = HTT_INVALID_PEER;
3602 			ts.tid = HTT_INVALID_TID;
3603 		}
3604 		ts.ppdu_id =
3605 			HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
3606 					htt_desc[1]);
3607 		ts.ack_frame_rssi =
3608 			HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
3609 					htt_desc[1]);
3610 
3611 		ts.tsf = htt_desc[3];
3612 		ts.first_msdu = 1;
3613 		ts.last_msdu = 1;
3614 		tid = ts.tid;
3615 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
3616 			tid = CDP_MAX_DATA_TIDS - 1;
3617 
3618 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
3619 
3620 		if (qdf_unlikely(pdev->delay_stats_flag))
3621 			dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
3622 		if (tx_status < CDP_MAX_TX_HTT_STATUS) {
3623 			tid_stats->htt_status_cnt[tx_status]++;
3624 		}
3625 
3626 		peer = dp_peer_find_by_id(soc, ts.peer_id);
3627 
3628 		if (qdf_likely(peer))
3629 			dp_peer_unref_del_find_by_id(peer);
3630 
3631 		dp_tx_comp_process_tx_status(tx_desc, &ts, peer, ring_id);
3632 		dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
3633 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3634 
3635 		break;
3636 	}
3637 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
3638 	{
3639 		dp_tx_reinject_handler(tx_desc, status);
3640 		break;
3641 	}
3642 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
3643 	{
3644 		dp_tx_inspect_handler(tx_desc, status);
3645 		break;
3646 	}
3647 	case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
3648 	{
3649 		dp_tx_mec_handler(vdev, status);
3650 		break;
3651 	}
3652 	default:
3653 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3654 			  "%s Invalid HTT tx_status %d\n",
3655 			  __func__, tx_status);
3656 		break;
3657 	}
3658 }
3659 
3660 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
3661 static inline
3662 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
3663 {
3664 	bool limit_hit = false;
3665 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
3666 
3667 	limit_hit =
3668 		(num_reaped >= cfg->tx_comp_loop_pkt_limit) ? true : false;
3669 
3670 	if (limit_hit)
3671 		DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
3672 
3673 	return limit_hit;
3674 }
3675 
3676 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
3677 {
3678 	return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
3679 }
3680 #else
3681 static inline
3682 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
3683 {
3684 	return false;
3685 }
3686 
3687 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
3688 {
3689 	return false;
3690 }
3691 #endif
3692 
3693 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
3694 			    hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
3695 			    uint32_t quota)
3696 {
3697 	void *tx_comp_hal_desc;
3698 	uint8_t buffer_src;
3699 	uint8_t pool_id;
3700 	uint32_t tx_desc_id;
3701 	struct dp_tx_desc_s *tx_desc = NULL;
3702 	struct dp_tx_desc_s *head_desc = NULL;
3703 	struct dp_tx_desc_s *tail_desc = NULL;
3704 	uint32_t num_processed = 0;
3705 	uint32_t count = 0;
3706 	uint32_t num_avail_for_reap = 0;
3707 	bool force_break = false;
3708 
3709 	DP_HIST_INIT();
3710 
3711 more_data:
3712 	/* Re-initialize local variables to be re-used */
3713 	head_desc = NULL;
3714 	tail_desc = NULL;
3715 
3716 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
3717 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
3718 		return 0;
3719 	}
3720 
3721 	num_avail_for_reap = hal_srng_dst_num_valid(soc->hal_soc, hal_ring_hdl, 0);
3722 
3723 	if (num_avail_for_reap >= quota)
3724 		num_avail_for_reap = quota;
3725 
3726 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
3727 
3728 	/* Find head descriptor from completion ring */
3729 	while (qdf_likely(num_avail_for_reap)) {
3730 
3731 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
3732 		if (qdf_unlikely(!tx_comp_hal_desc))
3733 			break;
3734 
3735 		buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
3736 
3737 		/* If this buffer was not released by TQM or FW, then it is not
3738 		 * Tx completion indication, assert */
3739 		if (qdf_unlikely(buffer_src !=
3740 					HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
3741 				 (qdf_unlikely(buffer_src !=
3742 					HAL_TX_COMP_RELEASE_SOURCE_FW))) {
3743 			uint8_t wbm_internal_error;
3744 
3745 			dp_err_rl(
3746 				"Tx comp release_src != TQM | FW but from %d",
3747 				buffer_src);
3748 			hal_dump_comp_desc(tx_comp_hal_desc);
3749 			DP_STATS_INC(soc, tx.invalid_release_source, 1);
3750 
3751 			/* When WBM sees NULL buffer_addr_info in any of
3752 			 * ingress rings it sends an error indication,
3753 			 * with wbm_internal_error=1, to a specific ring.
3754 			 * The WBM2SW ring used to indicate these errors is
3755 			 * fixed in HW, and that ring is being used as Tx
3756 			 * completion ring. These errors are not related to
3757 			 * Tx completions, and should just be ignored
3758 			 */
3759 			wbm_internal_error = hal_get_wbm_internal_error(
3760 							soc->hal_soc,
3761 							tx_comp_hal_desc);
3762 
3763 			if (wbm_internal_error) {
3764 				dp_err_rl("Tx comp wbm_internal_error!!");
3765 				DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
3766 
3767 				if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
3768 								buffer_src)
3769 					dp_handle_wbm_internal_error(
3770 						soc,
3771 						tx_comp_hal_desc,
3772 						hal_tx_comp_get_buffer_type(
3773 							tx_comp_hal_desc));
3774 
3775 			} else {
3776 				dp_err_rl("Tx comp wbm_internal_error false");
3777 				DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
3778 			}
3779 			continue;
3780 		}
3781 
3782 		/* Get descriptor id */
3783 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
3784 		pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
3785 			DP_TX_DESC_ID_POOL_OS;
3786 
3787 		/* Find Tx descriptor */
3788 		tx_desc = dp_tx_desc_find(soc, pool_id,
3789 				(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
3790 				DP_TX_DESC_ID_PAGE_OS,
3791 				(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
3792 				DP_TX_DESC_ID_OFFSET_OS);
3793 
3794 		/*
3795 		 * If the release source is FW, process the HTT status
3796 		 */
3797 		if (qdf_unlikely(buffer_src ==
3798 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
3799 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
3800 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
3801 					htt_tx_status);
3802 			dp_tx_process_htt_completion(tx_desc,
3803 					htt_tx_status, ring_id);
3804 		} else {
3805 			/*
3806 			 * If the fast completion mode is enabled extended
3807 			 * metadata from descriptor is not copied
3808 			 */
3809 			if (qdf_likely(tx_desc->flags &
3810 						DP_TX_DESC_FLAG_SIMPLE)) {
3811 				tx_desc->peer_id =
3812 					hal_tx_comp_get_peer_id(tx_comp_hal_desc);
3813 				tx_desc->tx_status =
3814 					hal_tx_comp_get_tx_status(tx_comp_hal_desc);
3815 				goto add_to_pool;
3816 			}
3817 
3818 			/*
3819 			 * If the descriptor is already freed in vdev_detach,
3820 			 * continue to next descriptor
3821 			 */
3822 			if (qdf_unlikely(!tx_desc->vdev) &&
3823 					 qdf_unlikely(!tx_desc->flags)) {
3824 				QDF_TRACE(QDF_MODULE_ID_DP,
3825 					  QDF_TRACE_LEVEL_INFO,
3826 					  "Descriptor freed in vdev_detach %d",
3827 					  tx_desc_id);
3828 				continue;
3829 			}
3830 
3831 			if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
3832 				QDF_TRACE(QDF_MODULE_ID_DP,
3833 					  QDF_TRACE_LEVEL_INFO,
3834 					  "pdev in down state %d",
3835 					  tx_desc_id);
3836 
3837 				dp_tx_comp_free_buf(soc, tx_desc);
3838 				dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3839 				goto next_desc;
3840 			}
3841 
3842 			/* Pool id is not matching. Error */
3843 			if (tx_desc->pool_id != pool_id) {
3844 				QDF_TRACE(QDF_MODULE_ID_DP,
3845 					QDF_TRACE_LEVEL_FATAL,
3846 					"Tx Comp pool id %d not matched %d",
3847 					pool_id, tx_desc->pool_id);
3848 
3849 				qdf_assert_always(0);
3850 			}
3851 
3852 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
3853 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
3854 				QDF_TRACE(QDF_MODULE_ID_DP,
3855 					  QDF_TRACE_LEVEL_FATAL,
3856 					  "Txdesc invalid, flgs = %x,id = %d",
3857 					  tx_desc->flags, tx_desc_id);
3858 				qdf_assert_always(0);
3859 			}
3860 
3861 			/* Collect hw completion contents */
3862 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
3863 					      &tx_desc->comp, 1);
3864 add_to_pool:
3865 			DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
3866 
3867 			/* First ring descriptor on the cycle */
3868 			if (!head_desc) {
3869 				head_desc = tx_desc;
3870 				tail_desc = tx_desc;
3871 			}
3872 
3873 			tail_desc->next = tx_desc;
3874 			tx_desc->next = NULL;
3875 			tail_desc = tx_desc;
3876 		}
3877 next_desc:
3878 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
3879 
3880 		/*
3881 		 * Processed packet count is more than given quota
3882 		 * stop to processing
3883 		 */
3884 
3885 		count++;
3886 
3887 		if (dp_tx_comp_loop_pkt_limit_hit(soc, count))
3888 			break;
3889 	}
3890 
3891 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
3892 
3893 	/* Process the reaped descriptors */
3894 	if (head_desc)
3895 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
3896 
3897 	if (dp_tx_comp_enable_eol_data_check(soc)) {
3898 
3899 		if (num_processed >= quota)
3900 			force_break = true;
3901 
3902 		if (!force_break &&
3903 		    hal_srng_dst_peek_sync_locked(soc->hal_soc,
3904 						  hal_ring_hdl)) {
3905 			DP_STATS_INC(soc, tx.hp_oos2, 1);
3906 			if (!hif_exec_should_yield(soc->hif_handle,
3907 						   int_ctx->dp_intr_id))
3908 				goto more_data;
3909 		}
3910 	}
3911 	DP_TX_HIST_STATS_PER_PDEV();
3912 
3913 	return num_processed;
3914 }
3915 
3916 #ifdef FEATURE_WLAN_TDLS
3917 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3918 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
3919 {
3920 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3921 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
3922 
3923 	if (!vdev) {
3924 		dp_err("vdev handle for id %d is NULL", vdev_id);
3925 		return NULL;
3926 	}
3927 
3928 	if (tx_spec & OL_TX_SPEC_NO_FREE)
3929 		vdev->is_tdls_frame = true;
3930 
3931 	return dp_tx_send(soc_hdl, vdev_id, msdu_list);
3932 }
3933 #endif
3934 
3935 /**
3936  * dp_tx_vdev_attach() - attach vdev to dp tx
3937  * @vdev: virtual device instance
3938  *
3939  * Return: QDF_STATUS_SUCCESS: success
3940  *         QDF_STATUS_E_RESOURCES: Error return
3941  */
3942 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
3943 {
3944 	int pdev_id;
3945 	/*
3946 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
3947 	 */
3948 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
3949 			HTT_TCL_METADATA_TYPE_VDEV_BASED);
3950 
3951 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
3952 			vdev->vdev_id);
3953 
3954 	pdev_id =
3955 		dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
3956 						       vdev->pdev->pdev_id);
3957 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
3958 
3959 	/*
3960 	 * Set HTT Extension Valid bit to 0 by default
3961 	 */
3962 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
3963 
3964 	dp_tx_vdev_update_search_flags(vdev);
3965 
3966 	return QDF_STATUS_SUCCESS;
3967 }
3968 
3969 #ifndef FEATURE_WDS
3970 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
3971 {
3972 	return false;
3973 }
3974 #endif
3975 
3976 /**
3977  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
3978  * @vdev: virtual device instance
3979  *
3980  * Return: void
3981  *
3982  */
3983 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
3984 {
3985 	struct dp_soc *soc = vdev->pdev->soc;
3986 
3987 	/*
3988 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
3989 	 * for TDLS link
3990 	 *
3991 	 * Enable AddrY (SA based search) only for non-WDS STA and
3992 	 * ProxySTA VAP (in HKv1) modes.
3993 	 *
3994 	 * In all other VAP modes, only DA based search should be
3995 	 * enabled
3996 	 */
3997 	if (vdev->opmode == wlan_op_mode_sta &&
3998 	    vdev->tdls_link_connected)
3999 		vdev->hal_desc_addr_search_flags =
4000 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
4001 	else if ((vdev->opmode == wlan_op_mode_sta) &&
4002 		 !dp_tx_da_search_override(vdev))
4003 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
4004 	else
4005 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
4006 
4007 	/* Set search type only when peer map v2 messaging is enabled
4008 	 * as we will have the search index (AST hash) only when v2 is
4009 	 * enabled
4010 	 */
4011 	if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
4012 		vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
4013 	else
4014 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
4015 }
4016 
4017 static inline bool
4018 dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
4019 			  struct dp_vdev *vdev,
4020 			  struct dp_tx_desc_s *tx_desc)
4021 {
4022 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
4023 		return false;
4024 
4025 	/*
4026 	 * if vdev is given, then only check whether desc
4027 	 * vdev match. if vdev is NULL, then check whether
4028 	 * desc pdev match.
4029 	 */
4030 	return vdev ? (tx_desc->vdev == vdev) : (tx_desc->pdev == pdev);
4031 }
4032 
4033 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
4034 /**
4035  * dp_tx_desc_flush() - release resources associated
4036  *                      to TX Desc
4037  *
4038  * @dp_pdev: Handle to DP pdev structure
4039  * @vdev: virtual device instance
4040  * NULL: no specific Vdev is required and check all allcated TX desc
4041  * on this pdev.
4042  * Non-NULL: only check the allocated TX Desc associated to this Vdev.
4043  *
4044  * @force_free:
4045  * true: flush the TX desc.
4046  * false: only reset the Vdev in each allocated TX desc
4047  * that associated to current Vdev.
4048  *
4049  * This function will go through the TX desc pool to flush
4050  * the outstanding TX data or reset Vdev to NULL in associated TX
4051  * Desc.
4052  */
4053 static void dp_tx_desc_flush(struct dp_pdev *pdev,
4054 			     struct dp_vdev *vdev,
4055 			     bool force_free)
4056 {
4057 	uint8_t i;
4058 	uint32_t j;
4059 	uint32_t num_desc, page_id, offset;
4060 	uint16_t num_desc_per_page;
4061 	struct dp_soc *soc = pdev->soc;
4062 	struct dp_tx_desc_s *tx_desc = NULL;
4063 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
4064 
4065 	if (!vdev && !force_free) {
4066 		dp_err("Reset TX desc vdev, Vdev param is required!");
4067 		return;
4068 	}
4069 
4070 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
4071 		tx_desc_pool = &soc->tx_desc[i];
4072 		if (!(tx_desc_pool->pool_size) ||
4073 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
4074 		    !(tx_desc_pool->desc_pages.cacheable_pages))
4075 			continue;
4076 
4077 		/*
4078 		 * Add flow pool lock protection in case pool is freed
4079 		 * due to all tx_desc is recycled when handle TX completion.
4080 		 * this is not necessary when do force flush as:
4081 		 * a. double lock will happen if dp_tx_desc_release is
4082 		 *    also trying to acquire it.
4083 		 * b. dp interrupt has been disabled before do force TX desc
4084 		 *    flush in dp_pdev_deinit().
4085 		 */
4086 		if (!force_free)
4087 			qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
4088 		num_desc = tx_desc_pool->pool_size;
4089 		num_desc_per_page =
4090 			tx_desc_pool->desc_pages.num_element_per_page;
4091 		for (j = 0; j < num_desc; j++) {
4092 			page_id = j / num_desc_per_page;
4093 			offset = j % num_desc_per_page;
4094 
4095 			if (qdf_unlikely(!(tx_desc_pool->
4096 					 desc_pages.cacheable_pages)))
4097 				break;
4098 
4099 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
4100 
4101 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
4102 				/*
4103 				 * Free TX desc if force free is
4104 				 * required, otherwise only reset vdev
4105 				 * in this TX desc.
4106 				 */
4107 				if (force_free) {
4108 					dp_tx_comp_free_buf(soc, tx_desc);
4109 					dp_tx_desc_release(tx_desc, i);
4110 				} else {
4111 					tx_desc->vdev = NULL;
4112 				}
4113 			}
4114 		}
4115 		if (!force_free)
4116 			qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
4117 	}
4118 }
4119 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
4120 /**
4121  * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
4122  *
4123  * @soc: Handle to DP soc structure
4124  * @tx_desc: pointer of one TX desc
4125  * @desc_pool_id: TX Desc pool id
4126  */
4127 static inline void
4128 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
4129 		      uint8_t desc_pool_id)
4130 {
4131 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
4132 
4133 	tx_desc->vdev = NULL;
4134 
4135 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
4136 }
4137 
4138 static void dp_tx_desc_flush(struct dp_pdev *pdev,
4139 			     struct dp_vdev *vdev,
4140 			     bool force_free)
4141 {
4142 	uint8_t i, num_pool;
4143 	uint32_t j;
4144 	uint32_t num_desc, page_id, offset;
4145 	uint16_t num_desc_per_page;
4146 	struct dp_soc *soc = pdev->soc;
4147 	struct dp_tx_desc_s *tx_desc = NULL;
4148 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
4149 
4150 	if (!vdev && !force_free) {
4151 		dp_err("Reset TX desc vdev, Vdev param is required!");
4152 		return;
4153 	}
4154 
4155 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4156 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4157 
4158 	for (i = 0; i < num_pool; i++) {
4159 		tx_desc_pool = &soc->tx_desc[i];
4160 		if (!tx_desc_pool->desc_pages.cacheable_pages)
4161 			continue;
4162 
4163 		num_desc_per_page =
4164 			tx_desc_pool->desc_pages.num_element_per_page;
4165 		for (j = 0; j < num_desc; j++) {
4166 			page_id = j / num_desc_per_page;
4167 			offset = j % num_desc_per_page;
4168 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
4169 
4170 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
4171 				if (force_free) {
4172 					dp_tx_comp_free_buf(soc, tx_desc);
4173 					dp_tx_desc_release(tx_desc, i);
4174 				} else {
4175 					dp_tx_desc_reset_vdev(soc, tx_desc,
4176 							      i);
4177 				}
4178 			}
4179 		}
4180 	}
4181 }
4182 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
4183 
4184 /**
4185  * dp_tx_vdev_detach() - detach vdev from dp tx
4186  * @vdev: virtual device instance
4187  *
4188  * Return: QDF_STATUS_SUCCESS: success
4189  *         QDF_STATUS_E_RESOURCES: Error return
4190  */
4191 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
4192 {
4193 	struct dp_pdev *pdev = vdev->pdev;
4194 
4195 	/* Reset TX desc associated to this Vdev as NULL */
4196 	dp_tx_desc_flush(pdev, vdev, false);
4197 	dp_tx_vdev_multipass_deinit(vdev);
4198 
4199 	return QDF_STATUS_SUCCESS;
4200 }
4201 
4202 /**
4203  * dp_tx_pdev_attach() - attach pdev to dp tx
4204  * @pdev: physical device instance
4205  *
4206  * Return: QDF_STATUS_SUCCESS: success
4207  *         QDF_STATUS_E_RESOURCES: Error return
4208  */
4209 QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev)
4210 {
4211 	struct dp_soc *soc = pdev->soc;
4212 
4213 	/* Initialize Flow control counters */
4214 	qdf_atomic_init(&pdev->num_tx_exception);
4215 	qdf_atomic_init(&pdev->num_tx_outstanding);
4216 
4217 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
4218 		/* Initialize descriptors in TCL Ring */
4219 		hal_tx_init_data_ring(soc->hal_soc,
4220 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
4221 	}
4222 
4223 	return QDF_STATUS_SUCCESS;
4224 }
4225 
4226 /**
4227  * dp_tx_pdev_detach() - detach pdev from dp tx
4228  * @pdev: physical device instance
4229  *
4230  * Return: QDF_STATUS_SUCCESS: success
4231  *         QDF_STATUS_E_RESOURCES: Error return
4232  */
4233 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
4234 {
4235 	/* flush TX outstanding data per pdev */
4236 	dp_tx_desc_flush(pdev, NULL, true);
4237 	dp_tx_me_exit(pdev);
4238 	return QDF_STATUS_SUCCESS;
4239 }
4240 
4241 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
4242 /* Pools will be allocated dynamically */
4243 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
4244 					   int num_desc)
4245 {
4246 	uint8_t i;
4247 
4248 	for (i = 0; i < num_pool; i++) {
4249 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
4250 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
4251 	}
4252 
4253 	return QDF_STATUS_SUCCESS;
4254 }
4255 
4256 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
4257 					  int num_desc)
4258 {
4259 	return QDF_STATUS_SUCCESS;
4260 }
4261 
4262 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
4263 {
4264 }
4265 
4266 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
4267 {
4268 	uint8_t i;
4269 
4270 	for (i = 0; i < num_pool; i++)
4271 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
4272 }
4273 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
4274 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
4275 					   int num_desc)
4276 {
4277 	uint8_t i, count;
4278 
4279 	/* Allocate software Tx descriptor pools */
4280 	for (i = 0; i < num_pool; i++) {
4281 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
4282 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4283 				  FL("Tx Desc Pool alloc %d failed %pK"),
4284 				  i, soc);
4285 			goto fail;
4286 		}
4287 	}
4288 	return QDF_STATUS_SUCCESS;
4289 
4290 fail:
4291 	for (count = 0; count < i; count++)
4292 		dp_tx_desc_pool_free(soc, count);
4293 
4294 	return QDF_STATUS_E_NOMEM;
4295 }
4296 
4297 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
4298 					  int num_desc)
4299 {
4300 	uint8_t i;
4301 	for (i = 0; i < num_pool; i++) {
4302 		if (dp_tx_desc_pool_init(soc, i, num_desc)) {
4303 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4304 				  FL("Tx Desc Pool init %d failed %pK"),
4305 				  i, soc);
4306 			return QDF_STATUS_E_NOMEM;
4307 		}
4308 	}
4309 	return QDF_STATUS_SUCCESS;
4310 }
4311 
4312 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
4313 {
4314 	uint8_t i;
4315 
4316 	for (i = 0; i < num_pool; i++)
4317 		dp_tx_desc_pool_deinit(soc, i);
4318 }
4319 
4320 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
4321 {
4322 	uint8_t i;
4323 
4324 	for (i = 0; i < num_pool; i++)
4325 		dp_tx_desc_pool_free(soc, i);
4326 }
4327 
4328 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
4329 
4330 /**
4331  * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
4332  * @soc: core txrx main context
4333  * @num_pool: number of pools
4334  *
4335  */
4336 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
4337 {
4338 	dp_tx_tso_desc_pool_deinit(soc, num_pool);
4339 	dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
4340 }
4341 
4342 /**
4343  * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
4344  * @soc: core txrx main context
4345  * @num_pool: number of pools
4346  *
4347  */
4348 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
4349 {
4350 	dp_tx_tso_desc_pool_free(soc, num_pool);
4351 	dp_tx_tso_num_seg_pool_free(soc, num_pool);
4352 }
4353 
4354 /**
4355  * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
4356  * @soc: core txrx main context
4357  *
4358  * This function frees all tx related descriptors as below
4359  * 1. Regular TX descriptors (static pools)
4360  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
4361  * 3. TSO descriptors
4362  *
4363  */
4364 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
4365 {
4366 	uint8_t num_pool;
4367 
4368 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4369 
4370 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
4371 	dp_tx_ext_desc_pool_free(soc, num_pool);
4372 	dp_tx_delete_static_pools(soc, num_pool);
4373 }
4374 
4375 /**
4376  * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
4377  * @soc: core txrx main context
4378  *
4379  * This function de-initializes all tx related descriptors as below
4380  * 1. Regular TX descriptors (static pools)
4381  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
4382  * 3. TSO descriptors
4383  *
4384  */
4385 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
4386 {
4387 	uint8_t num_pool;
4388 
4389 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4390 
4391 	dp_tx_flow_control_deinit(soc);
4392 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
4393 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
4394 	dp_tx_deinit_static_pools(soc, num_pool);
4395 }
4396 
4397 /**
4398  * dp_tso_attach() - TSO attach handler
4399  * @txrx_soc: Opaque Dp handle
4400  *
4401  * Reserve TSO descriptor buffers
4402  *
4403  * Return: QDF_STATUS_E_FAILURE on failure or
4404  * QDF_STATUS_SUCCESS on success
4405  */
4406 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
4407 					 uint8_t num_pool,
4408 					 uint16_t num_desc)
4409 {
4410 	if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
4411 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
4412 		return QDF_STATUS_E_FAILURE;
4413 	}
4414 
4415 	if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
4416 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
4417 		       num_pool, soc);
4418 		return QDF_STATUS_E_FAILURE;
4419 	}
4420 	return QDF_STATUS_SUCCESS;
4421 }
4422 
4423 /**
4424  * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
4425  * @soc: DP soc handle
4426  * @num_pool: Number of pools
4427  * @num_desc: Number of descriptors
4428  *
4429  * Initialize TSO descriptor pools
4430  *
4431  * Return: QDF_STATUS_E_FAILURE on failure or
4432  * QDF_STATUS_SUCCESS on success
4433  */
4434 
4435 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
4436 					uint8_t num_pool,
4437 					uint16_t num_desc)
4438 {
4439 	if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
4440 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
4441 		return QDF_STATUS_E_FAILURE;
4442 	}
4443 
4444 	if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
4445 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
4446 		       num_pool, soc);
4447 		return QDF_STATUS_E_FAILURE;
4448 	}
4449 	return QDF_STATUS_SUCCESS;
4450 }
4451 
4452 /**
4453  * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
4454  * @soc: core txrx main context
4455  *
4456  * This function allocates memory for following descriptor pools
4457  * 1. regular sw tx descriptor pools (static pools)
4458  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
4459  * 3. TSO descriptor pools
4460  *
4461  * Return: QDF_STATUS_SUCCESS: success
4462  *         QDF_STATUS_E_RESOURCES: Error return
4463  */
4464 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
4465 {
4466 	uint8_t num_pool;
4467 	uint32_t num_desc;
4468 	uint32_t num_ext_desc;
4469 
4470 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4471 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4472 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
4473 
4474 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4475 		  "%s Tx Desc Alloc num_pool = %d, descs = %d",
4476 		  __func__, num_pool, num_desc);
4477 
4478 	if ((num_pool > MAX_TXDESC_POOLS) ||
4479 	    (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
4480 		goto fail1;
4481 
4482 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
4483 		goto fail1;
4484 
4485 	if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
4486 		goto fail2;
4487 
4488 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
4489 		return QDF_STATUS_SUCCESS;
4490 
4491 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
4492 		goto fail3;
4493 
4494 	return QDF_STATUS_SUCCESS;
4495 
4496 fail3:
4497 	dp_tx_ext_desc_pool_free(soc, num_pool);
4498 fail2:
4499 	dp_tx_delete_static_pools(soc, num_pool);
4500 fail1:
4501 	return QDF_STATUS_E_RESOURCES;
4502 }
4503 
4504 /**
4505  * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
4506  * @soc: core txrx main context
4507  *
4508  * This function initializes the following TX descriptor pools
4509  * 1. regular sw tx descriptor pools (static pools)
4510  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
4511  * 3. TSO descriptor pools
4512  *
4513  * Return: QDF_STATUS_SUCCESS: success
4514  *	   QDF_STATUS_E_RESOURCES: Error return
4515  */
4516 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
4517 {
4518 	uint8_t num_pool;
4519 	uint32_t num_desc;
4520 	uint32_t num_ext_desc;
4521 
4522 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4523 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4524 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
4525 
4526 	if (dp_tx_init_static_pools(soc, num_pool, num_desc))
4527 		goto fail1;
4528 
4529 	if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
4530 		goto fail2;
4531 
4532 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
4533 		return QDF_STATUS_SUCCESS;
4534 
4535 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
4536 		goto fail3;
4537 
4538 	dp_tx_flow_control_init(soc);
4539 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
4540 	return QDF_STATUS_SUCCESS;
4541 
4542 fail3:
4543 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
4544 fail2:
4545 	dp_tx_deinit_static_pools(soc, num_pool);
4546 fail1:
4547 	return QDF_STATUS_E_RESOURCES;
4548 }
4549 
4550 /**
4551  * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
4552  * @txrx_soc: dp soc handle
4553  *
4554  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
4555  *			QDF_STATUS_E_FAILURE
4556  */
4557 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
4558 {
4559 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4560 	uint8_t num_pool;
4561 	uint32_t num_desc;
4562 	uint32_t num_ext_desc;
4563 
4564 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4565 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4566 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
4567 
4568 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
4569 		return QDF_STATUS_E_FAILURE;
4570 
4571 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
4572 		return QDF_STATUS_E_FAILURE;
4573 
4574 	return QDF_STATUS_SUCCESS;
4575 }
4576 
4577 /**
4578  * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
4579  * @txrx_soc: dp soc handle
4580  *
4581  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
4582  */
4583 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
4584 {
4585 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4586 	uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4587 
4588 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
4589 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
4590 
4591 	return QDF_STATUS_SUCCESS;
4592 }
4593 
4594