xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision a90712e15de63f505999ffcf45c1b4d4a0d2b89b)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "dp_htt.h"
21 #include "hal_hw_headers.h"
22 #include "dp_tx.h"
23 #include "dp_tx_desc.h"
24 #include "dp_peer.h"
25 #include "dp_types.h"
26 #include "hal_tx.h"
27 #include "qdf_mem.h"
28 #include "qdf_nbuf.h"
29 #include "qdf_net_types.h"
30 #include <wlan_cfg.h>
31 #include "dp_ipa.h"
32 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
33 #include "if_meta_hdr.h"
34 #endif
35 #include "enet.h"
36 #include "dp_internal.h"
37 #ifdef FEATURE_WDS
38 #include "dp_txrx_wds.h"
39 #endif
40 #ifdef ATH_SUPPORT_IQUE
41 #include "dp_txrx_me.h"
42 #endif
43 #include "dp_hist.h"
44 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
45 #include <dp_swlm.h>
46 #endif
47 
48 /* Flag to skip CCE classify when mesh or tid override enabled */
49 #define DP_TX_SKIP_CCE_CLASSIFY \
50 	(DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
51 
52 /* TODO Add support in TSO */
53 #define DP_DESC_NUM_FRAG(x) 0
54 
55 /* disable TQM_BYPASS */
56 #define TQM_BYPASS_WAR 0
57 
58 /* invalid peer id for reinject*/
59 #define DP_INVALID_PEER 0XFFFE
60 
61 /*mapping between hal encrypt type and cdp_sec_type*/
62 #define MAX_CDP_SEC_TYPE 12
63 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
64 					HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
65 					HAL_TX_ENCRYPT_TYPE_WEP_128,
66 					HAL_TX_ENCRYPT_TYPE_WEP_104,
67 					HAL_TX_ENCRYPT_TYPE_WEP_40,
68 					HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
69 					HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
70 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
71 					HAL_TX_ENCRYPT_TYPE_WAPI,
72 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
73 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
74 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
75 					HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
76 
77 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
78 /**
79  * dp_update_tx_desc_stats - Update the increase or decrease in
80  * outstanding tx desc count
81  * values on pdev and soc
82  * @vdev: DP pdev handle
83  *
84  * Return: void
85  */
86 static inline void
87 dp_update_tx_desc_stats(struct dp_pdev *pdev)
88 {
89 	int32_t tx_descs_cnt =
90 		qdf_atomic_read(&pdev->num_tx_outstanding);
91 	if (pdev->tx_descs_max < tx_descs_cnt)
92 		pdev->tx_descs_max = tx_descs_cnt;
93 	qdf_mem_tx_desc_cnt_update(pdev->num_tx_outstanding,
94 				   pdev->tx_descs_max);
95 }
96 
97 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
98 
99 static inline void
100 dp_update_tx_desc_stats(struct dp_pdev *pdev)
101 {
102 }
103 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
104 
105 #ifdef QCA_TX_LIMIT_CHECK
106 /**
107  * dp_tx_limit_check - Check if allocated tx descriptors reached
108  * soc max limit and pdev max limit
109  * @vdev: DP vdev handle
110  *
111  * Return: true if allocated tx descriptors reached max configured value, else
112  * false
113  */
114 static inline bool
115 dp_tx_limit_check(struct dp_vdev *vdev)
116 {
117 	struct dp_pdev *pdev = vdev->pdev;
118 	struct dp_soc *soc = pdev->soc;
119 
120 	if (qdf_atomic_read(&soc->num_tx_outstanding) >=
121 			soc->num_tx_allowed) {
122 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
123 			  "%s: queued packets are more than max tx, drop the frame",
124 			  __func__);
125 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
126 		return true;
127 	}
128 
129 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
130 			pdev->num_tx_allowed) {
131 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
132 			  "%s: queued packets are more than max tx, drop the frame",
133 			  __func__);
134 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
135 		return true;
136 	}
137 	return false;
138 }
139 
140 /**
141  * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
142  * reached soc max limit
143  * @vdev: DP vdev handle
144  *
145  * Return: true if allocated tx descriptors reached max configured value, else
146  * false
147  */
148 static inline bool
149 dp_tx_exception_limit_check(struct dp_vdev *vdev)
150 {
151 	struct dp_pdev *pdev = vdev->pdev;
152 	struct dp_soc *soc = pdev->soc;
153 
154 	if (qdf_atomic_read(&soc->num_tx_exception) >=
155 			soc->num_msdu_exception_desc) {
156 		dp_info("exc packets are more than max drop the exc pkt");
157 		DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
158 		return true;
159 	}
160 
161 	return false;
162 }
163 
164 /**
165  * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
166  * @vdev: DP pdev handle
167  *
168  * Return: void
169  */
170 static inline void
171 dp_tx_outstanding_inc(struct dp_pdev *pdev)
172 {
173 	struct dp_soc *soc = pdev->soc;
174 
175 	qdf_atomic_inc(&pdev->num_tx_outstanding);
176 	qdf_atomic_inc(&soc->num_tx_outstanding);
177 	dp_update_tx_desc_stats(pdev);
178 }
179 
180 /**
181  * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
182  * @vdev: DP pdev handle
183  *
184  * Return: void
185  */
186 static inline void
187 dp_tx_outstanding_dec(struct dp_pdev *pdev)
188 {
189 	struct dp_soc *soc = pdev->soc;
190 
191 	qdf_atomic_dec(&pdev->num_tx_outstanding);
192 	qdf_atomic_dec(&soc->num_tx_outstanding);
193 	dp_update_tx_desc_stats(pdev);
194 }
195 
196 #else //QCA_TX_LIMIT_CHECK
197 static inline bool
198 dp_tx_limit_check(struct dp_vdev *vdev)
199 {
200 	return false;
201 }
202 
203 static inline bool
204 dp_tx_exception_limit_check(struct dp_vdev *vdev)
205 {
206 	return false;
207 }
208 
209 static inline void
210 dp_tx_outstanding_inc(struct dp_pdev *pdev)
211 {
212 	qdf_atomic_inc(&pdev->num_tx_outstanding);
213 	dp_update_tx_desc_stats(pdev);
214 }
215 
216 static inline void
217 dp_tx_outstanding_dec(struct dp_pdev *pdev)
218 {
219 	qdf_atomic_dec(&pdev->num_tx_outstanding);
220 	dp_update_tx_desc_stats(pdev);
221 }
222 #endif //QCA_TX_LIMIT_CHECK
223 
224 #if defined(FEATURE_TSO)
225 /**
226  * dp_tx_tso_unmap_segment() - Unmap TSO segment
227  *
228  * @soc - core txrx main context
229  * @seg_desc - tso segment descriptor
230  * @num_seg_desc - tso number segment descriptor
231  */
232 static void dp_tx_tso_unmap_segment(
233 		struct dp_soc *soc,
234 		struct qdf_tso_seg_elem_t *seg_desc,
235 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
236 {
237 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
238 	if (qdf_unlikely(!seg_desc)) {
239 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
240 			 __func__, __LINE__);
241 		qdf_assert(0);
242 	} else if (qdf_unlikely(!num_seg_desc)) {
243 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
244 			 __func__, __LINE__);
245 		qdf_assert(0);
246 	} else {
247 		bool is_last_seg;
248 		/* no tso segment left to do dma unmap */
249 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
250 			return;
251 
252 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
253 					true : false;
254 		qdf_nbuf_unmap_tso_segment(soc->osdev,
255 					   seg_desc, is_last_seg);
256 		num_seg_desc->num_seg.tso_cmn_num_seg--;
257 	}
258 }
259 
260 /**
261  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
262  *                            back to the freelist
263  *
264  * @soc - soc device handle
265  * @tx_desc - Tx software descriptor
266  */
267 static void dp_tx_tso_desc_release(struct dp_soc *soc,
268 				   struct dp_tx_desc_s *tx_desc)
269 {
270 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
271 	if (qdf_unlikely(!tx_desc->tso_desc)) {
272 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
273 			  "%s %d TSO desc is NULL!",
274 			  __func__, __LINE__);
275 		qdf_assert(0);
276 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
277 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
278 			  "%s %d TSO num desc is NULL!",
279 			  __func__, __LINE__);
280 		qdf_assert(0);
281 	} else {
282 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
283 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
284 
285 		/* Add the tso num segment into the free list */
286 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
287 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
288 					    tx_desc->tso_num_desc);
289 			tx_desc->tso_num_desc = NULL;
290 			DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
291 		}
292 
293 		/* Add the tso segment into the free list*/
294 		dp_tx_tso_desc_free(soc,
295 				    tx_desc->pool_id, tx_desc->tso_desc);
296 		tx_desc->tso_desc = NULL;
297 	}
298 }
299 #else
300 static void dp_tx_tso_unmap_segment(
301 		struct dp_soc *soc,
302 		struct qdf_tso_seg_elem_t *seg_desc,
303 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
304 
305 {
306 }
307 
308 static void dp_tx_tso_desc_release(struct dp_soc *soc,
309 				   struct dp_tx_desc_s *tx_desc)
310 {
311 }
312 #endif
313 /**
314  * dp_tx_desc_release() - Release Tx Descriptor
315  * @tx_desc : Tx Descriptor
316  * @desc_pool_id: Descriptor Pool ID
317  *
318  * Deallocate all resources attached to Tx descriptor and free the Tx
319  * descriptor.
320  *
321  * Return:
322  */
323 static void
324 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
325 {
326 	struct dp_pdev *pdev = tx_desc->pdev;
327 	struct dp_soc *soc;
328 	uint8_t comp_status = 0;
329 
330 	qdf_assert(pdev);
331 
332 	soc = pdev->soc;
333 
334 	dp_tx_outstanding_dec(pdev);
335 
336 	if (tx_desc->frm_type == dp_tx_frm_tso)
337 		dp_tx_tso_desc_release(soc, tx_desc);
338 
339 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
340 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
341 
342 	if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
343 		dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
344 
345 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
346 		qdf_atomic_dec(&soc->num_tx_exception);
347 
348 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
349 				hal_tx_comp_get_buffer_source(&tx_desc->comp))
350 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
351 							     soc->hal_soc);
352 	else
353 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
354 
355 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
356 		"Tx Completion Release desc %d status %d outstanding %d",
357 		tx_desc->id, comp_status,
358 		qdf_atomic_read(&pdev->num_tx_outstanding));
359 
360 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
361 	return;
362 }
363 
364 /**
365  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
366  * @vdev: DP vdev Handle
367  * @nbuf: skb
368  * @msdu_info: msdu_info required to create HTT metadata
369  *
370  * Prepares and fills HTT metadata in the frame pre-header for special frames
371  * that should be transmitted using varying transmit parameters.
372  * There are 2 VDEV modes that currently needs this special metadata -
373  *  1) Mesh Mode
374  *  2) DSRC Mode
375  *
376  * Return: HTT metadata size
377  *
378  */
379 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
380 					  struct dp_tx_msdu_info_s *msdu_info)
381 {
382 	uint32_t *meta_data = msdu_info->meta_data;
383 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
384 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
385 
386 	uint8_t htt_desc_size;
387 
388 	/* Size rounded of multiple of 8 bytes */
389 	uint8_t htt_desc_size_aligned;
390 
391 	uint8_t *hdr = NULL;
392 
393 	/*
394 	 * Metadata - HTT MSDU Extension header
395 	 */
396 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
397 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
398 
399 	if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
400 	    HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
401 							   meta_data[0])) {
402 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
403 				 htt_desc_size_aligned)) {
404 			nbuf = qdf_nbuf_realloc_headroom(nbuf,
405 							 htt_desc_size_aligned);
406 			if (!nbuf) {
407 				/*
408 				 * qdf_nbuf_realloc_headroom won't do skb_clone
409 				 * as skb_realloc_headroom does. so, no free is
410 				 * needed here.
411 				 */
412 				DP_STATS_INC(vdev,
413 					     tx_i.dropped.headroom_insufficient,
414 					     1);
415 				qdf_print(" %s[%d] skb_realloc_headroom failed",
416 					  __func__, __LINE__);
417 				return 0;
418 			}
419 		}
420 		/* Fill and add HTT metaheader */
421 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
422 		if (!hdr) {
423 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
424 					"Error in filling HTT metadata");
425 
426 			return 0;
427 		}
428 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
429 
430 	} else if (vdev->opmode == wlan_op_mode_ocb) {
431 		/* Todo - Add support for DSRC */
432 	}
433 
434 	return htt_desc_size_aligned;
435 }
436 
437 /**
438  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
439  * @tso_seg: TSO segment to process
440  * @ext_desc: Pointer to MSDU extension descriptor
441  *
442  * Return: void
443  */
444 #if defined(FEATURE_TSO)
445 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
446 		void *ext_desc)
447 {
448 	uint8_t num_frag;
449 	uint32_t tso_flags;
450 
451 	/*
452 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
453 	 * tcp_flag_mask
454 	 *
455 	 * Checksum enable flags are set in TCL descriptor and not in Extension
456 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
457 	 */
458 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
459 
460 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
461 
462 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
463 		tso_seg->tso_flags.ip_len);
464 
465 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
466 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
467 
468 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
469 		uint32_t lo = 0;
470 		uint32_t hi = 0;
471 
472 		qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
473 				  (tso_seg->tso_frags[num_frag].length));
474 
475 		qdf_dmaaddr_to_32s(
476 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
477 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
478 			tso_seg->tso_frags[num_frag].length);
479 	}
480 
481 	return;
482 }
483 #else
484 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
485 		void *ext_desc)
486 {
487 	return;
488 }
489 #endif
490 
491 #if defined(FEATURE_TSO)
492 /**
493  * dp_tx_free_tso_seg_list() - Loop through the tso segments
494  *                             allocated and free them
495  *
496  * @soc: soc handle
497  * @free_seg: list of tso segments
498  * @msdu_info: msdu descriptor
499  *
500  * Return - void
501  */
502 static void dp_tx_free_tso_seg_list(
503 		struct dp_soc *soc,
504 		struct qdf_tso_seg_elem_t *free_seg,
505 		struct dp_tx_msdu_info_s *msdu_info)
506 {
507 	struct qdf_tso_seg_elem_t *next_seg;
508 
509 	while (free_seg) {
510 		next_seg = free_seg->next;
511 		dp_tx_tso_desc_free(soc,
512 				    msdu_info->tx_queue.desc_pool_id,
513 				    free_seg);
514 		free_seg = next_seg;
515 	}
516 }
517 
518 /**
519  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
520  *                                 allocated and free them
521  *
522  * @soc:  soc handle
523  * @free_num_seg: list of tso number segments
524  * @msdu_info: msdu descriptor
525  * Return - void
526  */
527 static void dp_tx_free_tso_num_seg_list(
528 		struct dp_soc *soc,
529 		struct qdf_tso_num_seg_elem_t *free_num_seg,
530 		struct dp_tx_msdu_info_s *msdu_info)
531 {
532 	struct qdf_tso_num_seg_elem_t *next_num_seg;
533 
534 	while (free_num_seg) {
535 		next_num_seg = free_num_seg->next;
536 		dp_tso_num_seg_free(soc,
537 				    msdu_info->tx_queue.desc_pool_id,
538 				    free_num_seg);
539 		free_num_seg = next_num_seg;
540 	}
541 }
542 
543 /**
544  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
545  *                              do dma unmap for each segment
546  *
547  * @soc: soc handle
548  * @free_seg: list of tso segments
549  * @num_seg_desc: tso number segment descriptor
550  *
551  * Return - void
552  */
553 static void dp_tx_unmap_tso_seg_list(
554 		struct dp_soc *soc,
555 		struct qdf_tso_seg_elem_t *free_seg,
556 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
557 {
558 	struct qdf_tso_seg_elem_t *next_seg;
559 
560 	if (qdf_unlikely(!num_seg_desc)) {
561 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
562 		return;
563 	}
564 
565 	while (free_seg) {
566 		next_seg = free_seg->next;
567 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
568 		free_seg = next_seg;
569 	}
570 }
571 
572 #ifdef FEATURE_TSO_STATS
573 /**
574  * dp_tso_get_stats_idx: Retrieve the tso packet id
575  * @pdev - pdev handle
576  *
577  * Return: id
578  */
579 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
580 {
581 	uint32_t stats_idx;
582 
583 	stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
584 						% CDP_MAX_TSO_PACKETS);
585 	return stats_idx;
586 }
587 #else
588 static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
589 {
590 	return 0;
591 }
592 #endif /* FEATURE_TSO_STATS */
593 
594 /**
595  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
596  *				     free the tso segments descriptor and
597  *				     tso num segments descriptor
598  *
599  * @soc:  soc handle
600  * @msdu_info: msdu descriptor
601  * @tso_seg_unmap: flag to show if dma unmap is necessary
602  *
603  * Return - void
604  */
605 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
606 					  struct dp_tx_msdu_info_s *msdu_info,
607 					  bool tso_seg_unmap)
608 {
609 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
610 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
611 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
612 					tso_info->tso_num_seg_list;
613 
614 	/* do dma unmap for each segment */
615 	if (tso_seg_unmap)
616 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
617 
618 	/* free all tso number segment descriptor though looks only have 1 */
619 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
620 
621 	/* free all tso segment descriptor */
622 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
623 }
624 
625 /**
626  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
627  * @vdev: virtual device handle
628  * @msdu: network buffer
629  * @msdu_info: meta data associated with the msdu
630  *
631  * Return: QDF_STATUS_SUCCESS success
632  */
633 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
634 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
635 {
636 	struct qdf_tso_seg_elem_t *tso_seg;
637 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
638 	struct dp_soc *soc = vdev->pdev->soc;
639 	struct dp_pdev *pdev = vdev->pdev;
640 	struct qdf_tso_info_t *tso_info;
641 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
642 	tso_info = &msdu_info->u.tso_info;
643 	tso_info->curr_seg = NULL;
644 	tso_info->tso_seg_list = NULL;
645 	tso_info->num_segs = num_seg;
646 	msdu_info->frm_type = dp_tx_frm_tso;
647 	tso_info->tso_num_seg_list = NULL;
648 
649 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
650 
651 	while (num_seg) {
652 		tso_seg = dp_tx_tso_desc_alloc(
653 				soc, msdu_info->tx_queue.desc_pool_id);
654 		if (tso_seg) {
655 			tso_seg->next = tso_info->tso_seg_list;
656 			tso_info->tso_seg_list = tso_seg;
657 			num_seg--;
658 		} else {
659 			dp_err_rl("Failed to alloc tso seg desc");
660 			DP_STATS_INC_PKT(vdev->pdev,
661 					 tso_stats.tso_no_mem_dropped, 1,
662 					 qdf_nbuf_len(msdu));
663 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
664 
665 			return QDF_STATUS_E_NOMEM;
666 		}
667 	}
668 
669 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
670 
671 	tso_num_seg = dp_tso_num_seg_alloc(soc,
672 			msdu_info->tx_queue.desc_pool_id);
673 
674 	if (tso_num_seg) {
675 		tso_num_seg->next = tso_info->tso_num_seg_list;
676 		tso_info->tso_num_seg_list = tso_num_seg;
677 	} else {
678 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
679 			 __func__);
680 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
681 
682 		return QDF_STATUS_E_NOMEM;
683 	}
684 
685 	msdu_info->num_seg =
686 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
687 
688 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
689 			msdu_info->num_seg);
690 
691 	if (!(msdu_info->num_seg)) {
692 		/*
693 		 * Free allocated TSO seg desc and number seg desc,
694 		 * do unmap for segments if dma map has done.
695 		 */
696 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
697 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
698 
699 		return QDF_STATUS_E_INVAL;
700 	}
701 
702 	tso_info->curr_seg = tso_info->tso_seg_list;
703 
704 	tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
705 	dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
706 			     msdu, msdu_info->num_seg);
707 	dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
708 				    tso_info->msdu_stats_idx);
709 	dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
710 	return QDF_STATUS_SUCCESS;
711 }
712 #else
713 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
714 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
715 {
716 	return QDF_STATUS_E_NOMEM;
717 }
718 #endif
719 
720 QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
721 			(DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
722 			 sizeof(struct htt_tx_msdu_desc_ext2_t)));
723 
724 /**
725  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
726  * @vdev: DP Vdev handle
727  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
728  * @desc_pool_id: Descriptor Pool ID
729  *
730  * Return:
731  */
732 static
733 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
734 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
735 {
736 	uint8_t i;
737 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
738 	struct dp_tx_seg_info_s *seg_info;
739 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
740 	struct dp_soc *soc = vdev->pdev->soc;
741 
742 	/* Allocate an extension descriptor */
743 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
744 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
745 
746 	if (!msdu_ext_desc) {
747 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
748 		return NULL;
749 	}
750 
751 	if (msdu_info->exception_fw &&
752 			qdf_unlikely(vdev->mesh_vdev)) {
753 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
754 				&msdu_info->meta_data[0],
755 				sizeof(struct htt_tx_msdu_desc_ext2_t));
756 		qdf_atomic_inc(&soc->num_tx_exception);
757 		msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
758 	}
759 
760 	switch (msdu_info->frm_type) {
761 	case dp_tx_frm_sg:
762 	case dp_tx_frm_me:
763 	case dp_tx_frm_raw:
764 		seg_info = msdu_info->u.sg_info.curr_seg;
765 		/* Update the buffer pointers in MSDU Extension Descriptor */
766 		for (i = 0; i < seg_info->frag_cnt; i++) {
767 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
768 				seg_info->frags[i].paddr_lo,
769 				seg_info->frags[i].paddr_hi,
770 				seg_info->frags[i].len);
771 		}
772 
773 		break;
774 
775 	case dp_tx_frm_tso:
776 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
777 				&cached_ext_desc[0]);
778 		break;
779 
780 
781 	default:
782 		break;
783 	}
784 
785 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
786 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
787 
788 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
789 			msdu_ext_desc->vaddr);
790 
791 	return msdu_ext_desc;
792 }
793 
794 /**
795  * dp_tx_trace_pkt() - Trace TX packet at DP layer
796  *
797  * @skb: skb to be traced
798  * @msdu_id: msdu_id of the packet
799  * @vdev_id: vdev_id of the packet
800  *
801  * Return: None
802  */
803 #ifdef DP_DISABLE_TX_PKT_TRACE
804 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
805 			    uint8_t vdev_id)
806 {
807 }
808 #else
809 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
810 			    uint8_t vdev_id)
811 {
812 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
813 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
814 	DPTRACE(qdf_dp_trace_ptr(skb,
815 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
816 				 QDF_TRACE_DEFAULT_PDEV_ID,
817 				 qdf_nbuf_data_addr(skb),
818 				 sizeof(qdf_nbuf_data(skb)),
819 				 msdu_id, vdev_id, 0));
820 
821 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
822 
823 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
824 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
825 				      msdu_id, QDF_TX));
826 }
827 #endif
828 
829 #ifdef QCA_SUPPORT_WDS_EXTENDED
830 /**
831  * dp_is_tx_extended() - Configure AST override from peer ast entry
832  *
833  * @vdev: DP vdev handle
834  * @tx_exc_metadata: Handle that holds exception path metadata
835  *
836  * Return: if this packet needs to exception to FW or not
837  *	   (false: exception to wlan FW, true: do not exception)
838  */
839 static inline bool
840 dp_is_tx_extended(struct dp_vdev *vdev, struct cdp_tx_exception_metadata
841 		  *tx_exc_metadata)
842 {
843 	if (qdf_likely(!vdev->wds_ext_enabled))
844 		return false;
845 
846 	if (tx_exc_metadata && !tx_exc_metadata->is_wds_extended)
847 		return false;
848 
849 	return true;
850 }
851 
852 /**
853  * dp_tx_wds_ext() - Configure AST override from peer ast entry
854  *
855  * @soc: DP soc handle
856  * @vdev: DP vdev handle
857  * @peer_id: peer_id of the peer for which packet is destined
858  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
859  *
860  * Return: None
861  */
862 static inline void
863 dp_tx_wds_ext(struct dp_soc *soc, struct dp_vdev *vdev, uint16_t peer_id,
864 	      struct dp_tx_msdu_info_s *msdu_info)
865 {
866 	struct dp_peer *peer = NULL;
867 
868 	msdu_info->search_type = vdev->search_type;
869 	msdu_info->ast_idx = vdev->bss_ast_idx;
870 	msdu_info->ast_hash = vdev->bss_ast_hash;
871 
872 	if (qdf_likely(!vdev->wds_ext_enabled))
873 		return;
874 
875 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_TX);
876 
877 	if (qdf_unlikely(!peer))
878 		return;
879 
880 	msdu_info->search_type = HAL_TX_ADDR_INDEX_SEARCH;
881 	msdu_info->ast_idx = peer->self_ast_entry->ast_idx;
882 	msdu_info->ast_hash = peer->self_ast_entry->ast_hash_value;
883 	dp_peer_unref_delete(peer, DP_MOD_ID_TX);
884 	msdu_info->exception_fw = 0;
885 }
886 #else
887 
888 static inline bool
889 dp_is_tx_extended(struct dp_vdev *vdev, struct cdp_tx_exception_metadata
890 		  *tx_exc_metadata)
891 {
892 	return false;
893 }
894 
895 static inline void
896 dp_tx_wds_ext(struct dp_soc *soc, struct dp_vdev *vdev, uint16_t peer_id,
897 	      struct dp_tx_msdu_info_s *msdu_info)
898 {
899 	msdu_info->search_type = vdev->search_type;
900 	msdu_info->ast_idx = vdev->bss_ast_idx;
901 	msdu_info->ast_hash = vdev->bss_ast_hash;
902 }
903 #endif
904 
905 /**
906  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
907  * @vdev: DP vdev handle
908  * @nbuf: skb
909  * @desc_pool_id: Descriptor pool ID
910  * @meta_data: Metadata to the fw
911  * @tx_exc_metadata: Handle that holds exception path metadata
912  * Allocate and prepare Tx descriptor with msdu information.
913  *
914  * Return: Pointer to Tx Descriptor on success,
915  *         NULL on failure
916  */
917 static
918 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
919 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
920 		struct dp_tx_msdu_info_s *msdu_info,
921 		struct cdp_tx_exception_metadata *tx_exc_metadata)
922 {
923 	uint8_t align_pad;
924 	uint8_t is_exception = 0;
925 	uint8_t htt_hdr_size;
926 	struct dp_tx_desc_s *tx_desc;
927 	struct dp_pdev *pdev = vdev->pdev;
928 	struct dp_soc *soc = pdev->soc;
929 
930 	if (dp_tx_limit_check(vdev))
931 		return NULL;
932 
933 	/* Allocate software Tx descriptor */
934 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
935 	if (qdf_unlikely(!tx_desc)) {
936 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
937 		return NULL;
938 	}
939 
940 	dp_tx_outstanding_inc(pdev);
941 
942 	/* Initialize the SW tx descriptor */
943 	tx_desc->nbuf = nbuf;
944 	tx_desc->frm_type = dp_tx_frm_std;
945 	tx_desc->tx_encap_type = ((tx_exc_metadata &&
946 		(tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
947 		tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
948 	tx_desc->vdev_id = vdev->vdev_id;
949 	tx_desc->pdev = pdev;
950 	tx_desc->msdu_ext_desc = NULL;
951 	tx_desc->pkt_offset = 0;
952 	tx_desc->length = qdf_nbuf_headlen(nbuf);
953 
954 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
955 
956 	if (qdf_unlikely(vdev->multipass_en)) {
957 		if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
958 			goto failure;
959 	}
960 
961 	if (qdf_unlikely(dp_is_tx_extended(vdev, tx_exc_metadata)))
962 		return tx_desc;
963 
964 	/*
965 	 * For special modes (vdev_type == ocb or mesh), data frames should be
966 	 * transmitted using varying transmit parameters (tx spec) which include
967 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
968 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
969 	 * These frames are sent as exception packets to firmware.
970 	 *
971 	 * HW requirement is that metadata should always point to a
972 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
973 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
974 	 *  to get 8-byte aligned start address along with align_pad added
975 	 *
976 	 *  |-----------------------------|
977 	 *  |                             |
978 	 *  |-----------------------------| <-----Buffer Pointer Address given
979 	 *  |                             |  ^    in HW descriptor (aligned)
980 	 *  |       HTT Metadata          |  |
981 	 *  |                             |  |
982 	 *  |                             |  | Packet Offset given in descriptor
983 	 *  |                             |  |
984 	 *  |-----------------------------|  |
985 	 *  |       Alignment Pad         |  v
986 	 *  |-----------------------------| <----- Actual buffer start address
987 	 *  |        SKB Data             |           (Unaligned)
988 	 *  |                             |
989 	 *  |                             |
990 	 *  |                             |
991 	 *  |                             |
992 	 *  |                             |
993 	 *  |-----------------------------|
994 	 */
995 	if (qdf_unlikely((msdu_info->exception_fw)) ||
996 				(vdev->opmode == wlan_op_mode_ocb) ||
997 				(tx_exc_metadata &&
998 				tx_exc_metadata->is_tx_sniffer)) {
999 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
1000 
1001 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
1002 			DP_STATS_INC(vdev,
1003 				     tx_i.dropped.headroom_insufficient, 1);
1004 			goto failure;
1005 		}
1006 
1007 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
1008 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1009 					"qdf_nbuf_push_head failed");
1010 			goto failure;
1011 		}
1012 
1013 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
1014 				msdu_info);
1015 		if (htt_hdr_size == 0)
1016 			goto failure;
1017 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
1018 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1019 		is_exception = 1;
1020 		tx_desc->length -= tx_desc->pkt_offset;
1021 	}
1022 
1023 #if !TQM_BYPASS_WAR
1024 	if (is_exception || tx_exc_metadata)
1025 #endif
1026 	{
1027 		/* Temporary WAR due to TQM VP issues */
1028 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1029 		qdf_atomic_inc(&soc->num_tx_exception);
1030 	}
1031 
1032 	return tx_desc;
1033 
1034 failure:
1035 	dp_tx_desc_release(tx_desc, desc_pool_id);
1036 	return NULL;
1037 }
1038 
1039 /**
1040  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
1041  * @vdev: DP vdev handle
1042  * @nbuf: skb
1043  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
1044  * @desc_pool_id : Descriptor Pool ID
1045  *
1046  * Allocate and prepare Tx descriptor with msdu and fragment descritor
1047  * information. For frames wth fragments, allocate and prepare
1048  * an MSDU extension descriptor
1049  *
1050  * Return: Pointer to Tx Descriptor on success,
1051  *         NULL on failure
1052  */
1053 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
1054 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
1055 		uint8_t desc_pool_id)
1056 {
1057 	struct dp_tx_desc_s *tx_desc;
1058 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
1059 	struct dp_pdev *pdev = vdev->pdev;
1060 	struct dp_soc *soc = pdev->soc;
1061 
1062 	if (dp_tx_limit_check(vdev))
1063 		return NULL;
1064 
1065 	/* Allocate software Tx descriptor */
1066 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1067 	if (!tx_desc) {
1068 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1069 		return NULL;
1070 	}
1071 
1072 	dp_tx_outstanding_inc(pdev);
1073 
1074 	/* Initialize the SW tx descriptor */
1075 	tx_desc->nbuf = nbuf;
1076 	tx_desc->frm_type = msdu_info->frm_type;
1077 	tx_desc->tx_encap_type = vdev->tx_encap_type;
1078 	tx_desc->vdev_id = vdev->vdev_id;
1079 	tx_desc->pdev = pdev;
1080 	tx_desc->pkt_offset = 0;
1081 	tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
1082 	tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
1083 
1084 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
1085 
1086 	/* Handle scattered frames - TSO/SG/ME */
1087 	/* Allocate and prepare an extension descriptor for scattered frames */
1088 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
1089 	if (!msdu_ext_desc) {
1090 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1091 				"%s Tx Extension Descriptor Alloc Fail",
1092 				__func__);
1093 		goto failure;
1094 	}
1095 
1096 #if TQM_BYPASS_WAR
1097 	/* Temporary WAR due to TQM VP issues */
1098 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1099 	qdf_atomic_inc(&soc->num_tx_exception);
1100 #endif
1101 	if (qdf_unlikely(msdu_info->exception_fw))
1102 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1103 
1104 	tx_desc->msdu_ext_desc = msdu_ext_desc;
1105 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
1106 
1107 	tx_desc->dma_addr = msdu_ext_desc->paddr;
1108 
1109 	if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
1110 		tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
1111 	else
1112 		tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
1113 
1114 	return tx_desc;
1115 failure:
1116 	dp_tx_desc_release(tx_desc, desc_pool_id);
1117 	return NULL;
1118 }
1119 
1120 /**
1121  * dp_tx_prepare_raw() - Prepare RAW packet TX
1122  * @vdev: DP vdev handle
1123  * @nbuf: buffer pointer
1124  * @seg_info: Pointer to Segment info Descriptor to be prepared
1125  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
1126  *     descriptor
1127  *
1128  * Return:
1129  */
1130 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1131 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1132 {
1133 	qdf_nbuf_t curr_nbuf = NULL;
1134 	uint16_t total_len = 0;
1135 	qdf_dma_addr_t paddr;
1136 	int32_t i;
1137 	int32_t mapped_buf_num = 0;
1138 
1139 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
1140 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1141 
1142 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
1143 
1144 	/* Continue only if frames are of DATA type */
1145 	if (!DP_FRAME_IS_DATA(qos_wh)) {
1146 		DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
1147 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1148 			  "Pkt. recd is of not data type");
1149 		goto error;
1150 	}
1151 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
1152 	if (vdev->raw_mode_war &&
1153 	    (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
1154 	    (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
1155 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
1156 
1157 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
1158 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
1159 		/*
1160 		 * Number of nbuf's must not exceed the size of the frags
1161 		 * array in seg_info.
1162 		 */
1163 		if (i >= DP_TX_MAX_NUM_FRAGS) {
1164 			dp_err_rl("nbuf cnt exceeds the max number of segs");
1165 			DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
1166 			goto error;
1167 		}
1168 		if (QDF_STATUS_SUCCESS !=
1169 			qdf_nbuf_map_nbytes_single(vdev->osdev,
1170 						   curr_nbuf,
1171 						   QDF_DMA_TO_DEVICE,
1172 						   curr_nbuf->len)) {
1173 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1174 				"%s dma map error ", __func__);
1175 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
1176 			goto error;
1177 		}
1178 		/* Update the count of mapped nbuf's */
1179 		mapped_buf_num++;
1180 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
1181 		seg_info->frags[i].paddr_lo = paddr;
1182 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
1183 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
1184 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
1185 		total_len += qdf_nbuf_len(curr_nbuf);
1186 	}
1187 
1188 	seg_info->frag_cnt = i;
1189 	seg_info->total_len = total_len;
1190 	seg_info->next = NULL;
1191 
1192 	sg_info->curr_seg = seg_info;
1193 
1194 	msdu_info->frm_type = dp_tx_frm_raw;
1195 	msdu_info->num_seg = 1;
1196 
1197 	return nbuf;
1198 
1199 error:
1200 	i = 0;
1201 	while (nbuf) {
1202 		curr_nbuf = nbuf;
1203 		if (i < mapped_buf_num) {
1204 			qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
1205 						     QDF_DMA_TO_DEVICE,
1206 						     curr_nbuf->len);
1207 			i++;
1208 		}
1209 		nbuf = qdf_nbuf_next(nbuf);
1210 		qdf_nbuf_free(curr_nbuf);
1211 	}
1212 	return NULL;
1213 
1214 }
1215 
1216 /**
1217  * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
1218  * @soc: DP soc handle
1219  * @nbuf: Buffer pointer
1220  *
1221  * unmap the chain of nbufs that belong to this RAW frame.
1222  *
1223  * Return: None
1224  */
1225 static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
1226 				    qdf_nbuf_t nbuf)
1227 {
1228 	qdf_nbuf_t cur_nbuf = nbuf;
1229 
1230 	do {
1231 		qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
1232 					     QDF_DMA_TO_DEVICE,
1233 					     cur_nbuf->len);
1234 		cur_nbuf = qdf_nbuf_next(cur_nbuf);
1235 	} while (cur_nbuf);
1236 }
1237 
1238 #ifdef VDEV_PEER_PROTOCOL_COUNT
1239 #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, nbuf) \
1240 { \
1241 	qdf_nbuf_t nbuf_local; \
1242 	struct dp_vdev *vdev_local = vdev_hdl; \
1243 	do { \
1244 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
1245 			break; \
1246 		nbuf_local = nbuf; \
1247 		if (qdf_unlikely(((vdev_local)->tx_encap_type) == \
1248 			 htt_cmn_pkt_type_raw)) \
1249 			break; \
1250 		else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local)))) \
1251 			break; \
1252 		else if (qdf_nbuf_is_tso((nbuf_local))) \
1253 			break; \
1254 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
1255 						       (nbuf_local), \
1256 						       NULL, 1, 0); \
1257 	} while (0); \
1258 }
1259 #else
1260 #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, skb)
1261 #endif
1262 
1263 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1264 /**
1265  * dp_tx_update_stats() - Update soc level tx stats
1266  * @soc: DP soc handle
1267  * @nbuf: packet being transmitted
1268  *
1269  * Returns: none
1270  */
1271 static inline void dp_tx_update_stats(struct dp_soc *soc,
1272 				      qdf_nbuf_t nbuf)
1273 {
1274 	DP_STATS_INC_PKT(soc, tx.egress, 1, qdf_nbuf_len(nbuf));
1275 }
1276 
1277 /**
1278  * dp_tx_attempt_coalescing() - Check and attempt TCL register write coalescing
1279  * @soc: Datapath soc handle
1280  * @tx_desc: tx packet descriptor
1281  * @tid: TID for pkt transmission
1282  *
1283  * Returns: 1, if coalescing is to be done
1284  *	    0, if coalescing is not to be done
1285  */
1286 static inline int
1287 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1288 			 struct dp_tx_desc_s *tx_desc,
1289 			 uint8_t tid)
1290 {
1291 	struct dp_swlm *swlm = &soc->swlm;
1292 	union swlm_data swlm_query_data;
1293 	struct dp_swlm_tcl_data tcl_data;
1294 	QDF_STATUS status;
1295 	int ret;
1296 
1297 	if (qdf_unlikely(!swlm->is_enabled))
1298 		return 0;
1299 
1300 	tcl_data.nbuf = tx_desc->nbuf;
1301 	tcl_data.tid = tid;
1302 	tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
1303 	swlm_query_data.tcl_data = &tcl_data;
1304 
1305 	status = dp_swlm_tcl_pre_check(soc, &tcl_data);
1306 	if (QDF_IS_STATUS_ERROR(status)) {
1307 		dp_swlm_tcl_reset_session_data(soc);
1308 		DP_STATS_INC(swlm, tcl.coalesce_fail, 1);
1309 		return 0;
1310 	}
1311 
1312 	ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
1313 	if (ret) {
1314 		DP_STATS_INC(swlm, tcl.coalesce_success, 1);
1315 	} else {
1316 		DP_STATS_INC(swlm, tcl.coalesce_fail, 1);
1317 	}
1318 
1319 	return ret;
1320 }
1321 
1322 /**
1323  * dp_tx_ring_access_end() - HAL ring access end for data transmission
1324  * @soc: Datapath soc handle
1325  * @hal_ring_hdl: HAL ring handle
1326  * @coalesce: Coalesce the current write or not
1327  *
1328  * Returns: none
1329  */
1330 static inline void
1331 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1332 		      int coalesce)
1333 {
1334 	if (coalesce)
1335 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1336 	else
1337 		dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1338 }
1339 
1340 #else
1341 static inline void dp_tx_update_stats(struct dp_soc *soc,
1342 				      qdf_nbuf_t nbuf)
1343 {
1344 }
1345 
1346 static inline int
1347 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1348 			 struct dp_tx_desc_s *tx_desc,
1349 			 uint8_t tid)
1350 {
1351 	return 0;
1352 }
1353 
1354 static inline void
1355 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1356 		      int coalesce)
1357 {
1358 	dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1359 }
1360 
1361 #endif
1362 /**
1363  * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
1364  * @soc: DP Soc Handle
1365  * @vdev: DP vdev handle
1366  * @tx_desc: Tx Descriptor Handle
1367  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1368  * @fw_metadata: Metadata to send to Target Firmware along with frame
1369  * @ring_id: Ring ID of H/W ring to which we enqueue the packet
1370  * @tx_exc_metadata: Handle that holds exception path meta data
1371  *
1372  *  Gets the next free TCL HW DMA descriptor and sets up required parameters
1373  *  from software Tx descriptor
1374  *
1375  * Return: QDF_STATUS_SUCCESS: success
1376  *         QDF_STATUS_E_RESOURCES: Error return
1377  */
1378 static QDF_STATUS
1379 dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
1380 		 struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
1381 		 struct cdp_tx_exception_metadata *tx_exc_metadata,
1382 		 struct dp_tx_msdu_info_s *msdu_info)
1383 {
1384 	void *hal_tx_desc;
1385 	uint32_t *hal_tx_desc_cached;
1386 	int coalesce = 0;
1387 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1388 	uint8_t ring_id = tx_q->ring_id & DP_TX_QUEUE_MASK;
1389 	uint8_t tid = msdu_info->tid;
1390 
1391 	/*
1392 	 * Setting it initialization statically here to avoid
1393 	 * a memset call jump with qdf_mem_set call
1394 	 */
1395 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
1396 
1397 	enum cdp_sec_type sec_type = ((tx_exc_metadata &&
1398 			tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
1399 			tx_exc_metadata->sec_type : vdev->sec_type);
1400 
1401 	/* Return Buffer Manager ID */
1402 	uint8_t bm_id = dp_tx_get_rbm_id(soc, ring_id);
1403 
1404 	hal_ring_handle_t hal_ring_hdl = NULL;
1405 
1406 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
1407 
1408 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
1409 		dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
1410 		return QDF_STATUS_E_RESOURCES;
1411 	}
1412 
1413 	hal_tx_desc_cached = (void *) cached_desc;
1414 
1415 	hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
1416 				 tx_desc->dma_addr, bm_id, tx_desc->id,
1417 				 (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
1418 	hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
1419 				vdev->lmac_id);
1420 	hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
1421 				    msdu_info->search_type);
1422 	hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
1423 				     msdu_info->ast_idx);
1424 	hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
1425 					  vdev->dscp_tid_map_id);
1426 
1427 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
1428 			sec_type_map[sec_type]);
1429 	hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
1430 				      (msdu_info->ast_hash & 0xF));
1431 
1432 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
1433 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
1434 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
1435 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
1436 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
1437 					  vdev->hal_desc_addr_search_flags);
1438 
1439 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
1440 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
1441 
1442 	/* verify checksum offload configuration*/
1443 	if (vdev->csum_enabled &&
1444 	    ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
1445 		|| qdf_nbuf_is_tso(tx_desc->nbuf)))  {
1446 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
1447 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
1448 	}
1449 
1450 	if (tid != HTT_TX_EXT_TID_INVALID)
1451 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
1452 
1453 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
1454 		hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
1455 
1456 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1457 	    qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled(
1458 			 soc->wlan_cfg_ctx)))
1459 		tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
1460 
1461 	dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
1462 			 tx_desc->length,
1463 			 (tx_desc->flags & DP_TX_DESC_FLAG_FRAG),
1464 			 (uint64_t)tx_desc->dma_addr, tx_desc->pkt_offset,
1465 			 tx_desc->id);
1466 
1467 	hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
1468 
1469 	if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
1470 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1471 			  "%s %d : HAL RING Access Failed -- %pK",
1472 			 __func__, __LINE__, hal_ring_hdl);
1473 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1474 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1475 		return status;
1476 	}
1477 
1478 	/* Sync cached descriptor with HW */
1479 
1480 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
1481 	if (qdf_unlikely(!hal_tx_desc)) {
1482 		dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
1483 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1484 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1485 		goto ring_access_fail;
1486 	}
1487 
1488 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
1489 	dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
1490 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
1491 	coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid);
1492 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
1493 	dp_tx_update_stats(soc, tx_desc->nbuf);
1494 	status = QDF_STATUS_SUCCESS;
1495 
1496 ring_access_fail:
1497 	if (hif_pm_runtime_get(soc->hif_handle,
1498 			       RTPM_ID_DW_TX_HW_ENQUEUE) == 0) {
1499 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1500 		hif_pm_runtime_put(soc->hif_handle,
1501 				   RTPM_ID_DW_TX_HW_ENQUEUE);
1502 	} else {
1503 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1504 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1505 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1506 	}
1507 
1508 	return status;
1509 }
1510 
1511 
1512 /**
1513  * dp_cce_classify() - Classify the frame based on CCE rules
1514  * @vdev: DP vdev handle
1515  * @nbuf: skb
1516  *
1517  * Classify frames based on CCE rules
1518  * Return: bool( true if classified,
1519  *               else false)
1520  */
1521 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1522 {
1523 	qdf_ether_header_t *eh = NULL;
1524 	uint16_t   ether_type;
1525 	qdf_llc_t *llcHdr;
1526 	qdf_nbuf_t nbuf_clone = NULL;
1527 	qdf_dot3_qosframe_t *qos_wh = NULL;
1528 
1529 	if (qdf_likely(vdev->skip_sw_tid_classification)) {
1530 	/*
1531 	 * In case of mesh packets or hlos tid override enabled,
1532 	 * don't do any classification
1533 	 */
1534 		if (qdf_unlikely(vdev->skip_sw_tid_classification
1535 					& DP_TX_SKIP_CCE_CLASSIFY))
1536 			return false;
1537 	}
1538 
1539 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1540 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1541 		ether_type = eh->ether_type;
1542 		llcHdr = (qdf_llc_t *)(nbuf->data +
1543 					sizeof(qdf_ether_header_t));
1544 	} else {
1545 		qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1546 		/* For encrypted packets don't do any classification */
1547 		if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
1548 			return false;
1549 
1550 		if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
1551 			if (qdf_unlikely(
1552 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
1553 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
1554 
1555 				ether_type = *(uint16_t *)(nbuf->data
1556 						+ QDF_IEEE80211_4ADDR_HDR_LEN
1557 						+ sizeof(qdf_llc_t)
1558 						- sizeof(ether_type));
1559 				llcHdr = (qdf_llc_t *)(nbuf->data +
1560 						QDF_IEEE80211_4ADDR_HDR_LEN);
1561 			} else {
1562 				ether_type = *(uint16_t *)(nbuf->data
1563 						+ QDF_IEEE80211_3ADDR_HDR_LEN
1564 						+ sizeof(qdf_llc_t)
1565 						- sizeof(ether_type));
1566 				llcHdr = (qdf_llc_t *)(nbuf->data +
1567 					QDF_IEEE80211_3ADDR_HDR_LEN);
1568 			}
1569 
1570 			if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
1571 				&& (ether_type ==
1572 				qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
1573 
1574 				DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
1575 				return true;
1576 			}
1577 		}
1578 
1579 		return false;
1580 	}
1581 
1582 	if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
1583 		ether_type = *(uint16_t *)(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1584 				sizeof(*llcHdr));
1585 		nbuf_clone = qdf_nbuf_clone(nbuf);
1586 		if (qdf_unlikely(nbuf_clone)) {
1587 			qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
1588 
1589 			if (ether_type == htons(ETHERTYPE_VLAN)) {
1590 				qdf_nbuf_pull_head(nbuf_clone,
1591 						sizeof(qdf_net_vlanhdr_t));
1592 			}
1593 		}
1594 	} else {
1595 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1596 			nbuf_clone = qdf_nbuf_clone(nbuf);
1597 			if (qdf_unlikely(nbuf_clone)) {
1598 				qdf_nbuf_pull_head(nbuf_clone,
1599 					sizeof(qdf_net_vlanhdr_t));
1600 			}
1601 		}
1602 	}
1603 
1604 	if (qdf_unlikely(nbuf_clone))
1605 		nbuf = nbuf_clone;
1606 
1607 
1608 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
1609 		|| qdf_nbuf_is_ipv4_arp_pkt(nbuf)
1610 		|| qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
1611 		|| qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
1612 		|| (qdf_nbuf_is_ipv4_pkt(nbuf)
1613 			&& qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
1614 		|| (qdf_nbuf_is_ipv6_pkt(nbuf) &&
1615 			qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
1616 		if (qdf_unlikely(nbuf_clone))
1617 			qdf_nbuf_free(nbuf_clone);
1618 		return true;
1619 	}
1620 
1621 	if (qdf_unlikely(nbuf_clone))
1622 		qdf_nbuf_free(nbuf_clone);
1623 
1624 	return false;
1625 }
1626 
1627 /**
1628  * dp_tx_get_tid() - Obtain TID to be used for this frame
1629  * @vdev: DP vdev handle
1630  * @nbuf: skb
1631  *
1632  * Extract the DSCP or PCP information from frame and map into TID value.
1633  *
1634  * Return: void
1635  */
1636 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1637 			  struct dp_tx_msdu_info_s *msdu_info)
1638 {
1639 	uint8_t tos = 0, dscp_tid_override = 0;
1640 	uint8_t *hdr_ptr, *L3datap;
1641 	uint8_t is_mcast = 0;
1642 	qdf_ether_header_t *eh = NULL;
1643 	qdf_ethervlan_header_t *evh = NULL;
1644 	uint16_t   ether_type;
1645 	qdf_llc_t *llcHdr;
1646 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1647 
1648 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1649 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1650 		eh = (qdf_ether_header_t *)nbuf->data;
1651 		hdr_ptr = (uint8_t *)(eh->ether_dhost);
1652 		L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
1653 	} else {
1654 		qdf_dot3_qosframe_t *qos_wh =
1655 			(qdf_dot3_qosframe_t *) nbuf->data;
1656 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1657 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1658 		return;
1659 	}
1660 
1661 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1662 	ether_type = eh->ether_type;
1663 
1664 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
1665 	/*
1666 	 * Check if packet is dot3 or eth2 type.
1667 	 */
1668 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1669 		ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1670 				sizeof(*llcHdr));
1671 
1672 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1673 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1674 				sizeof(*llcHdr);
1675 			ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
1676 					+ sizeof(*llcHdr) +
1677 					sizeof(qdf_net_vlanhdr_t));
1678 		} else {
1679 			L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
1680 				sizeof(*llcHdr);
1681 		}
1682 	} else {
1683 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1684 			evh = (qdf_ethervlan_header_t *) eh;
1685 			ether_type = evh->ether_type;
1686 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1687 		}
1688 	}
1689 
1690 	/*
1691 	 * Find priority from IP TOS DSCP field
1692 	 */
1693 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1694 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1695 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1696 			/* Only for unicast frames */
1697 			if (!is_mcast) {
1698 				/* send it on VO queue */
1699 				msdu_info->tid = DP_VO_TID;
1700 			}
1701 		} else {
1702 			/*
1703 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1704 			 * from TOS byte.
1705 			 */
1706 			tos = ip->ip_tos;
1707 			dscp_tid_override = 1;
1708 
1709 		}
1710 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1711 		/* TODO
1712 		 * use flowlabel
1713 		 *igmpmld cases to be handled in phase 2
1714 		 */
1715 		unsigned long ver_pri_flowlabel;
1716 		unsigned long pri;
1717 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1718 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1719 			DP_IPV6_PRIORITY_SHIFT;
1720 		tos = pri;
1721 		dscp_tid_override = 1;
1722 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1723 		msdu_info->tid = DP_VO_TID;
1724 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1725 		/* Only for unicast frames */
1726 		if (!is_mcast) {
1727 			/* send ucast arp on VO queue */
1728 			msdu_info->tid = DP_VO_TID;
1729 		}
1730 	}
1731 
1732 	/*
1733 	 * Assign all MCAST packets to BE
1734 	 */
1735 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1736 		if (is_mcast) {
1737 			tos = 0;
1738 			dscp_tid_override = 1;
1739 		}
1740 	}
1741 
1742 	if (dscp_tid_override == 1) {
1743 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1744 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1745 	}
1746 
1747 	if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
1748 		msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
1749 
1750 	return;
1751 }
1752 
1753 /**
1754  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1755  * @vdev: DP vdev handle
1756  * @nbuf: skb
1757  *
1758  * Software based TID classification is required when more than 2 DSCP-TID
1759  * mapping tables are needed.
1760  * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
1761  *
1762  * Return: void
1763  */
1764 static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1765 				      struct dp_tx_msdu_info_s *msdu_info)
1766 {
1767 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1768 
1769 	/*
1770 	 * skip_sw_tid_classification flag will set in below cases-
1771 	 * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
1772 	 * 2. hlos_tid_override enabled for vdev
1773 	 * 3. mesh mode enabled for vdev
1774 	 */
1775 	if (qdf_likely(vdev->skip_sw_tid_classification)) {
1776 		/* Update tid in msdu_info from skb priority */
1777 		if (qdf_unlikely(vdev->skip_sw_tid_classification
1778 			    & DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
1779 			msdu_info->tid = qdf_nbuf_get_priority(nbuf);
1780 			return;
1781 		}
1782 		return;
1783 	}
1784 
1785 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1786 }
1787 
1788 #ifdef FEATURE_WLAN_TDLS
1789 /**
1790  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1791  * @soc: datapath SOC
1792  * @vdev: datapath vdev
1793  * @tx_desc: TX descriptor
1794  *
1795  * Return: None
1796  */
1797 static void dp_tx_update_tdls_flags(struct dp_soc *soc,
1798 				    struct dp_vdev *vdev,
1799 				    struct dp_tx_desc_s *tx_desc)
1800 {
1801 	if (vdev) {
1802 		if (vdev->is_tdls_frame) {
1803 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1804 			vdev->is_tdls_frame = false;
1805 		}
1806 	}
1807 }
1808 
1809 /**
1810  * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1811  * @soc: dp_soc handle
1812  * @tx_desc: TX descriptor
1813  * @vdev: datapath vdev handle
1814  *
1815  * Return: None
1816  */
1817 static void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
1818 					 struct dp_tx_desc_s *tx_desc)
1819 {
1820 	struct hal_tx_completion_status ts = {0};
1821 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1822 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
1823 						     DP_MOD_ID_TDLS);
1824 
1825 	if (qdf_unlikely(!vdev)) {
1826 		dp_err_rl("vdev is null!");
1827 		goto error;
1828 	}
1829 
1830 	hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
1831 	if (vdev->tx_non_std_data_callback.func) {
1832 		qdf_nbuf_set_next(nbuf, NULL);
1833 		vdev->tx_non_std_data_callback.func(
1834 				vdev->tx_non_std_data_callback.ctxt,
1835 				nbuf, ts.status);
1836 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1837 		return;
1838 	} else {
1839 		dp_err_rl("callback func is null");
1840 	}
1841 
1842 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1843 error:
1844 	qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
1845 	qdf_nbuf_free(nbuf);
1846 }
1847 
1848 /**
1849  * dp_tx_msdu_single_map() - do nbuf map
1850  * @vdev: DP vdev handle
1851  * @tx_desc: DP TX descriptor pointer
1852  * @nbuf: skb pointer
1853  *
1854  * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
1855  * operation done in other component.
1856  *
1857  * Return: QDF_STATUS
1858  */
1859 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1860 					       struct dp_tx_desc_s *tx_desc,
1861 					       qdf_nbuf_t nbuf)
1862 {
1863 	if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
1864 		return qdf_nbuf_map_nbytes_single(vdev->osdev,
1865 						  nbuf,
1866 						  QDF_DMA_TO_DEVICE,
1867 						  nbuf->len);
1868 	else
1869 		return qdf_nbuf_map_single(vdev->osdev, nbuf,
1870 					   QDF_DMA_TO_DEVICE);
1871 }
1872 #else
1873 static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
1874 					   struct dp_vdev *vdev,
1875 					   struct dp_tx_desc_s *tx_desc)
1876 {
1877 }
1878 
1879 static inline void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
1880 						struct dp_tx_desc_s *tx_desc)
1881 {
1882 }
1883 
1884 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1885 					       struct dp_tx_desc_s *tx_desc,
1886 					       qdf_nbuf_t nbuf)
1887 {
1888 	return qdf_nbuf_map_nbytes_single(vdev->osdev,
1889 					  nbuf,
1890 					  QDF_DMA_TO_DEVICE,
1891 					  nbuf->len);
1892 }
1893 #endif
1894 
1895 #ifdef MESH_MODE_SUPPORT
1896 /**
1897  * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
1898  * @soc: datapath SOC
1899  * @vdev: datapath vdev
1900  * @tx_desc: TX descriptor
1901  *
1902  * Return: None
1903  */
1904 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
1905 					   struct dp_vdev *vdev,
1906 					   struct dp_tx_desc_s *tx_desc)
1907 {
1908 	if (qdf_unlikely(vdev->mesh_vdev))
1909 		tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
1910 }
1911 
1912 /**
1913  * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
1914  * @soc: dp_soc handle
1915  * @tx_desc: TX descriptor
1916  * @vdev: datapath vdev handle
1917  *
1918  * Return: None
1919  */
1920 static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
1921 					     struct dp_tx_desc_s *tx_desc)
1922 {
1923 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1924 	struct dp_vdev *vdev = NULL;
1925 
1926 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
1927 		qdf_nbuf_free(nbuf);
1928 		DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
1929 	} else {
1930 		vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
1931 					     DP_MOD_ID_MESH);
1932 		if (vdev && vdev->osif_tx_free_ext)
1933 			vdev->osif_tx_free_ext((nbuf));
1934 		else
1935 			qdf_nbuf_free(nbuf);
1936 
1937 		if (vdev)
1938 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
1939 	}
1940 }
1941 #else
1942 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
1943 					   struct dp_vdev *vdev,
1944 					   struct dp_tx_desc_s *tx_desc)
1945 {
1946 }
1947 
1948 static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
1949 					     struct dp_tx_desc_s *tx_desc)
1950 {
1951 }
1952 #endif
1953 
1954 /**
1955  * dp_tx_frame_is_drop() - checks if the packet is loopback
1956  * @vdev: DP vdev handle
1957  * @nbuf: skb
1958  *
1959  * Return: 1 if frame needs to be dropped else 0
1960  */
1961 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
1962 {
1963 	struct dp_pdev *pdev = NULL;
1964 	struct dp_ast_entry *src_ast_entry = NULL;
1965 	struct dp_ast_entry *dst_ast_entry = NULL;
1966 	struct dp_soc *soc = NULL;
1967 
1968 	qdf_assert(vdev);
1969 	pdev = vdev->pdev;
1970 	qdf_assert(pdev);
1971 	soc = pdev->soc;
1972 
1973 	dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
1974 				(soc, dstmac, vdev->pdev->pdev_id);
1975 
1976 	src_ast_entry = dp_peer_ast_hash_find_by_pdevid
1977 				(soc, srcmac, vdev->pdev->pdev_id);
1978 	if (dst_ast_entry && src_ast_entry) {
1979 		if (dst_ast_entry->peer_id ==
1980 				src_ast_entry->peer_id)
1981 			return 1;
1982 	}
1983 
1984 	return 0;
1985 }
1986 
1987 /**
1988  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1989  * @vdev: DP vdev handle
1990  * @nbuf: skb
1991  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1992  * @meta_data: Metadata to the fw
1993  * @tx_q: Tx queue to be used for this Tx frame
1994  * @peer_id: peer_id of the peer in case of NAWDS frames
1995  * @tx_exc_metadata: Handle that holds exception path metadata
1996  *
1997  * Return: NULL on success,
1998  *         nbuf when it fails to send
1999  */
2000 qdf_nbuf_t
2001 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2002 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
2003 		       struct cdp_tx_exception_metadata *tx_exc_metadata)
2004 {
2005 	struct dp_pdev *pdev = vdev->pdev;
2006 	struct dp_soc *soc = pdev->soc;
2007 	struct dp_tx_desc_s *tx_desc;
2008 	QDF_STATUS status;
2009 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
2010 	uint16_t htt_tcl_metadata = 0;
2011 	enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
2012 	uint8_t tid = msdu_info->tid;
2013 	struct cdp_tid_tx_stats *tid_stats = NULL;
2014 
2015 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
2016 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
2017 			msdu_info, tx_exc_metadata);
2018 	if (!tx_desc) {
2019 		dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
2020 			  vdev, tx_q->desc_pool_id);
2021 		drop_code = TX_DESC_ERR;
2022 		goto fail_return;
2023 	}
2024 
2025 	if (qdf_unlikely(soc->cce_disable)) {
2026 		if (dp_cce_classify(vdev, nbuf) == true) {
2027 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
2028 			tid = DP_VO_TID;
2029 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2030 		}
2031 	}
2032 
2033 	dp_tx_update_tdls_flags(soc, vdev, tx_desc);
2034 
2035 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
2036 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2037 		HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
2038 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
2039 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
2040 				HTT_TCL_METADATA_TYPE_PEER_BASED);
2041 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
2042 				peer_id);
2043 	} else
2044 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2045 
2046 	if (msdu_info->exception_fw)
2047 		HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2048 
2049 	dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
2050 					 !pdev->enhanced_stats_en);
2051 
2052 	dp_tx_update_mesh_flags(soc, vdev, tx_desc);
2053 
2054 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
2055 			 dp_tx_msdu_single_map(vdev, tx_desc, nbuf))) {
2056 		/* Handle failure */
2057 		dp_err("qdf_nbuf_map failed");
2058 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
2059 		drop_code = TX_DMA_MAP_ERR;
2060 		goto release_desc;
2061 	}
2062 
2063 	tx_desc->dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
2064 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
2065 	status = dp_tx_hw_enqueue(soc, vdev, tx_desc, htt_tcl_metadata,
2066 				  tx_exc_metadata, msdu_info);
2067 
2068 	if (status != QDF_STATUS_SUCCESS) {
2069 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2070 			  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
2071 			  __func__, tx_desc, tx_q->ring_id);
2072 		qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
2073 					     QDF_DMA_TO_DEVICE,
2074 					     nbuf->len);
2075 		drop_code = TX_HW_ENQUEUE;
2076 		goto release_desc;
2077 	}
2078 
2079 	return NULL;
2080 
2081 release_desc:
2082 	dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2083 
2084 fail_return:
2085 	dp_tx_get_tid(vdev, nbuf, msdu_info);
2086 	tid_stats = &pdev->stats.tid_stats.
2087 		    tid_tx_stats[tx_q->ring_id][tid];
2088 	tid_stats->swdrop_cnt[drop_code]++;
2089 	return nbuf;
2090 }
2091 
2092 /**
2093  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2094  * @soc: Soc handle
2095  * @desc: software Tx descriptor to be processed
2096  *
2097  * Return: none
2098  */
2099 static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
2100 				       struct dp_tx_desc_s *desc)
2101 {
2102 	qdf_nbuf_t nbuf = desc->nbuf;
2103 
2104 	/* nbuf already freed in vdev detach path */
2105 	if (!nbuf)
2106 		return;
2107 
2108 	/* If it is TDLS mgmt, don't unmap or free the frame */
2109 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2110 		return dp_non_std_tx_comp_free_buff(soc, desc);
2111 
2112 	/* 0 : MSDU buffer, 1 : MLE */
2113 	if (desc->msdu_ext_desc) {
2114 		/* TSO free */
2115 		if (hal_tx_ext_desc_get_tso_enable(
2116 					desc->msdu_ext_desc->vaddr)) {
2117 			/* unmap eash TSO seg before free the nbuf */
2118 			dp_tx_tso_unmap_segment(soc, desc->tso_desc,
2119 						desc->tso_num_desc);
2120 			qdf_nbuf_free(nbuf);
2121 			return;
2122 		}
2123 	}
2124 	/* If it's ME frame, dont unmap the cloned nbuf's */
2125 	if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
2126 		goto nbuf_free;
2127 
2128 	qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
2129 				     QDF_DMA_TO_DEVICE, nbuf->len);
2130 
2131 	if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
2132 		return dp_mesh_tx_comp_free_buff(soc, desc);
2133 nbuf_free:
2134 	qdf_nbuf_free(nbuf);
2135 }
2136 
2137 /**
2138  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
2139  * @vdev: DP vdev handle
2140  * @nbuf: skb
2141  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
2142  *
2143  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
2144  *
2145  * Return: NULL on success,
2146  *         nbuf when it fails to send
2147  */
2148 #if QDF_LOCK_STATS
2149 noinline
2150 #else
2151 #endif
2152 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2153 				    struct dp_tx_msdu_info_s *msdu_info)
2154 {
2155 	uint32_t i;
2156 	struct dp_pdev *pdev = vdev->pdev;
2157 	struct dp_soc *soc = pdev->soc;
2158 	struct dp_tx_desc_s *tx_desc;
2159 	bool is_cce_classified = false;
2160 	QDF_STATUS status;
2161 	uint16_t htt_tcl_metadata = 0;
2162 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
2163 	struct cdp_tid_tx_stats *tid_stats = NULL;
2164 	uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
2165 
2166 	if (qdf_unlikely(soc->cce_disable)) {
2167 		is_cce_classified = dp_cce_classify(vdev, nbuf);
2168 		if (is_cce_classified) {
2169 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
2170 			msdu_info->tid = DP_VO_TID;
2171 		}
2172 	}
2173 
2174 	if (msdu_info->frm_type == dp_tx_frm_me)
2175 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2176 
2177 	i = 0;
2178 	/* Print statement to track i and num_seg */
2179 	/*
2180 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
2181 	 * descriptors using information in msdu_info
2182 	 */
2183 	while (i < msdu_info->num_seg) {
2184 		/*
2185 		 * Setup Tx descriptor for an MSDU, and MSDU extension
2186 		 * descriptor
2187 		 */
2188 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
2189 				tx_q->desc_pool_id);
2190 
2191 		if (!tx_desc) {
2192 			if (msdu_info->frm_type == dp_tx_frm_me) {
2193 				prep_desc_fail++;
2194 				dp_tx_me_free_buf(pdev,
2195 					(void *)(msdu_info->u.sg_info
2196 						.curr_seg->frags[0].vaddr));
2197 				if (prep_desc_fail == msdu_info->num_seg) {
2198 					/*
2199 					 * Unmap is needed only if descriptor
2200 					 * preparation failed for all segments.
2201 					 */
2202 					qdf_nbuf_unmap(soc->osdev,
2203 						       msdu_info->u.sg_info.
2204 						       curr_seg->nbuf,
2205 						       QDF_DMA_TO_DEVICE);
2206 				}
2207 				/*
2208 				 * Free the nbuf for the current segment
2209 				 * and make it point to the next in the list.
2210 				 * For me, there are as many segments as there
2211 				 * are no of clients.
2212 				 */
2213 				qdf_nbuf_free(msdu_info->u.sg_info
2214 					      .curr_seg->nbuf);
2215 				if (msdu_info->u.sg_info.curr_seg->next) {
2216 					msdu_info->u.sg_info.curr_seg =
2217 						msdu_info->u.sg_info
2218 						.curr_seg->next;
2219 					nbuf = msdu_info->u.sg_info
2220 					       .curr_seg->nbuf;
2221 				}
2222 				i++;
2223 				continue;
2224 			}
2225 
2226 			if (msdu_info->frm_type == dp_tx_frm_tso) {
2227 				dp_tx_tso_unmap_segment(soc,
2228 							msdu_info->u.tso_info.
2229 							curr_seg,
2230 							msdu_info->u.tso_info.
2231 							tso_num_seg_list);
2232 
2233 				if (msdu_info->u.tso_info.curr_seg->next) {
2234 					msdu_info->u.tso_info.curr_seg =
2235 					msdu_info->u.tso_info.curr_seg->next;
2236 					i++;
2237 					continue;
2238 				}
2239 			}
2240 
2241 			goto done;
2242 		}
2243 
2244 		if (msdu_info->frm_type == dp_tx_frm_me) {
2245 			tx_desc->me_buffer =
2246 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
2247 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
2248 		}
2249 
2250 		if (is_cce_classified)
2251 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2252 
2253 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2254 		if (msdu_info->exception_fw) {
2255 			HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2256 		}
2257 
2258 		/*
2259 		 * For frames with multiple segments (TSO, ME), jump to next
2260 		 * segment.
2261 		 */
2262 		if (msdu_info->frm_type == dp_tx_frm_tso) {
2263 			if (msdu_info->u.tso_info.curr_seg->next) {
2264 				msdu_info->u.tso_info.curr_seg =
2265 					msdu_info->u.tso_info.curr_seg->next;
2266 
2267 				/*
2268 				 * If this is a jumbo nbuf, then increment the
2269 				 * number of nbuf users for each additional
2270 				 * segment of the msdu. This will ensure that
2271 				 * the skb is freed only after receiving tx
2272 				 * completion for all segments of an nbuf
2273 				 */
2274 				qdf_nbuf_inc_users(nbuf);
2275 
2276 				/* Check with MCL if this is needed */
2277 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
2278 				 */
2279 			}
2280 		}
2281 
2282 		/*
2283 		 * Enqueue the Tx MSDU descriptor to HW for transmit
2284 		 */
2285 		status = dp_tx_hw_enqueue(soc, vdev, tx_desc, htt_tcl_metadata,
2286 					  NULL, msdu_info);
2287 
2288 		if (status != QDF_STATUS_SUCCESS) {
2289 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2290 					"%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
2291 					__func__, tx_desc, tx_q->ring_id);
2292 
2293 			dp_tx_get_tid(vdev, nbuf, msdu_info);
2294 			tid_stats = &pdev->stats.tid_stats.
2295 				    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
2296 			tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
2297 
2298 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2299 			if (msdu_info->frm_type == dp_tx_frm_me) {
2300 				hw_enq_fail++;
2301 				if (hw_enq_fail == msdu_info->num_seg) {
2302 					/*
2303 					 * Unmap is needed only if enqueue
2304 					 * failed for all segments.
2305 					 */
2306 					qdf_nbuf_unmap(soc->osdev,
2307 						       msdu_info->u.sg_info.
2308 						       curr_seg->nbuf,
2309 						       QDF_DMA_TO_DEVICE);
2310 				}
2311 				/*
2312 				 * Free the nbuf for the current segment
2313 				 * and make it point to the next in the list.
2314 				 * For me, there are as many segments as there
2315 				 * are no of clients.
2316 				 */
2317 				qdf_nbuf_free(msdu_info->u.sg_info
2318 					      .curr_seg->nbuf);
2319 				if (msdu_info->u.sg_info.curr_seg->next) {
2320 					msdu_info->u.sg_info.curr_seg =
2321 						msdu_info->u.sg_info
2322 						.curr_seg->next;
2323 					nbuf = msdu_info->u.sg_info
2324 					       .curr_seg->nbuf;
2325 				}
2326 				i++;
2327 				continue;
2328 			}
2329 
2330 			/*
2331 			 * For TSO frames, the nbuf users increment done for
2332 			 * the current segment has to be reverted, since the
2333 			 * hw enqueue for this segment failed
2334 			 */
2335 			if (msdu_info->frm_type == dp_tx_frm_tso &&
2336 			    msdu_info->u.tso_info.curr_seg) {
2337 				/*
2338 				 * unmap and free current,
2339 				 * retransmit remaining segments
2340 				 */
2341 				dp_tx_comp_free_buf(soc, tx_desc);
2342 				i++;
2343 				continue;
2344 			}
2345 
2346 			goto done;
2347 		}
2348 
2349 		/*
2350 		 * TODO
2351 		 * if tso_info structure can be modified to have curr_seg
2352 		 * as first element, following 2 blocks of code (for TSO and SG)
2353 		 * can be combined into 1
2354 		 */
2355 
2356 		/*
2357 		 * For Multicast-Unicast converted packets,
2358 		 * each converted frame (for a client) is represented as
2359 		 * 1 segment
2360 		 */
2361 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
2362 				(msdu_info->frm_type == dp_tx_frm_me)) {
2363 			if (msdu_info->u.sg_info.curr_seg->next) {
2364 				msdu_info->u.sg_info.curr_seg =
2365 					msdu_info->u.sg_info.curr_seg->next;
2366 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2367 			}
2368 		}
2369 		i++;
2370 	}
2371 
2372 	nbuf = NULL;
2373 
2374 done:
2375 	return nbuf;
2376 }
2377 
2378 /**
2379  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
2380  *                     for SG frames
2381  * @vdev: DP vdev handle
2382  * @nbuf: skb
2383  * @seg_info: Pointer to Segment info Descriptor to be prepared
2384  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2385  *
2386  * Return: NULL on success,
2387  *         nbuf when it fails to send
2388  */
2389 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2390 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
2391 {
2392 	uint32_t cur_frag, nr_frags, i;
2393 	qdf_dma_addr_t paddr;
2394 	struct dp_tx_sg_info_s *sg_info;
2395 
2396 	sg_info = &msdu_info->u.sg_info;
2397 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
2398 
2399 	if (QDF_STATUS_SUCCESS !=
2400 		qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
2401 					   QDF_DMA_TO_DEVICE,
2402 					   qdf_nbuf_headlen(nbuf))) {
2403 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2404 				"dma map error");
2405 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2406 
2407 		qdf_nbuf_free(nbuf);
2408 		return NULL;
2409 	}
2410 
2411 	paddr = qdf_nbuf_mapped_paddr_get(nbuf);
2412 	seg_info->frags[0].paddr_lo = paddr;
2413 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
2414 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
2415 	seg_info->frags[0].vaddr = (void *) nbuf;
2416 
2417 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
2418 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
2419 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
2420 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2421 					"frag dma map error");
2422 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2423 			goto map_err;
2424 		}
2425 
2426 		paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
2427 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
2428 		seg_info->frags[cur_frag + 1].paddr_hi =
2429 			((uint64_t) paddr) >> 32;
2430 		seg_info->frags[cur_frag + 1].len =
2431 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
2432 	}
2433 
2434 	seg_info->frag_cnt = (cur_frag + 1);
2435 	seg_info->total_len = qdf_nbuf_len(nbuf);
2436 	seg_info->next = NULL;
2437 
2438 	sg_info->curr_seg = seg_info;
2439 
2440 	msdu_info->frm_type = dp_tx_frm_sg;
2441 	msdu_info->num_seg = 1;
2442 
2443 	return nbuf;
2444 map_err:
2445 	/* restore paddr into nbuf before calling unmap */
2446 	qdf_nbuf_mapped_paddr_set(nbuf,
2447 				  (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
2448 				  ((uint64_t)
2449 				  seg_info->frags[0].paddr_hi) << 32));
2450 	qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
2451 				     QDF_DMA_TO_DEVICE,
2452 				     seg_info->frags[0].len);
2453 	for (i = 1; i <= cur_frag; i++) {
2454 		qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
2455 				   (seg_info->frags[i].paddr_lo | ((uint64_t)
2456 				   seg_info->frags[i].paddr_hi) << 32),
2457 				   seg_info->frags[i].len,
2458 				   QDF_DMA_TO_DEVICE);
2459 	}
2460 	qdf_nbuf_free(nbuf);
2461 	return NULL;
2462 }
2463 
2464 /**
2465  * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
2466  * @vdev: DP vdev handle
2467  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2468  * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
2469  *
2470  * Return: NULL on failure,
2471  *         nbuf when extracted successfully
2472  */
2473 static
2474 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
2475 				    struct dp_tx_msdu_info_s *msdu_info,
2476 				    uint16_t ppdu_cookie)
2477 {
2478 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2479 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2480 
2481 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2482 
2483 	HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
2484 				(msdu_info->meta_data[5], 1);
2485 	HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
2486 				(msdu_info->meta_data[5], 1);
2487 	HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
2488 				(msdu_info->meta_data[6], ppdu_cookie);
2489 
2490 	msdu_info->exception_fw = 1;
2491 	msdu_info->is_tx_sniffer = 1;
2492 }
2493 
2494 #ifdef MESH_MODE_SUPPORT
2495 
2496 /**
2497  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
2498 				and prepare msdu_info for mesh frames.
2499  * @vdev: DP vdev handle
2500  * @nbuf: skb
2501  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2502  *
2503  * Return: NULL on failure,
2504  *         nbuf when extracted successfully
2505  */
2506 static
2507 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2508 				struct dp_tx_msdu_info_s *msdu_info)
2509 {
2510 	struct meta_hdr_s *mhdr;
2511 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2512 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2513 
2514 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2515 
2516 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
2517 		msdu_info->exception_fw = 0;
2518 		goto remove_meta_hdr;
2519 	}
2520 
2521 	msdu_info->exception_fw = 1;
2522 
2523 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2524 
2525 	meta_data->host_tx_desc_pool = 1;
2526 	meta_data->update_peer_cache = 1;
2527 	meta_data->learning_frame = 1;
2528 
2529 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
2530 		meta_data->power = mhdr->power;
2531 
2532 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
2533 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
2534 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
2535 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
2536 
2537 		meta_data->dyn_bw = 1;
2538 
2539 		meta_data->valid_pwr = 1;
2540 		meta_data->valid_mcs_mask = 1;
2541 		meta_data->valid_nss_mask = 1;
2542 		meta_data->valid_preamble_type  = 1;
2543 		meta_data->valid_retries = 1;
2544 		meta_data->valid_bw_info = 1;
2545 	}
2546 
2547 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
2548 		meta_data->encrypt_type = 0;
2549 		meta_data->valid_encrypt_type = 1;
2550 		meta_data->learning_frame = 0;
2551 	}
2552 
2553 	meta_data->valid_key_flags = 1;
2554 	meta_data->key_flags = (mhdr->keyix & 0x3);
2555 
2556 remove_meta_hdr:
2557 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
2558 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2559 				"qdf_nbuf_pull_head failed");
2560 		qdf_nbuf_free(nbuf);
2561 		return NULL;
2562 	}
2563 
2564 	msdu_info->tid = qdf_nbuf_get_priority(nbuf);
2565 
2566 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2567 			"%s , Meta hdr %0x %0x %0x %0x %0x %0x"
2568 			" tid %d to_fw %d",
2569 			__func__, msdu_info->meta_data[0],
2570 			msdu_info->meta_data[1],
2571 			msdu_info->meta_data[2],
2572 			msdu_info->meta_data[3],
2573 			msdu_info->meta_data[4],
2574 			msdu_info->meta_data[5],
2575 			msdu_info->tid, msdu_info->exception_fw);
2576 
2577 	return nbuf;
2578 }
2579 #else
2580 static
2581 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2582 				struct dp_tx_msdu_info_s *msdu_info)
2583 {
2584 	return nbuf;
2585 }
2586 
2587 #endif
2588 
2589 /**
2590  * dp_check_exc_metadata() - Checks if parameters are valid
2591  * @tx_exc - holds all exception path parameters
2592  *
2593  * Returns true when all the parameters are valid else false
2594  *
2595  */
2596 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
2597 {
2598 	bool invalid_tid = (tx_exc->tid > DP_MAX_TIDS && tx_exc->tid !=
2599 			    HTT_INVALID_TID);
2600 	bool invalid_encap_type =
2601 			(tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
2602 			 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
2603 	bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
2604 				 tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
2605 	bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
2606 			       tx_exc->ppdu_cookie == 0);
2607 
2608 	if (invalid_tid || invalid_encap_type || invalid_sec_type ||
2609 	    invalid_cookie) {
2610 		return false;
2611 	}
2612 
2613 	return true;
2614 }
2615 
2616 #ifdef ATH_SUPPORT_IQUE
2617 /**
2618  * dp_tx_mcast_enhance() - Multicast enhancement on TX
2619  * @vdev: vdev handle
2620  * @nbuf: skb
2621  *
2622  * Return: true on success,
2623  *         false on failure
2624  */
2625 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
2626 {
2627 	qdf_ether_header_t *eh;
2628 
2629 	/* Mcast to Ucast Conversion*/
2630 	if (qdf_likely(!vdev->mcast_enhancement_en))
2631 		return true;
2632 
2633 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2634 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2635 	    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2636 		dp_verbose_debug("Mcast frm for ME %pK", vdev);
2637 
2638 		DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
2639 				 qdf_nbuf_len(nbuf));
2640 		if (dp_tx_prepare_send_me(vdev, nbuf) ==
2641 				QDF_STATUS_SUCCESS) {
2642 			return false;
2643 		}
2644 
2645 		if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
2646 			if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
2647 					QDF_STATUS_SUCCESS) {
2648 				return false;
2649 			}
2650 		}
2651 	}
2652 
2653 	return true;
2654 }
2655 #else
2656 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
2657 {
2658 	return true;
2659 }
2660 #endif
2661 
2662 /**
2663  * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
2664  * @nbuf: qdf_nbuf_t
2665  * @vdev: struct dp_vdev *
2666  *
2667  * Allow packet for processing only if it is for peer client which is
2668  * connected with same vap. Drop packet if client is connected to
2669  * different vap.
2670  *
2671  * Return: QDF_STATUS
2672  */
2673 static inline QDF_STATUS
2674 dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
2675 {
2676 	struct dp_ast_entry *dst_ast_entry = NULL;
2677 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2678 
2679 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
2680 	    DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
2681 		return QDF_STATUS_SUCCESS;
2682 
2683 	qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
2684 	dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
2685 							eh->ether_dhost,
2686 							vdev->vdev_id);
2687 
2688 	/* If there is no ast entry, return failure */
2689 	if (qdf_unlikely(!dst_ast_entry)) {
2690 		qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
2691 		return QDF_STATUS_E_FAILURE;
2692 	}
2693 	qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
2694 
2695 	return QDF_STATUS_SUCCESS;
2696 }
2697 
2698 /**
2699  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
2700  * @soc: DP soc handle
2701  * @vdev_id: id of DP vdev handle
2702  * @nbuf: skb
2703  * @tx_exc_metadata: Handle that holds exception path meta data
2704  *
2705  * Entry point for Core Tx layer (DP_TX) invoked from
2706  * hard_start_xmit in OSIF/HDD to transmit frames through fw
2707  *
2708  * Return: NULL on success,
2709  *         nbuf when it fails to send
2710  */
2711 qdf_nbuf_t
2712 dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2713 		     qdf_nbuf_t nbuf,
2714 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
2715 {
2716 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2717 	qdf_ether_header_t *eh = NULL;
2718 	struct dp_tx_msdu_info_s msdu_info;
2719 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
2720 						     DP_MOD_ID_TX_EXCEPTION);
2721 
2722 	if (qdf_unlikely(!vdev))
2723 		goto fail;
2724 
2725 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
2726 
2727 	if (!tx_exc_metadata)
2728 		goto fail;
2729 
2730 	msdu_info.tid = tx_exc_metadata->tid;
2731 	dp_tx_wds_ext(soc, vdev, tx_exc_metadata->peer_id, &msdu_info);
2732 
2733 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2734 	dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
2735 			 QDF_MAC_ADDR_REF(nbuf->data));
2736 
2737 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
2738 
2739 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
2740 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2741 			"Invalid parameters in exception path");
2742 		goto fail;
2743 	}
2744 
2745 	/* Basic sanity checks for unsupported packets */
2746 
2747 	/* MESH mode */
2748 	if (qdf_unlikely(vdev->mesh_vdev)) {
2749 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2750 			"Mesh mode is not supported in exception path");
2751 		goto fail;
2752 	}
2753 
2754 	/*
2755 	 * Classify the frame and call corresponding
2756 	 * "prepare" function which extracts the segment (TSO)
2757 	 * and fragmentation information (for TSO , SG, ME, or Raw)
2758 	 * into MSDU_INFO structure which is later used to fill
2759 	 * SW and HW descriptors.
2760 	 */
2761 	if (qdf_nbuf_is_tso(nbuf)) {
2762 		dp_verbose_debug("TSO frame %pK", vdev);
2763 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
2764 				 qdf_nbuf_len(nbuf));
2765 
2766 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
2767 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
2768 					 qdf_nbuf_len(nbuf));
2769 			return nbuf;
2770 		}
2771 
2772 		goto send_multiple;
2773 	}
2774 
2775 	/* SG */
2776 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2777 		struct dp_tx_seg_info_s seg_info = {0};
2778 
2779 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
2780 		if (!nbuf)
2781 			return NULL;
2782 
2783 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
2784 
2785 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
2786 				 qdf_nbuf_len(nbuf));
2787 
2788 		goto send_multiple;
2789 	}
2790 
2791 	if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
2792 		return NULL;
2793 
2794 	if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
2795 		DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
2796 				 qdf_nbuf_len(nbuf));
2797 
2798 		dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
2799 					       tx_exc_metadata->ppdu_cookie);
2800 	}
2801 
2802 	/*
2803 	 * Get HW Queue to use for this frame.
2804 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
2805 	 * dedicated for data and 1 for command.
2806 	 * "queue_id" maps to one hardware ring.
2807 	 *  With each ring, we also associate a unique Tx descriptor pool
2808 	 *  to minimize lock contention for these resources.
2809 	 */
2810 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2811 
2812 	/*
2813 	 * Check exception descriptors
2814 	 */
2815 	if (dp_tx_exception_limit_check(vdev))
2816 		goto fail;
2817 
2818 	/*  Single linear frame */
2819 	/*
2820 	 * If nbuf is a simple linear frame, use send_single function to
2821 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2822 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2823 	 */
2824 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
2825 			tx_exc_metadata->peer_id, tx_exc_metadata);
2826 
2827 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
2828 	return nbuf;
2829 
2830 send_multiple:
2831 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2832 
2833 fail:
2834 	if (vdev)
2835 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
2836 	dp_verbose_debug("pkt send failed");
2837 	return nbuf;
2838 }
2839 
2840 /**
2841  * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
2842  *      in exception path in special case to avoid regular exception path chk.
2843  * @soc: DP soc handle
2844  * @vdev_id: id of DP vdev handle
2845  * @nbuf: skb
2846  * @tx_exc_metadata: Handle that holds exception path meta data
2847  *
2848  * Entry point for Core Tx layer (DP_TX) invoked from
2849  * hard_start_xmit in OSIF/HDD to transmit frames through fw
2850  *
2851  * Return: NULL on success,
2852  *         nbuf when it fails to send
2853  */
2854 qdf_nbuf_t
2855 dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
2856 				   uint8_t vdev_id, qdf_nbuf_t nbuf,
2857 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
2858 {
2859 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2860 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
2861 						     DP_MOD_ID_TX_EXCEPTION);
2862 
2863 	if (qdf_unlikely(!vdev))
2864 		goto fail;
2865 
2866 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
2867 			== QDF_STATUS_E_FAILURE)) {
2868 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
2869 		goto fail;
2870 	}
2871 
2872 	/* Unref count as it will agin be taken inside dp_tx_exception */
2873 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
2874 
2875 	return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
2876 
2877 fail:
2878 	if (vdev)
2879 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
2880 	dp_verbose_debug("pkt send failed");
2881 	return nbuf;
2882 }
2883 
2884 /**
2885  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
2886  * @soc: DP soc handle
2887  * @vdev_id: DP vdev handle
2888  * @nbuf: skb
2889  *
2890  * Entry point for Core Tx layer (DP_TX) invoked from
2891  * hard_start_xmit in OSIF/HDD
2892  *
2893  * Return: NULL on success,
2894  *         nbuf when it fails to send
2895  */
2896 #ifdef MESH_MODE_SUPPORT
2897 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2898 			   qdf_nbuf_t nbuf)
2899 {
2900 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2901 	struct meta_hdr_s *mhdr;
2902 	qdf_nbuf_t nbuf_mesh = NULL;
2903 	qdf_nbuf_t nbuf_clone = NULL;
2904 	struct dp_vdev *vdev;
2905 	uint8_t no_enc_frame = 0;
2906 
2907 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
2908 	if (!nbuf_mesh) {
2909 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2910 				"qdf_nbuf_unshare failed");
2911 		return nbuf;
2912 	}
2913 
2914 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
2915 	if (!vdev) {
2916 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2917 				"vdev is NULL for vdev_id %d", vdev_id);
2918 		return nbuf;
2919 	}
2920 
2921 	nbuf = nbuf_mesh;
2922 
2923 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2924 
2925 	if ((vdev->sec_type != cdp_sec_type_none) &&
2926 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
2927 		no_enc_frame = 1;
2928 
2929 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
2930 		qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
2931 
2932 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
2933 		       !no_enc_frame) {
2934 		nbuf_clone = qdf_nbuf_clone(nbuf);
2935 		if (!nbuf_clone) {
2936 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2937 				"qdf_nbuf_clone failed");
2938 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
2939 			return nbuf;
2940 		}
2941 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
2942 	}
2943 
2944 	if (nbuf_clone) {
2945 		if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
2946 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2947 		} else {
2948 			qdf_nbuf_free(nbuf_clone);
2949 		}
2950 	}
2951 
2952 	if (no_enc_frame)
2953 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
2954 	else
2955 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
2956 
2957 	nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
2958 	if ((!nbuf) && no_enc_frame) {
2959 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2960 	}
2961 
2962 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
2963 	return nbuf;
2964 }
2965 
2966 #else
2967 
2968 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
2969 			   qdf_nbuf_t nbuf)
2970 {
2971 	return dp_tx_send(soc, vdev_id, nbuf);
2972 }
2973 
2974 #endif
2975 
2976 /**
2977  * dp_tx_nawds_handler() - NAWDS handler
2978  *
2979  * @soc: DP soc handle
2980  * @vdev_id: id of DP vdev handle
2981  * @msdu_info: msdu_info required to create HTT metadata
2982  * @nbuf: skb
2983  *
2984  * This API transfers the multicast frames with the peer id
2985  * on NAWDS enabled peer.
2986 
2987  * Return: none
2988  */
2989 
2990 static inline
2991 void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
2992 			 struct dp_tx_msdu_info_s *msdu_info, qdf_nbuf_t nbuf)
2993 {
2994 	struct dp_peer *peer = NULL;
2995 	qdf_nbuf_t nbuf_clone = NULL;
2996 	uint16_t peer_id = DP_INVALID_PEER;
2997 	uint16_t sa_peer_id = DP_INVALID_PEER;
2998 	struct dp_ast_entry *ast_entry = NULL;
2999 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3000 
3001 	qdf_spin_lock_bh(&soc->ast_lock);
3002 	ast_entry = dp_peer_ast_hash_find_by_pdevid
3003 				(soc,
3004 				 (uint8_t *)(eh->ether_shost),
3005 				 vdev->pdev->pdev_id);
3006 
3007 	if (ast_entry)
3008 		sa_peer_id = ast_entry->peer_id;
3009 	qdf_spin_unlock_bh(&soc->ast_lock);
3010 
3011 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3012 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3013 		if (!peer->bss_peer && peer->nawds_enabled) {
3014 			peer_id = peer->peer_id;
3015 			/* Multicast packets needs to be
3016 			 * dropped in case of intra bss forwarding
3017 			 */
3018 			if (sa_peer_id == peer->peer_id) {
3019 				QDF_TRACE(QDF_MODULE_ID_DP,
3020 					  QDF_TRACE_LEVEL_DEBUG,
3021 					  " %s: multicast packet",  __func__);
3022 				DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
3023 				continue;
3024 			}
3025 			nbuf_clone = qdf_nbuf_clone(nbuf);
3026 
3027 			if (!nbuf_clone) {
3028 				QDF_TRACE(QDF_MODULE_ID_DP,
3029 					  QDF_TRACE_LEVEL_ERROR,
3030 					  FL("nbuf clone failed"));
3031 				break;
3032 			}
3033 
3034 			nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
3035 							    msdu_info, peer_id,
3036 							    NULL);
3037 
3038 			if (nbuf_clone) {
3039 				QDF_TRACE(QDF_MODULE_ID_DP,
3040 					  QDF_TRACE_LEVEL_DEBUG,
3041 					  FL("pkt send failed"));
3042 				qdf_nbuf_free(nbuf_clone);
3043 			} else {
3044 				if (peer_id != DP_INVALID_PEER)
3045 					DP_STATS_INC_PKT(peer, tx.nawds_mcast,
3046 							 1, qdf_nbuf_len(nbuf));
3047 			}
3048 		}
3049 	}
3050 
3051 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3052 }
3053 
3054 /**
3055  * dp_tx_send() - Transmit a frame on a given VAP
3056  * @soc: DP soc handle
3057  * @vdev_id: id of DP vdev handle
3058  * @nbuf: skb
3059  *
3060  * Entry point for Core Tx layer (DP_TX) invoked from
3061  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
3062  * cases
3063  *
3064  * Return: NULL on success,
3065  *         nbuf when it fails to send
3066  */
3067 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3068 		      qdf_nbuf_t nbuf)
3069 {
3070 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3071 	uint16_t peer_id = HTT_INVALID_PEER;
3072 	/*
3073 	 * doing a memzero is causing additional function call overhead
3074 	 * so doing static stack clearing
3075 	 */
3076 	struct dp_tx_msdu_info_s msdu_info = {0};
3077 	struct dp_vdev *vdev = NULL;
3078 
3079 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3080 		return nbuf;
3081 
3082 	/*
3083 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3084 	 * this in per packet path.
3085 	 *
3086 	 * As in this path vdev memory is already protected with netdev
3087 	 * tx lock
3088 	 */
3089 	vdev = soc->vdev_id_map[vdev_id];
3090 	if (qdf_unlikely(!vdev))
3091 		return nbuf;
3092 
3093 	dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
3094 			 QDF_MAC_ADDR_REF(nbuf->data));
3095 
3096 	/*
3097 	 * Set Default Host TID value to invalid TID
3098 	 * (TID override disabled)
3099 	 */
3100 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
3101 	dp_tx_wds_ext(soc, vdev, peer_id, &msdu_info);
3102 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
3103 
3104 	if (qdf_unlikely(vdev->mesh_vdev)) {
3105 		qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
3106 								&msdu_info);
3107 		if (!nbuf_mesh) {
3108 			dp_verbose_debug("Extracting mesh metadata failed");
3109 			return nbuf;
3110 		}
3111 		nbuf = nbuf_mesh;
3112 	}
3113 
3114 	/*
3115 	 * Get HW Queue to use for this frame.
3116 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
3117 	 * dedicated for data and 1 for command.
3118 	 * "queue_id" maps to one hardware ring.
3119 	 *  With each ring, we also associate a unique Tx descriptor pool
3120 	 *  to minimize lock contention for these resources.
3121 	 */
3122 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3123 
3124 	/*
3125 	 * TCL H/W supports 2 DSCP-TID mapping tables.
3126 	 *  Table 1 - Default DSCP-TID mapping table
3127 	 *  Table 2 - 1 DSCP-TID override table
3128 	 *
3129 	 * If we need a different DSCP-TID mapping for this vap,
3130 	 * call tid_classify to extract DSCP/ToS from frame and
3131 	 * map to a TID and store in msdu_info. This is later used
3132 	 * to fill in TCL Input descriptor (per-packet TID override).
3133 	 */
3134 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
3135 
3136 	/*
3137 	 * Classify the frame and call corresponding
3138 	 * "prepare" function which extracts the segment (TSO)
3139 	 * and fragmentation information (for TSO , SG, ME, or Raw)
3140 	 * into MSDU_INFO structure which is later used to fill
3141 	 * SW and HW descriptors.
3142 	 */
3143 	if (qdf_nbuf_is_tso(nbuf)) {
3144 		dp_verbose_debug("TSO frame %pK", vdev);
3145 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
3146 				 qdf_nbuf_len(nbuf));
3147 
3148 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
3149 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
3150 					 qdf_nbuf_len(nbuf));
3151 			return nbuf;
3152 		}
3153 
3154 		goto send_multiple;
3155 	}
3156 
3157 	/* SG */
3158 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
3159 		struct dp_tx_seg_info_s seg_info = {0};
3160 
3161 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
3162 		if (!nbuf)
3163 			return NULL;
3164 
3165 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
3166 
3167 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
3168 				qdf_nbuf_len(nbuf));
3169 
3170 		goto send_multiple;
3171 	}
3172 
3173 	if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
3174 		return NULL;
3175 
3176 	/* RAW */
3177 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
3178 		struct dp_tx_seg_info_s seg_info = {0};
3179 
3180 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
3181 		if (!nbuf)
3182 			return NULL;
3183 
3184 		dp_verbose_debug("Raw frame %pK", vdev);
3185 
3186 		goto send_multiple;
3187 
3188 	}
3189 
3190 	if (qdf_unlikely(vdev->nawds_enabled)) {
3191 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
3192 					  qdf_nbuf_data(nbuf);
3193 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost))
3194 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf);
3195 
3196 		peer_id = DP_INVALID_PEER;
3197 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
3198 				 1, qdf_nbuf_len(nbuf));
3199 	}
3200 
3201 	/*  Single linear frame */
3202 	/*
3203 	 * If nbuf is a simple linear frame, use send_single function to
3204 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
3205 	 * SRNG. There is no need to setup a MSDU extension descriptor.
3206 	 */
3207 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
3208 
3209 	return nbuf;
3210 
3211 send_multiple:
3212 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3213 
3214 	if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
3215 		dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
3216 
3217 	return nbuf;
3218 }
3219 
3220 /**
3221  * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
3222  *      case to vaoid check in perpkt path.
3223  * @soc: DP soc handle
3224  * @vdev_id: id of DP vdev handle
3225  * @nbuf: skb
3226  *
3227  * Entry point for Core Tx layer (DP_TX) invoked from
3228  * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
3229  * with special condition to avoid per pkt check in dp_tx_send
3230  *
3231  * Return: NULL on success,
3232  *         nbuf when it fails to send
3233  */
3234 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
3235 				    uint8_t vdev_id, qdf_nbuf_t nbuf)
3236 {
3237 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3238 	struct dp_vdev *vdev = NULL;
3239 
3240 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3241 		return nbuf;
3242 
3243 	/*
3244 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3245 	 * this in per packet path.
3246 	 *
3247 	 * As in this path vdev memory is already protected with netdev
3248 	 * tx lock
3249 	 */
3250 	vdev = soc->vdev_id_map[vdev_id];
3251 	if (qdf_unlikely(!vdev))
3252 		return nbuf;
3253 
3254 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
3255 			== QDF_STATUS_E_FAILURE)) {
3256 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
3257 		return nbuf;
3258 	}
3259 
3260 	return dp_tx_send(soc_hdl, vdev_id, nbuf);
3261 }
3262 
3263 /**
3264  * dp_tx_reinject_handler() - Tx Reinject Handler
3265  * @soc: datapath soc handle
3266  * @vdev: datapath vdev handle
3267  * @tx_desc: software descriptor head pointer
3268  * @status : Tx completion status from HTT descriptor
3269  *
3270  * This function reinjects frames back to Target.
3271  * Todo - Host queue needs to be added
3272  *
3273  * Return: none
3274  */
3275 static
3276 void dp_tx_reinject_handler(struct dp_soc *soc,
3277 			    struct dp_vdev *vdev,
3278 			    struct dp_tx_desc_s *tx_desc,
3279 			    uint8_t *status)
3280 {
3281 	struct dp_peer *peer = NULL;
3282 	uint32_t peer_id = HTT_INVALID_PEER;
3283 	qdf_nbuf_t nbuf = tx_desc->nbuf;
3284 	qdf_nbuf_t nbuf_copy = NULL;
3285 	struct dp_tx_msdu_info_s msdu_info;
3286 #ifdef WDS_VENDOR_EXTENSION
3287 	int is_mcast = 0, is_ucast = 0;
3288 	int num_peers_3addr = 0;
3289 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
3290 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
3291 #endif
3292 
3293 	qdf_assert(vdev);
3294 
3295 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
3296 
3297 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3298 
3299 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3300 			"%s Tx reinject path", __func__);
3301 
3302 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
3303 			qdf_nbuf_len(tx_desc->nbuf));
3304 
3305 #ifdef WDS_VENDOR_EXTENSION
3306 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
3307 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
3308 	} else {
3309 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
3310 	}
3311 	is_ucast = !is_mcast;
3312 
3313 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3314 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3315 		if (peer->bss_peer)
3316 			continue;
3317 
3318 		/* Detect wds peers that use 3-addr framing for mcast.
3319 		 * if there are any, the bss_peer is used to send the
3320 		 * the mcast frame using 3-addr format. all wds enabled
3321 		 * peers that use 4-addr framing for mcast frames will
3322 		 * be duplicated and sent as 4-addr frames below.
3323 		 */
3324 		if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
3325 			num_peers_3addr = 1;
3326 			break;
3327 		}
3328 	}
3329 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3330 #endif
3331 
3332 	if (qdf_unlikely(vdev->mesh_vdev)) {
3333 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
3334 	} else {
3335 		qdf_spin_lock_bh(&vdev->peer_list_lock);
3336 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3337 			if ((peer->peer_id != HTT_INVALID_PEER) &&
3338 #ifdef WDS_VENDOR_EXTENSION
3339 			/*
3340 			 * . if 3-addr STA, then send on BSS Peer
3341 			 * . if Peer WDS enabled and accept 4-addr mcast,
3342 			 * send mcast on that peer only
3343 			 * . if Peer WDS enabled and accept 4-addr ucast,
3344 			 * send ucast on that peer only
3345 			 */
3346 			((peer->bss_peer && num_peers_3addr && is_mcast) ||
3347 			 (peer->wds_enabled &&
3348 				  ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
3349 				   (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
3350 #else
3351 			((peer->bss_peer &&
3352 			  !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))))) {
3353 #endif
3354 				peer_id = DP_INVALID_PEER;
3355 
3356 				nbuf_copy = qdf_nbuf_copy(nbuf);
3357 
3358 				if (!nbuf_copy) {
3359 					QDF_TRACE(QDF_MODULE_ID_DP,
3360 						QDF_TRACE_LEVEL_DEBUG,
3361 						FL("nbuf copy failed"));
3362 					break;
3363 				}
3364 
3365 				nbuf_copy = dp_tx_send_msdu_single(vdev,
3366 						nbuf_copy,
3367 						&msdu_info,
3368 						peer_id,
3369 						NULL);
3370 
3371 				if (nbuf_copy) {
3372 					QDF_TRACE(QDF_MODULE_ID_DP,
3373 						QDF_TRACE_LEVEL_DEBUG,
3374 						FL("pkt send failed"));
3375 					qdf_nbuf_free(nbuf_copy);
3376 				}
3377 			}
3378 		}
3379 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
3380 	}
3381 
3382 	qdf_nbuf_free(nbuf);
3383 
3384 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3385 }
3386 
3387 /**
3388  * dp_tx_inspect_handler() - Tx Inspect Handler
3389  * @soc: datapath soc handle
3390  * @vdev: datapath vdev handle
3391  * @tx_desc: software descriptor head pointer
3392  * @status : Tx completion status from HTT descriptor
3393  *
3394  * Handles Tx frames sent back to Host for inspection
3395  * (ProxyARP)
3396  *
3397  * Return: none
3398  */
3399 static void dp_tx_inspect_handler(struct dp_soc *soc,
3400 				  struct dp_vdev *vdev,
3401 				  struct dp_tx_desc_s *tx_desc,
3402 				  uint8_t *status)
3403 {
3404 
3405 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3406 			"%s Tx inspect path",
3407 			__func__);
3408 
3409 	DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
3410 			 qdf_nbuf_len(tx_desc->nbuf));
3411 
3412 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
3413 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3414 }
3415 
3416 #ifdef FEATURE_PERPKT_INFO
3417 /**
3418  * dp_get_completion_indication_for_stack() - send completion to stack
3419  * @soc : dp_soc handle
3420  * @pdev: dp_pdev handle
3421  * @peer: dp peer handle
3422  * @ts: transmit completion status structure
3423  * @netbuf: Buffer pointer for free
3424  *
3425  * This function is used for indication whether buffer needs to be
3426  * sent to stack for freeing or not
3427 */
3428 QDF_STATUS
3429 dp_get_completion_indication_for_stack(struct dp_soc *soc,
3430 				       struct dp_pdev *pdev,
3431 				       struct dp_peer *peer,
3432 				       struct hal_tx_completion_status *ts,
3433 				       qdf_nbuf_t netbuf,
3434 				       uint64_t time_latency)
3435 {
3436 	struct tx_capture_hdr *ppdu_hdr;
3437 	uint16_t peer_id = ts->peer_id;
3438 	uint32_t ppdu_id = ts->ppdu_id;
3439 	uint8_t first_msdu = ts->first_msdu;
3440 	uint8_t last_msdu = ts->last_msdu;
3441 	uint32_t txcap_hdr_size = sizeof(struct tx_capture_hdr);
3442 
3443 	if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
3444 			 !pdev->latency_capture_enable))
3445 		return QDF_STATUS_E_NOSUPPORT;
3446 
3447 	if (!peer) {
3448 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3449 				FL("Peer Invalid"));
3450 		return QDF_STATUS_E_INVAL;
3451 	}
3452 
3453 	if (pdev->mcopy_mode) {
3454 		/* If mcopy is enabled and mcopy_mode is M_COPY deliver 1st MSDU
3455 		 * per PPDU. If mcopy_mode is M_COPY_EXTENDED deliver 1st MSDU
3456 		 * for each MPDU
3457 		 */
3458 		if (pdev->mcopy_mode == M_COPY) {
3459 			if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
3460 			    (pdev->m_copy_id.tx_peer_id == peer_id)) {
3461 				return QDF_STATUS_E_INVAL;
3462 			}
3463 		}
3464 
3465 		if (!first_msdu)
3466 			return QDF_STATUS_E_INVAL;
3467 
3468 		pdev->m_copy_id.tx_ppdu_id = ppdu_id;
3469 		pdev->m_copy_id.tx_peer_id = peer_id;
3470 	}
3471 
3472 	if (qdf_unlikely(qdf_nbuf_headroom(netbuf) < txcap_hdr_size)) {
3473 		netbuf = qdf_nbuf_realloc_headroom(netbuf, txcap_hdr_size);
3474 		if (!netbuf) {
3475 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3476 				  FL("No headroom"));
3477 			return QDF_STATUS_E_NOMEM;
3478 		}
3479 	}
3480 
3481 	if (!qdf_nbuf_push_head(netbuf, txcap_hdr_size)) {
3482 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3483 				FL("No headroom"));
3484 		return QDF_STATUS_E_NOMEM;
3485 	}
3486 
3487 	ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
3488 	qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
3489 		     QDF_MAC_ADDR_SIZE);
3490 	qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
3491 		     QDF_MAC_ADDR_SIZE);
3492 	ppdu_hdr->ppdu_id = ppdu_id;
3493 	ppdu_hdr->peer_id = peer_id;
3494 	ppdu_hdr->first_msdu = first_msdu;
3495 	ppdu_hdr->last_msdu = last_msdu;
3496 	if (qdf_unlikely(pdev->latency_capture_enable)) {
3497 		ppdu_hdr->tsf = ts->tsf;
3498 		ppdu_hdr->time_latency = time_latency;
3499 	}
3500 
3501 	return QDF_STATUS_SUCCESS;
3502 }
3503 
3504 
3505 /**
3506  * dp_send_completion_to_stack() - send completion to stack
3507  * @soc :  dp_soc handle
3508  * @pdev:  dp_pdev handle
3509  * @peer_id: peer_id of the peer for which completion came
3510  * @ppdu_id: ppdu_id
3511  * @netbuf: Buffer pointer for free
3512  *
3513  * This function is used to send completion to stack
3514  * to free buffer
3515 */
3516 void  dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
3517 					uint16_t peer_id, uint32_t ppdu_id,
3518 					qdf_nbuf_t netbuf)
3519 {
3520 	dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
3521 				netbuf, peer_id,
3522 				WDI_NO_VAL, pdev->pdev_id);
3523 }
3524 #else
3525 static QDF_STATUS
3526 dp_get_completion_indication_for_stack(struct dp_soc *soc,
3527 				       struct dp_pdev *pdev,
3528 				       struct dp_peer *peer,
3529 				       struct hal_tx_completion_status *ts,
3530 				       qdf_nbuf_t netbuf,
3531 				       uint64_t time_latency)
3532 {
3533 	return QDF_STATUS_E_NOSUPPORT;
3534 }
3535 
3536 static void
3537 dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
3538 	uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
3539 {
3540 }
3541 #endif
3542 
3543 #ifdef MESH_MODE_SUPPORT
3544 /**
3545  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
3546  *                                         in mesh meta header
3547  * @tx_desc: software descriptor head pointer
3548  * @ts: pointer to tx completion stats
3549  * Return: none
3550  */
3551 static
3552 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
3553 		struct hal_tx_completion_status *ts)
3554 {
3555 	struct meta_hdr_s *mhdr;
3556 	qdf_nbuf_t netbuf = tx_desc->nbuf;
3557 
3558 	if (!tx_desc->msdu_ext_desc) {
3559 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
3560 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3561 				"netbuf %pK offset %d",
3562 				netbuf, tx_desc->pkt_offset);
3563 			return;
3564 		}
3565 	}
3566 	if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
3567 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3568 			"netbuf %pK offset %lu", netbuf,
3569 			sizeof(struct meta_hdr_s));
3570 		return;
3571 	}
3572 
3573 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
3574 	mhdr->rssi = ts->ack_frame_rssi;
3575 	mhdr->band = tx_desc->pdev->operating_channel.band;
3576 	mhdr->channel = tx_desc->pdev->operating_channel.num;
3577 }
3578 
3579 #else
3580 static
3581 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
3582 		struct hal_tx_completion_status *ts)
3583 {
3584 }
3585 
3586 #endif
3587 
3588 #ifdef QCA_PEER_EXT_STATS
3589 /*
3590  * dp_tx_compute_tid_delay() - Compute per TID delay
3591  * @stats: Per TID delay stats
3592  * @tx_desc: Software Tx descriptor
3593  *
3594  * Compute the software enqueue and hw enqueue delays and
3595  * update the respective histograms
3596  *
3597  * Return: void
3598  */
3599 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
3600 				    struct dp_tx_desc_s *tx_desc)
3601 {
3602 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
3603 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
3604 	uint32_t sw_enqueue_delay, fwhw_transmit_delay;
3605 
3606 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
3607 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
3608 	timestamp_hw_enqueue = tx_desc->timestamp;
3609 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
3610 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
3611 					 timestamp_hw_enqueue);
3612 
3613 	/*
3614 	 * Update the Tx software enqueue delay and HW enque-Completion delay.
3615 	 */
3616 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
3617 	dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
3618 }
3619 
3620 /*
3621  * dp_tx_update_peer_ext_stats() - Update the peer extended stats
3622  * @peer: DP peer context
3623  * @tx_desc: Tx software descriptor
3624  * @tid: Transmission ID
3625  * @ring_id: Rx CPU context ID/CPU_ID
3626  *
3627  * Update the peer extended stats. These are enhanced other
3628  * delay stats per msdu level.
3629  *
3630  * Return: void
3631  */
3632 static void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
3633 					struct dp_tx_desc_s *tx_desc,
3634 					uint8_t tid, uint8_t ring_id)
3635 {
3636 	struct dp_pdev *pdev = peer->vdev->pdev;
3637 	struct dp_soc *soc = NULL;
3638 	struct cdp_peer_ext_stats *pext_stats = NULL;
3639 
3640 	soc = pdev->soc;
3641 	if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
3642 		return;
3643 
3644 	pext_stats = peer->pext_stats;
3645 
3646 	qdf_assert(pext_stats);
3647 	qdf_assert(ring < CDP_MAX_TXRX_CTX);
3648 
3649 	/*
3650 	 * For non-TID packets use the TID 9
3651 	 */
3652 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
3653 		tid = CDP_MAX_DATA_TIDS - 1;
3654 
3655 	dp_tx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id],
3656 				tx_desc);
3657 }
3658 #else
3659 static inline void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
3660 					       struct dp_tx_desc_s *tx_desc,
3661 					       uint8_t tid, uint8_t ring_id)
3662 {
3663 }
3664 #endif
3665 
3666 /**
3667  * dp_tx_compute_delay() - Compute and fill in all timestamps
3668  *				to pass in correct fields
3669  *
3670  * @vdev: pdev handle
3671  * @tx_desc: tx descriptor
3672  * @tid: tid value
3673  * @ring_id: TCL or WBM ring number for transmit path
3674  * Return: none
3675  */
3676 static void dp_tx_compute_delay(struct dp_vdev *vdev,
3677 				struct dp_tx_desc_s *tx_desc,
3678 				uint8_t tid, uint8_t ring_id)
3679 {
3680 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
3681 	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
3682 
3683 	if (qdf_likely(!vdev->pdev->delay_stats_flag))
3684 		return;
3685 
3686 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
3687 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
3688 	timestamp_hw_enqueue = tx_desc->timestamp;
3689 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
3690 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
3691 					 timestamp_hw_enqueue);
3692 	interframe_delay = (uint32_t)(timestamp_ingress -
3693 				      vdev->prev_tx_enq_tstamp);
3694 
3695 	/*
3696 	 * Delay in software enqueue
3697 	 */
3698 	dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid,
3699 			      CDP_DELAY_STATS_SW_ENQ, ring_id);
3700 	/*
3701 	 * Delay between packet enqueued to HW and Tx completion
3702 	 */
3703 	dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid,
3704 			      CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id);
3705 
3706 	/*
3707 	 * Update interframe delay stats calculated at hardstart receive point.
3708 	 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
3709 	 * interframe delay will not be calculate correctly for 1st frame.
3710 	 * On the other side, this will help in avoiding extra per packet check
3711 	 * of !vdev->prev_tx_enq_tstamp.
3712 	 */
3713 	dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
3714 			      CDP_DELAY_STATS_TX_INTERFRAME, ring_id);
3715 	vdev->prev_tx_enq_tstamp = timestamp_ingress;
3716 }
3717 
3718 #ifdef DISABLE_DP_STATS
3719 static
3720 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
3721 {
3722 }
3723 #else
3724 static
3725 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
3726 {
3727 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
3728 
3729 	DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
3730 	if (subtype != QDF_PROTO_INVALID)
3731 		DP_STATS_INC(peer, tx.no_ack_count[subtype], 1);
3732 }
3733 #endif
3734 
3735 /**
3736  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
3737  *				per wbm ring
3738  *
3739  * @tx_desc: software descriptor head pointer
3740  * @ts: Tx completion status
3741  * @peer: peer handle
3742  * @ring_id: ring number
3743  *
3744  * Return: None
3745  */
3746 static inline void
3747 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
3748 			struct hal_tx_completion_status *ts,
3749 			struct dp_peer *peer, uint8_t ring_id)
3750 {
3751 	struct dp_pdev *pdev = peer->vdev->pdev;
3752 	struct dp_soc *soc = NULL;
3753 	uint8_t mcs, pkt_type;
3754 	uint8_t tid = ts->tid;
3755 	uint32_t length;
3756 	struct cdp_tid_tx_stats *tid_stats;
3757 
3758 	if (!pdev)
3759 		return;
3760 
3761 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
3762 		tid = CDP_MAX_DATA_TIDS - 1;
3763 
3764 	tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
3765 	soc = pdev->soc;
3766 
3767 	mcs = ts->mcs;
3768 	pkt_type = ts->pkt_type;
3769 
3770 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
3771 		dp_err("Release source is not from TQM");
3772 		return;
3773 	}
3774 
3775 	length = qdf_nbuf_len(tx_desc->nbuf);
3776 	DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
3777 
3778 	if (qdf_unlikely(pdev->delay_stats_flag))
3779 		dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id);
3780 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
3781 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
3782 
3783 	DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length,
3784 			  (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
3785 
3786 	DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
3787 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
3788 
3789 	DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
3790 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
3791 
3792 	DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
3793 		     (ts->status == HAL_TX_TQM_RR_FW_REASON1));
3794 
3795 	DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
3796 		     (ts->status == HAL_TX_TQM_RR_FW_REASON2));
3797 
3798 	DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
3799 		     (ts->status == HAL_TX_TQM_RR_FW_REASON3));
3800 
3801 	/*
3802 	 * tx_failed is ideally supposed to be updated from HTT ppdu completion
3803 	 * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there
3804 	 * are no completions for failed cases. Hence updating tx_failed from
3805 	 * data path. Please note that if tx_failed is fixed to be from ppdu,
3806 	 * then this has to be removed
3807 	 */
3808 	peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num +
3809 				peer->stats.tx.dropped.fw_rem_notx +
3810 				peer->stats.tx.dropped.fw_rem_tx +
3811 				peer->stats.tx.dropped.age_out +
3812 				peer->stats.tx.dropped.fw_reason1 +
3813 				peer->stats.tx.dropped.fw_reason2 +
3814 				peer->stats.tx.dropped.fw_reason3;
3815 
3816 	if (ts->status < CDP_MAX_TX_TQM_STATUS) {
3817 		tid_stats->tqm_status_cnt[ts->status]++;
3818 	}
3819 
3820 	if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
3821 		dp_update_no_ack_stats(tx_desc->nbuf, peer);
3822 		return;
3823 	}
3824 
3825 	DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
3826 
3827 	DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
3828 	DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
3829 
3830 	/*
3831 	 * Following Rate Statistics are updated from HTT PPDU events from FW.
3832 	 * Return from here if HTT PPDU events are enabled.
3833 	 */
3834 	if (!(soc->process_tx_status))
3835 		return;
3836 
3837 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3838 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
3839 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3840 			((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
3841 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3842 			((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
3843 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3844 			((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
3845 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3846 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
3847 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3848 			((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
3849 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3850 			((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
3851 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3852 			((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
3853 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3854 			((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
3855 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3856 			((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
3857 
3858 	DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
3859 	DP_STATS_INC(peer, tx.bw[ts->bw], 1);
3860 	DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
3861 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
3862 	DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
3863 	DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
3864 	DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
3865 
3866 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
3867 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
3868 			     &peer->stats, ts->peer_id,
3869 			     UPDATE_PEER_STATS, pdev->pdev_id);
3870 #endif
3871 }
3872 
3873 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3874 /**
3875  * dp_tx_flow_pool_lock() - take flow pool lock
3876  * @soc: core txrx main context
3877  * @tx_desc: tx desc
3878  *
3879  * Return: None
3880  */
3881 static inline
3882 void dp_tx_flow_pool_lock(struct dp_soc *soc,
3883 			  struct dp_tx_desc_s *tx_desc)
3884 {
3885 	struct dp_tx_desc_pool_s *pool;
3886 	uint8_t desc_pool_id;
3887 
3888 	desc_pool_id = tx_desc->pool_id;
3889 	pool = &soc->tx_desc[desc_pool_id];
3890 
3891 	qdf_spin_lock_bh(&pool->flow_pool_lock);
3892 }
3893 
3894 /**
3895  * dp_tx_flow_pool_unlock() - release flow pool lock
3896  * @soc: core txrx main context
3897  * @tx_desc: tx desc
3898  *
3899  * Return: None
3900  */
3901 static inline
3902 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
3903 			    struct dp_tx_desc_s *tx_desc)
3904 {
3905 	struct dp_tx_desc_pool_s *pool;
3906 	uint8_t desc_pool_id;
3907 
3908 	desc_pool_id = tx_desc->pool_id;
3909 	pool = &soc->tx_desc[desc_pool_id];
3910 
3911 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
3912 }
3913 #else
3914 static inline
3915 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
3916 {
3917 }
3918 
3919 static inline
3920 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
3921 {
3922 }
3923 #endif
3924 
3925 /**
3926  * dp_tx_notify_completion() - Notify tx completion for this desc
3927  * @soc: core txrx main context
3928  * @vdev: datapath vdev handle
3929  * @tx_desc: tx desc
3930  * @netbuf:  buffer
3931  * @status: tx status
3932  *
3933  * Return: none
3934  */
3935 static inline void dp_tx_notify_completion(struct dp_soc *soc,
3936 					   struct dp_vdev *vdev,
3937 					   struct dp_tx_desc_s *tx_desc,
3938 					   qdf_nbuf_t netbuf,
3939 					   uint8_t status)
3940 {
3941 	void *osif_dev;
3942 	ol_txrx_completion_fp tx_compl_cbk = NULL;
3943 	uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
3944 
3945 	qdf_assert(tx_desc);
3946 
3947 	dp_tx_flow_pool_lock(soc, tx_desc);
3948 
3949 	if (!vdev ||
3950 	    !vdev->osif_vdev) {
3951 		dp_tx_flow_pool_unlock(soc, tx_desc);
3952 		return;
3953 	}
3954 
3955 	osif_dev = vdev->osif_vdev;
3956 	tx_compl_cbk = vdev->tx_comp;
3957 	dp_tx_flow_pool_unlock(soc, tx_desc);
3958 
3959 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
3960 		flag |= BIT(QDF_TX_RX_STATUS_OK);
3961 
3962 	if (tx_compl_cbk)
3963 		tx_compl_cbk(netbuf, osif_dev, flag);
3964 }
3965 
3966 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
3967  * @pdev: pdev handle
3968  * @tid: tid value
3969  * @txdesc_ts: timestamp from txdesc
3970  * @ppdu_id: ppdu id
3971  *
3972  * Return: none
3973  */
3974 #ifdef FEATURE_PERPKT_INFO
3975 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
3976 					       struct dp_peer *peer,
3977 					       uint8_t tid,
3978 					       uint64_t txdesc_ts,
3979 					       uint32_t ppdu_id)
3980 {
3981 	uint64_t delta_ms;
3982 	struct cdp_tx_sojourn_stats *sojourn_stats;
3983 
3984 	if (qdf_unlikely(pdev->enhanced_stats_en == 0))
3985 		return;
3986 
3987 	if (qdf_unlikely(tid == HTT_INVALID_TID ||
3988 			 tid >= CDP_DATA_TID_MAX))
3989 		return;
3990 
3991 	if (qdf_unlikely(!pdev->sojourn_buf))
3992 		return;
3993 
3994 	sojourn_stats = (struct cdp_tx_sojourn_stats *)
3995 		qdf_nbuf_data(pdev->sojourn_buf);
3996 
3997 	sojourn_stats->cookie = (void *)peer->rdkstats_ctx;
3998 
3999 	delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
4000 				txdesc_ts;
4001 	qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid],
4002 			    delta_ms);
4003 	sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
4004 	sojourn_stats->num_msdus[tid] = 1;
4005 	sojourn_stats->avg_sojourn_msdu[tid].internal =
4006 		peer->avg_sojourn_msdu[tid].internal;
4007 	dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
4008 			     pdev->sojourn_buf, HTT_INVALID_PEER,
4009 			     WDI_NO_VAL, pdev->pdev_id);
4010 	sojourn_stats->sum_sojourn_msdu[tid] = 0;
4011 	sojourn_stats->num_msdus[tid] = 0;
4012 	sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
4013 }
4014 #else
4015 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
4016 					       struct dp_peer *peer,
4017 					       uint8_t tid,
4018 					       uint64_t txdesc_ts,
4019 					       uint32_t ppdu_id)
4020 {
4021 }
4022 #endif
4023 
4024 /**
4025  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
4026  * @soc: DP Soc handle
4027  * @tx_desc: software Tx descriptor
4028  * @ts : Tx completion status from HAL/HTT descriptor
4029  *
4030  * Return: none
4031  */
4032 static inline void
4033 dp_tx_comp_process_desc(struct dp_soc *soc,
4034 			struct dp_tx_desc_s *desc,
4035 			struct hal_tx_completion_status *ts,
4036 			struct dp_peer *peer)
4037 {
4038 	uint64_t time_latency = 0;
4039 	/*
4040 	 * m_copy/tx_capture modes are not supported for
4041 	 * scatter gather packets
4042 	 */
4043 	if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
4044 		time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
4045 				desc->timestamp);
4046 	}
4047 	if (!(desc->msdu_ext_desc)) {
4048 		if (QDF_STATUS_SUCCESS ==
4049 		    dp_tx_add_to_comp_queue(soc, desc, ts, peer)) {
4050 			return;
4051 		}
4052 
4053 		if (QDF_STATUS_SUCCESS ==
4054 		    dp_get_completion_indication_for_stack(soc,
4055 							   desc->pdev,
4056 							   peer, ts,
4057 							   desc->nbuf,
4058 							   time_latency)) {
4059 			qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
4060 						     QDF_DMA_TO_DEVICE,
4061 						     desc->nbuf->len);
4062 			dp_send_completion_to_stack(soc,
4063 						    desc->pdev,
4064 						    ts->peer_id,
4065 						    ts->ppdu_id,
4066 						    desc->nbuf);
4067 			return;
4068 		}
4069 	}
4070 
4071 	dp_tx_comp_free_buf(soc, desc);
4072 }
4073 
4074 #ifdef DISABLE_DP_STATS
4075 /**
4076  * dp_tx_update_connectivity_stats() - update tx connectivity stats
4077  * @soc: core txrx main context
4078  * @tx_desc: tx desc
4079  * @status: tx status
4080  *
4081  * Return: none
4082  */
4083 static inline
4084 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
4085 				     struct dp_vdev *vdev,
4086 				     struct dp_tx_desc_s *tx_desc,
4087 				     uint8_t status)
4088 {
4089 }
4090 #else
4091 static inline
4092 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
4093 				     struct dp_vdev *vdev,
4094 				     struct dp_tx_desc_s *tx_desc,
4095 				     uint8_t status)
4096 {
4097 	void *osif_dev;
4098 	ol_txrx_stats_rx_fp stats_cbk;
4099 	uint8_t pkt_type;
4100 
4101 	qdf_assert(tx_desc);
4102 
4103 	if (!vdev ||
4104 	    !vdev->osif_vdev ||
4105 	    !vdev->stats_cb)
4106 		return;
4107 
4108 	osif_dev = vdev->osif_vdev;
4109 	stats_cbk = vdev->stats_cb;
4110 
4111 	stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
4112 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
4113 		stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
4114 			  &pkt_type);
4115 }
4116 #endif
4117 
4118 /**
4119  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
4120  * @soc: DP soc handle
4121  * @tx_desc: software descriptor head pointer
4122  * @ts: Tx completion status
4123  * @peer: peer handle
4124  * @ring_id: ring number
4125  *
4126  * Return: none
4127  */
4128 static inline
4129 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
4130 				  struct dp_tx_desc_s *tx_desc,
4131 				  struct hal_tx_completion_status *ts,
4132 				  struct dp_peer *peer, uint8_t ring_id)
4133 {
4134 	uint32_t length;
4135 	qdf_ether_header_t *eh;
4136 	struct dp_vdev *vdev = NULL;
4137 	qdf_nbuf_t nbuf = tx_desc->nbuf;
4138 	enum qdf_dp_tx_rx_status dp_status;
4139 
4140 	if (!nbuf) {
4141 		dp_info_rl("invalid tx descriptor. nbuf NULL");
4142 		goto out;
4143 	}
4144 
4145 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
4146 	length = qdf_nbuf_len(nbuf);
4147 
4148 	dp_status = dp_tx_hw_to_qdf(ts->status);
4149 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
4150 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
4151 				 QDF_TRACE_DEFAULT_PDEV_ID,
4152 				 qdf_nbuf_data_addr(nbuf),
4153 				 sizeof(qdf_nbuf_data(nbuf)),
4154 				 tx_desc->id, ts->status, dp_status));
4155 
4156 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
4157 				"-------------------- \n"
4158 				"Tx Completion Stats: \n"
4159 				"-------------------- \n"
4160 				"ack_frame_rssi = %d \n"
4161 				"first_msdu = %d \n"
4162 				"last_msdu = %d \n"
4163 				"msdu_part_of_amsdu = %d \n"
4164 				"rate_stats valid = %d \n"
4165 				"bw = %d \n"
4166 				"pkt_type = %d \n"
4167 				"stbc = %d \n"
4168 				"ldpc = %d \n"
4169 				"sgi = %d \n"
4170 				"mcs = %d \n"
4171 				"ofdma = %d \n"
4172 				"tones_in_ru = %d \n"
4173 				"tsf = %d \n"
4174 				"ppdu_id = %d \n"
4175 				"transmit_cnt = %d \n"
4176 				"tid = %d \n"
4177 				"peer_id = %d\n",
4178 				ts->ack_frame_rssi, ts->first_msdu,
4179 				ts->last_msdu, ts->msdu_part_of_amsdu,
4180 				ts->valid, ts->bw, ts->pkt_type, ts->stbc,
4181 				ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
4182 				ts->tones_in_ru, ts->tsf, ts->ppdu_id,
4183 				ts->transmit_cnt, ts->tid, ts->peer_id);
4184 
4185 	/* Update SoC level stats */
4186 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
4187 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
4188 
4189 	if (!peer) {
4190 		dp_info_rl("peer is null or deletion in progress");
4191 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
4192 		goto out;
4193 	}
4194 	vdev = peer->vdev;
4195 
4196 	dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
4197 
4198 	/* Update per-packet stats for mesh mode */
4199 	if (qdf_unlikely(vdev->mesh_vdev) &&
4200 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
4201 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
4202 
4203 	/* Update peer level stats */
4204 	if (qdf_unlikely(peer->bss_peer && vdev->opmode == wlan_op_mode_ap)) {
4205 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
4206 			DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
4207 
4208 			if ((peer->vdev->tx_encap_type ==
4209 				htt_cmn_pkt_type_ethernet) &&
4210 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
4211 				DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
4212 			}
4213 		}
4214 	} else {
4215 		DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
4216 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
4217 			DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
4218 			if (qdf_unlikely(peer->in_twt)) {
4219 				DP_STATS_INC_PKT(peer,
4220 						 tx.tx_success_twt,
4221 						 1, length);
4222 			}
4223 		}
4224 	}
4225 
4226 	dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id);
4227 	dp_tx_update_peer_ext_stats(peer, tx_desc, ts->tid, ring_id);
4228 
4229 #ifdef QCA_SUPPORT_RDK_STATS
4230 	if (soc->rdkstats_enabled)
4231 		dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid,
4232 					    tx_desc->timestamp,
4233 					    ts->ppdu_id);
4234 #endif
4235 
4236 out:
4237 	return;
4238 }
4239 /**
4240  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
4241  * @soc: core txrx main context
4242  * @comp_head: software descriptor head pointer
4243  * @ring_id: ring number
4244  *
4245  * This function will process batch of descriptors reaped by dp_tx_comp_handler
4246  * and release the software descriptors after processing is complete
4247  *
4248  * Return: none
4249  */
4250 static void
4251 dp_tx_comp_process_desc_list(struct dp_soc *soc,
4252 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id)
4253 {
4254 	struct dp_tx_desc_s *desc;
4255 	struct dp_tx_desc_s *next;
4256 	struct hal_tx_completion_status ts;
4257 	struct dp_peer *peer = NULL;
4258 	uint16_t peer_id = DP_INVALID_PEER;
4259 	qdf_nbuf_t netbuf;
4260 
4261 	desc = comp_head;
4262 
4263 	while (desc) {
4264 		if (peer_id != desc->peer_id) {
4265 			if (peer)
4266 				dp_peer_unref_delete(peer,
4267 						     DP_MOD_ID_TX_COMP);
4268 			peer_id = desc->peer_id;
4269 			peer = dp_peer_get_ref_by_id(soc, peer_id,
4270 						     DP_MOD_ID_TX_COMP);
4271 		}
4272 		if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
4273 			struct dp_pdev *pdev = desc->pdev;
4274 
4275 			if (qdf_likely(peer)) {
4276 				/*
4277 				 * Increment peer statistics
4278 				 * Minimal statistics update done here
4279 				 */
4280 				DP_STATS_INC_PKT(peer, tx.comp_pkt, 1,
4281 						 desc->length);
4282 
4283 				if (desc->tx_status !=
4284 						HAL_TX_TQM_RR_FRAME_ACKED)
4285 					DP_STATS_INC(peer, tx.tx_failed, 1);
4286 			}
4287 
4288 			qdf_assert(pdev);
4289 			dp_tx_outstanding_dec(pdev);
4290 
4291 			/*
4292 			 * Calling a QDF WRAPPER here is creating signifcant
4293 			 * performance impact so avoided the wrapper call here
4294 			 */
4295 			next = desc->next;
4296 			qdf_mem_unmap_nbytes_single(soc->osdev,
4297 						    desc->dma_addr,
4298 						    QDF_DMA_TO_DEVICE,
4299 						    desc->length);
4300 			qdf_nbuf_free(desc->nbuf);
4301 			dp_tx_desc_free(soc, desc, desc->pool_id);
4302 			desc = next;
4303 			continue;
4304 		}
4305 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
4306 
4307 		dp_tx_comp_process_tx_status(soc, desc, &ts, peer, ring_id);
4308 
4309 		netbuf = desc->nbuf;
4310 		/* check tx complete notification */
4311 		if (peer && qdf_nbuf_tx_notify_comp_get(netbuf))
4312 			dp_tx_notify_completion(soc, peer->vdev, desc,
4313 						netbuf, ts.status);
4314 
4315 		dp_tx_comp_process_desc(soc, desc, &ts, peer);
4316 
4317 		next = desc->next;
4318 
4319 		dp_tx_desc_release(desc, desc->pool_id);
4320 		desc = next;
4321 	}
4322 	if (peer)
4323 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_COMP);
4324 }
4325 
4326 /**
4327  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
4328  * @soc: Handle to DP soc structure
4329  * @tx_desc: software descriptor head pointer
4330  * @status : Tx completion status from HTT descriptor
4331  * @ring_id: ring number
4332  *
4333  * This function will process HTT Tx indication messages from Target
4334  *
4335  * Return: none
4336  */
4337 static
4338 void dp_tx_process_htt_completion(struct dp_soc *soc,
4339 				  struct dp_tx_desc_s *tx_desc, uint8_t *status,
4340 				  uint8_t ring_id)
4341 {
4342 	uint8_t tx_status;
4343 	struct dp_pdev *pdev;
4344 	struct dp_vdev *vdev;
4345 	struct hal_tx_completion_status ts = {0};
4346 	uint32_t *htt_desc = (uint32_t *)status;
4347 	struct dp_peer *peer;
4348 	struct cdp_tid_tx_stats *tid_stats = NULL;
4349 	struct htt_soc *htt_handle;
4350 	uint8_t vdev_id;
4351 
4352 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
4353 	htt_handle = (struct htt_soc *)soc->htt_handle;
4354 	htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
4355 
4356 	/*
4357 	 * There can be scenario where WBM consuming descriptor enqueued
4358 	 * from TQM2WBM first and TQM completion can happen before MEC
4359 	 * notification comes from FW2WBM. Avoid access any field of tx
4360 	 * descriptor in case of MEC notify.
4361 	 */
4362 	if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY) {
4363 		/*
4364 		 * Get vdev id from HTT status word in case of MEC
4365 		 * notification
4366 		 */
4367 		vdev_id = HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(htt_desc[3]);
4368 		if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
4369 			return;
4370 
4371 		vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4372 				DP_MOD_ID_HTT_COMP);
4373 		if (!vdev)
4374 			return;
4375 		dp_tx_mec_handler(vdev, status);
4376 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
4377 		return;
4378 	}
4379 
4380 	/*
4381 	 * If the descriptor is already freed in vdev_detach,
4382 	 * continue to next descriptor
4383 	 */
4384 	if ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) && !tx_desc->flags) {
4385 		QDF_TRACE(QDF_MODULE_ID_DP,
4386 				QDF_TRACE_LEVEL_INFO,
4387 				"Descriptor freed in vdev_detach %d",
4388 				tx_desc->id);
4389 		return;
4390 	}
4391 
4392 	pdev = tx_desc->pdev;
4393 
4394 	if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
4395 		QDF_TRACE(QDF_MODULE_ID_DP,
4396 				QDF_TRACE_LEVEL_INFO,
4397 				"pdev in down state %d",
4398 				tx_desc->id);
4399 		dp_tx_comp_free_buf(soc, tx_desc);
4400 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
4401 		return;
4402 	}
4403 
4404 	qdf_assert(tx_desc->pdev);
4405 
4406 	vdev_id = tx_desc->vdev_id;
4407 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4408 			DP_MOD_ID_HTT_COMP);
4409 
4410 	if (!vdev)
4411 		return;
4412 
4413 	switch (tx_status) {
4414 	case HTT_TX_FW2WBM_TX_STATUS_OK:
4415 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
4416 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
4417 	{
4418 		uint8_t tid;
4419 		if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
4420 			ts.peer_id =
4421 				HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
4422 						htt_desc[2]);
4423 			ts.tid =
4424 				HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
4425 						htt_desc[2]);
4426 		} else {
4427 			ts.peer_id = HTT_INVALID_PEER;
4428 			ts.tid = HTT_INVALID_TID;
4429 		}
4430 		ts.ppdu_id =
4431 			HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
4432 					htt_desc[1]);
4433 		ts.ack_frame_rssi =
4434 			HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
4435 					htt_desc[1]);
4436 
4437 		ts.tsf = htt_desc[3];
4438 		ts.first_msdu = 1;
4439 		ts.last_msdu = 1;
4440 		tid = ts.tid;
4441 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4442 			tid = CDP_MAX_DATA_TIDS - 1;
4443 
4444 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
4445 
4446 		if (qdf_unlikely(pdev->delay_stats_flag))
4447 			dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
4448 		if (tx_status < CDP_MAX_TX_HTT_STATUS) {
4449 			tid_stats->htt_status_cnt[tx_status]++;
4450 		}
4451 
4452 		peer = dp_peer_get_ref_by_id(soc, ts.peer_id,
4453 					     DP_MOD_ID_HTT_COMP);
4454 
4455 		dp_tx_comp_process_tx_status(soc, tx_desc, &ts, peer, ring_id);
4456 		dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
4457 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
4458 
4459 		if (qdf_likely(peer))
4460 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT_COMP);
4461 
4462 		break;
4463 	}
4464 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
4465 	{
4466 		dp_tx_reinject_handler(soc, vdev, tx_desc, status);
4467 		break;
4468 	}
4469 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
4470 	{
4471 		dp_tx_inspect_handler(soc, vdev, tx_desc, status);
4472 		break;
4473 	}
4474 	default:
4475 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
4476 			  "%s Invalid HTT tx_status %d\n",
4477 			  __func__, tx_status);
4478 		break;
4479 	}
4480 
4481 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
4482 }
4483 
4484 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
4485 static inline
4486 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
4487 {
4488 	bool limit_hit = false;
4489 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
4490 
4491 	limit_hit =
4492 		(num_reaped >= cfg->tx_comp_loop_pkt_limit) ? true : false;
4493 
4494 	if (limit_hit)
4495 		DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
4496 
4497 	return limit_hit;
4498 }
4499 
4500 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
4501 {
4502 	return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
4503 }
4504 #else
4505 static inline
4506 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
4507 {
4508 	return false;
4509 }
4510 
4511 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
4512 {
4513 	return false;
4514 }
4515 #endif
4516 
4517 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
4518 			    hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
4519 			    uint32_t quota)
4520 {
4521 	void *tx_comp_hal_desc;
4522 	uint8_t buffer_src;
4523 	uint8_t pool_id;
4524 	uint32_t tx_desc_id;
4525 	struct dp_tx_desc_s *tx_desc = NULL;
4526 	struct dp_tx_desc_s *head_desc = NULL;
4527 	struct dp_tx_desc_s *tail_desc = NULL;
4528 	uint32_t num_processed = 0;
4529 	uint32_t count;
4530 	uint32_t num_avail_for_reap = 0;
4531 	bool force_break = false;
4532 
4533 	DP_HIST_INIT();
4534 
4535 more_data:
4536 	/* Re-initialize local variables to be re-used */
4537 	head_desc = NULL;
4538 	tail_desc = NULL;
4539 	count = 0;
4540 
4541 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
4542 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
4543 		return 0;
4544 	}
4545 
4546 	num_avail_for_reap = hal_srng_dst_num_valid(soc->hal_soc, hal_ring_hdl, 0);
4547 
4548 	if (num_avail_for_reap >= quota)
4549 		num_avail_for_reap = quota;
4550 
4551 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
4552 
4553 	/* Find head descriptor from completion ring */
4554 	while (qdf_likely(num_avail_for_reap)) {
4555 
4556 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
4557 		if (qdf_unlikely(!tx_comp_hal_desc))
4558 			break;
4559 
4560 		buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
4561 
4562 		/* If this buffer was not released by TQM or FW, then it is not
4563 		 * Tx completion indication, assert */
4564 		if (qdf_unlikely(buffer_src !=
4565 					HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
4566 				 (qdf_unlikely(buffer_src !=
4567 					HAL_TX_COMP_RELEASE_SOURCE_FW))) {
4568 			uint8_t wbm_internal_error;
4569 
4570 			dp_err_rl(
4571 				"Tx comp release_src != TQM | FW but from %d",
4572 				buffer_src);
4573 			hal_dump_comp_desc(tx_comp_hal_desc);
4574 			DP_STATS_INC(soc, tx.invalid_release_source, 1);
4575 
4576 			/* When WBM sees NULL buffer_addr_info in any of
4577 			 * ingress rings it sends an error indication,
4578 			 * with wbm_internal_error=1, to a specific ring.
4579 			 * The WBM2SW ring used to indicate these errors is
4580 			 * fixed in HW, and that ring is being used as Tx
4581 			 * completion ring. These errors are not related to
4582 			 * Tx completions, and should just be ignored
4583 			 */
4584 			wbm_internal_error = hal_get_wbm_internal_error(
4585 							soc->hal_soc,
4586 							tx_comp_hal_desc);
4587 
4588 			if (wbm_internal_error) {
4589 				dp_err_rl("Tx comp wbm_internal_error!!");
4590 				DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
4591 
4592 				if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
4593 								buffer_src)
4594 					dp_handle_wbm_internal_error(
4595 						soc,
4596 						tx_comp_hal_desc,
4597 						hal_tx_comp_get_buffer_type(
4598 							tx_comp_hal_desc));
4599 
4600 			} else {
4601 				dp_err_rl("Tx comp wbm_internal_error false");
4602 				DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
4603 			}
4604 			continue;
4605 		}
4606 
4607 		/* Get descriptor id */
4608 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
4609 		pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
4610 			DP_TX_DESC_ID_POOL_OS;
4611 
4612 		/* Find Tx descriptor */
4613 		tx_desc = dp_tx_desc_find(soc, pool_id,
4614 				(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
4615 				DP_TX_DESC_ID_PAGE_OS,
4616 				(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
4617 				DP_TX_DESC_ID_OFFSET_OS);
4618 
4619 		/*
4620 		 * If the release source is FW, process the HTT status
4621 		 */
4622 		if (qdf_unlikely(buffer_src ==
4623 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
4624 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
4625 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
4626 					htt_tx_status);
4627 			dp_tx_process_htt_completion(soc, tx_desc,
4628 					htt_tx_status, ring_id);
4629 		} else {
4630 			tx_desc->peer_id =
4631 				hal_tx_comp_get_peer_id(tx_comp_hal_desc);
4632 			tx_desc->tx_status =
4633 				hal_tx_comp_get_tx_status(tx_comp_hal_desc);
4634 			/*
4635 			 * If the fast completion mode is enabled extended
4636 			 * metadata from descriptor is not copied
4637 			 */
4638 			if (qdf_likely(tx_desc->flags &
4639 						DP_TX_DESC_FLAG_SIMPLE))
4640 				goto add_to_pool;
4641 
4642 			/*
4643 			 * If the descriptor is already freed in vdev_detach,
4644 			 * continue to next descriptor
4645 			 */
4646 			if (qdf_unlikely
4647 				((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
4648 				 !tx_desc->flags)) {
4649 				QDF_TRACE(QDF_MODULE_ID_DP,
4650 					  QDF_TRACE_LEVEL_INFO,
4651 					  "Descriptor freed in vdev_detach %d",
4652 					  tx_desc_id);
4653 				continue;
4654 			}
4655 
4656 			if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
4657 				QDF_TRACE(QDF_MODULE_ID_DP,
4658 					  QDF_TRACE_LEVEL_INFO,
4659 					  "pdev in down state %d",
4660 					  tx_desc_id);
4661 
4662 				dp_tx_comp_free_buf(soc, tx_desc);
4663 				dp_tx_desc_release(tx_desc, tx_desc->pool_id);
4664 				goto next_desc;
4665 			}
4666 
4667 			/* Pool id is not matching. Error */
4668 			if (tx_desc->pool_id != pool_id) {
4669 				QDF_TRACE(QDF_MODULE_ID_DP,
4670 					QDF_TRACE_LEVEL_FATAL,
4671 					"Tx Comp pool id %d not matched %d",
4672 					pool_id, tx_desc->pool_id);
4673 
4674 				qdf_assert_always(0);
4675 			}
4676 
4677 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
4678 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
4679 				QDF_TRACE(QDF_MODULE_ID_DP,
4680 					  QDF_TRACE_LEVEL_FATAL,
4681 					  "Txdesc invalid, flgs = %x,id = %d",
4682 					  tx_desc->flags, tx_desc_id);
4683 				qdf_assert_always(0);
4684 			}
4685 
4686 			/* Collect hw completion contents */
4687 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
4688 					      &tx_desc->comp, 1);
4689 add_to_pool:
4690 			DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
4691 
4692 			/* First ring descriptor on the cycle */
4693 			if (!head_desc) {
4694 				head_desc = tx_desc;
4695 				tail_desc = tx_desc;
4696 			}
4697 
4698 			tail_desc->next = tx_desc;
4699 			tx_desc->next = NULL;
4700 			tail_desc = tx_desc;
4701 		}
4702 next_desc:
4703 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
4704 
4705 		/*
4706 		 * Processed packet count is more than given quota
4707 		 * stop to processing
4708 		 */
4709 
4710 		count++;
4711 
4712 		if (dp_tx_comp_loop_pkt_limit_hit(soc, count))
4713 			break;
4714 	}
4715 
4716 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
4717 
4718 	/* Process the reaped descriptors */
4719 	if (head_desc)
4720 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
4721 
4722 	if (dp_tx_comp_enable_eol_data_check(soc)) {
4723 
4724 		if (num_processed >= quota)
4725 			force_break = true;
4726 
4727 		if (!force_break &&
4728 		    hal_srng_dst_peek_sync_locked(soc->hal_soc,
4729 						  hal_ring_hdl)) {
4730 			DP_STATS_INC(soc, tx.hp_oos2, 1);
4731 			if (!hif_exec_should_yield(soc->hif_handle,
4732 						   int_ctx->dp_intr_id))
4733 				goto more_data;
4734 		}
4735 	}
4736 	DP_TX_HIST_STATS_PER_PDEV();
4737 
4738 	return num_processed;
4739 }
4740 
4741 #ifdef FEATURE_WLAN_TDLS
4742 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4743 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
4744 {
4745 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4746 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4747 						     DP_MOD_ID_TDLS);
4748 
4749 	if (!vdev) {
4750 		dp_err("vdev handle for id %d is NULL", vdev_id);
4751 		return NULL;
4752 	}
4753 
4754 	if (tx_spec & OL_TX_SPEC_NO_FREE)
4755 		vdev->is_tdls_frame = true;
4756 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
4757 
4758 	return dp_tx_send(soc_hdl, vdev_id, msdu_list);
4759 }
4760 #endif
4761 
4762 static void dp_tx_vdev_update_feature_flags(struct dp_vdev *vdev)
4763 {
4764 	struct wlan_cfg_dp_soc_ctxt *cfg;
4765 
4766 	struct dp_soc *soc;
4767 
4768 	soc = vdev->pdev->soc;
4769 	if (!soc)
4770 		return;
4771 
4772 	cfg = soc->wlan_cfg_ctx;
4773 	if (!cfg)
4774 		return;
4775 
4776 	if (vdev->opmode == wlan_op_mode_ndi)
4777 		vdev->csum_enabled = wlan_cfg_get_nan_checksum_offload(cfg);
4778 	else if ((vdev->subtype == wlan_op_subtype_p2p_device) ||
4779 		 (vdev->subtype == wlan_op_subtype_p2p_cli) ||
4780 		 (vdev->subtype == wlan_op_subtype_p2p_go))
4781 		vdev->csum_enabled = wlan_cfg_get_p2p_checksum_offload(cfg);
4782 	else
4783 		vdev->csum_enabled = wlan_cfg_get_checksum_offload(cfg);
4784 }
4785 
4786 /**
4787  * dp_tx_vdev_attach() - attach vdev to dp tx
4788  * @vdev: virtual device instance
4789  *
4790  * Return: QDF_STATUS_SUCCESS: success
4791  *         QDF_STATUS_E_RESOURCES: Error return
4792  */
4793 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
4794 {
4795 	int pdev_id;
4796 	/*
4797 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
4798 	 */
4799 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
4800 			HTT_TCL_METADATA_TYPE_VDEV_BASED);
4801 
4802 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
4803 			vdev->vdev_id);
4804 
4805 	pdev_id =
4806 		dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
4807 						       vdev->pdev->pdev_id);
4808 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
4809 
4810 	/*
4811 	 * Set HTT Extension Valid bit to 0 by default
4812 	 */
4813 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
4814 
4815 	dp_tx_vdev_update_search_flags(vdev);
4816 
4817 	dp_tx_vdev_update_feature_flags(vdev);
4818 
4819 	return QDF_STATUS_SUCCESS;
4820 }
4821 
4822 #ifndef FEATURE_WDS
4823 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
4824 {
4825 	return false;
4826 }
4827 #endif
4828 
4829 /**
4830  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
4831  * @vdev: virtual device instance
4832  *
4833  * Return: void
4834  *
4835  */
4836 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
4837 {
4838 	struct dp_soc *soc = vdev->pdev->soc;
4839 
4840 	/*
4841 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
4842 	 * for TDLS link
4843 	 *
4844 	 * Enable AddrY (SA based search) only for non-WDS STA and
4845 	 * ProxySTA VAP (in HKv1) modes.
4846 	 *
4847 	 * In all other VAP modes, only DA based search should be
4848 	 * enabled
4849 	 */
4850 	if (vdev->opmode == wlan_op_mode_sta &&
4851 	    vdev->tdls_link_connected)
4852 		vdev->hal_desc_addr_search_flags =
4853 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
4854 	else if ((vdev->opmode == wlan_op_mode_sta) &&
4855 		 !dp_tx_da_search_override(vdev))
4856 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
4857 	else
4858 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
4859 
4860 	/* Set search type only when peer map v2 messaging is enabled
4861 	 * as we will have the search index (AST hash) only when v2 is
4862 	 * enabled
4863 	 */
4864 	if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
4865 		vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
4866 	else
4867 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
4868 }
4869 
4870 static inline bool
4871 dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
4872 			  struct dp_vdev *vdev,
4873 			  struct dp_tx_desc_s *tx_desc)
4874 {
4875 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
4876 		return false;
4877 
4878 	/*
4879 	 * if vdev is given, then only check whether desc
4880 	 * vdev match. if vdev is NULL, then check whether
4881 	 * desc pdev match.
4882 	 */
4883 	return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
4884 		(tx_desc->pdev == pdev);
4885 }
4886 
4887 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
4888 /**
4889  * dp_tx_desc_flush() - release resources associated
4890  *                      to TX Desc
4891  *
4892  * @dp_pdev: Handle to DP pdev structure
4893  * @vdev: virtual device instance
4894  * NULL: no specific Vdev is required and check all allcated TX desc
4895  * on this pdev.
4896  * Non-NULL: only check the allocated TX Desc associated to this Vdev.
4897  *
4898  * @force_free:
4899  * true: flush the TX desc.
4900  * false: only reset the Vdev in each allocated TX desc
4901  * that associated to current Vdev.
4902  *
4903  * This function will go through the TX desc pool to flush
4904  * the outstanding TX data or reset Vdev to NULL in associated TX
4905  * Desc.
4906  */
4907 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
4908 		      bool force_free)
4909 {
4910 	uint8_t i;
4911 	uint32_t j;
4912 	uint32_t num_desc, page_id, offset;
4913 	uint16_t num_desc_per_page;
4914 	struct dp_soc *soc = pdev->soc;
4915 	struct dp_tx_desc_s *tx_desc = NULL;
4916 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
4917 
4918 	if (!vdev && !force_free) {
4919 		dp_err("Reset TX desc vdev, Vdev param is required!");
4920 		return;
4921 	}
4922 
4923 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
4924 		tx_desc_pool = &soc->tx_desc[i];
4925 		if (!(tx_desc_pool->pool_size) ||
4926 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
4927 		    !(tx_desc_pool->desc_pages.cacheable_pages))
4928 			continue;
4929 
4930 		/*
4931 		 * Add flow pool lock protection in case pool is freed
4932 		 * due to all tx_desc is recycled when handle TX completion.
4933 		 * this is not necessary when do force flush as:
4934 		 * a. double lock will happen if dp_tx_desc_release is
4935 		 *    also trying to acquire it.
4936 		 * b. dp interrupt has been disabled before do force TX desc
4937 		 *    flush in dp_pdev_deinit().
4938 		 */
4939 		if (!force_free)
4940 			qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
4941 		num_desc = tx_desc_pool->pool_size;
4942 		num_desc_per_page =
4943 			tx_desc_pool->desc_pages.num_element_per_page;
4944 		for (j = 0; j < num_desc; j++) {
4945 			page_id = j / num_desc_per_page;
4946 			offset = j % num_desc_per_page;
4947 
4948 			if (qdf_unlikely(!(tx_desc_pool->
4949 					 desc_pages.cacheable_pages)))
4950 				break;
4951 
4952 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
4953 
4954 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
4955 				/*
4956 				 * Free TX desc if force free is
4957 				 * required, otherwise only reset vdev
4958 				 * in this TX desc.
4959 				 */
4960 				if (force_free) {
4961 					dp_tx_comp_free_buf(soc, tx_desc);
4962 					dp_tx_desc_release(tx_desc, i);
4963 				} else {
4964 					tx_desc->vdev_id = DP_INVALID_VDEV_ID;
4965 				}
4966 			}
4967 		}
4968 		if (!force_free)
4969 			qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
4970 	}
4971 }
4972 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
4973 /**
4974  * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
4975  *
4976  * @soc: Handle to DP soc structure
4977  * @tx_desc: pointer of one TX desc
4978  * @desc_pool_id: TX Desc pool id
4979  */
4980 static inline void
4981 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
4982 		      uint8_t desc_pool_id)
4983 {
4984 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
4985 
4986 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
4987 
4988 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
4989 }
4990 
4991 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
4992 		      bool force_free)
4993 {
4994 	uint8_t i, num_pool;
4995 	uint32_t j;
4996 	uint32_t num_desc, page_id, offset;
4997 	uint16_t num_desc_per_page;
4998 	struct dp_soc *soc = pdev->soc;
4999 	struct dp_tx_desc_s *tx_desc = NULL;
5000 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
5001 
5002 	if (!vdev && !force_free) {
5003 		dp_err("Reset TX desc vdev, Vdev param is required!");
5004 		return;
5005 	}
5006 
5007 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5008 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5009 
5010 	for (i = 0; i < num_pool; i++) {
5011 		tx_desc_pool = &soc->tx_desc[i];
5012 		if (!tx_desc_pool->desc_pages.cacheable_pages)
5013 			continue;
5014 
5015 		num_desc_per_page =
5016 			tx_desc_pool->desc_pages.num_element_per_page;
5017 		for (j = 0; j < num_desc; j++) {
5018 			page_id = j / num_desc_per_page;
5019 			offset = j % num_desc_per_page;
5020 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
5021 
5022 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
5023 				if (force_free) {
5024 					dp_tx_comp_free_buf(soc, tx_desc);
5025 					dp_tx_desc_release(tx_desc, i);
5026 				} else {
5027 					dp_tx_desc_reset_vdev(soc, tx_desc,
5028 							      i);
5029 				}
5030 			}
5031 		}
5032 	}
5033 }
5034 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
5035 
5036 /**
5037  * dp_tx_vdev_detach() - detach vdev from dp tx
5038  * @vdev: virtual device instance
5039  *
5040  * Return: QDF_STATUS_SUCCESS: success
5041  *         QDF_STATUS_E_RESOURCES: Error return
5042  */
5043 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
5044 {
5045 	struct dp_pdev *pdev = vdev->pdev;
5046 
5047 	/* Reset TX desc associated to this Vdev as NULL */
5048 	dp_tx_desc_flush(pdev, vdev, false);
5049 	dp_tx_vdev_multipass_deinit(vdev);
5050 
5051 	return QDF_STATUS_SUCCESS;
5052 }
5053 
5054 /**
5055  * dp_tx_pdev_attach() - attach pdev to dp tx
5056  * @pdev: physical device instance
5057  *
5058  * Return: QDF_STATUS_SUCCESS: success
5059  *         QDF_STATUS_E_RESOURCES: Error return
5060  */
5061 QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev)
5062 {
5063 	struct dp_soc *soc = pdev->soc;
5064 
5065 	/* Initialize Flow control counters */
5066 	qdf_atomic_init(&pdev->num_tx_outstanding);
5067 	pdev->tx_descs_max = 0;
5068 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
5069 		/* Initialize descriptors in TCL Ring */
5070 		hal_tx_init_data_ring(soc->hal_soc,
5071 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
5072 	}
5073 
5074 	return QDF_STATUS_SUCCESS;
5075 }
5076 
5077 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
5078 /* Pools will be allocated dynamically */
5079 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
5080 					   int num_desc)
5081 {
5082 	uint8_t i;
5083 
5084 	for (i = 0; i < num_pool; i++) {
5085 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
5086 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
5087 	}
5088 
5089 	return QDF_STATUS_SUCCESS;
5090 }
5091 
5092 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
5093 					  int num_desc)
5094 {
5095 	return QDF_STATUS_SUCCESS;
5096 }
5097 
5098 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
5099 {
5100 }
5101 
5102 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
5103 {
5104 	uint8_t i;
5105 
5106 	for (i = 0; i < num_pool; i++)
5107 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
5108 }
5109 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
5110 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
5111 					   int num_desc)
5112 {
5113 	uint8_t i, count;
5114 
5115 	/* Allocate software Tx descriptor pools */
5116 	for (i = 0; i < num_pool; i++) {
5117 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
5118 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5119 				  FL("Tx Desc Pool alloc %d failed %pK"),
5120 				  i, soc);
5121 			goto fail;
5122 		}
5123 	}
5124 	return QDF_STATUS_SUCCESS;
5125 
5126 fail:
5127 	for (count = 0; count < i; count++)
5128 		dp_tx_desc_pool_free(soc, count);
5129 
5130 	return QDF_STATUS_E_NOMEM;
5131 }
5132 
5133 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
5134 					  int num_desc)
5135 {
5136 	uint8_t i;
5137 	for (i = 0; i < num_pool; i++) {
5138 		if (dp_tx_desc_pool_init(soc, i, num_desc)) {
5139 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5140 				  FL("Tx Desc Pool init %d failed %pK"),
5141 				  i, soc);
5142 			return QDF_STATUS_E_NOMEM;
5143 		}
5144 	}
5145 	return QDF_STATUS_SUCCESS;
5146 }
5147 
5148 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
5149 {
5150 	uint8_t i;
5151 
5152 	for (i = 0; i < num_pool; i++)
5153 		dp_tx_desc_pool_deinit(soc, i);
5154 }
5155 
5156 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
5157 {
5158 	uint8_t i;
5159 
5160 	for (i = 0; i < num_pool; i++)
5161 		dp_tx_desc_pool_free(soc, i);
5162 }
5163 
5164 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
5165 
5166 /**
5167  * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
5168  * @soc: core txrx main context
5169  * @num_pool: number of pools
5170  *
5171  */
5172 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
5173 {
5174 	dp_tx_tso_desc_pool_deinit(soc, num_pool);
5175 	dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
5176 }
5177 
5178 /**
5179  * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
5180  * @soc: core txrx main context
5181  * @num_pool: number of pools
5182  *
5183  */
5184 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
5185 {
5186 	dp_tx_tso_desc_pool_free(soc, num_pool);
5187 	dp_tx_tso_num_seg_pool_free(soc, num_pool);
5188 }
5189 
5190 /**
5191  * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
5192  * @soc: core txrx main context
5193  *
5194  * This function frees all tx related descriptors as below
5195  * 1. Regular TX descriptors (static pools)
5196  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
5197  * 3. TSO descriptors
5198  *
5199  */
5200 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
5201 {
5202 	uint8_t num_pool;
5203 
5204 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5205 
5206 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
5207 	dp_tx_ext_desc_pool_free(soc, num_pool);
5208 	dp_tx_delete_static_pools(soc, num_pool);
5209 }
5210 
5211 /**
5212  * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
5213  * @soc: core txrx main context
5214  *
5215  * This function de-initializes all tx related descriptors as below
5216  * 1. Regular TX descriptors (static pools)
5217  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
5218  * 3. TSO descriptors
5219  *
5220  */
5221 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
5222 {
5223 	uint8_t num_pool;
5224 
5225 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5226 
5227 	dp_tx_flow_control_deinit(soc);
5228 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
5229 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
5230 	dp_tx_deinit_static_pools(soc, num_pool);
5231 }
5232 
5233 /**
5234  * dp_tso_attach() - TSO attach handler
5235  * @txrx_soc: Opaque Dp handle
5236  *
5237  * Reserve TSO descriptor buffers
5238  *
5239  * Return: QDF_STATUS_E_FAILURE on failure or
5240  * QDF_STATUS_SUCCESS on success
5241  */
5242 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
5243 					 uint8_t num_pool,
5244 					 uint16_t num_desc)
5245 {
5246 	if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
5247 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
5248 		return QDF_STATUS_E_FAILURE;
5249 	}
5250 
5251 	if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
5252 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
5253 		       num_pool, soc);
5254 		return QDF_STATUS_E_FAILURE;
5255 	}
5256 	return QDF_STATUS_SUCCESS;
5257 }
5258 
5259 /**
5260  * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
5261  * @soc: DP soc handle
5262  * @num_pool: Number of pools
5263  * @num_desc: Number of descriptors
5264  *
5265  * Initialize TSO descriptor pools
5266  *
5267  * Return: QDF_STATUS_E_FAILURE on failure or
5268  * QDF_STATUS_SUCCESS on success
5269  */
5270 
5271 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
5272 					uint8_t num_pool,
5273 					uint16_t num_desc)
5274 {
5275 	if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
5276 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
5277 		return QDF_STATUS_E_FAILURE;
5278 	}
5279 
5280 	if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
5281 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
5282 		       num_pool, soc);
5283 		return QDF_STATUS_E_FAILURE;
5284 	}
5285 	return QDF_STATUS_SUCCESS;
5286 }
5287 
5288 /**
5289  * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
5290  * @soc: core txrx main context
5291  *
5292  * This function allocates memory for following descriptor pools
5293  * 1. regular sw tx descriptor pools (static pools)
5294  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
5295  * 3. TSO descriptor pools
5296  *
5297  * Return: QDF_STATUS_SUCCESS: success
5298  *         QDF_STATUS_E_RESOURCES: Error return
5299  */
5300 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
5301 {
5302 	uint8_t num_pool;
5303 	uint32_t num_desc;
5304 	uint32_t num_ext_desc;
5305 
5306 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5307 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5308 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
5309 
5310 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5311 		  "%s Tx Desc Alloc num_pool = %d, descs = %d",
5312 		  __func__, num_pool, num_desc);
5313 
5314 	if ((num_pool > MAX_TXDESC_POOLS) ||
5315 	    (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
5316 		goto fail1;
5317 
5318 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
5319 		goto fail1;
5320 
5321 	if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
5322 		goto fail2;
5323 
5324 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
5325 		return QDF_STATUS_SUCCESS;
5326 
5327 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
5328 		goto fail3;
5329 
5330 	return QDF_STATUS_SUCCESS;
5331 
5332 fail3:
5333 	dp_tx_ext_desc_pool_free(soc, num_pool);
5334 fail2:
5335 	dp_tx_delete_static_pools(soc, num_pool);
5336 fail1:
5337 	return QDF_STATUS_E_RESOURCES;
5338 }
5339 
5340 /**
5341  * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
5342  * @soc: core txrx main context
5343  *
5344  * This function initializes the following TX descriptor pools
5345  * 1. regular sw tx descriptor pools (static pools)
5346  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
5347  * 3. TSO descriptor pools
5348  *
5349  * Return: QDF_STATUS_SUCCESS: success
5350  *	   QDF_STATUS_E_RESOURCES: Error return
5351  */
5352 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
5353 {
5354 	uint8_t num_pool;
5355 	uint32_t num_desc;
5356 	uint32_t num_ext_desc;
5357 
5358 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5359 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5360 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
5361 
5362 	if (dp_tx_init_static_pools(soc, num_pool, num_desc))
5363 		goto fail1;
5364 
5365 	if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
5366 		goto fail2;
5367 
5368 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
5369 		return QDF_STATUS_SUCCESS;
5370 
5371 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
5372 		goto fail3;
5373 
5374 	dp_tx_flow_control_init(soc);
5375 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
5376 	return QDF_STATUS_SUCCESS;
5377 
5378 fail3:
5379 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
5380 fail2:
5381 	dp_tx_deinit_static_pools(soc, num_pool);
5382 fail1:
5383 	return QDF_STATUS_E_RESOURCES;
5384 }
5385 
5386 /**
5387  * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
5388  * @txrx_soc: dp soc handle
5389  *
5390  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
5391  *			QDF_STATUS_E_FAILURE
5392  */
5393 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
5394 {
5395 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5396 	uint8_t num_pool;
5397 	uint32_t num_desc;
5398 	uint32_t num_ext_desc;
5399 
5400 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5401 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5402 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
5403 
5404 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
5405 		return QDF_STATUS_E_FAILURE;
5406 
5407 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
5408 		return QDF_STATUS_E_FAILURE;
5409 
5410 	return QDF_STATUS_SUCCESS;
5411 }
5412 
5413 /**
5414  * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
5415  * @txrx_soc: dp soc handle
5416  *
5417  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
5418  */
5419 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
5420 {
5421 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5422 	uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5423 
5424 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
5425 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
5426 
5427 	return QDF_STATUS_SUCCESS;
5428 }
5429 
5430