xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "dp_htt.h"
21 #include "hal_hw_headers.h"
22 #include "dp_tx.h"
23 #include "dp_tx_desc.h"
24 #include "dp_peer.h"
25 #include "dp_types.h"
26 #include "hal_tx.h"
27 #include "qdf_mem.h"
28 #include "qdf_nbuf.h"
29 #include "qdf_net_types.h"
30 #include <wlan_cfg.h>
31 #include "dp_ipa.h"
32 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
33 #include "if_meta_hdr.h"
34 #endif
35 #include "enet.h"
36 #include "dp_internal.h"
37 #ifdef FEATURE_WDS
38 #include "dp_txrx_wds.h"
39 #endif
40 #ifdef ATH_SUPPORT_IQUE
41 #include "dp_txrx_me.h"
42 #endif
43 #include "dp_hist.h"
44 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
45 #include <dp_swlm.h>
46 #endif
47 
48 /* Flag to skip CCE classify when mesh or tid override enabled */
49 #define DP_TX_SKIP_CCE_CLASSIFY \
50 	(DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
51 
52 /* TODO Add support in TSO */
53 #define DP_DESC_NUM_FRAG(x) 0
54 
55 /* disable TQM_BYPASS */
56 #define TQM_BYPASS_WAR 0
57 
58 /* invalid peer id for reinject*/
59 #define DP_INVALID_PEER 0XFFFE
60 
61 /*mapping between hal encrypt type and cdp_sec_type*/
62 #define MAX_CDP_SEC_TYPE 12
63 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
64 					HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
65 					HAL_TX_ENCRYPT_TYPE_WEP_128,
66 					HAL_TX_ENCRYPT_TYPE_WEP_104,
67 					HAL_TX_ENCRYPT_TYPE_WEP_40,
68 					HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
69 					HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
70 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
71 					HAL_TX_ENCRYPT_TYPE_WAPI,
72 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
73 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
74 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
75 					HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
76 
77 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
78 /**
79  * dp_update_tx_desc_stats - Update the increase or decrease in
80  * outstanding tx desc count
81  * values on pdev and soc
82  * @vdev: DP pdev handle
83  *
84  * Return: void
85  */
86 static inline void
87 dp_update_tx_desc_stats(struct dp_pdev *pdev)
88 {
89 	int32_t tx_descs_cnt =
90 		qdf_atomic_read(&pdev->num_tx_outstanding);
91 	if (pdev->tx_descs_max < tx_descs_cnt)
92 		pdev->tx_descs_max = tx_descs_cnt;
93 	qdf_mem_tx_desc_cnt_update(pdev->num_tx_outstanding,
94 				   pdev->tx_descs_max);
95 }
96 
97 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
98 
99 static inline void
100 dp_update_tx_desc_stats(struct dp_pdev *pdev)
101 {
102 }
103 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
104 
105 #ifdef QCA_TX_LIMIT_CHECK
106 /**
107  * dp_tx_limit_check - Check if allocated tx descriptors reached
108  * soc max limit and pdev max limit
109  * @vdev: DP vdev handle
110  *
111  * Return: true if allocated tx descriptors reached max configured value, else
112  * false
113  */
114 static inline bool
115 dp_tx_limit_check(struct dp_vdev *vdev)
116 {
117 	struct dp_pdev *pdev = vdev->pdev;
118 	struct dp_soc *soc = pdev->soc;
119 
120 	if (qdf_atomic_read(&soc->num_tx_outstanding) >=
121 			soc->num_tx_allowed) {
122 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
123 			  "%s: queued packets are more than max tx, drop the frame",
124 			  __func__);
125 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
126 		return true;
127 	}
128 
129 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
130 			pdev->num_tx_allowed) {
131 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
132 			  "%s: queued packets are more than max tx, drop the frame",
133 			  __func__);
134 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
135 		return true;
136 	}
137 	return false;
138 }
139 
140 /**
141  * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
142  * reached soc max limit
143  * @vdev: DP vdev handle
144  *
145  * Return: true if allocated tx descriptors reached max configured value, else
146  * false
147  */
148 static inline bool
149 dp_tx_exception_limit_check(struct dp_vdev *vdev)
150 {
151 	struct dp_pdev *pdev = vdev->pdev;
152 	struct dp_soc *soc = pdev->soc;
153 
154 	if (qdf_atomic_read(&soc->num_tx_exception) >=
155 			soc->num_msdu_exception_desc) {
156 		dp_info("exc packets are more than max drop the exc pkt");
157 		DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
158 		return true;
159 	}
160 
161 	return false;
162 }
163 
164 /**
165  * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
166  * @vdev: DP pdev handle
167  *
168  * Return: void
169  */
170 static inline void
171 dp_tx_outstanding_inc(struct dp_pdev *pdev)
172 {
173 	struct dp_soc *soc = pdev->soc;
174 
175 	qdf_atomic_inc(&pdev->num_tx_outstanding);
176 	qdf_atomic_inc(&soc->num_tx_outstanding);
177 	dp_update_tx_desc_stats(pdev);
178 }
179 
180 /**
181  * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
182  * @vdev: DP pdev handle
183  *
184  * Return: void
185  */
186 static inline void
187 dp_tx_outstanding_dec(struct dp_pdev *pdev)
188 {
189 	struct dp_soc *soc = pdev->soc;
190 
191 	qdf_atomic_dec(&pdev->num_tx_outstanding);
192 	qdf_atomic_dec(&soc->num_tx_outstanding);
193 	dp_update_tx_desc_stats(pdev);
194 }
195 
196 #else //QCA_TX_LIMIT_CHECK
197 static inline bool
198 dp_tx_limit_check(struct dp_vdev *vdev)
199 {
200 	return false;
201 }
202 
203 static inline bool
204 dp_tx_exception_limit_check(struct dp_vdev *vdev)
205 {
206 	return false;
207 }
208 
209 static inline void
210 dp_tx_outstanding_inc(struct dp_pdev *pdev)
211 {
212 	qdf_atomic_inc(&pdev->num_tx_outstanding);
213 	dp_update_tx_desc_stats(pdev);
214 }
215 
216 static inline void
217 dp_tx_outstanding_dec(struct dp_pdev *pdev)
218 {
219 	qdf_atomic_dec(&pdev->num_tx_outstanding);
220 	dp_update_tx_desc_stats(pdev);
221 }
222 #endif //QCA_TX_LIMIT_CHECK
223 
224 #if defined(FEATURE_TSO)
225 /**
226  * dp_tx_tso_unmap_segment() - Unmap TSO segment
227  *
228  * @soc - core txrx main context
229  * @seg_desc - tso segment descriptor
230  * @num_seg_desc - tso number segment descriptor
231  */
232 static void dp_tx_tso_unmap_segment(
233 		struct dp_soc *soc,
234 		struct qdf_tso_seg_elem_t *seg_desc,
235 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
236 {
237 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
238 	if (qdf_unlikely(!seg_desc)) {
239 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
240 			 __func__, __LINE__);
241 		qdf_assert(0);
242 	} else if (qdf_unlikely(!num_seg_desc)) {
243 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
244 			 __func__, __LINE__);
245 		qdf_assert(0);
246 	} else {
247 		bool is_last_seg;
248 		/* no tso segment left to do dma unmap */
249 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
250 			return;
251 
252 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
253 					true : false;
254 		qdf_nbuf_unmap_tso_segment(soc->osdev,
255 					   seg_desc, is_last_seg);
256 		num_seg_desc->num_seg.tso_cmn_num_seg--;
257 	}
258 }
259 
260 /**
261  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
262  *                            back to the freelist
263  *
264  * @soc - soc device handle
265  * @tx_desc - Tx software descriptor
266  */
267 static void dp_tx_tso_desc_release(struct dp_soc *soc,
268 				   struct dp_tx_desc_s *tx_desc)
269 {
270 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
271 	if (qdf_unlikely(!tx_desc->tso_desc)) {
272 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
273 			  "%s %d TSO desc is NULL!",
274 			  __func__, __LINE__);
275 		qdf_assert(0);
276 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
277 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
278 			  "%s %d TSO num desc is NULL!",
279 			  __func__, __LINE__);
280 		qdf_assert(0);
281 	} else {
282 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
283 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
284 
285 		/* Add the tso num segment into the free list */
286 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
287 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
288 					    tx_desc->tso_num_desc);
289 			tx_desc->tso_num_desc = NULL;
290 			DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
291 		}
292 
293 		/* Add the tso segment into the free list*/
294 		dp_tx_tso_desc_free(soc,
295 				    tx_desc->pool_id, tx_desc->tso_desc);
296 		tx_desc->tso_desc = NULL;
297 	}
298 }
299 #else
300 static void dp_tx_tso_unmap_segment(
301 		struct dp_soc *soc,
302 		struct qdf_tso_seg_elem_t *seg_desc,
303 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
304 
305 {
306 }
307 
308 static void dp_tx_tso_desc_release(struct dp_soc *soc,
309 				   struct dp_tx_desc_s *tx_desc)
310 {
311 }
312 #endif
313 /**
314  * dp_tx_desc_release() - Release Tx Descriptor
315  * @tx_desc : Tx Descriptor
316  * @desc_pool_id: Descriptor Pool ID
317  *
318  * Deallocate all resources attached to Tx descriptor and free the Tx
319  * descriptor.
320  *
321  * Return:
322  */
323 static void
324 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
325 {
326 	struct dp_pdev *pdev = tx_desc->pdev;
327 	struct dp_soc *soc;
328 	uint8_t comp_status = 0;
329 
330 	qdf_assert(pdev);
331 
332 	soc = pdev->soc;
333 
334 	dp_tx_outstanding_dec(pdev);
335 
336 	if (tx_desc->frm_type == dp_tx_frm_tso)
337 		dp_tx_tso_desc_release(soc, tx_desc);
338 
339 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
340 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
341 
342 	if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
343 		dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
344 
345 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
346 		qdf_atomic_dec(&soc->num_tx_exception);
347 
348 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
349 				hal_tx_comp_get_buffer_source(&tx_desc->comp))
350 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
351 							     soc->hal_soc);
352 	else
353 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
354 
355 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
356 		"Tx Completion Release desc %d status %d outstanding %d",
357 		tx_desc->id, comp_status,
358 		qdf_atomic_read(&pdev->num_tx_outstanding));
359 
360 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
361 	return;
362 }
363 
364 /**
365  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
366  * @vdev: DP vdev Handle
367  * @nbuf: skb
368  * @msdu_info: msdu_info required to create HTT metadata
369  *
370  * Prepares and fills HTT metadata in the frame pre-header for special frames
371  * that should be transmitted using varying transmit parameters.
372  * There are 2 VDEV modes that currently needs this special metadata -
373  *  1) Mesh Mode
374  *  2) DSRC Mode
375  *
376  * Return: HTT metadata size
377  *
378  */
379 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
380 					  struct dp_tx_msdu_info_s *msdu_info)
381 {
382 	uint32_t *meta_data = msdu_info->meta_data;
383 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
384 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
385 
386 	uint8_t htt_desc_size;
387 
388 	/* Size rounded of multiple of 8 bytes */
389 	uint8_t htt_desc_size_aligned;
390 
391 	uint8_t *hdr = NULL;
392 
393 	/*
394 	 * Metadata - HTT MSDU Extension header
395 	 */
396 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
397 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
398 
399 	if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
400 	    HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
401 							   meta_data[0])) {
402 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
403 				 htt_desc_size_aligned)) {
404 			nbuf = qdf_nbuf_realloc_headroom(nbuf,
405 							 htt_desc_size_aligned);
406 			if (!nbuf) {
407 				/*
408 				 * qdf_nbuf_realloc_headroom won't do skb_clone
409 				 * as skb_realloc_headroom does. so, no free is
410 				 * needed here.
411 				 */
412 				DP_STATS_INC(vdev,
413 					     tx_i.dropped.headroom_insufficient,
414 					     1);
415 				qdf_print(" %s[%d] skb_realloc_headroom failed",
416 					  __func__, __LINE__);
417 				return 0;
418 			}
419 		}
420 		/* Fill and add HTT metaheader */
421 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
422 		if (!hdr) {
423 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
424 					"Error in filling HTT metadata");
425 
426 			return 0;
427 		}
428 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
429 
430 	} else if (vdev->opmode == wlan_op_mode_ocb) {
431 		/* Todo - Add support for DSRC */
432 	}
433 
434 	return htt_desc_size_aligned;
435 }
436 
437 /**
438  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
439  * @tso_seg: TSO segment to process
440  * @ext_desc: Pointer to MSDU extension descriptor
441  *
442  * Return: void
443  */
444 #if defined(FEATURE_TSO)
445 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
446 		void *ext_desc)
447 {
448 	uint8_t num_frag;
449 	uint32_t tso_flags;
450 
451 	/*
452 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
453 	 * tcp_flag_mask
454 	 *
455 	 * Checksum enable flags are set in TCL descriptor and not in Extension
456 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
457 	 */
458 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
459 
460 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
461 
462 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
463 		tso_seg->tso_flags.ip_len);
464 
465 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
466 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
467 
468 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
469 		uint32_t lo = 0;
470 		uint32_t hi = 0;
471 
472 		qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
473 				  (tso_seg->tso_frags[num_frag].length));
474 
475 		qdf_dmaaddr_to_32s(
476 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
477 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
478 			tso_seg->tso_frags[num_frag].length);
479 	}
480 
481 	return;
482 }
483 #else
484 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
485 		void *ext_desc)
486 {
487 	return;
488 }
489 #endif
490 
491 #if defined(FEATURE_TSO)
492 /**
493  * dp_tx_free_tso_seg_list() - Loop through the tso segments
494  *                             allocated and free them
495  *
496  * @soc: soc handle
497  * @free_seg: list of tso segments
498  * @msdu_info: msdu descriptor
499  *
500  * Return - void
501  */
502 static void dp_tx_free_tso_seg_list(
503 		struct dp_soc *soc,
504 		struct qdf_tso_seg_elem_t *free_seg,
505 		struct dp_tx_msdu_info_s *msdu_info)
506 {
507 	struct qdf_tso_seg_elem_t *next_seg;
508 
509 	while (free_seg) {
510 		next_seg = free_seg->next;
511 		dp_tx_tso_desc_free(soc,
512 				    msdu_info->tx_queue.desc_pool_id,
513 				    free_seg);
514 		free_seg = next_seg;
515 	}
516 }
517 
518 /**
519  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
520  *                                 allocated and free them
521  *
522  * @soc:  soc handle
523  * @free_num_seg: list of tso number segments
524  * @msdu_info: msdu descriptor
525  * Return - void
526  */
527 static void dp_tx_free_tso_num_seg_list(
528 		struct dp_soc *soc,
529 		struct qdf_tso_num_seg_elem_t *free_num_seg,
530 		struct dp_tx_msdu_info_s *msdu_info)
531 {
532 	struct qdf_tso_num_seg_elem_t *next_num_seg;
533 
534 	while (free_num_seg) {
535 		next_num_seg = free_num_seg->next;
536 		dp_tso_num_seg_free(soc,
537 				    msdu_info->tx_queue.desc_pool_id,
538 				    free_num_seg);
539 		free_num_seg = next_num_seg;
540 	}
541 }
542 
543 /**
544  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
545  *                              do dma unmap for each segment
546  *
547  * @soc: soc handle
548  * @free_seg: list of tso segments
549  * @num_seg_desc: tso number segment descriptor
550  *
551  * Return - void
552  */
553 static void dp_tx_unmap_tso_seg_list(
554 		struct dp_soc *soc,
555 		struct qdf_tso_seg_elem_t *free_seg,
556 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
557 {
558 	struct qdf_tso_seg_elem_t *next_seg;
559 
560 	if (qdf_unlikely(!num_seg_desc)) {
561 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
562 		return;
563 	}
564 
565 	while (free_seg) {
566 		next_seg = free_seg->next;
567 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
568 		free_seg = next_seg;
569 	}
570 }
571 
572 #ifdef FEATURE_TSO_STATS
573 /**
574  * dp_tso_get_stats_idx: Retrieve the tso packet id
575  * @pdev - pdev handle
576  *
577  * Return: id
578  */
579 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
580 {
581 	uint32_t stats_idx;
582 
583 	stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
584 						% CDP_MAX_TSO_PACKETS);
585 	return stats_idx;
586 }
587 #else
588 static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
589 {
590 	return 0;
591 }
592 #endif /* FEATURE_TSO_STATS */
593 
594 /**
595  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
596  *				     free the tso segments descriptor and
597  *				     tso num segments descriptor
598  *
599  * @soc:  soc handle
600  * @msdu_info: msdu descriptor
601  * @tso_seg_unmap: flag to show if dma unmap is necessary
602  *
603  * Return - void
604  */
605 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
606 					  struct dp_tx_msdu_info_s *msdu_info,
607 					  bool tso_seg_unmap)
608 {
609 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
610 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
611 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
612 					tso_info->tso_num_seg_list;
613 
614 	/* do dma unmap for each segment */
615 	if (tso_seg_unmap)
616 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
617 
618 	/* free all tso number segment descriptor though looks only have 1 */
619 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
620 
621 	/* free all tso segment descriptor */
622 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
623 }
624 
625 /**
626  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
627  * @vdev: virtual device handle
628  * @msdu: network buffer
629  * @msdu_info: meta data associated with the msdu
630  *
631  * Return: QDF_STATUS_SUCCESS success
632  */
633 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
634 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
635 {
636 	struct qdf_tso_seg_elem_t *tso_seg;
637 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
638 	struct dp_soc *soc = vdev->pdev->soc;
639 	struct dp_pdev *pdev = vdev->pdev;
640 	struct qdf_tso_info_t *tso_info;
641 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
642 	tso_info = &msdu_info->u.tso_info;
643 	tso_info->curr_seg = NULL;
644 	tso_info->tso_seg_list = NULL;
645 	tso_info->num_segs = num_seg;
646 	msdu_info->frm_type = dp_tx_frm_tso;
647 	tso_info->tso_num_seg_list = NULL;
648 
649 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
650 
651 	while (num_seg) {
652 		tso_seg = dp_tx_tso_desc_alloc(
653 				soc, msdu_info->tx_queue.desc_pool_id);
654 		if (tso_seg) {
655 			tso_seg->next = tso_info->tso_seg_list;
656 			tso_info->tso_seg_list = tso_seg;
657 			num_seg--;
658 		} else {
659 			dp_err_rl("Failed to alloc tso seg desc");
660 			DP_STATS_INC_PKT(vdev->pdev,
661 					 tso_stats.tso_no_mem_dropped, 1,
662 					 qdf_nbuf_len(msdu));
663 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
664 
665 			return QDF_STATUS_E_NOMEM;
666 		}
667 	}
668 
669 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
670 
671 	tso_num_seg = dp_tso_num_seg_alloc(soc,
672 			msdu_info->tx_queue.desc_pool_id);
673 
674 	if (tso_num_seg) {
675 		tso_num_seg->next = tso_info->tso_num_seg_list;
676 		tso_info->tso_num_seg_list = tso_num_seg;
677 	} else {
678 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
679 			 __func__);
680 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
681 
682 		return QDF_STATUS_E_NOMEM;
683 	}
684 
685 	msdu_info->num_seg =
686 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
687 
688 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
689 			msdu_info->num_seg);
690 
691 	if (!(msdu_info->num_seg)) {
692 		/*
693 		 * Free allocated TSO seg desc and number seg desc,
694 		 * do unmap for segments if dma map has done.
695 		 */
696 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
697 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
698 
699 		return QDF_STATUS_E_INVAL;
700 	}
701 
702 	tso_info->curr_seg = tso_info->tso_seg_list;
703 
704 	tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
705 	dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
706 			     msdu, msdu_info->num_seg);
707 	dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
708 				    tso_info->msdu_stats_idx);
709 	dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
710 	return QDF_STATUS_SUCCESS;
711 }
712 #else
713 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
714 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
715 {
716 	return QDF_STATUS_E_NOMEM;
717 }
718 #endif
719 
720 QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
721 			(DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
722 			 sizeof(struct htt_tx_msdu_desc_ext2_t)));
723 
724 /**
725  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
726  * @vdev: DP Vdev handle
727  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
728  * @desc_pool_id: Descriptor Pool ID
729  *
730  * Return:
731  */
732 static
733 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
734 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
735 {
736 	uint8_t i;
737 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
738 	struct dp_tx_seg_info_s *seg_info;
739 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
740 	struct dp_soc *soc = vdev->pdev->soc;
741 
742 	/* Allocate an extension descriptor */
743 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
744 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
745 
746 	if (!msdu_ext_desc) {
747 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
748 		return NULL;
749 	}
750 
751 	if (msdu_info->exception_fw &&
752 			qdf_unlikely(vdev->mesh_vdev)) {
753 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
754 				&msdu_info->meta_data[0],
755 				sizeof(struct htt_tx_msdu_desc_ext2_t));
756 		qdf_atomic_inc(&soc->num_tx_exception);
757 		msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
758 	}
759 
760 	switch (msdu_info->frm_type) {
761 	case dp_tx_frm_sg:
762 	case dp_tx_frm_me:
763 	case dp_tx_frm_raw:
764 		seg_info = msdu_info->u.sg_info.curr_seg;
765 		/* Update the buffer pointers in MSDU Extension Descriptor */
766 		for (i = 0; i < seg_info->frag_cnt; i++) {
767 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
768 				seg_info->frags[i].paddr_lo,
769 				seg_info->frags[i].paddr_hi,
770 				seg_info->frags[i].len);
771 		}
772 
773 		break;
774 
775 	case dp_tx_frm_tso:
776 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
777 				&cached_ext_desc[0]);
778 		break;
779 
780 
781 	default:
782 		break;
783 	}
784 
785 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
786 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
787 
788 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
789 			msdu_ext_desc->vaddr);
790 
791 	return msdu_ext_desc;
792 }
793 
794 /**
795  * dp_tx_trace_pkt() - Trace TX packet at DP layer
796  *
797  * @skb: skb to be traced
798  * @msdu_id: msdu_id of the packet
799  * @vdev_id: vdev_id of the packet
800  *
801  * Return: None
802  */
803 #ifdef DP_DISABLE_TX_PKT_TRACE
804 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
805 			    uint8_t vdev_id)
806 {
807 }
808 #else
809 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
810 			    uint8_t vdev_id)
811 {
812 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
813 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
814 	DPTRACE(qdf_dp_trace_ptr(skb,
815 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
816 				 QDF_TRACE_DEFAULT_PDEV_ID,
817 				 qdf_nbuf_data_addr(skb),
818 				 sizeof(qdf_nbuf_data(skb)),
819 				 msdu_id, vdev_id, 0));
820 
821 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
822 
823 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
824 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
825 				      msdu_id, QDF_TX));
826 }
827 #endif
828 
829 #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
830 /**
831  * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as
832  *				      exception by the upper layer (OS_IF)
833  * @soc: DP soc handle
834  * @nbuf: packet to be transmitted
835  *
836  * Returns: 1 if the packet is marked as exception,
837  *	    0, if the packet is not marked as exception.
838  */
839 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
840 						 qdf_nbuf_t nbuf)
841 {
842 	return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf);
843 }
844 #else
845 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
846 						 qdf_nbuf_t nbuf)
847 {
848 	return 0;
849 }
850 #endif
851 
852 /**
853  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
854  * @vdev: DP vdev handle
855  * @nbuf: skb
856  * @desc_pool_id: Descriptor pool ID
857  * @meta_data: Metadata to the fw
858  * @tx_exc_metadata: Handle that holds exception path metadata
859  * Allocate and prepare Tx descriptor with msdu information.
860  *
861  * Return: Pointer to Tx Descriptor on success,
862  *         NULL on failure
863  */
864 static
865 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
866 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
867 		struct dp_tx_msdu_info_s *msdu_info,
868 		struct cdp_tx_exception_metadata *tx_exc_metadata)
869 {
870 	uint8_t align_pad;
871 	uint8_t is_exception = 0;
872 	uint8_t htt_hdr_size;
873 	struct dp_tx_desc_s *tx_desc;
874 	struct dp_pdev *pdev = vdev->pdev;
875 	struct dp_soc *soc = pdev->soc;
876 
877 	if (dp_tx_limit_check(vdev))
878 		return NULL;
879 
880 	/* Allocate software Tx descriptor */
881 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
882 	if (qdf_unlikely(!tx_desc)) {
883 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
884 		return NULL;
885 	}
886 
887 	dp_tx_outstanding_inc(pdev);
888 
889 	/* Initialize the SW tx descriptor */
890 	tx_desc->nbuf = nbuf;
891 	tx_desc->frm_type = dp_tx_frm_std;
892 	tx_desc->tx_encap_type = ((tx_exc_metadata &&
893 		(tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
894 		tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
895 	tx_desc->vdev_id = vdev->vdev_id;
896 	tx_desc->pdev = pdev;
897 	tx_desc->msdu_ext_desc = NULL;
898 	tx_desc->pkt_offset = 0;
899 	tx_desc->length = qdf_nbuf_headlen(nbuf);
900 
901 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
902 
903 	if (qdf_unlikely(vdev->multipass_en)) {
904 		if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
905 			goto failure;
906 	}
907 
908 	/* Packets marked by upper layer (OS-IF) to be sent to FW */
909 	if (dp_tx_is_nbuf_marked_exception(soc, nbuf))
910 		is_exception = 1;
911 	/*
912 	 * For special modes (vdev_type == ocb or mesh), data frames should be
913 	 * transmitted using varying transmit parameters (tx spec) which include
914 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
915 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
916 	 * These frames are sent as exception packets to firmware.
917 	 *
918 	 * HW requirement is that metadata should always point to a
919 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
920 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
921 	 *  to get 8-byte aligned start address along with align_pad added
922 	 *
923 	 *  |-----------------------------|
924 	 *  |                             |
925 	 *  |-----------------------------| <-----Buffer Pointer Address given
926 	 *  |                             |  ^    in HW descriptor (aligned)
927 	 *  |       HTT Metadata          |  |
928 	 *  |                             |  |
929 	 *  |                             |  | Packet Offset given in descriptor
930 	 *  |                             |  |
931 	 *  |-----------------------------|  |
932 	 *  |       Alignment Pad         |  v
933 	 *  |-----------------------------| <----- Actual buffer start address
934 	 *  |        SKB Data             |           (Unaligned)
935 	 *  |                             |
936 	 *  |                             |
937 	 *  |                             |
938 	 *  |                             |
939 	 *  |                             |
940 	 *  |-----------------------------|
941 	 */
942 	if (qdf_unlikely((msdu_info->exception_fw)) ||
943 				(vdev->opmode == wlan_op_mode_ocb) ||
944 				(tx_exc_metadata &&
945 				tx_exc_metadata->is_tx_sniffer)) {
946 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
947 
948 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
949 			DP_STATS_INC(vdev,
950 				     tx_i.dropped.headroom_insufficient, 1);
951 			goto failure;
952 		}
953 
954 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
955 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
956 					"qdf_nbuf_push_head failed");
957 			goto failure;
958 		}
959 
960 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
961 				msdu_info);
962 		if (htt_hdr_size == 0)
963 			goto failure;
964 
965 		tx_desc->length = qdf_nbuf_headlen(nbuf);
966 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
967 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
968 		is_exception = 1;
969 		tx_desc->length -= tx_desc->pkt_offset;
970 	}
971 
972 #if !TQM_BYPASS_WAR
973 	if (is_exception || tx_exc_metadata)
974 #endif
975 	{
976 		/* Temporary WAR due to TQM VP issues */
977 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
978 		qdf_atomic_inc(&soc->num_tx_exception);
979 	}
980 
981 	return tx_desc;
982 
983 failure:
984 	dp_tx_desc_release(tx_desc, desc_pool_id);
985 	return NULL;
986 }
987 
988 /**
989  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
990  * @vdev: DP vdev handle
991  * @nbuf: skb
992  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
993  * @desc_pool_id : Descriptor Pool ID
994  *
995  * Allocate and prepare Tx descriptor with msdu and fragment descritor
996  * information. For frames wth fragments, allocate and prepare
997  * an MSDU extension descriptor
998  *
999  * Return: Pointer to Tx Descriptor on success,
1000  *         NULL on failure
1001  */
1002 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
1003 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
1004 		uint8_t desc_pool_id)
1005 {
1006 	struct dp_tx_desc_s *tx_desc;
1007 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
1008 	struct dp_pdev *pdev = vdev->pdev;
1009 	struct dp_soc *soc = pdev->soc;
1010 
1011 	if (dp_tx_limit_check(vdev))
1012 		return NULL;
1013 
1014 	/* Allocate software Tx descriptor */
1015 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1016 	if (!tx_desc) {
1017 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1018 		return NULL;
1019 	}
1020 
1021 	dp_tx_outstanding_inc(pdev);
1022 
1023 	/* Initialize the SW tx descriptor */
1024 	tx_desc->nbuf = nbuf;
1025 	tx_desc->frm_type = msdu_info->frm_type;
1026 	tx_desc->tx_encap_type = vdev->tx_encap_type;
1027 	tx_desc->vdev_id = vdev->vdev_id;
1028 	tx_desc->pdev = pdev;
1029 	tx_desc->pkt_offset = 0;
1030 	tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
1031 	tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
1032 
1033 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
1034 
1035 	/* Handle scattered frames - TSO/SG/ME */
1036 	/* Allocate and prepare an extension descriptor for scattered frames */
1037 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
1038 	if (!msdu_ext_desc) {
1039 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1040 				"%s Tx Extension Descriptor Alloc Fail",
1041 				__func__);
1042 		goto failure;
1043 	}
1044 
1045 #if TQM_BYPASS_WAR
1046 	/* Temporary WAR due to TQM VP issues */
1047 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1048 	qdf_atomic_inc(&soc->num_tx_exception);
1049 #endif
1050 	if (qdf_unlikely(msdu_info->exception_fw))
1051 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1052 
1053 	tx_desc->msdu_ext_desc = msdu_ext_desc;
1054 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
1055 
1056 	tx_desc->dma_addr = msdu_ext_desc->paddr;
1057 
1058 	if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
1059 		tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
1060 	else
1061 		tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
1062 
1063 	return tx_desc;
1064 failure:
1065 	dp_tx_desc_release(tx_desc, desc_pool_id);
1066 	return NULL;
1067 }
1068 
1069 /**
1070  * dp_tx_prepare_raw() - Prepare RAW packet TX
1071  * @vdev: DP vdev handle
1072  * @nbuf: buffer pointer
1073  * @seg_info: Pointer to Segment info Descriptor to be prepared
1074  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
1075  *     descriptor
1076  *
1077  * Return:
1078  */
1079 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1080 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1081 {
1082 	qdf_nbuf_t curr_nbuf = NULL;
1083 	uint16_t total_len = 0;
1084 	qdf_dma_addr_t paddr;
1085 	int32_t i;
1086 	int32_t mapped_buf_num = 0;
1087 
1088 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
1089 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1090 
1091 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
1092 
1093 	/* Continue only if frames are of DATA type */
1094 	if (!DP_FRAME_IS_DATA(qos_wh)) {
1095 		DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
1096 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1097 			  "Pkt. recd is of not data type");
1098 		goto error;
1099 	}
1100 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
1101 	if (vdev->raw_mode_war &&
1102 	    (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
1103 	    (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
1104 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
1105 
1106 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
1107 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
1108 		/*
1109 		 * Number of nbuf's must not exceed the size of the frags
1110 		 * array in seg_info.
1111 		 */
1112 		if (i >= DP_TX_MAX_NUM_FRAGS) {
1113 			dp_err_rl("nbuf cnt exceeds the max number of segs");
1114 			DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
1115 			goto error;
1116 		}
1117 		if (QDF_STATUS_SUCCESS !=
1118 			qdf_nbuf_map_nbytes_single(vdev->osdev,
1119 						   curr_nbuf,
1120 						   QDF_DMA_TO_DEVICE,
1121 						   curr_nbuf->len)) {
1122 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1123 				"%s dma map error ", __func__);
1124 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
1125 			goto error;
1126 		}
1127 		/* Update the count of mapped nbuf's */
1128 		mapped_buf_num++;
1129 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
1130 		seg_info->frags[i].paddr_lo = paddr;
1131 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
1132 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
1133 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
1134 		total_len += qdf_nbuf_len(curr_nbuf);
1135 	}
1136 
1137 	seg_info->frag_cnt = i;
1138 	seg_info->total_len = total_len;
1139 	seg_info->next = NULL;
1140 
1141 	sg_info->curr_seg = seg_info;
1142 
1143 	msdu_info->frm_type = dp_tx_frm_raw;
1144 	msdu_info->num_seg = 1;
1145 
1146 	return nbuf;
1147 
1148 error:
1149 	i = 0;
1150 	while (nbuf) {
1151 		curr_nbuf = nbuf;
1152 		if (i < mapped_buf_num) {
1153 			qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
1154 						     QDF_DMA_TO_DEVICE,
1155 						     curr_nbuf->len);
1156 			i++;
1157 		}
1158 		nbuf = qdf_nbuf_next(nbuf);
1159 		qdf_nbuf_free(curr_nbuf);
1160 	}
1161 	return NULL;
1162 
1163 }
1164 
1165 /**
1166  * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
1167  * @soc: DP soc handle
1168  * @nbuf: Buffer pointer
1169  *
1170  * unmap the chain of nbufs that belong to this RAW frame.
1171  *
1172  * Return: None
1173  */
1174 static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
1175 				    qdf_nbuf_t nbuf)
1176 {
1177 	qdf_nbuf_t cur_nbuf = nbuf;
1178 
1179 	do {
1180 		qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
1181 					     QDF_DMA_TO_DEVICE,
1182 					     cur_nbuf->len);
1183 		cur_nbuf = qdf_nbuf_next(cur_nbuf);
1184 	} while (cur_nbuf);
1185 }
1186 
1187 #ifdef VDEV_PEER_PROTOCOL_COUNT
1188 #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, nbuf) \
1189 { \
1190 	qdf_nbuf_t nbuf_local; \
1191 	struct dp_vdev *vdev_local = vdev_hdl; \
1192 	do { \
1193 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
1194 			break; \
1195 		nbuf_local = nbuf; \
1196 		if (qdf_unlikely(((vdev_local)->tx_encap_type) == \
1197 			 htt_cmn_pkt_type_raw)) \
1198 			break; \
1199 		else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local)))) \
1200 			break; \
1201 		else if (qdf_nbuf_is_tso((nbuf_local))) \
1202 			break; \
1203 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
1204 						       (nbuf_local), \
1205 						       NULL, 1, 0); \
1206 	} while (0); \
1207 }
1208 #else
1209 #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, skb)
1210 #endif
1211 
1212 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1213 /**
1214  * dp_tx_update_stats() - Update soc level tx stats
1215  * @soc: DP soc handle
1216  * @nbuf: packet being transmitted
1217  *
1218  * Returns: none
1219  */
1220 static inline void dp_tx_update_stats(struct dp_soc *soc,
1221 				      qdf_nbuf_t nbuf)
1222 {
1223 	DP_STATS_INC_PKT(soc, tx.egress, 1, qdf_nbuf_len(nbuf));
1224 }
1225 
1226 /**
1227  * dp_tx_attempt_coalescing() - Check and attempt TCL register write coalescing
1228  * @soc: Datapath soc handle
1229  * @tx_desc: tx packet descriptor
1230  * @tid: TID for pkt transmission
1231  *
1232  * Returns: 1, if coalescing is to be done
1233  *	    0, if coalescing is not to be done
1234  */
1235 static inline int
1236 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1237 			 struct dp_tx_desc_s *tx_desc,
1238 			 uint8_t tid)
1239 {
1240 	struct dp_swlm *swlm = &soc->swlm;
1241 	union swlm_data swlm_query_data;
1242 	struct dp_swlm_tcl_data tcl_data;
1243 	QDF_STATUS status;
1244 	int ret;
1245 
1246 	if (qdf_unlikely(!swlm->is_enabled))
1247 		return 0;
1248 
1249 	tcl_data.nbuf = tx_desc->nbuf;
1250 	tcl_data.tid = tid;
1251 	tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
1252 	swlm_query_data.tcl_data = &tcl_data;
1253 
1254 	status = dp_swlm_tcl_pre_check(soc, &tcl_data);
1255 	if (QDF_IS_STATUS_ERROR(status)) {
1256 		dp_swlm_tcl_reset_session_data(soc);
1257 		DP_STATS_INC(swlm, tcl.coalesce_fail, 1);
1258 		return 0;
1259 	}
1260 
1261 	ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
1262 	if (ret) {
1263 		DP_STATS_INC(swlm, tcl.coalesce_success, 1);
1264 	} else {
1265 		DP_STATS_INC(swlm, tcl.coalesce_fail, 1);
1266 	}
1267 
1268 	return ret;
1269 }
1270 
1271 /**
1272  * dp_tx_ring_access_end() - HAL ring access end for data transmission
1273  * @soc: Datapath soc handle
1274  * @hal_ring_hdl: HAL ring handle
1275  * @coalesce: Coalesce the current write or not
1276  *
1277  * Returns: none
1278  */
1279 static inline void
1280 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1281 		      int coalesce)
1282 {
1283 	if (coalesce)
1284 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1285 	else
1286 		dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1287 }
1288 
1289 #else
1290 static inline void dp_tx_update_stats(struct dp_soc *soc,
1291 				      qdf_nbuf_t nbuf)
1292 {
1293 }
1294 
1295 static inline int
1296 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1297 			 struct dp_tx_desc_s *tx_desc,
1298 			 uint8_t tid)
1299 {
1300 	return 0;
1301 }
1302 
1303 static inline void
1304 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1305 		      int coalesce)
1306 {
1307 	dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1308 }
1309 
1310 #endif
1311 /**
1312  * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
1313  * @soc: DP Soc Handle
1314  * @vdev: DP vdev handle
1315  * @tx_desc: Tx Descriptor Handle
1316  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1317  * @fw_metadata: Metadata to send to Target Firmware along with frame
1318  * @ring_id: Ring ID of H/W ring to which we enqueue the packet
1319  * @tx_exc_metadata: Handle that holds exception path meta data
1320  *
1321  *  Gets the next free TCL HW DMA descriptor and sets up required parameters
1322  *  from software Tx descriptor
1323  *
1324  * Return: QDF_STATUS_SUCCESS: success
1325  *         QDF_STATUS_E_RESOURCES: Error return
1326  */
1327 static QDF_STATUS
1328 dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
1329 		 struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
1330 		 struct cdp_tx_exception_metadata *tx_exc_metadata,
1331 		 struct dp_tx_msdu_info_s *msdu_info)
1332 {
1333 	void *hal_tx_desc;
1334 	uint32_t *hal_tx_desc_cached;
1335 	int coalesce = 0;
1336 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1337 	uint8_t ring_id = tx_q->ring_id & DP_TX_QUEUE_MASK;
1338 	uint8_t tid = msdu_info->tid;
1339 
1340 	/*
1341 	 * Setting it initialization statically here to avoid
1342 	 * a memset call jump with qdf_mem_set call
1343 	 */
1344 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
1345 
1346 	enum cdp_sec_type sec_type = ((tx_exc_metadata &&
1347 			tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
1348 			tx_exc_metadata->sec_type : vdev->sec_type);
1349 
1350 	/* Return Buffer Manager ID */
1351 	uint8_t bm_id = dp_tx_get_rbm_id(soc, ring_id);
1352 
1353 	hal_ring_handle_t hal_ring_hdl = NULL;
1354 
1355 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
1356 
1357 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
1358 		dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
1359 		return QDF_STATUS_E_RESOURCES;
1360 	}
1361 
1362 	hal_tx_desc_cached = (void *) cached_desc;
1363 
1364 	hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
1365 				 tx_desc->dma_addr, bm_id, tx_desc->id,
1366 				 (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
1367 	hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
1368 				vdev->lmac_id);
1369 	hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
1370 				    vdev->search_type);
1371 	hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
1372 				     vdev->bss_ast_idx);
1373 	hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
1374 					  vdev->dscp_tid_map_id);
1375 
1376 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
1377 			sec_type_map[sec_type]);
1378 	hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
1379 				      (vdev->bss_ast_hash & 0xF));
1380 
1381 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
1382 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
1383 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
1384 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
1385 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
1386 					  vdev->hal_desc_addr_search_flags);
1387 
1388 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
1389 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
1390 
1391 	/* verify checksum offload configuration*/
1392 	if (vdev->csum_enabled &&
1393 	    ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
1394 		|| qdf_nbuf_is_tso(tx_desc->nbuf)))  {
1395 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
1396 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
1397 	}
1398 
1399 	if (tid != HTT_TX_EXT_TID_INVALID)
1400 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
1401 
1402 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
1403 		hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
1404 
1405 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1406 	    qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled(
1407 			 soc->wlan_cfg_ctx)))
1408 		tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
1409 
1410 	dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
1411 			 tx_desc->length,
1412 			 (tx_desc->flags & DP_TX_DESC_FLAG_FRAG),
1413 			 (uint64_t)tx_desc->dma_addr, tx_desc->pkt_offset,
1414 			 tx_desc->id);
1415 
1416 	hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
1417 
1418 	if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
1419 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1420 			  "%s %d : HAL RING Access Failed -- %pK",
1421 			 __func__, __LINE__, hal_ring_hdl);
1422 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1423 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1424 		return status;
1425 	}
1426 
1427 	/* Sync cached descriptor with HW */
1428 
1429 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
1430 	if (qdf_unlikely(!hal_tx_desc)) {
1431 		dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
1432 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1433 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1434 		goto ring_access_fail;
1435 	}
1436 
1437 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
1438 	dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
1439 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
1440 	coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid);
1441 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
1442 	dp_tx_update_stats(soc, tx_desc->nbuf);
1443 	status = QDF_STATUS_SUCCESS;
1444 
1445 ring_access_fail:
1446 	if (hif_pm_runtime_get(soc->hif_handle,
1447 			       RTPM_ID_DW_TX_HW_ENQUEUE) == 0) {
1448 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1449 		hif_pm_runtime_put(soc->hif_handle,
1450 				   RTPM_ID_DW_TX_HW_ENQUEUE);
1451 	} else {
1452 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1453 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1454 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1455 	}
1456 
1457 	return status;
1458 }
1459 
1460 
1461 /**
1462  * dp_cce_classify() - Classify the frame based on CCE rules
1463  * @vdev: DP vdev handle
1464  * @nbuf: skb
1465  *
1466  * Classify frames based on CCE rules
1467  * Return: bool( true if classified,
1468  *               else false)
1469  */
1470 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1471 {
1472 	qdf_ether_header_t *eh = NULL;
1473 	uint16_t   ether_type;
1474 	qdf_llc_t *llcHdr;
1475 	qdf_nbuf_t nbuf_clone = NULL;
1476 	qdf_dot3_qosframe_t *qos_wh = NULL;
1477 
1478 	if (qdf_likely(vdev->skip_sw_tid_classification)) {
1479 	/*
1480 	 * In case of mesh packets or hlos tid override enabled,
1481 	 * don't do any classification
1482 	 */
1483 		if (qdf_unlikely(vdev->skip_sw_tid_classification
1484 					& DP_TX_SKIP_CCE_CLASSIFY))
1485 			return false;
1486 	}
1487 
1488 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1489 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1490 		ether_type = eh->ether_type;
1491 		llcHdr = (qdf_llc_t *)(nbuf->data +
1492 					sizeof(qdf_ether_header_t));
1493 	} else {
1494 		qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1495 		/* For encrypted packets don't do any classification */
1496 		if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
1497 			return false;
1498 
1499 		if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
1500 			if (qdf_unlikely(
1501 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
1502 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
1503 
1504 				ether_type = *(uint16_t *)(nbuf->data
1505 						+ QDF_IEEE80211_4ADDR_HDR_LEN
1506 						+ sizeof(qdf_llc_t)
1507 						- sizeof(ether_type));
1508 				llcHdr = (qdf_llc_t *)(nbuf->data +
1509 						QDF_IEEE80211_4ADDR_HDR_LEN);
1510 			} else {
1511 				ether_type = *(uint16_t *)(nbuf->data
1512 						+ QDF_IEEE80211_3ADDR_HDR_LEN
1513 						+ sizeof(qdf_llc_t)
1514 						- sizeof(ether_type));
1515 				llcHdr = (qdf_llc_t *)(nbuf->data +
1516 					QDF_IEEE80211_3ADDR_HDR_LEN);
1517 			}
1518 
1519 			if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
1520 				&& (ether_type ==
1521 				qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
1522 
1523 				DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
1524 				return true;
1525 			}
1526 		}
1527 
1528 		return false;
1529 	}
1530 
1531 	if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
1532 		ether_type = *(uint16_t *)(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1533 				sizeof(*llcHdr));
1534 		nbuf_clone = qdf_nbuf_clone(nbuf);
1535 		if (qdf_unlikely(nbuf_clone)) {
1536 			qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
1537 
1538 			if (ether_type == htons(ETHERTYPE_VLAN)) {
1539 				qdf_nbuf_pull_head(nbuf_clone,
1540 						sizeof(qdf_net_vlanhdr_t));
1541 			}
1542 		}
1543 	} else {
1544 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1545 			nbuf_clone = qdf_nbuf_clone(nbuf);
1546 			if (qdf_unlikely(nbuf_clone)) {
1547 				qdf_nbuf_pull_head(nbuf_clone,
1548 					sizeof(qdf_net_vlanhdr_t));
1549 			}
1550 		}
1551 	}
1552 
1553 	if (qdf_unlikely(nbuf_clone))
1554 		nbuf = nbuf_clone;
1555 
1556 
1557 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
1558 		|| qdf_nbuf_is_ipv4_arp_pkt(nbuf)
1559 		|| qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
1560 		|| qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
1561 		|| (qdf_nbuf_is_ipv4_pkt(nbuf)
1562 			&& qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
1563 		|| (qdf_nbuf_is_ipv6_pkt(nbuf) &&
1564 			qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
1565 		if (qdf_unlikely(nbuf_clone))
1566 			qdf_nbuf_free(nbuf_clone);
1567 		return true;
1568 	}
1569 
1570 	if (qdf_unlikely(nbuf_clone))
1571 		qdf_nbuf_free(nbuf_clone);
1572 
1573 	return false;
1574 }
1575 
1576 /**
1577  * dp_tx_get_tid() - Obtain TID to be used for this frame
1578  * @vdev: DP vdev handle
1579  * @nbuf: skb
1580  *
1581  * Extract the DSCP or PCP information from frame and map into TID value.
1582  *
1583  * Return: void
1584  */
1585 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1586 			  struct dp_tx_msdu_info_s *msdu_info)
1587 {
1588 	uint8_t tos = 0, dscp_tid_override = 0;
1589 	uint8_t *hdr_ptr, *L3datap;
1590 	uint8_t is_mcast = 0;
1591 	qdf_ether_header_t *eh = NULL;
1592 	qdf_ethervlan_header_t *evh = NULL;
1593 	uint16_t   ether_type;
1594 	qdf_llc_t *llcHdr;
1595 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1596 
1597 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1598 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1599 		eh = (qdf_ether_header_t *)nbuf->data;
1600 		hdr_ptr = (uint8_t *)(eh->ether_dhost);
1601 		L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
1602 	} else {
1603 		qdf_dot3_qosframe_t *qos_wh =
1604 			(qdf_dot3_qosframe_t *) nbuf->data;
1605 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1606 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1607 		return;
1608 	}
1609 
1610 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1611 	ether_type = eh->ether_type;
1612 
1613 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
1614 	/*
1615 	 * Check if packet is dot3 or eth2 type.
1616 	 */
1617 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1618 		ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1619 				sizeof(*llcHdr));
1620 
1621 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1622 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1623 				sizeof(*llcHdr);
1624 			ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
1625 					+ sizeof(*llcHdr) +
1626 					sizeof(qdf_net_vlanhdr_t));
1627 		} else {
1628 			L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
1629 				sizeof(*llcHdr);
1630 		}
1631 	} else {
1632 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1633 			evh = (qdf_ethervlan_header_t *) eh;
1634 			ether_type = evh->ether_type;
1635 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1636 		}
1637 	}
1638 
1639 	/*
1640 	 * Find priority from IP TOS DSCP field
1641 	 */
1642 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1643 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1644 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1645 			/* Only for unicast frames */
1646 			if (!is_mcast) {
1647 				/* send it on VO queue */
1648 				msdu_info->tid = DP_VO_TID;
1649 			}
1650 		} else {
1651 			/*
1652 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1653 			 * from TOS byte.
1654 			 */
1655 			tos = ip->ip_tos;
1656 			dscp_tid_override = 1;
1657 
1658 		}
1659 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1660 		/* TODO
1661 		 * use flowlabel
1662 		 *igmpmld cases to be handled in phase 2
1663 		 */
1664 		unsigned long ver_pri_flowlabel;
1665 		unsigned long pri;
1666 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1667 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1668 			DP_IPV6_PRIORITY_SHIFT;
1669 		tos = pri;
1670 		dscp_tid_override = 1;
1671 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1672 		msdu_info->tid = DP_VO_TID;
1673 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1674 		/* Only for unicast frames */
1675 		if (!is_mcast) {
1676 			/* send ucast arp on VO queue */
1677 			msdu_info->tid = DP_VO_TID;
1678 		}
1679 	}
1680 
1681 	/*
1682 	 * Assign all MCAST packets to BE
1683 	 */
1684 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1685 		if (is_mcast) {
1686 			tos = 0;
1687 			dscp_tid_override = 1;
1688 		}
1689 	}
1690 
1691 	if (dscp_tid_override == 1) {
1692 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1693 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1694 	}
1695 
1696 	if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
1697 		msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
1698 
1699 	return;
1700 }
1701 
1702 /**
1703  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1704  * @vdev: DP vdev handle
1705  * @nbuf: skb
1706  *
1707  * Software based TID classification is required when more than 2 DSCP-TID
1708  * mapping tables are needed.
1709  * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
1710  *
1711  * Return: void
1712  */
1713 static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1714 				      struct dp_tx_msdu_info_s *msdu_info)
1715 {
1716 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1717 
1718 	/*
1719 	 * skip_sw_tid_classification flag will set in below cases-
1720 	 * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
1721 	 * 2. hlos_tid_override enabled for vdev
1722 	 * 3. mesh mode enabled for vdev
1723 	 */
1724 	if (qdf_likely(vdev->skip_sw_tid_classification)) {
1725 		/* Update tid in msdu_info from skb priority */
1726 		if (qdf_unlikely(vdev->skip_sw_tid_classification
1727 			    & DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
1728 			msdu_info->tid = qdf_nbuf_get_priority(nbuf);
1729 			return;
1730 		}
1731 		return;
1732 	}
1733 
1734 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1735 }
1736 
1737 #ifdef FEATURE_WLAN_TDLS
1738 /**
1739  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1740  * @soc: datapath SOC
1741  * @vdev: datapath vdev
1742  * @tx_desc: TX descriptor
1743  *
1744  * Return: None
1745  */
1746 static void dp_tx_update_tdls_flags(struct dp_soc *soc,
1747 				    struct dp_vdev *vdev,
1748 				    struct dp_tx_desc_s *tx_desc)
1749 {
1750 	if (vdev) {
1751 		if (vdev->is_tdls_frame) {
1752 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1753 			vdev->is_tdls_frame = false;
1754 		}
1755 	}
1756 }
1757 
1758 /**
1759  * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1760  * @soc: dp_soc handle
1761  * @tx_desc: TX descriptor
1762  * @vdev: datapath vdev handle
1763  *
1764  * Return: None
1765  */
1766 static void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
1767 					 struct dp_tx_desc_s *tx_desc)
1768 {
1769 	struct hal_tx_completion_status ts = {0};
1770 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1771 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
1772 						     DP_MOD_ID_TDLS);
1773 
1774 	if (qdf_unlikely(!vdev)) {
1775 		dp_err_rl("vdev is null!");
1776 		goto error;
1777 	}
1778 
1779 	hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
1780 	if (vdev->tx_non_std_data_callback.func) {
1781 		qdf_nbuf_set_next(nbuf, NULL);
1782 		vdev->tx_non_std_data_callback.func(
1783 				vdev->tx_non_std_data_callback.ctxt,
1784 				nbuf, ts.status);
1785 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1786 		return;
1787 	} else {
1788 		dp_err_rl("callback func is null");
1789 	}
1790 
1791 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1792 error:
1793 	qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
1794 	qdf_nbuf_free(nbuf);
1795 }
1796 
1797 /**
1798  * dp_tx_msdu_single_map() - do nbuf map
1799  * @vdev: DP vdev handle
1800  * @tx_desc: DP TX descriptor pointer
1801  * @nbuf: skb pointer
1802  *
1803  * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
1804  * operation done in other component.
1805  *
1806  * Return: QDF_STATUS
1807  */
1808 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1809 					       struct dp_tx_desc_s *tx_desc,
1810 					       qdf_nbuf_t nbuf)
1811 {
1812 	if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
1813 		return qdf_nbuf_map_nbytes_single(vdev->osdev,
1814 						  nbuf,
1815 						  QDF_DMA_TO_DEVICE,
1816 						  nbuf->len);
1817 	else
1818 		return qdf_nbuf_map_single(vdev->osdev, nbuf,
1819 					   QDF_DMA_TO_DEVICE);
1820 }
1821 #else
1822 static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
1823 					   struct dp_vdev *vdev,
1824 					   struct dp_tx_desc_s *tx_desc)
1825 {
1826 }
1827 
1828 static inline void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
1829 						struct dp_tx_desc_s *tx_desc)
1830 {
1831 }
1832 
1833 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1834 					       struct dp_tx_desc_s *tx_desc,
1835 					       qdf_nbuf_t nbuf)
1836 {
1837 	return qdf_nbuf_map_nbytes_single(vdev->osdev,
1838 					  nbuf,
1839 					  QDF_DMA_TO_DEVICE,
1840 					  nbuf->len);
1841 }
1842 #endif
1843 
1844 #ifdef MESH_MODE_SUPPORT
1845 /**
1846  * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
1847  * @soc: datapath SOC
1848  * @vdev: datapath vdev
1849  * @tx_desc: TX descriptor
1850  *
1851  * Return: None
1852  */
1853 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
1854 					   struct dp_vdev *vdev,
1855 					   struct dp_tx_desc_s *tx_desc)
1856 {
1857 	if (qdf_unlikely(vdev->mesh_vdev))
1858 		tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
1859 }
1860 
1861 /**
1862  * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
1863  * @soc: dp_soc handle
1864  * @tx_desc: TX descriptor
1865  * @vdev: datapath vdev handle
1866  *
1867  * Return: None
1868  */
1869 static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
1870 					     struct dp_tx_desc_s *tx_desc)
1871 {
1872 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1873 	struct dp_vdev *vdev = NULL;
1874 
1875 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
1876 		qdf_nbuf_free(nbuf);
1877 		DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
1878 	} else {
1879 		vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
1880 					     DP_MOD_ID_MESH);
1881 		if (vdev && vdev->osif_tx_free_ext)
1882 			vdev->osif_tx_free_ext((nbuf));
1883 		else
1884 			qdf_nbuf_free(nbuf);
1885 
1886 		if (vdev)
1887 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
1888 	}
1889 }
1890 #else
1891 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
1892 					   struct dp_vdev *vdev,
1893 					   struct dp_tx_desc_s *tx_desc)
1894 {
1895 }
1896 
1897 static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
1898 					     struct dp_tx_desc_s *tx_desc)
1899 {
1900 }
1901 #endif
1902 
1903 /**
1904  * dp_tx_frame_is_drop() - checks if the packet is loopback
1905  * @vdev: DP vdev handle
1906  * @nbuf: skb
1907  *
1908  * Return: 1 if frame needs to be dropped else 0
1909  */
1910 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
1911 {
1912 	struct dp_pdev *pdev = NULL;
1913 	struct dp_ast_entry *src_ast_entry = NULL;
1914 	struct dp_ast_entry *dst_ast_entry = NULL;
1915 	struct dp_soc *soc = NULL;
1916 
1917 	qdf_assert(vdev);
1918 	pdev = vdev->pdev;
1919 	qdf_assert(pdev);
1920 	soc = pdev->soc;
1921 
1922 	dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
1923 				(soc, dstmac, vdev->pdev->pdev_id);
1924 
1925 	src_ast_entry = dp_peer_ast_hash_find_by_pdevid
1926 				(soc, srcmac, vdev->pdev->pdev_id);
1927 	if (dst_ast_entry && src_ast_entry) {
1928 		if (dst_ast_entry->peer_id ==
1929 				src_ast_entry->peer_id)
1930 			return 1;
1931 	}
1932 
1933 	return 0;
1934 }
1935 
1936 /**
1937  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1938  * @vdev: DP vdev handle
1939  * @nbuf: skb
1940  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1941  * @meta_data: Metadata to the fw
1942  * @tx_q: Tx queue to be used for this Tx frame
1943  * @peer_id: peer_id of the peer in case of NAWDS frames
1944  * @tx_exc_metadata: Handle that holds exception path metadata
1945  *
1946  * Return: NULL on success,
1947  *         nbuf when it fails to send
1948  */
1949 qdf_nbuf_t
1950 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1951 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
1952 		       struct cdp_tx_exception_metadata *tx_exc_metadata)
1953 {
1954 	struct dp_pdev *pdev = vdev->pdev;
1955 	struct dp_soc *soc = pdev->soc;
1956 	struct dp_tx_desc_s *tx_desc;
1957 	QDF_STATUS status;
1958 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
1959 	uint16_t htt_tcl_metadata = 0;
1960 	enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
1961 	uint8_t tid = msdu_info->tid;
1962 	struct cdp_tid_tx_stats *tid_stats = NULL;
1963 
1964 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
1965 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
1966 			msdu_info, tx_exc_metadata);
1967 	if (!tx_desc) {
1968 		dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
1969 			  vdev, tx_q->desc_pool_id);
1970 		drop_code = TX_DESC_ERR;
1971 		goto fail_return;
1972 	}
1973 
1974 	if (qdf_unlikely(soc->cce_disable)) {
1975 		if (dp_cce_classify(vdev, nbuf) == true) {
1976 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1977 			tid = DP_VO_TID;
1978 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1979 		}
1980 	}
1981 
1982 	dp_tx_update_tdls_flags(soc, vdev, tx_desc);
1983 
1984 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
1985 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1986 		HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
1987 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
1988 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
1989 				HTT_TCL_METADATA_TYPE_PEER_BASED);
1990 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
1991 				peer_id);
1992 	} else
1993 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1994 
1995 	if (msdu_info->exception_fw)
1996 		HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1997 
1998 	dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
1999 					 !pdev->enhanced_stats_en);
2000 
2001 	dp_tx_update_mesh_flags(soc, vdev, tx_desc);
2002 
2003 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
2004 			 dp_tx_msdu_single_map(vdev, tx_desc, nbuf))) {
2005 		/* Handle failure */
2006 		dp_err("qdf_nbuf_map failed");
2007 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
2008 		drop_code = TX_DMA_MAP_ERR;
2009 		goto release_desc;
2010 	}
2011 
2012 	tx_desc->dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
2013 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
2014 	status = dp_tx_hw_enqueue(soc, vdev, tx_desc, htt_tcl_metadata,
2015 				  tx_exc_metadata, msdu_info);
2016 
2017 	if (status != QDF_STATUS_SUCCESS) {
2018 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2019 			  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
2020 			  __func__, tx_desc, tx_q->ring_id);
2021 		qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
2022 					     QDF_DMA_TO_DEVICE,
2023 					     nbuf->len);
2024 		drop_code = TX_HW_ENQUEUE;
2025 		goto release_desc;
2026 	}
2027 
2028 	return NULL;
2029 
2030 release_desc:
2031 	dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2032 
2033 fail_return:
2034 	dp_tx_get_tid(vdev, nbuf, msdu_info);
2035 	tid_stats = &pdev->stats.tid_stats.
2036 		    tid_tx_stats[tx_q->ring_id][tid];
2037 	tid_stats->swdrop_cnt[drop_code]++;
2038 	return nbuf;
2039 }
2040 
2041 /**
2042  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2043  * @soc: Soc handle
2044  * @desc: software Tx descriptor to be processed
2045  *
2046  * Return: none
2047  */
2048 static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
2049 				       struct dp_tx_desc_s *desc)
2050 {
2051 	qdf_nbuf_t nbuf = desc->nbuf;
2052 
2053 	/* nbuf already freed in vdev detach path */
2054 	if (!nbuf)
2055 		return;
2056 
2057 	/* If it is TDLS mgmt, don't unmap or free the frame */
2058 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2059 		return dp_non_std_tx_comp_free_buff(soc, desc);
2060 
2061 	/* 0 : MSDU buffer, 1 : MLE */
2062 	if (desc->msdu_ext_desc) {
2063 		/* TSO free */
2064 		if (hal_tx_ext_desc_get_tso_enable(
2065 					desc->msdu_ext_desc->vaddr)) {
2066 			/* unmap eash TSO seg before free the nbuf */
2067 			dp_tx_tso_unmap_segment(soc, desc->tso_desc,
2068 						desc->tso_num_desc);
2069 			qdf_nbuf_free(nbuf);
2070 			return;
2071 		}
2072 	}
2073 	/* If it's ME frame, dont unmap the cloned nbuf's */
2074 	if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
2075 		goto nbuf_free;
2076 
2077 	qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
2078 				     QDF_DMA_TO_DEVICE, nbuf->len);
2079 
2080 	if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
2081 		return dp_mesh_tx_comp_free_buff(soc, desc);
2082 nbuf_free:
2083 	qdf_nbuf_free(nbuf);
2084 }
2085 
2086 /**
2087  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
2088  * @vdev: DP vdev handle
2089  * @nbuf: skb
2090  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
2091  *
2092  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
2093  *
2094  * Return: NULL on success,
2095  *         nbuf when it fails to send
2096  */
2097 #if QDF_LOCK_STATS
2098 noinline
2099 #else
2100 #endif
2101 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2102 				    struct dp_tx_msdu_info_s *msdu_info)
2103 {
2104 	uint32_t i;
2105 	struct dp_pdev *pdev = vdev->pdev;
2106 	struct dp_soc *soc = pdev->soc;
2107 	struct dp_tx_desc_s *tx_desc;
2108 	bool is_cce_classified = false;
2109 	QDF_STATUS status;
2110 	uint16_t htt_tcl_metadata = 0;
2111 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
2112 	struct cdp_tid_tx_stats *tid_stats = NULL;
2113 	uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
2114 
2115 	if (qdf_unlikely(soc->cce_disable)) {
2116 		is_cce_classified = dp_cce_classify(vdev, nbuf);
2117 		if (is_cce_classified) {
2118 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
2119 			msdu_info->tid = DP_VO_TID;
2120 		}
2121 	}
2122 
2123 	if (msdu_info->frm_type == dp_tx_frm_me)
2124 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2125 
2126 	i = 0;
2127 	/* Print statement to track i and num_seg */
2128 	/*
2129 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
2130 	 * descriptors using information in msdu_info
2131 	 */
2132 	while (i < msdu_info->num_seg) {
2133 		/*
2134 		 * Setup Tx descriptor for an MSDU, and MSDU extension
2135 		 * descriptor
2136 		 */
2137 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
2138 				tx_q->desc_pool_id);
2139 
2140 		if (!tx_desc) {
2141 			if (msdu_info->frm_type == dp_tx_frm_me) {
2142 				prep_desc_fail++;
2143 				dp_tx_me_free_buf(pdev,
2144 					(void *)(msdu_info->u.sg_info
2145 						.curr_seg->frags[0].vaddr));
2146 				if (prep_desc_fail == msdu_info->num_seg) {
2147 					/*
2148 					 * Unmap is needed only if descriptor
2149 					 * preparation failed for all segments.
2150 					 */
2151 					qdf_nbuf_unmap(soc->osdev,
2152 						       msdu_info->u.sg_info.
2153 						       curr_seg->nbuf,
2154 						       QDF_DMA_TO_DEVICE);
2155 				}
2156 				/*
2157 				 * Free the nbuf for the current segment
2158 				 * and make it point to the next in the list.
2159 				 * For me, there are as many segments as there
2160 				 * are no of clients.
2161 				 */
2162 				qdf_nbuf_free(msdu_info->u.sg_info
2163 					      .curr_seg->nbuf);
2164 				if (msdu_info->u.sg_info.curr_seg->next) {
2165 					msdu_info->u.sg_info.curr_seg =
2166 						msdu_info->u.sg_info
2167 						.curr_seg->next;
2168 					nbuf = msdu_info->u.sg_info
2169 					       .curr_seg->nbuf;
2170 				}
2171 				i++;
2172 				continue;
2173 			}
2174 
2175 			if (msdu_info->frm_type == dp_tx_frm_tso) {
2176 				dp_tx_tso_unmap_segment(soc,
2177 							msdu_info->u.tso_info.
2178 							curr_seg,
2179 							msdu_info->u.tso_info.
2180 							tso_num_seg_list);
2181 
2182 				if (msdu_info->u.tso_info.curr_seg->next) {
2183 					msdu_info->u.tso_info.curr_seg =
2184 					msdu_info->u.tso_info.curr_seg->next;
2185 					i++;
2186 					continue;
2187 				}
2188 			}
2189 
2190 			goto done;
2191 		}
2192 
2193 		if (msdu_info->frm_type == dp_tx_frm_me) {
2194 			tx_desc->me_buffer =
2195 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
2196 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
2197 		}
2198 
2199 		if (is_cce_classified)
2200 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2201 
2202 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2203 		if (msdu_info->exception_fw) {
2204 			HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2205 		}
2206 
2207 		/*
2208 		 * For frames with multiple segments (TSO, ME), jump to next
2209 		 * segment.
2210 		 */
2211 		if (msdu_info->frm_type == dp_tx_frm_tso) {
2212 			if (msdu_info->u.tso_info.curr_seg->next) {
2213 				msdu_info->u.tso_info.curr_seg =
2214 					msdu_info->u.tso_info.curr_seg->next;
2215 
2216 				/*
2217 				 * If this is a jumbo nbuf, then increment the
2218 				 * number of nbuf users for each additional
2219 				 * segment of the msdu. This will ensure that
2220 				 * the skb is freed only after receiving tx
2221 				 * completion for all segments of an nbuf
2222 				 */
2223 				qdf_nbuf_inc_users(nbuf);
2224 
2225 				/* Check with MCL if this is needed */
2226 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
2227 				 */
2228 			}
2229 		}
2230 
2231 		/*
2232 		 * Enqueue the Tx MSDU descriptor to HW for transmit
2233 		 */
2234 		status = dp_tx_hw_enqueue(soc, vdev, tx_desc, htt_tcl_metadata,
2235 					  NULL, msdu_info);
2236 
2237 		if (status != QDF_STATUS_SUCCESS) {
2238 			dp_info("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2239 				tx_desc, tx_q->ring_id);
2240 
2241 			dp_tx_get_tid(vdev, nbuf, msdu_info);
2242 			tid_stats = &pdev->stats.tid_stats.
2243 				    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
2244 			tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
2245 
2246 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2247 			if (msdu_info->frm_type == dp_tx_frm_me) {
2248 				hw_enq_fail++;
2249 				if (hw_enq_fail == msdu_info->num_seg) {
2250 					/*
2251 					 * Unmap is needed only if enqueue
2252 					 * failed for all segments.
2253 					 */
2254 					qdf_nbuf_unmap(soc->osdev,
2255 						       msdu_info->u.sg_info.
2256 						       curr_seg->nbuf,
2257 						       QDF_DMA_TO_DEVICE);
2258 				}
2259 				/*
2260 				 * Free the nbuf for the current segment
2261 				 * and make it point to the next in the list.
2262 				 * For me, there are as many segments as there
2263 				 * are no of clients.
2264 				 */
2265 				qdf_nbuf_free(msdu_info->u.sg_info
2266 					      .curr_seg->nbuf);
2267 				if (msdu_info->u.sg_info.curr_seg->next) {
2268 					msdu_info->u.sg_info.curr_seg =
2269 						msdu_info->u.sg_info
2270 						.curr_seg->next;
2271 					nbuf = msdu_info->u.sg_info
2272 					       .curr_seg->nbuf;
2273 				}
2274 				i++;
2275 				continue;
2276 			}
2277 
2278 			/*
2279 			 * For TSO frames, the nbuf users increment done for
2280 			 * the current segment has to be reverted, since the
2281 			 * hw enqueue for this segment failed
2282 			 */
2283 			if (msdu_info->frm_type == dp_tx_frm_tso &&
2284 			    msdu_info->u.tso_info.curr_seg) {
2285 				/*
2286 				 * unmap and free current,
2287 				 * retransmit remaining segments
2288 				 */
2289 				dp_tx_comp_free_buf(soc, tx_desc);
2290 				i++;
2291 				continue;
2292 			}
2293 
2294 			goto done;
2295 		}
2296 
2297 		/*
2298 		 * TODO
2299 		 * if tso_info structure can be modified to have curr_seg
2300 		 * as first element, following 2 blocks of code (for TSO and SG)
2301 		 * can be combined into 1
2302 		 */
2303 
2304 		/*
2305 		 * For Multicast-Unicast converted packets,
2306 		 * each converted frame (for a client) is represented as
2307 		 * 1 segment
2308 		 */
2309 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
2310 				(msdu_info->frm_type == dp_tx_frm_me)) {
2311 			if (msdu_info->u.sg_info.curr_seg->next) {
2312 				msdu_info->u.sg_info.curr_seg =
2313 					msdu_info->u.sg_info.curr_seg->next;
2314 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2315 			}
2316 		}
2317 		i++;
2318 	}
2319 
2320 	nbuf = NULL;
2321 
2322 done:
2323 	return nbuf;
2324 }
2325 
2326 /**
2327  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
2328  *                     for SG frames
2329  * @vdev: DP vdev handle
2330  * @nbuf: skb
2331  * @seg_info: Pointer to Segment info Descriptor to be prepared
2332  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2333  *
2334  * Return: NULL on success,
2335  *         nbuf when it fails to send
2336  */
2337 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2338 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
2339 {
2340 	uint32_t cur_frag, nr_frags, i;
2341 	qdf_dma_addr_t paddr;
2342 	struct dp_tx_sg_info_s *sg_info;
2343 
2344 	sg_info = &msdu_info->u.sg_info;
2345 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
2346 
2347 	if (QDF_STATUS_SUCCESS !=
2348 		qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
2349 					   QDF_DMA_TO_DEVICE,
2350 					   qdf_nbuf_headlen(nbuf))) {
2351 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2352 				"dma map error");
2353 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2354 
2355 		qdf_nbuf_free(nbuf);
2356 		return NULL;
2357 	}
2358 
2359 	paddr = qdf_nbuf_mapped_paddr_get(nbuf);
2360 	seg_info->frags[0].paddr_lo = paddr;
2361 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
2362 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
2363 	seg_info->frags[0].vaddr = (void *) nbuf;
2364 
2365 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
2366 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
2367 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
2368 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2369 					"frag dma map error");
2370 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2371 			goto map_err;
2372 		}
2373 
2374 		paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
2375 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
2376 		seg_info->frags[cur_frag + 1].paddr_hi =
2377 			((uint64_t) paddr) >> 32;
2378 		seg_info->frags[cur_frag + 1].len =
2379 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
2380 	}
2381 
2382 	seg_info->frag_cnt = (cur_frag + 1);
2383 	seg_info->total_len = qdf_nbuf_len(nbuf);
2384 	seg_info->next = NULL;
2385 
2386 	sg_info->curr_seg = seg_info;
2387 
2388 	msdu_info->frm_type = dp_tx_frm_sg;
2389 	msdu_info->num_seg = 1;
2390 
2391 	return nbuf;
2392 map_err:
2393 	/* restore paddr into nbuf before calling unmap */
2394 	qdf_nbuf_mapped_paddr_set(nbuf,
2395 				  (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
2396 				  ((uint64_t)
2397 				  seg_info->frags[0].paddr_hi) << 32));
2398 	qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
2399 				     QDF_DMA_TO_DEVICE,
2400 				     seg_info->frags[0].len);
2401 	for (i = 1; i <= cur_frag; i++) {
2402 		qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
2403 				   (seg_info->frags[i].paddr_lo | ((uint64_t)
2404 				   seg_info->frags[i].paddr_hi) << 32),
2405 				   seg_info->frags[i].len,
2406 				   QDF_DMA_TO_DEVICE);
2407 	}
2408 	qdf_nbuf_free(nbuf);
2409 	return NULL;
2410 }
2411 
2412 /**
2413  * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
2414  * @vdev: DP vdev handle
2415  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2416  * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
2417  *
2418  * Return: NULL on failure,
2419  *         nbuf when extracted successfully
2420  */
2421 static
2422 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
2423 				    struct dp_tx_msdu_info_s *msdu_info,
2424 				    uint16_t ppdu_cookie)
2425 {
2426 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2427 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2428 
2429 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2430 
2431 	HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
2432 				(msdu_info->meta_data[5], 1);
2433 	HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
2434 				(msdu_info->meta_data[5], 1);
2435 	HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
2436 				(msdu_info->meta_data[6], ppdu_cookie);
2437 
2438 	msdu_info->exception_fw = 1;
2439 	msdu_info->is_tx_sniffer = 1;
2440 }
2441 
2442 #ifdef MESH_MODE_SUPPORT
2443 
2444 /**
2445  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
2446 				and prepare msdu_info for mesh frames.
2447  * @vdev: DP vdev handle
2448  * @nbuf: skb
2449  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2450  *
2451  * Return: NULL on failure,
2452  *         nbuf when extracted successfully
2453  */
2454 static
2455 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2456 				struct dp_tx_msdu_info_s *msdu_info)
2457 {
2458 	struct meta_hdr_s *mhdr;
2459 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2460 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2461 
2462 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2463 
2464 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
2465 		msdu_info->exception_fw = 0;
2466 		goto remove_meta_hdr;
2467 	}
2468 
2469 	msdu_info->exception_fw = 1;
2470 
2471 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2472 
2473 	meta_data->host_tx_desc_pool = 1;
2474 	meta_data->update_peer_cache = 1;
2475 	meta_data->learning_frame = 1;
2476 
2477 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
2478 		meta_data->power = mhdr->power;
2479 
2480 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
2481 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
2482 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
2483 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
2484 
2485 		meta_data->dyn_bw = 1;
2486 
2487 		meta_data->valid_pwr = 1;
2488 		meta_data->valid_mcs_mask = 1;
2489 		meta_data->valid_nss_mask = 1;
2490 		meta_data->valid_preamble_type  = 1;
2491 		meta_data->valid_retries = 1;
2492 		meta_data->valid_bw_info = 1;
2493 	}
2494 
2495 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
2496 		meta_data->encrypt_type = 0;
2497 		meta_data->valid_encrypt_type = 1;
2498 		meta_data->learning_frame = 0;
2499 	}
2500 
2501 	meta_data->valid_key_flags = 1;
2502 	meta_data->key_flags = (mhdr->keyix & 0x3);
2503 
2504 remove_meta_hdr:
2505 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
2506 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2507 				"qdf_nbuf_pull_head failed");
2508 		qdf_nbuf_free(nbuf);
2509 		return NULL;
2510 	}
2511 
2512 	msdu_info->tid = qdf_nbuf_get_priority(nbuf);
2513 
2514 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2515 			"%s , Meta hdr %0x %0x %0x %0x %0x %0x"
2516 			" tid %d to_fw %d",
2517 			__func__, msdu_info->meta_data[0],
2518 			msdu_info->meta_data[1],
2519 			msdu_info->meta_data[2],
2520 			msdu_info->meta_data[3],
2521 			msdu_info->meta_data[4],
2522 			msdu_info->meta_data[5],
2523 			msdu_info->tid, msdu_info->exception_fw);
2524 
2525 	return nbuf;
2526 }
2527 #else
2528 static
2529 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2530 				struct dp_tx_msdu_info_s *msdu_info)
2531 {
2532 	return nbuf;
2533 }
2534 
2535 #endif
2536 
2537 /**
2538  * dp_check_exc_metadata() - Checks if parameters are valid
2539  * @tx_exc - holds all exception path parameters
2540  *
2541  * Returns true when all the parameters are valid else false
2542  *
2543  */
2544 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
2545 {
2546 	bool invalid_tid = (tx_exc->tid > DP_MAX_TIDS && tx_exc->tid !=
2547 			    HTT_INVALID_TID);
2548 	bool invalid_encap_type =
2549 			(tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
2550 			 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
2551 	bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
2552 				 tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
2553 	bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
2554 			       tx_exc->ppdu_cookie == 0);
2555 
2556 	if (invalid_tid || invalid_encap_type || invalid_sec_type ||
2557 	    invalid_cookie) {
2558 		return false;
2559 	}
2560 
2561 	return true;
2562 }
2563 
2564 #ifdef ATH_SUPPORT_IQUE
2565 /**
2566  * dp_tx_mcast_enhance() - Multicast enhancement on TX
2567  * @vdev: vdev handle
2568  * @nbuf: skb
2569  *
2570  * Return: true on success,
2571  *         false on failure
2572  */
2573 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
2574 {
2575 	qdf_ether_header_t *eh;
2576 
2577 	/* Mcast to Ucast Conversion*/
2578 	if (qdf_likely(!vdev->mcast_enhancement_en))
2579 		return true;
2580 
2581 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2582 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2583 	    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2584 		dp_verbose_debug("Mcast frm for ME %pK", vdev);
2585 
2586 		DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
2587 				 qdf_nbuf_len(nbuf));
2588 		if (dp_tx_prepare_send_me(vdev, nbuf) ==
2589 				QDF_STATUS_SUCCESS) {
2590 			return false;
2591 		}
2592 
2593 		if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
2594 			if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
2595 					QDF_STATUS_SUCCESS) {
2596 				return false;
2597 			}
2598 		}
2599 	}
2600 
2601 	return true;
2602 }
2603 #else
2604 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
2605 {
2606 	return true;
2607 }
2608 #endif
2609 
2610 /**
2611  * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
2612  * @nbuf: qdf_nbuf_t
2613  * @vdev: struct dp_vdev *
2614  *
2615  * Allow packet for processing only if it is for peer client which is
2616  * connected with same vap. Drop packet if client is connected to
2617  * different vap.
2618  *
2619  * Return: QDF_STATUS
2620  */
2621 static inline QDF_STATUS
2622 dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
2623 {
2624 	struct dp_ast_entry *dst_ast_entry = NULL;
2625 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2626 
2627 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
2628 	    DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
2629 		return QDF_STATUS_SUCCESS;
2630 
2631 	qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
2632 	dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
2633 							eh->ether_dhost,
2634 							vdev->vdev_id);
2635 
2636 	/* If there is no ast entry, return failure */
2637 	if (qdf_unlikely(!dst_ast_entry)) {
2638 		qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
2639 		return QDF_STATUS_E_FAILURE;
2640 	}
2641 	qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
2642 
2643 	return QDF_STATUS_SUCCESS;
2644 }
2645 
2646 /**
2647  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
2648  * @soc: DP soc handle
2649  * @vdev_id: id of DP vdev handle
2650  * @nbuf: skb
2651  * @tx_exc_metadata: Handle that holds exception path meta data
2652  *
2653  * Entry point for Core Tx layer (DP_TX) invoked from
2654  * hard_start_xmit in OSIF/HDD to transmit frames through fw
2655  *
2656  * Return: NULL on success,
2657  *         nbuf when it fails to send
2658  */
2659 qdf_nbuf_t
2660 dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2661 		     qdf_nbuf_t nbuf,
2662 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
2663 {
2664 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2665 	qdf_ether_header_t *eh = NULL;
2666 	struct dp_tx_msdu_info_s msdu_info;
2667 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
2668 						     DP_MOD_ID_TX_EXCEPTION);
2669 
2670 	if (qdf_unlikely(!vdev))
2671 		goto fail;
2672 
2673 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
2674 
2675 	if (!tx_exc_metadata)
2676 		goto fail;
2677 
2678 	msdu_info.tid = tx_exc_metadata->tid;
2679 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2680 	dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
2681 			 QDF_MAC_ADDR_REF(nbuf->data));
2682 
2683 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
2684 
2685 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
2686 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2687 			"Invalid parameters in exception path");
2688 		goto fail;
2689 	}
2690 
2691 	/* Basic sanity checks for unsupported packets */
2692 
2693 	/* MESH mode */
2694 	if (qdf_unlikely(vdev->mesh_vdev)) {
2695 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2696 			"Mesh mode is not supported in exception path");
2697 		goto fail;
2698 	}
2699 
2700 	/*
2701 	 * Classify the frame and call corresponding
2702 	 * "prepare" function which extracts the segment (TSO)
2703 	 * and fragmentation information (for TSO , SG, ME, or Raw)
2704 	 * into MSDU_INFO structure which is later used to fill
2705 	 * SW and HW descriptors.
2706 	 */
2707 	if (qdf_nbuf_is_tso(nbuf)) {
2708 		dp_verbose_debug("TSO frame %pK", vdev);
2709 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
2710 				 qdf_nbuf_len(nbuf));
2711 
2712 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
2713 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
2714 					 qdf_nbuf_len(nbuf));
2715 			return nbuf;
2716 		}
2717 
2718 		goto send_multiple;
2719 	}
2720 
2721 	/* SG */
2722 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2723 		struct dp_tx_seg_info_s seg_info = {0};
2724 
2725 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
2726 		if (!nbuf)
2727 			return NULL;
2728 
2729 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
2730 
2731 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
2732 				 qdf_nbuf_len(nbuf));
2733 
2734 		goto send_multiple;
2735 	}
2736 
2737 	if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
2738 		DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
2739 				 qdf_nbuf_len(nbuf));
2740 
2741 		dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
2742 					       tx_exc_metadata->ppdu_cookie);
2743 	}
2744 
2745 	/*
2746 	 * Get HW Queue to use for this frame.
2747 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
2748 	 * dedicated for data and 1 for command.
2749 	 * "queue_id" maps to one hardware ring.
2750 	 *  With each ring, we also associate a unique Tx descriptor pool
2751 	 *  to minimize lock contention for these resources.
2752 	 */
2753 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2754 
2755 	/*
2756 	 * Check exception descriptors
2757 	 */
2758 	if (dp_tx_exception_limit_check(vdev))
2759 		goto fail;
2760 
2761 	/*  Single linear frame */
2762 	/*
2763 	 * If nbuf is a simple linear frame, use send_single function to
2764 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2765 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2766 	 */
2767 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
2768 			tx_exc_metadata->peer_id, tx_exc_metadata);
2769 
2770 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
2771 	return nbuf;
2772 
2773 send_multiple:
2774 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2775 
2776 fail:
2777 	if (vdev)
2778 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
2779 	dp_verbose_debug("pkt send failed");
2780 	return nbuf;
2781 }
2782 
2783 /**
2784  * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
2785  *      in exception path in special case to avoid regular exception path chk.
2786  * @soc: DP soc handle
2787  * @vdev_id: id of DP vdev handle
2788  * @nbuf: skb
2789  * @tx_exc_metadata: Handle that holds exception path meta data
2790  *
2791  * Entry point for Core Tx layer (DP_TX) invoked from
2792  * hard_start_xmit in OSIF/HDD to transmit frames through fw
2793  *
2794  * Return: NULL on success,
2795  *         nbuf when it fails to send
2796  */
2797 qdf_nbuf_t
2798 dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
2799 				   uint8_t vdev_id, qdf_nbuf_t nbuf,
2800 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
2801 {
2802 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2803 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
2804 						     DP_MOD_ID_TX_EXCEPTION);
2805 
2806 	if (qdf_unlikely(!vdev))
2807 		goto fail;
2808 
2809 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
2810 			== QDF_STATUS_E_FAILURE)) {
2811 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
2812 		goto fail;
2813 	}
2814 
2815 	/* Unref count as it will agin be taken inside dp_tx_exception */
2816 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
2817 
2818 	return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
2819 
2820 fail:
2821 	if (vdev)
2822 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
2823 	dp_verbose_debug("pkt send failed");
2824 	return nbuf;
2825 }
2826 
2827 /**
2828  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
2829  * @soc: DP soc handle
2830  * @vdev_id: DP vdev handle
2831  * @nbuf: skb
2832  *
2833  * Entry point for Core Tx layer (DP_TX) invoked from
2834  * hard_start_xmit in OSIF/HDD
2835  *
2836  * Return: NULL on success,
2837  *         nbuf when it fails to send
2838  */
2839 #ifdef MESH_MODE_SUPPORT
2840 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2841 			   qdf_nbuf_t nbuf)
2842 {
2843 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2844 	struct meta_hdr_s *mhdr;
2845 	qdf_nbuf_t nbuf_mesh = NULL;
2846 	qdf_nbuf_t nbuf_clone = NULL;
2847 	struct dp_vdev *vdev;
2848 	uint8_t no_enc_frame = 0;
2849 
2850 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
2851 	if (!nbuf_mesh) {
2852 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2853 				"qdf_nbuf_unshare failed");
2854 		return nbuf;
2855 	}
2856 
2857 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
2858 	if (!vdev) {
2859 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2860 				"vdev is NULL for vdev_id %d", vdev_id);
2861 		return nbuf;
2862 	}
2863 
2864 	nbuf = nbuf_mesh;
2865 
2866 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2867 
2868 	if ((vdev->sec_type != cdp_sec_type_none) &&
2869 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
2870 		no_enc_frame = 1;
2871 
2872 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
2873 		qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
2874 
2875 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
2876 		       !no_enc_frame) {
2877 		nbuf_clone = qdf_nbuf_clone(nbuf);
2878 		if (!nbuf_clone) {
2879 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2880 				"qdf_nbuf_clone failed");
2881 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
2882 			return nbuf;
2883 		}
2884 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
2885 	}
2886 
2887 	if (nbuf_clone) {
2888 		if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
2889 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2890 		} else {
2891 			qdf_nbuf_free(nbuf_clone);
2892 		}
2893 	}
2894 
2895 	if (no_enc_frame)
2896 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
2897 	else
2898 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
2899 
2900 	nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
2901 	if ((!nbuf) && no_enc_frame) {
2902 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2903 	}
2904 
2905 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
2906 	return nbuf;
2907 }
2908 
2909 #else
2910 
2911 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
2912 			   qdf_nbuf_t nbuf)
2913 {
2914 	return dp_tx_send(soc, vdev_id, nbuf);
2915 }
2916 
2917 #endif
2918 
2919 /**
2920  * dp_tx_nawds_handler() - NAWDS handler
2921  *
2922  * @soc: DP soc handle
2923  * @vdev_id: id of DP vdev handle
2924  * @msdu_info: msdu_info required to create HTT metadata
2925  * @nbuf: skb
2926  *
2927  * This API transfers the multicast frames with the peer id
2928  * on NAWDS enabled peer.
2929 
2930  * Return: none
2931  */
2932 
2933 static inline
2934 void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
2935 			 struct dp_tx_msdu_info_s *msdu_info, qdf_nbuf_t nbuf)
2936 {
2937 	struct dp_peer *peer = NULL;
2938 	qdf_nbuf_t nbuf_clone = NULL;
2939 	uint16_t peer_id = DP_INVALID_PEER;
2940 	uint16_t sa_peer_id = DP_INVALID_PEER;
2941 	struct dp_ast_entry *ast_entry = NULL;
2942 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2943 
2944 	qdf_spin_lock_bh(&soc->ast_lock);
2945 	ast_entry = dp_peer_ast_hash_find_by_pdevid
2946 				(soc,
2947 				 (uint8_t *)(eh->ether_shost),
2948 				 vdev->pdev->pdev_id);
2949 
2950 	if (ast_entry)
2951 		sa_peer_id = ast_entry->peer_id;
2952 	qdf_spin_unlock_bh(&soc->ast_lock);
2953 
2954 	qdf_spin_lock_bh(&vdev->peer_list_lock);
2955 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2956 		if (!peer->bss_peer && peer->nawds_enabled) {
2957 			peer_id = peer->peer_id;
2958 			/* Multicast packets needs to be
2959 			 * dropped in case of intra bss forwarding
2960 			 */
2961 			if (sa_peer_id == peer->peer_id) {
2962 				QDF_TRACE(QDF_MODULE_ID_DP,
2963 					  QDF_TRACE_LEVEL_DEBUG,
2964 					  " %s: multicast packet",  __func__);
2965 				DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
2966 				continue;
2967 			}
2968 			nbuf_clone = qdf_nbuf_clone(nbuf);
2969 
2970 			if (!nbuf_clone) {
2971 				QDF_TRACE(QDF_MODULE_ID_DP,
2972 					  QDF_TRACE_LEVEL_ERROR,
2973 					  FL("nbuf clone failed"));
2974 				break;
2975 			}
2976 
2977 			nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
2978 							    msdu_info, peer_id,
2979 							    NULL);
2980 
2981 			if (nbuf_clone) {
2982 				QDF_TRACE(QDF_MODULE_ID_DP,
2983 					  QDF_TRACE_LEVEL_DEBUG,
2984 					  FL("pkt send failed"));
2985 				qdf_nbuf_free(nbuf_clone);
2986 			} else {
2987 				if (peer_id != DP_INVALID_PEER)
2988 					DP_STATS_INC_PKT(peer, tx.nawds_mcast,
2989 							 1, qdf_nbuf_len(nbuf));
2990 			}
2991 		}
2992 	}
2993 
2994 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
2995 }
2996 
2997 /**
2998  * dp_tx_send() - Transmit a frame on a given VAP
2999  * @soc: DP soc handle
3000  * @vdev_id: id of DP vdev handle
3001  * @nbuf: skb
3002  *
3003  * Entry point for Core Tx layer (DP_TX) invoked from
3004  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
3005  * cases
3006  *
3007  * Return: NULL on success,
3008  *         nbuf when it fails to send
3009  */
3010 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3011 		      qdf_nbuf_t nbuf)
3012 {
3013 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3014 	uint16_t peer_id = HTT_INVALID_PEER;
3015 	/*
3016 	 * doing a memzero is causing additional function call overhead
3017 	 * so doing static stack clearing
3018 	 */
3019 	struct dp_tx_msdu_info_s msdu_info = {0};
3020 	struct dp_vdev *vdev = NULL;
3021 
3022 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3023 		return nbuf;
3024 
3025 	/*
3026 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3027 	 * this in per packet path.
3028 	 *
3029 	 * As in this path vdev memory is already protected with netdev
3030 	 * tx lock
3031 	 */
3032 	vdev = soc->vdev_id_map[vdev_id];
3033 	if (qdf_unlikely(!vdev))
3034 		return nbuf;
3035 
3036 	dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
3037 			 QDF_MAC_ADDR_REF(nbuf->data));
3038 
3039 	/*
3040 	 * Set Default Host TID value to invalid TID
3041 	 * (TID override disabled)
3042 	 */
3043 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
3044 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
3045 
3046 	if (qdf_unlikely(vdev->mesh_vdev)) {
3047 		qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
3048 								&msdu_info);
3049 		if (!nbuf_mesh) {
3050 			dp_verbose_debug("Extracting mesh metadata failed");
3051 			return nbuf;
3052 		}
3053 		nbuf = nbuf_mesh;
3054 	}
3055 
3056 	/*
3057 	 * Get HW Queue to use for this frame.
3058 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
3059 	 * dedicated for data and 1 for command.
3060 	 * "queue_id" maps to one hardware ring.
3061 	 *  With each ring, we also associate a unique Tx descriptor pool
3062 	 *  to minimize lock contention for these resources.
3063 	 */
3064 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3065 
3066 	/*
3067 	 * TCL H/W supports 2 DSCP-TID mapping tables.
3068 	 *  Table 1 - Default DSCP-TID mapping table
3069 	 *  Table 2 - 1 DSCP-TID override table
3070 	 *
3071 	 * If we need a different DSCP-TID mapping for this vap,
3072 	 * call tid_classify to extract DSCP/ToS from frame and
3073 	 * map to a TID and store in msdu_info. This is later used
3074 	 * to fill in TCL Input descriptor (per-packet TID override).
3075 	 */
3076 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
3077 
3078 	/*
3079 	 * Classify the frame and call corresponding
3080 	 * "prepare" function which extracts the segment (TSO)
3081 	 * and fragmentation information (for TSO , SG, ME, or Raw)
3082 	 * into MSDU_INFO structure which is later used to fill
3083 	 * SW and HW descriptors.
3084 	 */
3085 	if (qdf_nbuf_is_tso(nbuf)) {
3086 		dp_verbose_debug("TSO frame %pK", vdev);
3087 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
3088 				 qdf_nbuf_len(nbuf));
3089 
3090 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
3091 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
3092 					 qdf_nbuf_len(nbuf));
3093 			return nbuf;
3094 		}
3095 
3096 		goto send_multiple;
3097 	}
3098 
3099 	/* SG */
3100 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
3101 		struct dp_tx_seg_info_s seg_info = {0};
3102 
3103 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
3104 		if (!nbuf)
3105 			return NULL;
3106 
3107 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
3108 
3109 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
3110 				qdf_nbuf_len(nbuf));
3111 
3112 		goto send_multiple;
3113 	}
3114 
3115 	if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
3116 		return NULL;
3117 
3118 	/* RAW */
3119 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
3120 		struct dp_tx_seg_info_s seg_info = {0};
3121 
3122 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
3123 		if (!nbuf)
3124 			return NULL;
3125 
3126 		dp_verbose_debug("Raw frame %pK", vdev);
3127 
3128 		goto send_multiple;
3129 
3130 	}
3131 
3132 	if (qdf_unlikely(vdev->nawds_enabled)) {
3133 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
3134 					  qdf_nbuf_data(nbuf);
3135 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost))
3136 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf);
3137 
3138 		peer_id = DP_INVALID_PEER;
3139 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
3140 				 1, qdf_nbuf_len(nbuf));
3141 	}
3142 
3143 	/*  Single linear frame */
3144 	/*
3145 	 * If nbuf is a simple linear frame, use send_single function to
3146 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
3147 	 * SRNG. There is no need to setup a MSDU extension descriptor.
3148 	 */
3149 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
3150 
3151 	return nbuf;
3152 
3153 send_multiple:
3154 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3155 
3156 	if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
3157 		dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
3158 
3159 	return nbuf;
3160 }
3161 
3162 /**
3163  * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
3164  *      case to vaoid check in perpkt path.
3165  * @soc: DP soc handle
3166  * @vdev_id: id of DP vdev handle
3167  * @nbuf: skb
3168  *
3169  * Entry point for Core Tx layer (DP_TX) invoked from
3170  * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
3171  * with special condition to avoid per pkt check in dp_tx_send
3172  *
3173  * Return: NULL on success,
3174  *         nbuf when it fails to send
3175  */
3176 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
3177 				    uint8_t vdev_id, qdf_nbuf_t nbuf)
3178 {
3179 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3180 	struct dp_vdev *vdev = NULL;
3181 
3182 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3183 		return nbuf;
3184 
3185 	/*
3186 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3187 	 * this in per packet path.
3188 	 *
3189 	 * As in this path vdev memory is already protected with netdev
3190 	 * tx lock
3191 	 */
3192 	vdev = soc->vdev_id_map[vdev_id];
3193 	if (qdf_unlikely(!vdev))
3194 		return nbuf;
3195 
3196 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
3197 			== QDF_STATUS_E_FAILURE)) {
3198 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
3199 		return nbuf;
3200 	}
3201 
3202 	return dp_tx_send(soc_hdl, vdev_id, nbuf);
3203 }
3204 
3205 /**
3206  * dp_tx_reinject_handler() - Tx Reinject Handler
3207  * @soc: datapath soc handle
3208  * @vdev: datapath vdev handle
3209  * @tx_desc: software descriptor head pointer
3210  * @status : Tx completion status from HTT descriptor
3211  *
3212  * This function reinjects frames back to Target.
3213  * Todo - Host queue needs to be added
3214  *
3215  * Return: none
3216  */
3217 static
3218 void dp_tx_reinject_handler(struct dp_soc *soc,
3219 			    struct dp_vdev *vdev,
3220 			    struct dp_tx_desc_s *tx_desc,
3221 			    uint8_t *status)
3222 {
3223 	struct dp_peer *peer = NULL;
3224 	uint32_t peer_id = HTT_INVALID_PEER;
3225 	qdf_nbuf_t nbuf = tx_desc->nbuf;
3226 	qdf_nbuf_t nbuf_copy = NULL;
3227 	struct dp_tx_msdu_info_s msdu_info;
3228 #ifdef WDS_VENDOR_EXTENSION
3229 	int is_mcast = 0, is_ucast = 0;
3230 	int num_peers_3addr = 0;
3231 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
3232 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
3233 #endif
3234 
3235 	qdf_assert(vdev);
3236 
3237 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
3238 
3239 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3240 
3241 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3242 			"%s Tx reinject path", __func__);
3243 
3244 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
3245 			qdf_nbuf_len(tx_desc->nbuf));
3246 
3247 #ifdef WDS_VENDOR_EXTENSION
3248 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
3249 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
3250 	} else {
3251 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
3252 	}
3253 	is_ucast = !is_mcast;
3254 
3255 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3256 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3257 		if (peer->bss_peer)
3258 			continue;
3259 
3260 		/* Detect wds peers that use 3-addr framing for mcast.
3261 		 * if there are any, the bss_peer is used to send the
3262 		 * the mcast frame using 3-addr format. all wds enabled
3263 		 * peers that use 4-addr framing for mcast frames will
3264 		 * be duplicated and sent as 4-addr frames below.
3265 		 */
3266 		if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
3267 			num_peers_3addr = 1;
3268 			break;
3269 		}
3270 	}
3271 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3272 #endif
3273 
3274 	if (qdf_unlikely(vdev->mesh_vdev)) {
3275 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
3276 	} else {
3277 		qdf_spin_lock_bh(&vdev->peer_list_lock);
3278 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3279 			if ((peer->peer_id != HTT_INVALID_PEER) &&
3280 #ifdef WDS_VENDOR_EXTENSION
3281 			/*
3282 			 * . if 3-addr STA, then send on BSS Peer
3283 			 * . if Peer WDS enabled and accept 4-addr mcast,
3284 			 * send mcast on that peer only
3285 			 * . if Peer WDS enabled and accept 4-addr ucast,
3286 			 * send ucast on that peer only
3287 			 */
3288 			((peer->bss_peer && num_peers_3addr && is_mcast) ||
3289 			 (peer->wds_enabled &&
3290 				  ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
3291 				   (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
3292 #else
3293 			((peer->bss_peer &&
3294 			  !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))))) {
3295 #endif
3296 				peer_id = DP_INVALID_PEER;
3297 
3298 				nbuf_copy = qdf_nbuf_copy(nbuf);
3299 
3300 				if (!nbuf_copy) {
3301 					QDF_TRACE(QDF_MODULE_ID_DP,
3302 						QDF_TRACE_LEVEL_DEBUG,
3303 						FL("nbuf copy failed"));
3304 					break;
3305 				}
3306 
3307 				nbuf_copy = dp_tx_send_msdu_single(vdev,
3308 						nbuf_copy,
3309 						&msdu_info,
3310 						peer_id,
3311 						NULL);
3312 
3313 				if (nbuf_copy) {
3314 					QDF_TRACE(QDF_MODULE_ID_DP,
3315 						QDF_TRACE_LEVEL_DEBUG,
3316 						FL("pkt send failed"));
3317 					qdf_nbuf_free(nbuf_copy);
3318 				}
3319 			}
3320 		}
3321 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
3322 	}
3323 
3324 	qdf_nbuf_free(nbuf);
3325 
3326 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3327 }
3328 
3329 /**
3330  * dp_tx_inspect_handler() - Tx Inspect Handler
3331  * @soc: datapath soc handle
3332  * @vdev: datapath vdev handle
3333  * @tx_desc: software descriptor head pointer
3334  * @status : Tx completion status from HTT descriptor
3335  *
3336  * Handles Tx frames sent back to Host for inspection
3337  * (ProxyARP)
3338  *
3339  * Return: none
3340  */
3341 static void dp_tx_inspect_handler(struct dp_soc *soc,
3342 				  struct dp_vdev *vdev,
3343 				  struct dp_tx_desc_s *tx_desc,
3344 				  uint8_t *status)
3345 {
3346 
3347 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3348 			"%s Tx inspect path",
3349 			__func__);
3350 
3351 	DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
3352 			 qdf_nbuf_len(tx_desc->nbuf));
3353 
3354 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
3355 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3356 }
3357 
3358 #ifdef FEATURE_PERPKT_INFO
3359 /**
3360  * dp_get_completion_indication_for_stack() - send completion to stack
3361  * @soc : dp_soc handle
3362  * @pdev: dp_pdev handle
3363  * @peer: dp peer handle
3364  * @ts: transmit completion status structure
3365  * @netbuf: Buffer pointer for free
3366  *
3367  * This function is used for indication whether buffer needs to be
3368  * sent to stack for freeing or not
3369 */
3370 QDF_STATUS
3371 dp_get_completion_indication_for_stack(struct dp_soc *soc,
3372 				       struct dp_pdev *pdev,
3373 				       struct dp_peer *peer,
3374 				       struct hal_tx_completion_status *ts,
3375 				       qdf_nbuf_t netbuf,
3376 				       uint64_t time_latency)
3377 {
3378 	struct tx_capture_hdr *ppdu_hdr;
3379 	uint16_t peer_id = ts->peer_id;
3380 	uint32_t ppdu_id = ts->ppdu_id;
3381 	uint8_t first_msdu = ts->first_msdu;
3382 	uint8_t last_msdu = ts->last_msdu;
3383 	uint32_t txcap_hdr_size = sizeof(struct tx_capture_hdr);
3384 
3385 	if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
3386 			 !pdev->latency_capture_enable))
3387 		return QDF_STATUS_E_NOSUPPORT;
3388 
3389 	if (!peer) {
3390 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3391 				FL("Peer Invalid"));
3392 		return QDF_STATUS_E_INVAL;
3393 	}
3394 
3395 	if (pdev->mcopy_mode) {
3396 		/* If mcopy is enabled and mcopy_mode is M_COPY deliver 1st MSDU
3397 		 * per PPDU. If mcopy_mode is M_COPY_EXTENDED deliver 1st MSDU
3398 		 * for each MPDU
3399 		 */
3400 		if (pdev->mcopy_mode == M_COPY) {
3401 			if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
3402 			    (pdev->m_copy_id.tx_peer_id == peer_id)) {
3403 				return QDF_STATUS_E_INVAL;
3404 			}
3405 		}
3406 
3407 		if (!first_msdu)
3408 			return QDF_STATUS_E_INVAL;
3409 
3410 		pdev->m_copy_id.tx_ppdu_id = ppdu_id;
3411 		pdev->m_copy_id.tx_peer_id = peer_id;
3412 	}
3413 
3414 	if (qdf_unlikely(qdf_nbuf_headroom(netbuf) < txcap_hdr_size)) {
3415 		netbuf = qdf_nbuf_realloc_headroom(netbuf, txcap_hdr_size);
3416 		if (!netbuf) {
3417 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3418 				  FL("No headroom"));
3419 			return QDF_STATUS_E_NOMEM;
3420 		}
3421 	}
3422 
3423 	if (!qdf_nbuf_push_head(netbuf, txcap_hdr_size)) {
3424 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3425 				FL("No headroom"));
3426 		return QDF_STATUS_E_NOMEM;
3427 	}
3428 
3429 	ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
3430 	qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
3431 		     QDF_MAC_ADDR_SIZE);
3432 	qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
3433 		     QDF_MAC_ADDR_SIZE);
3434 	ppdu_hdr->ppdu_id = ppdu_id;
3435 	ppdu_hdr->peer_id = peer_id;
3436 	ppdu_hdr->first_msdu = first_msdu;
3437 	ppdu_hdr->last_msdu = last_msdu;
3438 	if (qdf_unlikely(pdev->latency_capture_enable)) {
3439 		ppdu_hdr->tsf = ts->tsf;
3440 		ppdu_hdr->time_latency = time_latency;
3441 	}
3442 
3443 	return QDF_STATUS_SUCCESS;
3444 }
3445 
3446 
3447 /**
3448  * dp_send_completion_to_stack() - send completion to stack
3449  * @soc :  dp_soc handle
3450  * @pdev:  dp_pdev handle
3451  * @peer_id: peer_id of the peer for which completion came
3452  * @ppdu_id: ppdu_id
3453  * @netbuf: Buffer pointer for free
3454  *
3455  * This function is used to send completion to stack
3456  * to free buffer
3457 */
3458 void  dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
3459 					uint16_t peer_id, uint32_t ppdu_id,
3460 					qdf_nbuf_t netbuf)
3461 {
3462 	dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
3463 				netbuf, peer_id,
3464 				WDI_NO_VAL, pdev->pdev_id);
3465 }
3466 #else
3467 static QDF_STATUS
3468 dp_get_completion_indication_for_stack(struct dp_soc *soc,
3469 				       struct dp_pdev *pdev,
3470 				       struct dp_peer *peer,
3471 				       struct hal_tx_completion_status *ts,
3472 				       qdf_nbuf_t netbuf,
3473 				       uint64_t time_latency)
3474 {
3475 	return QDF_STATUS_E_NOSUPPORT;
3476 }
3477 
3478 static void
3479 dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
3480 	uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
3481 {
3482 }
3483 #endif
3484 
3485 #ifdef MESH_MODE_SUPPORT
3486 /**
3487  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
3488  *                                         in mesh meta header
3489  * @tx_desc: software descriptor head pointer
3490  * @ts: pointer to tx completion stats
3491  * Return: none
3492  */
3493 static
3494 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
3495 		struct hal_tx_completion_status *ts)
3496 {
3497 	struct meta_hdr_s *mhdr;
3498 	qdf_nbuf_t netbuf = tx_desc->nbuf;
3499 
3500 	if (!tx_desc->msdu_ext_desc) {
3501 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
3502 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3503 				"netbuf %pK offset %d",
3504 				netbuf, tx_desc->pkt_offset);
3505 			return;
3506 		}
3507 	}
3508 	if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
3509 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3510 			"netbuf %pK offset %lu", netbuf,
3511 			sizeof(struct meta_hdr_s));
3512 		return;
3513 	}
3514 
3515 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
3516 	mhdr->rssi = ts->ack_frame_rssi;
3517 	mhdr->band = tx_desc->pdev->operating_channel.band;
3518 	mhdr->channel = tx_desc->pdev->operating_channel.num;
3519 }
3520 
3521 #else
3522 static
3523 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
3524 		struct hal_tx_completion_status *ts)
3525 {
3526 }
3527 
3528 #endif
3529 
3530 #ifdef QCA_PEER_EXT_STATS
3531 /*
3532  * dp_tx_compute_tid_delay() - Compute per TID delay
3533  * @stats: Per TID delay stats
3534  * @tx_desc: Software Tx descriptor
3535  *
3536  * Compute the software enqueue and hw enqueue delays and
3537  * update the respective histograms
3538  *
3539  * Return: void
3540  */
3541 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
3542 				    struct dp_tx_desc_s *tx_desc)
3543 {
3544 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
3545 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
3546 	uint32_t sw_enqueue_delay, fwhw_transmit_delay;
3547 
3548 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
3549 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
3550 	timestamp_hw_enqueue = tx_desc->timestamp;
3551 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
3552 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
3553 					 timestamp_hw_enqueue);
3554 
3555 	/*
3556 	 * Update the Tx software enqueue delay and HW enque-Completion delay.
3557 	 */
3558 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
3559 	dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
3560 }
3561 
3562 /*
3563  * dp_tx_update_peer_ext_stats() - Update the peer extended stats
3564  * @peer: DP peer context
3565  * @tx_desc: Tx software descriptor
3566  * @tid: Transmission ID
3567  * @ring_id: Rx CPU context ID/CPU_ID
3568  *
3569  * Update the peer extended stats. These are enhanced other
3570  * delay stats per msdu level.
3571  *
3572  * Return: void
3573  */
3574 static void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
3575 					struct dp_tx_desc_s *tx_desc,
3576 					uint8_t tid, uint8_t ring_id)
3577 {
3578 	struct dp_pdev *pdev = peer->vdev->pdev;
3579 	struct dp_soc *soc = NULL;
3580 	struct cdp_peer_ext_stats *pext_stats = NULL;
3581 
3582 	soc = pdev->soc;
3583 	if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
3584 		return;
3585 
3586 	pext_stats = peer->pext_stats;
3587 
3588 	qdf_assert(pext_stats);
3589 	qdf_assert(ring < CDP_MAX_TXRX_CTX);
3590 
3591 	/*
3592 	 * For non-TID packets use the TID 9
3593 	 */
3594 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
3595 		tid = CDP_MAX_DATA_TIDS - 1;
3596 
3597 	dp_tx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id],
3598 				tx_desc);
3599 }
3600 #else
3601 static inline void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
3602 					       struct dp_tx_desc_s *tx_desc,
3603 					       uint8_t tid, uint8_t ring_id)
3604 {
3605 }
3606 #endif
3607 
3608 /**
3609  * dp_tx_compute_delay() - Compute and fill in all timestamps
3610  *				to pass in correct fields
3611  *
3612  * @vdev: pdev handle
3613  * @tx_desc: tx descriptor
3614  * @tid: tid value
3615  * @ring_id: TCL or WBM ring number for transmit path
3616  * Return: none
3617  */
3618 static void dp_tx_compute_delay(struct dp_vdev *vdev,
3619 				struct dp_tx_desc_s *tx_desc,
3620 				uint8_t tid, uint8_t ring_id)
3621 {
3622 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
3623 	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
3624 
3625 	if (qdf_likely(!vdev->pdev->delay_stats_flag))
3626 		return;
3627 
3628 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
3629 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
3630 	timestamp_hw_enqueue = tx_desc->timestamp;
3631 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
3632 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
3633 					 timestamp_hw_enqueue);
3634 	interframe_delay = (uint32_t)(timestamp_ingress -
3635 				      vdev->prev_tx_enq_tstamp);
3636 
3637 	/*
3638 	 * Delay in software enqueue
3639 	 */
3640 	dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid,
3641 			      CDP_DELAY_STATS_SW_ENQ, ring_id);
3642 	/*
3643 	 * Delay between packet enqueued to HW and Tx completion
3644 	 */
3645 	dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid,
3646 			      CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id);
3647 
3648 	/*
3649 	 * Update interframe delay stats calculated at hardstart receive point.
3650 	 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
3651 	 * interframe delay will not be calculate correctly for 1st frame.
3652 	 * On the other side, this will help in avoiding extra per packet check
3653 	 * of !vdev->prev_tx_enq_tstamp.
3654 	 */
3655 	dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
3656 			      CDP_DELAY_STATS_TX_INTERFRAME, ring_id);
3657 	vdev->prev_tx_enq_tstamp = timestamp_ingress;
3658 }
3659 
3660 #ifdef DISABLE_DP_STATS
3661 static
3662 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
3663 {
3664 }
3665 #else
3666 static
3667 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
3668 {
3669 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
3670 
3671 	DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
3672 	if (subtype != QDF_PROTO_INVALID)
3673 		DP_STATS_INC(peer, tx.no_ack_count[subtype], 1);
3674 }
3675 #endif
3676 
3677 /**
3678  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
3679  *				per wbm ring
3680  *
3681  * @tx_desc: software descriptor head pointer
3682  * @ts: Tx completion status
3683  * @peer: peer handle
3684  * @ring_id: ring number
3685  *
3686  * Return: None
3687  */
3688 static inline void
3689 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
3690 			struct hal_tx_completion_status *ts,
3691 			struct dp_peer *peer, uint8_t ring_id)
3692 {
3693 	struct dp_pdev *pdev = peer->vdev->pdev;
3694 	struct dp_soc *soc = NULL;
3695 	uint8_t mcs, pkt_type;
3696 	uint8_t tid = ts->tid;
3697 	uint32_t length;
3698 	struct cdp_tid_tx_stats *tid_stats;
3699 
3700 	if (!pdev)
3701 		return;
3702 
3703 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
3704 		tid = CDP_MAX_DATA_TIDS - 1;
3705 
3706 	tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
3707 	soc = pdev->soc;
3708 
3709 	mcs = ts->mcs;
3710 	pkt_type = ts->pkt_type;
3711 
3712 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
3713 		dp_err("Release source is not from TQM");
3714 		return;
3715 	}
3716 
3717 	length = qdf_nbuf_len(tx_desc->nbuf);
3718 	DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
3719 
3720 	if (qdf_unlikely(pdev->delay_stats_flag))
3721 		dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id);
3722 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
3723 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
3724 
3725 	DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length,
3726 			  (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
3727 
3728 	DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
3729 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
3730 
3731 	DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
3732 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
3733 
3734 	DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
3735 		     (ts->status == HAL_TX_TQM_RR_FW_REASON1));
3736 
3737 	DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
3738 		     (ts->status == HAL_TX_TQM_RR_FW_REASON2));
3739 
3740 	DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
3741 		     (ts->status == HAL_TX_TQM_RR_FW_REASON3));
3742 
3743 	/*
3744 	 * tx_failed is ideally supposed to be updated from HTT ppdu completion
3745 	 * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there
3746 	 * are no completions for failed cases. Hence updating tx_failed from
3747 	 * data path. Please note that if tx_failed is fixed to be from ppdu,
3748 	 * then this has to be removed
3749 	 */
3750 	peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num +
3751 				peer->stats.tx.dropped.fw_rem_notx +
3752 				peer->stats.tx.dropped.fw_rem_tx +
3753 				peer->stats.tx.dropped.age_out +
3754 				peer->stats.tx.dropped.fw_reason1 +
3755 				peer->stats.tx.dropped.fw_reason2 +
3756 				peer->stats.tx.dropped.fw_reason3;
3757 
3758 	if (ts->status < CDP_MAX_TX_TQM_STATUS) {
3759 		tid_stats->tqm_status_cnt[ts->status]++;
3760 	}
3761 
3762 	if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
3763 		dp_update_no_ack_stats(tx_desc->nbuf, peer);
3764 		return;
3765 	}
3766 
3767 	DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
3768 
3769 	DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
3770 	DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
3771 
3772 	/*
3773 	 * Following Rate Statistics are updated from HTT PPDU events from FW.
3774 	 * Return from here if HTT PPDU events are enabled.
3775 	 */
3776 	if (!(soc->process_tx_status))
3777 		return;
3778 
3779 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3780 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
3781 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3782 			((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
3783 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3784 			((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
3785 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3786 			((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
3787 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3788 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
3789 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3790 			((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
3791 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3792 			((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
3793 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3794 			((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
3795 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3796 			((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
3797 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3798 			((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
3799 
3800 	DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
3801 	DP_STATS_INC(peer, tx.bw[ts->bw], 1);
3802 	DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
3803 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
3804 	DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
3805 	DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
3806 	DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
3807 
3808 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
3809 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
3810 			     &peer->stats, ts->peer_id,
3811 			     UPDATE_PEER_STATS, pdev->pdev_id);
3812 #endif
3813 }
3814 
3815 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3816 /**
3817  * dp_tx_flow_pool_lock() - take flow pool lock
3818  * @soc: core txrx main context
3819  * @tx_desc: tx desc
3820  *
3821  * Return: None
3822  */
3823 static inline
3824 void dp_tx_flow_pool_lock(struct dp_soc *soc,
3825 			  struct dp_tx_desc_s *tx_desc)
3826 {
3827 	struct dp_tx_desc_pool_s *pool;
3828 	uint8_t desc_pool_id;
3829 
3830 	desc_pool_id = tx_desc->pool_id;
3831 	pool = &soc->tx_desc[desc_pool_id];
3832 
3833 	qdf_spin_lock_bh(&pool->flow_pool_lock);
3834 }
3835 
3836 /**
3837  * dp_tx_flow_pool_unlock() - release flow pool lock
3838  * @soc: core txrx main context
3839  * @tx_desc: tx desc
3840  *
3841  * Return: None
3842  */
3843 static inline
3844 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
3845 			    struct dp_tx_desc_s *tx_desc)
3846 {
3847 	struct dp_tx_desc_pool_s *pool;
3848 	uint8_t desc_pool_id;
3849 
3850 	desc_pool_id = tx_desc->pool_id;
3851 	pool = &soc->tx_desc[desc_pool_id];
3852 
3853 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
3854 }
3855 #else
3856 static inline
3857 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
3858 {
3859 }
3860 
3861 static inline
3862 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
3863 {
3864 }
3865 #endif
3866 
3867 /**
3868  * dp_tx_notify_completion() - Notify tx completion for this desc
3869  * @soc: core txrx main context
3870  * @vdev: datapath vdev handle
3871  * @tx_desc: tx desc
3872  * @netbuf:  buffer
3873  * @status: tx status
3874  *
3875  * Return: none
3876  */
3877 static inline void dp_tx_notify_completion(struct dp_soc *soc,
3878 					   struct dp_vdev *vdev,
3879 					   struct dp_tx_desc_s *tx_desc,
3880 					   qdf_nbuf_t netbuf,
3881 					   uint8_t status)
3882 {
3883 	void *osif_dev;
3884 	ol_txrx_completion_fp tx_compl_cbk = NULL;
3885 	uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
3886 
3887 	qdf_assert(tx_desc);
3888 
3889 	dp_tx_flow_pool_lock(soc, tx_desc);
3890 
3891 	if (!vdev ||
3892 	    !vdev->osif_vdev) {
3893 		dp_tx_flow_pool_unlock(soc, tx_desc);
3894 		return;
3895 	}
3896 
3897 	osif_dev = vdev->osif_vdev;
3898 	tx_compl_cbk = vdev->tx_comp;
3899 	dp_tx_flow_pool_unlock(soc, tx_desc);
3900 
3901 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
3902 		flag |= BIT(QDF_TX_RX_STATUS_OK);
3903 
3904 	if (tx_compl_cbk)
3905 		tx_compl_cbk(netbuf, osif_dev, flag);
3906 }
3907 
3908 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
3909  * @pdev: pdev handle
3910  * @tid: tid value
3911  * @txdesc_ts: timestamp from txdesc
3912  * @ppdu_id: ppdu id
3913  *
3914  * Return: none
3915  */
3916 #ifdef FEATURE_PERPKT_INFO
3917 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
3918 					       struct dp_peer *peer,
3919 					       uint8_t tid,
3920 					       uint64_t txdesc_ts,
3921 					       uint32_t ppdu_id)
3922 {
3923 	uint64_t delta_ms;
3924 	struct cdp_tx_sojourn_stats *sojourn_stats;
3925 
3926 	if (qdf_unlikely(pdev->enhanced_stats_en == 0))
3927 		return;
3928 
3929 	if (qdf_unlikely(tid == HTT_INVALID_TID ||
3930 			 tid >= CDP_DATA_TID_MAX))
3931 		return;
3932 
3933 	if (qdf_unlikely(!pdev->sojourn_buf))
3934 		return;
3935 
3936 	sojourn_stats = (struct cdp_tx_sojourn_stats *)
3937 		qdf_nbuf_data(pdev->sojourn_buf);
3938 
3939 	sojourn_stats->cookie = (void *)peer->rdkstats_ctx;
3940 
3941 	delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
3942 				txdesc_ts;
3943 	qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid],
3944 			    delta_ms);
3945 	sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
3946 	sojourn_stats->num_msdus[tid] = 1;
3947 	sojourn_stats->avg_sojourn_msdu[tid].internal =
3948 		peer->avg_sojourn_msdu[tid].internal;
3949 	dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
3950 			     pdev->sojourn_buf, HTT_INVALID_PEER,
3951 			     WDI_NO_VAL, pdev->pdev_id);
3952 	sojourn_stats->sum_sojourn_msdu[tid] = 0;
3953 	sojourn_stats->num_msdus[tid] = 0;
3954 	sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
3955 }
3956 #else
3957 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
3958 					       struct dp_peer *peer,
3959 					       uint8_t tid,
3960 					       uint64_t txdesc_ts,
3961 					       uint32_t ppdu_id)
3962 {
3963 }
3964 #endif
3965 
3966 /**
3967  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
3968  * @soc: DP Soc handle
3969  * @tx_desc: software Tx descriptor
3970  * @ts : Tx completion status from HAL/HTT descriptor
3971  *
3972  * Return: none
3973  */
3974 static inline void
3975 dp_tx_comp_process_desc(struct dp_soc *soc,
3976 			struct dp_tx_desc_s *desc,
3977 			struct hal_tx_completion_status *ts,
3978 			struct dp_peer *peer)
3979 {
3980 	uint64_t time_latency = 0;
3981 	/*
3982 	 * m_copy/tx_capture modes are not supported for
3983 	 * scatter gather packets
3984 	 */
3985 	if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
3986 		time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
3987 				desc->timestamp);
3988 	}
3989 	if (!(desc->msdu_ext_desc)) {
3990 		if (QDF_STATUS_SUCCESS ==
3991 		    dp_tx_add_to_comp_queue(soc, desc, ts, peer)) {
3992 			return;
3993 		}
3994 
3995 		if (QDF_STATUS_SUCCESS ==
3996 		    dp_get_completion_indication_for_stack(soc,
3997 							   desc->pdev,
3998 							   peer, ts,
3999 							   desc->nbuf,
4000 							   time_latency)) {
4001 			qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
4002 						     QDF_DMA_TO_DEVICE,
4003 						     desc->nbuf->len);
4004 			dp_send_completion_to_stack(soc,
4005 						    desc->pdev,
4006 						    ts->peer_id,
4007 						    ts->ppdu_id,
4008 						    desc->nbuf);
4009 			return;
4010 		}
4011 	}
4012 
4013 	dp_tx_comp_free_buf(soc, desc);
4014 }
4015 
4016 #ifdef DISABLE_DP_STATS
4017 /**
4018  * dp_tx_update_connectivity_stats() - update tx connectivity stats
4019  * @soc: core txrx main context
4020  * @tx_desc: tx desc
4021  * @status: tx status
4022  *
4023  * Return: none
4024  */
4025 static inline
4026 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
4027 				     struct dp_vdev *vdev,
4028 				     struct dp_tx_desc_s *tx_desc,
4029 				     uint8_t status)
4030 {
4031 }
4032 #else
4033 static inline
4034 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
4035 				     struct dp_vdev *vdev,
4036 				     struct dp_tx_desc_s *tx_desc,
4037 				     uint8_t status)
4038 {
4039 	void *osif_dev;
4040 	ol_txrx_stats_rx_fp stats_cbk;
4041 	uint8_t pkt_type;
4042 
4043 	qdf_assert(tx_desc);
4044 
4045 	if (!vdev ||
4046 	    !vdev->osif_vdev ||
4047 	    !vdev->stats_cb)
4048 		return;
4049 
4050 	osif_dev = vdev->osif_vdev;
4051 	stats_cbk = vdev->stats_cb;
4052 
4053 	stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
4054 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
4055 		stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
4056 			  &pkt_type);
4057 }
4058 #endif
4059 
4060 /**
4061  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
4062  * @soc: DP soc handle
4063  * @tx_desc: software descriptor head pointer
4064  * @ts: Tx completion status
4065  * @peer: peer handle
4066  * @ring_id: ring number
4067  *
4068  * Return: none
4069  */
4070 static inline
4071 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
4072 				  struct dp_tx_desc_s *tx_desc,
4073 				  struct hal_tx_completion_status *ts,
4074 				  struct dp_peer *peer, uint8_t ring_id)
4075 {
4076 	uint32_t length;
4077 	qdf_ether_header_t *eh;
4078 	struct dp_vdev *vdev = NULL;
4079 	qdf_nbuf_t nbuf = tx_desc->nbuf;
4080 	enum qdf_dp_tx_rx_status dp_status;
4081 
4082 	if (!nbuf) {
4083 		dp_info_rl("invalid tx descriptor. nbuf NULL");
4084 		goto out;
4085 	}
4086 
4087 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
4088 	length = qdf_nbuf_len(nbuf);
4089 
4090 	dp_status = dp_tx_hw_to_qdf(ts->status);
4091 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
4092 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
4093 				 QDF_TRACE_DEFAULT_PDEV_ID,
4094 				 qdf_nbuf_data_addr(nbuf),
4095 				 sizeof(qdf_nbuf_data(nbuf)),
4096 				 tx_desc->id, ts->status, dp_status));
4097 
4098 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
4099 				"-------------------- \n"
4100 				"Tx Completion Stats: \n"
4101 				"-------------------- \n"
4102 				"ack_frame_rssi = %d \n"
4103 				"first_msdu = %d \n"
4104 				"last_msdu = %d \n"
4105 				"msdu_part_of_amsdu = %d \n"
4106 				"rate_stats valid = %d \n"
4107 				"bw = %d \n"
4108 				"pkt_type = %d \n"
4109 				"stbc = %d \n"
4110 				"ldpc = %d \n"
4111 				"sgi = %d \n"
4112 				"mcs = %d \n"
4113 				"ofdma = %d \n"
4114 				"tones_in_ru = %d \n"
4115 				"tsf = %d \n"
4116 				"ppdu_id = %d \n"
4117 				"transmit_cnt = %d \n"
4118 				"tid = %d \n"
4119 				"peer_id = %d\n",
4120 				ts->ack_frame_rssi, ts->first_msdu,
4121 				ts->last_msdu, ts->msdu_part_of_amsdu,
4122 				ts->valid, ts->bw, ts->pkt_type, ts->stbc,
4123 				ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
4124 				ts->tones_in_ru, ts->tsf, ts->ppdu_id,
4125 				ts->transmit_cnt, ts->tid, ts->peer_id);
4126 
4127 	/* Update SoC level stats */
4128 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
4129 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
4130 
4131 	if (!peer) {
4132 		dp_info_rl("peer is null or deletion in progress");
4133 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
4134 		goto out;
4135 	}
4136 	vdev = peer->vdev;
4137 
4138 	dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
4139 
4140 	/* Update per-packet stats for mesh mode */
4141 	if (qdf_unlikely(vdev->mesh_vdev) &&
4142 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
4143 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
4144 
4145 	/* Update peer level stats */
4146 	if (qdf_unlikely(peer->bss_peer && vdev->opmode == wlan_op_mode_ap)) {
4147 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
4148 			DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
4149 
4150 			if ((peer->vdev->tx_encap_type ==
4151 				htt_cmn_pkt_type_ethernet) &&
4152 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
4153 				DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
4154 			}
4155 		}
4156 	} else {
4157 		DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
4158 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
4159 			DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
4160 			if (qdf_unlikely(peer->in_twt)) {
4161 				DP_STATS_INC_PKT(peer,
4162 						 tx.tx_success_twt,
4163 						 1, length);
4164 			}
4165 		}
4166 	}
4167 
4168 	dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id);
4169 	dp_tx_update_peer_ext_stats(peer, tx_desc, ts->tid, ring_id);
4170 
4171 #ifdef QCA_SUPPORT_RDK_STATS
4172 	if (soc->rdkstats_enabled)
4173 		dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid,
4174 					    tx_desc->timestamp,
4175 					    ts->ppdu_id);
4176 #endif
4177 
4178 out:
4179 	return;
4180 }
4181 /**
4182  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
4183  * @soc: core txrx main context
4184  * @comp_head: software descriptor head pointer
4185  * @ring_id: ring number
4186  *
4187  * This function will process batch of descriptors reaped by dp_tx_comp_handler
4188  * and release the software descriptors after processing is complete
4189  *
4190  * Return: none
4191  */
4192 static void
4193 dp_tx_comp_process_desc_list(struct dp_soc *soc,
4194 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id)
4195 {
4196 	struct dp_tx_desc_s *desc;
4197 	struct dp_tx_desc_s *next;
4198 	struct hal_tx_completion_status ts;
4199 	struct dp_peer *peer = NULL;
4200 	uint16_t peer_id = DP_INVALID_PEER;
4201 	qdf_nbuf_t netbuf;
4202 
4203 	desc = comp_head;
4204 
4205 	while (desc) {
4206 		if (peer_id != desc->peer_id) {
4207 			if (peer)
4208 				dp_peer_unref_delete(peer,
4209 						     DP_MOD_ID_TX_COMP);
4210 			peer_id = desc->peer_id;
4211 			peer = dp_peer_get_ref_by_id(soc, peer_id,
4212 						     DP_MOD_ID_TX_COMP);
4213 		}
4214 		if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
4215 			struct dp_pdev *pdev = desc->pdev;
4216 
4217 			if (qdf_likely(peer)) {
4218 				/*
4219 				 * Increment peer statistics
4220 				 * Minimal statistics update done here
4221 				 */
4222 				DP_STATS_INC_PKT(peer, tx.comp_pkt, 1,
4223 						 desc->length);
4224 
4225 				if (desc->tx_status !=
4226 						HAL_TX_TQM_RR_FRAME_ACKED)
4227 					DP_STATS_INC(peer, tx.tx_failed, 1);
4228 			}
4229 
4230 			qdf_assert(pdev);
4231 			dp_tx_outstanding_dec(pdev);
4232 
4233 			/*
4234 			 * Calling a QDF WRAPPER here is creating signifcant
4235 			 * performance impact so avoided the wrapper call here
4236 			 */
4237 			next = desc->next;
4238 			qdf_mem_unmap_nbytes_single(soc->osdev,
4239 						    desc->dma_addr,
4240 						    QDF_DMA_TO_DEVICE,
4241 						    desc->length);
4242 			qdf_nbuf_free(desc->nbuf);
4243 			dp_tx_desc_free(soc, desc, desc->pool_id);
4244 			desc = next;
4245 			continue;
4246 		}
4247 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
4248 
4249 		dp_tx_comp_process_tx_status(soc, desc, &ts, peer, ring_id);
4250 
4251 		netbuf = desc->nbuf;
4252 		/* check tx complete notification */
4253 		if (peer && qdf_nbuf_tx_notify_comp_get(netbuf))
4254 			dp_tx_notify_completion(soc, peer->vdev, desc,
4255 						netbuf, ts.status);
4256 
4257 		dp_tx_comp_process_desc(soc, desc, &ts, peer);
4258 
4259 		next = desc->next;
4260 
4261 		dp_tx_desc_release(desc, desc->pool_id);
4262 		desc = next;
4263 	}
4264 	if (peer)
4265 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_COMP);
4266 }
4267 
4268 /**
4269  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
4270  * @soc: Handle to DP soc structure
4271  * @tx_desc: software descriptor head pointer
4272  * @status : Tx completion status from HTT descriptor
4273  * @ring_id: ring number
4274  *
4275  * This function will process HTT Tx indication messages from Target
4276  *
4277  * Return: none
4278  */
4279 static
4280 void dp_tx_process_htt_completion(struct dp_soc *soc,
4281 				  struct dp_tx_desc_s *tx_desc, uint8_t *status,
4282 				  uint8_t ring_id)
4283 {
4284 	uint8_t tx_status;
4285 	struct dp_pdev *pdev;
4286 	struct dp_vdev *vdev;
4287 	struct hal_tx_completion_status ts = {0};
4288 	uint32_t *htt_desc = (uint32_t *)status;
4289 	struct dp_peer *peer;
4290 	struct cdp_tid_tx_stats *tid_stats = NULL;
4291 	struct htt_soc *htt_handle;
4292 	uint8_t vdev_id;
4293 
4294 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
4295 	htt_handle = (struct htt_soc *)soc->htt_handle;
4296 	htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
4297 
4298 	/*
4299 	 * There can be scenario where WBM consuming descriptor enqueued
4300 	 * from TQM2WBM first and TQM completion can happen before MEC
4301 	 * notification comes from FW2WBM. Avoid access any field of tx
4302 	 * descriptor in case of MEC notify.
4303 	 */
4304 	if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY) {
4305 		/*
4306 		 * Get vdev id from HTT status word in case of MEC
4307 		 * notification
4308 		 */
4309 		vdev_id = HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(htt_desc[3]);
4310 		if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
4311 			return;
4312 
4313 		vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4314 				DP_MOD_ID_HTT_COMP);
4315 		if (!vdev)
4316 			return;
4317 		dp_tx_mec_handler(vdev, status);
4318 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
4319 		return;
4320 	}
4321 
4322 	/*
4323 	 * If the descriptor is already freed in vdev_detach,
4324 	 * continue to next descriptor
4325 	 */
4326 	if ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) && !tx_desc->flags) {
4327 		QDF_TRACE(QDF_MODULE_ID_DP,
4328 				QDF_TRACE_LEVEL_INFO,
4329 				"Descriptor freed in vdev_detach %d",
4330 				tx_desc->id);
4331 		return;
4332 	}
4333 
4334 	pdev = tx_desc->pdev;
4335 
4336 	if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
4337 		QDF_TRACE(QDF_MODULE_ID_DP,
4338 				QDF_TRACE_LEVEL_INFO,
4339 				"pdev in down state %d",
4340 				tx_desc->id);
4341 		dp_tx_comp_free_buf(soc, tx_desc);
4342 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
4343 		return;
4344 	}
4345 
4346 	qdf_assert(tx_desc->pdev);
4347 
4348 	vdev_id = tx_desc->vdev_id;
4349 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4350 			DP_MOD_ID_HTT_COMP);
4351 
4352 	if (!vdev)
4353 		return;
4354 
4355 	switch (tx_status) {
4356 	case HTT_TX_FW2WBM_TX_STATUS_OK:
4357 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
4358 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
4359 	{
4360 		uint8_t tid;
4361 		if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
4362 			ts.peer_id =
4363 				HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
4364 						htt_desc[2]);
4365 			ts.tid =
4366 				HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
4367 						htt_desc[2]);
4368 		} else {
4369 			ts.peer_id = HTT_INVALID_PEER;
4370 			ts.tid = HTT_INVALID_TID;
4371 		}
4372 		ts.ppdu_id =
4373 			HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
4374 					htt_desc[1]);
4375 		ts.ack_frame_rssi =
4376 			HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
4377 					htt_desc[1]);
4378 
4379 		ts.tsf = htt_desc[3];
4380 		ts.first_msdu = 1;
4381 		ts.last_msdu = 1;
4382 		tid = ts.tid;
4383 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4384 			tid = CDP_MAX_DATA_TIDS - 1;
4385 
4386 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
4387 
4388 		if (qdf_unlikely(pdev->delay_stats_flag))
4389 			dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
4390 		if (tx_status < CDP_MAX_TX_HTT_STATUS) {
4391 			tid_stats->htt_status_cnt[tx_status]++;
4392 		}
4393 
4394 		peer = dp_peer_get_ref_by_id(soc, ts.peer_id,
4395 					     DP_MOD_ID_HTT_COMP);
4396 
4397 		dp_tx_comp_process_tx_status(soc, tx_desc, &ts, peer, ring_id);
4398 		dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
4399 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
4400 
4401 		if (qdf_likely(peer))
4402 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT_COMP);
4403 
4404 		break;
4405 	}
4406 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
4407 	{
4408 		dp_tx_reinject_handler(soc, vdev, tx_desc, status);
4409 		break;
4410 	}
4411 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
4412 	{
4413 		dp_tx_inspect_handler(soc, vdev, tx_desc, status);
4414 		break;
4415 	}
4416 	default:
4417 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
4418 			  "%s Invalid HTT tx_status %d\n",
4419 			  __func__, tx_status);
4420 		break;
4421 	}
4422 
4423 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
4424 }
4425 
4426 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
4427 static inline
4428 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
4429 {
4430 	bool limit_hit = false;
4431 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
4432 
4433 	limit_hit =
4434 		(num_reaped >= cfg->tx_comp_loop_pkt_limit) ? true : false;
4435 
4436 	if (limit_hit)
4437 		DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
4438 
4439 	return limit_hit;
4440 }
4441 
4442 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
4443 {
4444 	return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
4445 }
4446 #else
4447 static inline
4448 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
4449 {
4450 	return false;
4451 }
4452 
4453 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
4454 {
4455 	return false;
4456 }
4457 #endif
4458 
4459 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
4460 			    hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
4461 			    uint32_t quota)
4462 {
4463 	void *tx_comp_hal_desc;
4464 	uint8_t buffer_src;
4465 	uint8_t pool_id;
4466 	uint32_t tx_desc_id;
4467 	struct dp_tx_desc_s *tx_desc = NULL;
4468 	struct dp_tx_desc_s *head_desc = NULL;
4469 	struct dp_tx_desc_s *tail_desc = NULL;
4470 	uint32_t num_processed = 0;
4471 	uint32_t count;
4472 	uint32_t num_avail_for_reap = 0;
4473 	bool force_break = false;
4474 
4475 	DP_HIST_INIT();
4476 
4477 more_data:
4478 	/* Re-initialize local variables to be re-used */
4479 	head_desc = NULL;
4480 	tail_desc = NULL;
4481 	count = 0;
4482 
4483 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
4484 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
4485 		return 0;
4486 	}
4487 
4488 	num_avail_for_reap = hal_srng_dst_num_valid(soc->hal_soc, hal_ring_hdl, 0);
4489 
4490 	if (num_avail_for_reap >= quota)
4491 		num_avail_for_reap = quota;
4492 
4493 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
4494 
4495 	/* Find head descriptor from completion ring */
4496 	while (qdf_likely(num_avail_for_reap)) {
4497 
4498 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
4499 		if (qdf_unlikely(!tx_comp_hal_desc))
4500 			break;
4501 
4502 		buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
4503 
4504 		/* If this buffer was not released by TQM or FW, then it is not
4505 		 * Tx completion indication, assert */
4506 		if (qdf_unlikely(buffer_src !=
4507 					HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
4508 				 (qdf_unlikely(buffer_src !=
4509 					HAL_TX_COMP_RELEASE_SOURCE_FW))) {
4510 			uint8_t wbm_internal_error;
4511 
4512 			dp_err_rl(
4513 				"Tx comp release_src != TQM | FW but from %d",
4514 				buffer_src);
4515 			hal_dump_comp_desc(tx_comp_hal_desc);
4516 			DP_STATS_INC(soc, tx.invalid_release_source, 1);
4517 
4518 			/* When WBM sees NULL buffer_addr_info in any of
4519 			 * ingress rings it sends an error indication,
4520 			 * with wbm_internal_error=1, to a specific ring.
4521 			 * The WBM2SW ring used to indicate these errors is
4522 			 * fixed in HW, and that ring is being used as Tx
4523 			 * completion ring. These errors are not related to
4524 			 * Tx completions, and should just be ignored
4525 			 */
4526 			wbm_internal_error = hal_get_wbm_internal_error(
4527 							soc->hal_soc,
4528 							tx_comp_hal_desc);
4529 
4530 			if (wbm_internal_error) {
4531 				dp_err_rl("Tx comp wbm_internal_error!!");
4532 				DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
4533 
4534 				if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
4535 								buffer_src)
4536 					dp_handle_wbm_internal_error(
4537 						soc,
4538 						tx_comp_hal_desc,
4539 						hal_tx_comp_get_buffer_type(
4540 							tx_comp_hal_desc));
4541 
4542 			} else {
4543 				dp_err_rl("Tx comp wbm_internal_error false");
4544 				DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
4545 			}
4546 			continue;
4547 		}
4548 
4549 		/* Get descriptor id */
4550 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
4551 		pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
4552 			DP_TX_DESC_ID_POOL_OS;
4553 
4554 		/* Find Tx descriptor */
4555 		tx_desc = dp_tx_desc_find(soc, pool_id,
4556 				(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
4557 				DP_TX_DESC_ID_PAGE_OS,
4558 				(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
4559 				DP_TX_DESC_ID_OFFSET_OS);
4560 
4561 		/*
4562 		 * If the release source is FW, process the HTT status
4563 		 */
4564 		if (qdf_unlikely(buffer_src ==
4565 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
4566 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
4567 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
4568 					htt_tx_status);
4569 			dp_tx_process_htt_completion(soc, tx_desc,
4570 					htt_tx_status, ring_id);
4571 		} else {
4572 			tx_desc->peer_id =
4573 				hal_tx_comp_get_peer_id(tx_comp_hal_desc);
4574 			tx_desc->tx_status =
4575 				hal_tx_comp_get_tx_status(tx_comp_hal_desc);
4576 			/*
4577 			 * If the fast completion mode is enabled extended
4578 			 * metadata from descriptor is not copied
4579 			 */
4580 			if (qdf_likely(tx_desc->flags &
4581 						DP_TX_DESC_FLAG_SIMPLE))
4582 				goto add_to_pool;
4583 
4584 			/*
4585 			 * If the descriptor is already freed in vdev_detach,
4586 			 * continue to next descriptor
4587 			 */
4588 			if (qdf_unlikely
4589 				((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
4590 				 !tx_desc->flags)) {
4591 				QDF_TRACE(QDF_MODULE_ID_DP,
4592 					  QDF_TRACE_LEVEL_INFO,
4593 					  "Descriptor freed in vdev_detach %d",
4594 					  tx_desc_id);
4595 				continue;
4596 			}
4597 
4598 			if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
4599 				QDF_TRACE(QDF_MODULE_ID_DP,
4600 					  QDF_TRACE_LEVEL_INFO,
4601 					  "pdev in down state %d",
4602 					  tx_desc_id);
4603 
4604 				dp_tx_comp_free_buf(soc, tx_desc);
4605 				dp_tx_desc_release(tx_desc, tx_desc->pool_id);
4606 				goto next_desc;
4607 			}
4608 
4609 			/* Pool id is not matching. Error */
4610 			if (tx_desc->pool_id != pool_id) {
4611 				QDF_TRACE(QDF_MODULE_ID_DP,
4612 					QDF_TRACE_LEVEL_FATAL,
4613 					"Tx Comp pool id %d not matched %d",
4614 					pool_id, tx_desc->pool_id);
4615 
4616 				qdf_assert_always(0);
4617 			}
4618 
4619 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
4620 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
4621 				QDF_TRACE(QDF_MODULE_ID_DP,
4622 					  QDF_TRACE_LEVEL_FATAL,
4623 					  "Txdesc invalid, flgs = %x,id = %d",
4624 					  tx_desc->flags, tx_desc_id);
4625 				qdf_assert_always(0);
4626 			}
4627 
4628 			/* Collect hw completion contents */
4629 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
4630 					      &tx_desc->comp, 1);
4631 add_to_pool:
4632 			DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
4633 
4634 			/* First ring descriptor on the cycle */
4635 			if (!head_desc) {
4636 				head_desc = tx_desc;
4637 				tail_desc = tx_desc;
4638 			}
4639 
4640 			tail_desc->next = tx_desc;
4641 			tx_desc->next = NULL;
4642 			tail_desc = tx_desc;
4643 		}
4644 next_desc:
4645 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
4646 
4647 		/*
4648 		 * Processed packet count is more than given quota
4649 		 * stop to processing
4650 		 */
4651 
4652 		count++;
4653 
4654 		if (dp_tx_comp_loop_pkt_limit_hit(soc, count))
4655 			break;
4656 	}
4657 
4658 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
4659 
4660 	/* Process the reaped descriptors */
4661 	if (head_desc)
4662 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
4663 
4664 	if (dp_tx_comp_enable_eol_data_check(soc)) {
4665 
4666 		if (num_processed >= quota)
4667 			force_break = true;
4668 
4669 		if (!force_break &&
4670 		    hal_srng_dst_peek_sync_locked(soc->hal_soc,
4671 						  hal_ring_hdl)) {
4672 			DP_STATS_INC(soc, tx.hp_oos2, 1);
4673 			if (!hif_exec_should_yield(soc->hif_handle,
4674 						   int_ctx->dp_intr_id))
4675 				goto more_data;
4676 		}
4677 	}
4678 	DP_TX_HIST_STATS_PER_PDEV();
4679 
4680 	return num_processed;
4681 }
4682 
4683 #ifdef FEATURE_WLAN_TDLS
4684 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4685 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
4686 {
4687 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4688 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4689 						     DP_MOD_ID_TDLS);
4690 
4691 	if (!vdev) {
4692 		dp_err("vdev handle for id %d is NULL", vdev_id);
4693 		return NULL;
4694 	}
4695 
4696 	if (tx_spec & OL_TX_SPEC_NO_FREE)
4697 		vdev->is_tdls_frame = true;
4698 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
4699 
4700 	return dp_tx_send(soc_hdl, vdev_id, msdu_list);
4701 }
4702 #endif
4703 
4704 static void dp_tx_vdev_update_feature_flags(struct dp_vdev *vdev)
4705 {
4706 	struct wlan_cfg_dp_soc_ctxt *cfg;
4707 
4708 	struct dp_soc *soc;
4709 
4710 	soc = vdev->pdev->soc;
4711 	if (!soc)
4712 		return;
4713 
4714 	cfg = soc->wlan_cfg_ctx;
4715 	if (!cfg)
4716 		return;
4717 
4718 	if (vdev->opmode == wlan_op_mode_ndi)
4719 		vdev->csum_enabled = wlan_cfg_get_nan_checksum_offload(cfg);
4720 	else if ((vdev->subtype == wlan_op_subtype_p2p_device) ||
4721 		 (vdev->subtype == wlan_op_subtype_p2p_cli) ||
4722 		 (vdev->subtype == wlan_op_subtype_p2p_go))
4723 		vdev->csum_enabled = wlan_cfg_get_p2p_checksum_offload(cfg);
4724 	else
4725 		vdev->csum_enabled = wlan_cfg_get_checksum_offload(cfg);
4726 }
4727 
4728 /**
4729  * dp_tx_vdev_attach() - attach vdev to dp tx
4730  * @vdev: virtual device instance
4731  *
4732  * Return: QDF_STATUS_SUCCESS: success
4733  *         QDF_STATUS_E_RESOURCES: Error return
4734  */
4735 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
4736 {
4737 	int pdev_id;
4738 	/*
4739 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
4740 	 */
4741 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
4742 			HTT_TCL_METADATA_TYPE_VDEV_BASED);
4743 
4744 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
4745 			vdev->vdev_id);
4746 
4747 	pdev_id =
4748 		dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
4749 						       vdev->pdev->pdev_id);
4750 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
4751 
4752 	/*
4753 	 * Set HTT Extension Valid bit to 0 by default
4754 	 */
4755 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
4756 
4757 	dp_tx_vdev_update_search_flags(vdev);
4758 
4759 	dp_tx_vdev_update_feature_flags(vdev);
4760 
4761 	return QDF_STATUS_SUCCESS;
4762 }
4763 
4764 #ifndef FEATURE_WDS
4765 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
4766 {
4767 	return false;
4768 }
4769 #endif
4770 
4771 /**
4772  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
4773  * @vdev: virtual device instance
4774  *
4775  * Return: void
4776  *
4777  */
4778 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
4779 {
4780 	struct dp_soc *soc = vdev->pdev->soc;
4781 
4782 	/*
4783 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
4784 	 * for TDLS link
4785 	 *
4786 	 * Enable AddrY (SA based search) only for non-WDS STA and
4787 	 * ProxySTA VAP (in HKv1) modes.
4788 	 *
4789 	 * In all other VAP modes, only DA based search should be
4790 	 * enabled
4791 	 */
4792 	if (vdev->opmode == wlan_op_mode_sta &&
4793 	    vdev->tdls_link_connected)
4794 		vdev->hal_desc_addr_search_flags =
4795 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
4796 	else if ((vdev->opmode == wlan_op_mode_sta) &&
4797 		 !dp_tx_da_search_override(vdev))
4798 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
4799 	else
4800 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
4801 
4802 	/* Set search type only when peer map v2 messaging is enabled
4803 	 * as we will have the search index (AST hash) only when v2 is
4804 	 * enabled
4805 	 */
4806 	if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
4807 		vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
4808 	else
4809 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
4810 }
4811 
4812 static inline bool
4813 dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
4814 			  struct dp_vdev *vdev,
4815 			  struct dp_tx_desc_s *tx_desc)
4816 {
4817 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
4818 		return false;
4819 
4820 	/*
4821 	 * if vdev is given, then only check whether desc
4822 	 * vdev match. if vdev is NULL, then check whether
4823 	 * desc pdev match.
4824 	 */
4825 	return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
4826 		(tx_desc->pdev == pdev);
4827 }
4828 
4829 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
4830 /**
4831  * dp_tx_desc_flush() - release resources associated
4832  *                      to TX Desc
4833  *
4834  * @dp_pdev: Handle to DP pdev structure
4835  * @vdev: virtual device instance
4836  * NULL: no specific Vdev is required and check all allcated TX desc
4837  * on this pdev.
4838  * Non-NULL: only check the allocated TX Desc associated to this Vdev.
4839  *
4840  * @force_free:
4841  * true: flush the TX desc.
4842  * false: only reset the Vdev in each allocated TX desc
4843  * that associated to current Vdev.
4844  *
4845  * This function will go through the TX desc pool to flush
4846  * the outstanding TX data or reset Vdev to NULL in associated TX
4847  * Desc.
4848  */
4849 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
4850 		      bool force_free)
4851 {
4852 	uint8_t i;
4853 	uint32_t j;
4854 	uint32_t num_desc, page_id, offset;
4855 	uint16_t num_desc_per_page;
4856 	struct dp_soc *soc = pdev->soc;
4857 	struct dp_tx_desc_s *tx_desc = NULL;
4858 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
4859 
4860 	if (!vdev && !force_free) {
4861 		dp_err("Reset TX desc vdev, Vdev param is required!");
4862 		return;
4863 	}
4864 
4865 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
4866 		tx_desc_pool = &soc->tx_desc[i];
4867 		if (!(tx_desc_pool->pool_size) ||
4868 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
4869 		    !(tx_desc_pool->desc_pages.cacheable_pages))
4870 			continue;
4871 
4872 		/*
4873 		 * Add flow pool lock protection in case pool is freed
4874 		 * due to all tx_desc is recycled when handle TX completion.
4875 		 * this is not necessary when do force flush as:
4876 		 * a. double lock will happen if dp_tx_desc_release is
4877 		 *    also trying to acquire it.
4878 		 * b. dp interrupt has been disabled before do force TX desc
4879 		 *    flush in dp_pdev_deinit().
4880 		 */
4881 		if (!force_free)
4882 			qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
4883 		num_desc = tx_desc_pool->pool_size;
4884 		num_desc_per_page =
4885 			tx_desc_pool->desc_pages.num_element_per_page;
4886 		for (j = 0; j < num_desc; j++) {
4887 			page_id = j / num_desc_per_page;
4888 			offset = j % num_desc_per_page;
4889 
4890 			if (qdf_unlikely(!(tx_desc_pool->
4891 					 desc_pages.cacheable_pages)))
4892 				break;
4893 
4894 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
4895 
4896 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
4897 				/*
4898 				 * Free TX desc if force free is
4899 				 * required, otherwise only reset vdev
4900 				 * in this TX desc.
4901 				 */
4902 				if (force_free) {
4903 					dp_tx_comp_free_buf(soc, tx_desc);
4904 					dp_tx_desc_release(tx_desc, i);
4905 				} else {
4906 					tx_desc->vdev_id = DP_INVALID_VDEV_ID;
4907 				}
4908 			}
4909 		}
4910 		if (!force_free)
4911 			qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
4912 	}
4913 }
4914 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
4915 /**
4916  * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
4917  *
4918  * @soc: Handle to DP soc structure
4919  * @tx_desc: pointer of one TX desc
4920  * @desc_pool_id: TX Desc pool id
4921  */
4922 static inline void
4923 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
4924 		      uint8_t desc_pool_id)
4925 {
4926 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
4927 
4928 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
4929 
4930 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
4931 }
4932 
4933 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
4934 		      bool force_free)
4935 {
4936 	uint8_t i, num_pool;
4937 	uint32_t j;
4938 	uint32_t num_desc, page_id, offset;
4939 	uint16_t num_desc_per_page;
4940 	struct dp_soc *soc = pdev->soc;
4941 	struct dp_tx_desc_s *tx_desc = NULL;
4942 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
4943 
4944 	if (!vdev && !force_free) {
4945 		dp_err("Reset TX desc vdev, Vdev param is required!");
4946 		return;
4947 	}
4948 
4949 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4950 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4951 
4952 	for (i = 0; i < num_pool; i++) {
4953 		tx_desc_pool = &soc->tx_desc[i];
4954 		if (!tx_desc_pool->desc_pages.cacheable_pages)
4955 			continue;
4956 
4957 		num_desc_per_page =
4958 			tx_desc_pool->desc_pages.num_element_per_page;
4959 		for (j = 0; j < num_desc; j++) {
4960 			page_id = j / num_desc_per_page;
4961 			offset = j % num_desc_per_page;
4962 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
4963 
4964 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
4965 				if (force_free) {
4966 					dp_tx_comp_free_buf(soc, tx_desc);
4967 					dp_tx_desc_release(tx_desc, i);
4968 				} else {
4969 					dp_tx_desc_reset_vdev(soc, tx_desc,
4970 							      i);
4971 				}
4972 			}
4973 		}
4974 	}
4975 }
4976 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
4977 
4978 /**
4979  * dp_tx_vdev_detach() - detach vdev from dp tx
4980  * @vdev: virtual device instance
4981  *
4982  * Return: QDF_STATUS_SUCCESS: success
4983  *         QDF_STATUS_E_RESOURCES: Error return
4984  */
4985 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
4986 {
4987 	struct dp_pdev *pdev = vdev->pdev;
4988 
4989 	/* Reset TX desc associated to this Vdev as NULL */
4990 	dp_tx_desc_flush(pdev, vdev, false);
4991 	dp_tx_vdev_multipass_deinit(vdev);
4992 
4993 	return QDF_STATUS_SUCCESS;
4994 }
4995 
4996 /**
4997  * dp_tx_pdev_attach() - attach pdev to dp tx
4998  * @pdev: physical device instance
4999  *
5000  * Return: QDF_STATUS_SUCCESS: success
5001  *         QDF_STATUS_E_RESOURCES: Error return
5002  */
5003 QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev)
5004 {
5005 	struct dp_soc *soc = pdev->soc;
5006 
5007 	/* Initialize Flow control counters */
5008 	qdf_atomic_init(&pdev->num_tx_outstanding);
5009 	pdev->tx_descs_max = 0;
5010 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
5011 		/* Initialize descriptors in TCL Ring */
5012 		hal_tx_init_data_ring(soc->hal_soc,
5013 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
5014 	}
5015 
5016 	return QDF_STATUS_SUCCESS;
5017 }
5018 
5019 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
5020 /* Pools will be allocated dynamically */
5021 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
5022 					   int num_desc)
5023 {
5024 	uint8_t i;
5025 
5026 	for (i = 0; i < num_pool; i++) {
5027 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
5028 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
5029 	}
5030 
5031 	return QDF_STATUS_SUCCESS;
5032 }
5033 
5034 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
5035 					  int num_desc)
5036 {
5037 	return QDF_STATUS_SUCCESS;
5038 }
5039 
5040 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
5041 {
5042 }
5043 
5044 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
5045 {
5046 	uint8_t i;
5047 
5048 	for (i = 0; i < num_pool; i++)
5049 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
5050 }
5051 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
5052 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
5053 					   int num_desc)
5054 {
5055 	uint8_t i, count;
5056 
5057 	/* Allocate software Tx descriptor pools */
5058 	for (i = 0; i < num_pool; i++) {
5059 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
5060 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5061 				  FL("Tx Desc Pool alloc %d failed %pK"),
5062 				  i, soc);
5063 			goto fail;
5064 		}
5065 	}
5066 	return QDF_STATUS_SUCCESS;
5067 
5068 fail:
5069 	for (count = 0; count < i; count++)
5070 		dp_tx_desc_pool_free(soc, count);
5071 
5072 	return QDF_STATUS_E_NOMEM;
5073 }
5074 
5075 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
5076 					  int num_desc)
5077 {
5078 	uint8_t i;
5079 	for (i = 0; i < num_pool; i++) {
5080 		if (dp_tx_desc_pool_init(soc, i, num_desc)) {
5081 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5082 				  FL("Tx Desc Pool init %d failed %pK"),
5083 				  i, soc);
5084 			return QDF_STATUS_E_NOMEM;
5085 		}
5086 	}
5087 	return QDF_STATUS_SUCCESS;
5088 }
5089 
5090 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
5091 {
5092 	uint8_t i;
5093 
5094 	for (i = 0; i < num_pool; i++)
5095 		dp_tx_desc_pool_deinit(soc, i);
5096 }
5097 
5098 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
5099 {
5100 	uint8_t i;
5101 
5102 	for (i = 0; i < num_pool; i++)
5103 		dp_tx_desc_pool_free(soc, i);
5104 }
5105 
5106 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
5107 
5108 /**
5109  * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
5110  * @soc: core txrx main context
5111  * @num_pool: number of pools
5112  *
5113  */
5114 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
5115 {
5116 	dp_tx_tso_desc_pool_deinit(soc, num_pool);
5117 	dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
5118 }
5119 
5120 /**
5121  * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
5122  * @soc: core txrx main context
5123  * @num_pool: number of pools
5124  *
5125  */
5126 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
5127 {
5128 	dp_tx_tso_desc_pool_free(soc, num_pool);
5129 	dp_tx_tso_num_seg_pool_free(soc, num_pool);
5130 }
5131 
5132 /**
5133  * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
5134  * @soc: core txrx main context
5135  *
5136  * This function frees all tx related descriptors as below
5137  * 1. Regular TX descriptors (static pools)
5138  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
5139  * 3. TSO descriptors
5140  *
5141  */
5142 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
5143 {
5144 	uint8_t num_pool;
5145 
5146 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5147 
5148 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
5149 	dp_tx_ext_desc_pool_free(soc, num_pool);
5150 	dp_tx_delete_static_pools(soc, num_pool);
5151 }
5152 
5153 /**
5154  * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
5155  * @soc: core txrx main context
5156  *
5157  * This function de-initializes all tx related descriptors as below
5158  * 1. Regular TX descriptors (static pools)
5159  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
5160  * 3. TSO descriptors
5161  *
5162  */
5163 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
5164 {
5165 	uint8_t num_pool;
5166 
5167 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5168 
5169 	dp_tx_flow_control_deinit(soc);
5170 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
5171 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
5172 	dp_tx_deinit_static_pools(soc, num_pool);
5173 }
5174 
5175 /**
5176  * dp_tso_attach() - TSO attach handler
5177  * @txrx_soc: Opaque Dp handle
5178  *
5179  * Reserve TSO descriptor buffers
5180  *
5181  * Return: QDF_STATUS_E_FAILURE on failure or
5182  * QDF_STATUS_SUCCESS on success
5183  */
5184 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
5185 					 uint8_t num_pool,
5186 					 uint16_t num_desc)
5187 {
5188 	if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
5189 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
5190 		return QDF_STATUS_E_FAILURE;
5191 	}
5192 
5193 	if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
5194 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
5195 		       num_pool, soc);
5196 		return QDF_STATUS_E_FAILURE;
5197 	}
5198 	return QDF_STATUS_SUCCESS;
5199 }
5200 
5201 /**
5202  * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
5203  * @soc: DP soc handle
5204  * @num_pool: Number of pools
5205  * @num_desc: Number of descriptors
5206  *
5207  * Initialize TSO descriptor pools
5208  *
5209  * Return: QDF_STATUS_E_FAILURE on failure or
5210  * QDF_STATUS_SUCCESS on success
5211  */
5212 
5213 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
5214 					uint8_t num_pool,
5215 					uint16_t num_desc)
5216 {
5217 	if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
5218 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
5219 		return QDF_STATUS_E_FAILURE;
5220 	}
5221 
5222 	if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
5223 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
5224 		       num_pool, soc);
5225 		return QDF_STATUS_E_FAILURE;
5226 	}
5227 	return QDF_STATUS_SUCCESS;
5228 }
5229 
5230 /**
5231  * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
5232  * @soc: core txrx main context
5233  *
5234  * This function allocates memory for following descriptor pools
5235  * 1. regular sw tx descriptor pools (static pools)
5236  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
5237  * 3. TSO descriptor pools
5238  *
5239  * Return: QDF_STATUS_SUCCESS: success
5240  *         QDF_STATUS_E_RESOURCES: Error return
5241  */
5242 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
5243 {
5244 	uint8_t num_pool;
5245 	uint32_t num_desc;
5246 	uint32_t num_ext_desc;
5247 
5248 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5249 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5250 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
5251 
5252 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5253 		  "%s Tx Desc Alloc num_pool = %d, descs = %d",
5254 		  __func__, num_pool, num_desc);
5255 
5256 	if ((num_pool > MAX_TXDESC_POOLS) ||
5257 	    (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
5258 		goto fail1;
5259 
5260 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
5261 		goto fail1;
5262 
5263 	if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
5264 		goto fail2;
5265 
5266 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
5267 		return QDF_STATUS_SUCCESS;
5268 
5269 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
5270 		goto fail3;
5271 
5272 	return QDF_STATUS_SUCCESS;
5273 
5274 fail3:
5275 	dp_tx_ext_desc_pool_free(soc, num_pool);
5276 fail2:
5277 	dp_tx_delete_static_pools(soc, num_pool);
5278 fail1:
5279 	return QDF_STATUS_E_RESOURCES;
5280 }
5281 
5282 /**
5283  * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
5284  * @soc: core txrx main context
5285  *
5286  * This function initializes the following TX descriptor pools
5287  * 1. regular sw tx descriptor pools (static pools)
5288  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
5289  * 3. TSO descriptor pools
5290  *
5291  * Return: QDF_STATUS_SUCCESS: success
5292  *	   QDF_STATUS_E_RESOURCES: Error return
5293  */
5294 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
5295 {
5296 	uint8_t num_pool;
5297 	uint32_t num_desc;
5298 	uint32_t num_ext_desc;
5299 
5300 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5301 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5302 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
5303 
5304 	if (dp_tx_init_static_pools(soc, num_pool, num_desc))
5305 		goto fail1;
5306 
5307 	if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
5308 		goto fail2;
5309 
5310 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
5311 		return QDF_STATUS_SUCCESS;
5312 
5313 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
5314 		goto fail3;
5315 
5316 	dp_tx_flow_control_init(soc);
5317 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
5318 	return QDF_STATUS_SUCCESS;
5319 
5320 fail3:
5321 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
5322 fail2:
5323 	dp_tx_deinit_static_pools(soc, num_pool);
5324 fail1:
5325 	return QDF_STATUS_E_RESOURCES;
5326 }
5327 
5328 /**
5329  * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
5330  * @txrx_soc: dp soc handle
5331  *
5332  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
5333  *			QDF_STATUS_E_FAILURE
5334  */
5335 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
5336 {
5337 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5338 	uint8_t num_pool;
5339 	uint32_t num_desc;
5340 	uint32_t num_ext_desc;
5341 
5342 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5343 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5344 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
5345 
5346 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
5347 		return QDF_STATUS_E_FAILURE;
5348 
5349 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
5350 		return QDF_STATUS_E_FAILURE;
5351 
5352 	return QDF_STATUS_SUCCESS;
5353 }
5354 
5355 /**
5356  * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
5357  * @txrx_soc: dp soc handle
5358  *
5359  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
5360  */
5361 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
5362 {
5363 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5364 	uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5365 
5366 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
5367 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
5368 
5369 	return QDF_STATUS_SUCCESS;
5370 }
5371 
5372