xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision ab649e062216b8280c10bbf93638dd8770820d39)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "htt.h"
21 #include "dp_htt.h"
22 #include "hal_hw_headers.h"
23 #include "dp_tx.h"
24 #include "dp_tx_desc.h"
25 #include "dp_peer.h"
26 #include "dp_types.h"
27 #include "hal_tx.h"
28 #include "qdf_mem.h"
29 #include "qdf_nbuf.h"
30 #include "qdf_net_types.h"
31 #include "qdf_module.h"
32 #include <wlan_cfg.h>
33 #include "dp_ipa.h"
34 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
35 #include "if_meta_hdr.h"
36 #endif
37 #include "enet.h"
38 #include "dp_internal.h"
39 #ifdef ATH_SUPPORT_IQUE
40 #include "dp_txrx_me.h"
41 #endif
42 #include "dp_hist.h"
43 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
44 #include <wlan_dp_swlm.h>
45 #endif
46 #ifdef WIFI_MONITOR_SUPPORT
47 #include <dp_mon.h>
48 #endif
49 #ifdef FEATURE_WDS
50 #include "dp_txrx_wds.h"
51 #endif
52 #include "cdp_txrx_cmn_reg.h"
53 #ifdef CONFIG_SAWF
54 #include <dp_sawf.h>
55 #endif
56 
57 /* Flag to skip CCE classify when mesh or tid override enabled */
58 #define DP_TX_SKIP_CCE_CLASSIFY \
59 	(DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
60 
61 /* TODO Add support in TSO */
62 #define DP_DESC_NUM_FRAG(x) 0
63 
64 /* disable TQM_BYPASS */
65 #define TQM_BYPASS_WAR 0
66 
67 #define DP_RETRY_COUNT 7
68 #ifdef WLAN_PEER_JITTER
69 #define DP_AVG_JITTER_WEIGHT_DENOM 4
70 #define DP_AVG_DELAY_WEIGHT_DENOM 3
71 #endif
72 
73 #ifdef QCA_DP_TX_FW_METADATA_V2
74 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
75 	HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
76 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
77 	HTT_TX_TCL_METADATA_V2_VALID_HTT_SET(_var, _val)
78 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
79 	HTT_TX_TCL_METADATA_TYPE_V2_SET(_var, _val)
80 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
81 	HTT_TX_TCL_METADATA_V2_HOST_INSPECTED_SET(_var, _val)
82 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
83 	 HTT_TX_TCL_METADATA_V2_PEER_ID_SET(_var, _val)
84 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
85 	HTT_TX_TCL_METADATA_V2_VDEV_ID_SET(_var, _val)
86 #define DP_TCL_METADATA_TYPE_PEER_BASED \
87 	HTT_TCL_METADATA_V2_TYPE_PEER_BASED
88 #define DP_TCL_METADATA_TYPE_VDEV_BASED \
89 	HTT_TCL_METADATA_V2_TYPE_VDEV_BASED
90 #else
91 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
92 	HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
93 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
94 	HTT_TX_TCL_METADATA_VALID_HTT_SET(_var, _val)
95 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
96 	HTT_TX_TCL_METADATA_TYPE_SET(_var, _val)
97 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
98 	HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val)
99 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
100 	HTT_TX_TCL_METADATA_PEER_ID_SET(_var, _val)
101 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
102 	HTT_TX_TCL_METADATA_VDEV_ID_SET(_var, _val)
103 #define DP_TCL_METADATA_TYPE_PEER_BASED \
104 	HTT_TCL_METADATA_TYPE_PEER_BASED
105 #define DP_TCL_METADATA_TYPE_VDEV_BASED \
106 	HTT_TCL_METADATA_TYPE_VDEV_BASED
107 #endif
108 
109 #define DP_GET_HW_LINK_ID_FRM_PPDU_ID(PPDU_ID, LINK_ID_OFFSET, LINK_ID_BITS) \
110 	(((PPDU_ID) >> (LINK_ID_OFFSET)) & ((1 << (LINK_ID_BITS)) - 1))
111 
112 /*mapping between hal encrypt type and cdp_sec_type*/
113 uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
114 					  HAL_TX_ENCRYPT_TYPE_WEP_128,
115 					  HAL_TX_ENCRYPT_TYPE_WEP_104,
116 					  HAL_TX_ENCRYPT_TYPE_WEP_40,
117 					  HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
118 					  HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
119 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
120 					  HAL_TX_ENCRYPT_TYPE_WAPI,
121 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
122 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
123 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
124 					  HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
125 qdf_export_symbol(sec_type_map);
126 
127 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
128 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
129 {
130 	enum dp_tx_event_type type;
131 
132 	if (flags & DP_TX_DESC_FLAG_FLUSH)
133 		type = DP_TX_DESC_FLUSH;
134 	else if (flags & DP_TX_DESC_FLAG_TX_COMP_ERR)
135 		type = DP_TX_COMP_UNMAP_ERR;
136 	else if (flags & DP_TX_DESC_FLAG_COMPLETED_TX)
137 		type = DP_TX_COMP_UNMAP;
138 	else
139 		type = DP_TX_DESC_UNMAP;
140 
141 	return type;
142 }
143 
144 static inline void
145 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
146 		       qdf_nbuf_t skb, uint32_t sw_cookie,
147 		       enum dp_tx_event_type type)
148 {
149 	struct dp_tx_tcl_history *tx_tcl_history = &soc->tx_tcl_history;
150 	struct dp_tx_comp_history *tx_comp_history = &soc->tx_comp_history;
151 	struct dp_tx_desc_event *entry;
152 	uint32_t idx;
153 	uint16_t slot;
154 
155 	switch (type) {
156 	case DP_TX_COMP_UNMAP:
157 	case DP_TX_COMP_UNMAP_ERR:
158 	case DP_TX_COMP_MSDU_EXT:
159 		if (qdf_unlikely(!tx_comp_history->allocated))
160 			return;
161 
162 		dp_get_frag_hist_next_atomic_idx(&tx_comp_history->index, &idx,
163 						 &slot,
164 						 DP_TX_COMP_HIST_SLOT_SHIFT,
165 						 DP_TX_COMP_HIST_PER_SLOT_MAX,
166 						 DP_TX_COMP_HISTORY_SIZE);
167 		entry = &tx_comp_history->entry[slot][idx];
168 		break;
169 	case DP_TX_DESC_MAP:
170 	case DP_TX_DESC_UNMAP:
171 	case DP_TX_DESC_COOKIE:
172 	case DP_TX_DESC_FLUSH:
173 		if (qdf_unlikely(!tx_tcl_history->allocated))
174 			return;
175 
176 		dp_get_frag_hist_next_atomic_idx(&tx_tcl_history->index, &idx,
177 						 &slot,
178 						 DP_TX_TCL_HIST_SLOT_SHIFT,
179 						 DP_TX_TCL_HIST_PER_SLOT_MAX,
180 						 DP_TX_TCL_HISTORY_SIZE);
181 		entry = &tx_tcl_history->entry[slot][idx];
182 		break;
183 	default:
184 		dp_info_rl("Invalid dp_tx_event_type: %d", type);
185 		return;
186 	}
187 
188 	entry->skb = skb;
189 	entry->paddr = paddr;
190 	entry->sw_cookie = sw_cookie;
191 	entry->type = type;
192 	entry->ts = qdf_get_log_timestamp();
193 }
194 
195 static inline void
196 dp_tx_tso_seg_history_add(struct dp_soc *soc,
197 			  struct qdf_tso_seg_elem_t *tso_seg,
198 			  qdf_nbuf_t skb, uint32_t sw_cookie,
199 			  enum dp_tx_event_type type)
200 {
201 	int i;
202 
203 	for (i = 1; i < tso_seg->seg.num_frags; i++) {
204 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[i].paddr,
205 				       skb, sw_cookie, type);
206 	}
207 
208 	if (!tso_seg->next)
209 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[0].paddr,
210 				       skb, 0xFFFFFFFF, type);
211 }
212 
213 static inline void
214 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
215 		      qdf_nbuf_t skb, uint32_t sw_cookie,
216 		      enum dp_tx_event_type type)
217 {
218 	struct qdf_tso_seg_elem_t *curr_seg = tso_info.tso_seg_list;
219 	uint32_t num_segs = tso_info.num_segs;
220 
221 	while (num_segs) {
222 		dp_tx_tso_seg_history_add(soc, curr_seg, skb, sw_cookie, type);
223 		curr_seg = curr_seg->next;
224 		num_segs--;
225 	}
226 }
227 
228 #else
229 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
230 {
231 	return DP_TX_DESC_INVAL_EVT;
232 }
233 
234 static inline void
235 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
236 		       qdf_nbuf_t skb, uint32_t sw_cookie,
237 		       enum dp_tx_event_type type)
238 {
239 }
240 
241 static inline void
242 dp_tx_tso_seg_history_add(struct dp_soc *soc,
243 			  struct qdf_tso_seg_elem_t *tso_seg,
244 			  qdf_nbuf_t skb, uint32_t sw_cookie,
245 			  enum dp_tx_event_type type)
246 {
247 }
248 
249 static inline void
250 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
251 		      qdf_nbuf_t skb, uint32_t sw_cookie,
252 		      enum dp_tx_event_type type)
253 {
254 }
255 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
256 
257 /**
258  * dp_is_tput_high() - Check if throughput is high
259  *
260  * @soc: core txrx main context
261  *
262  * The current function is based of the RTPM tput policy variable where RTPM is
263  * avoided based on throughput.
264  */
265 static inline int dp_is_tput_high(struct dp_soc *soc)
266 {
267 	return dp_get_rtpm_tput_policy_requirement(soc);
268 }
269 
270 #if defined(FEATURE_TSO)
271 /**
272  * dp_tx_tso_unmap_segment() - Unmap TSO segment
273  *
274  * @soc: core txrx main context
275  * @seg_desc: tso segment descriptor
276  * @num_seg_desc: tso number segment descriptor
277  */
278 static void dp_tx_tso_unmap_segment(
279 		struct dp_soc *soc,
280 		struct qdf_tso_seg_elem_t *seg_desc,
281 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
282 {
283 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
284 	if (qdf_unlikely(!seg_desc)) {
285 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
286 			 __func__, __LINE__);
287 		qdf_assert(0);
288 	} else if (qdf_unlikely(!num_seg_desc)) {
289 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
290 			 __func__, __LINE__);
291 		qdf_assert(0);
292 	} else {
293 		bool is_last_seg;
294 		/* no tso segment left to do dma unmap */
295 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
296 			return;
297 
298 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
299 					true : false;
300 		qdf_nbuf_unmap_tso_segment(soc->osdev,
301 					   seg_desc, is_last_seg);
302 		num_seg_desc->num_seg.tso_cmn_num_seg--;
303 	}
304 }
305 
306 /**
307  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
308  *                            back to the freelist
309  *
310  * @soc: soc device handle
311  * @tx_desc: Tx software descriptor
312  */
313 static void dp_tx_tso_desc_release(struct dp_soc *soc,
314 				   struct dp_tx_desc_s *tx_desc)
315 {
316 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
317 	if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_desc)) {
318 		dp_tx_err("SO desc is NULL!");
319 		qdf_assert(0);
320 	} else if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_num_desc)) {
321 		dp_tx_err("TSO num desc is NULL!");
322 		qdf_assert(0);
323 	} else {
324 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
325 			(struct qdf_tso_num_seg_elem_t *)tx_desc->
326 				msdu_ext_desc->tso_num_desc;
327 
328 		/* Add the tso num segment into the free list */
329 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
330 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
331 					    tx_desc->msdu_ext_desc->
332 					    tso_num_desc);
333 			tx_desc->msdu_ext_desc->tso_num_desc = NULL;
334 			DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
335 		}
336 
337 		/* Add the tso segment into the free list*/
338 		dp_tx_tso_desc_free(soc,
339 				    tx_desc->pool_id, tx_desc->msdu_ext_desc->
340 				    tso_desc);
341 		tx_desc->msdu_ext_desc->tso_desc = NULL;
342 	}
343 }
344 #else
345 static void dp_tx_tso_unmap_segment(
346 		struct dp_soc *soc,
347 		struct qdf_tso_seg_elem_t *seg_desc,
348 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
349 
350 {
351 }
352 
353 static void dp_tx_tso_desc_release(struct dp_soc *soc,
354 				   struct dp_tx_desc_s *tx_desc)
355 {
356 }
357 #endif
358 
359 #ifdef WLAN_SUPPORT_PPEDS
360 static inline int
361 dp_tx_release_ds_tx_desc(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
362 			 uint8_t desc_pool_id)
363 {
364 	if (tx_desc->flags & DP_TX_DESC_FLAG_PPEDS) {
365 		__dp_tx_outstanding_dec(soc);
366 		dp_tx_desc_free(soc, tx_desc, desc_pool_id);
367 
368 		return 1;
369 	}
370 
371 	return 0;
372 }
373 #else
374 static inline int
375 dp_tx_release_ds_tx_desc(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
376 			 uint8_t desc_pool_id)
377 {
378 	return 0;
379 }
380 #endif
381 
382 void
383 dp_tx_desc_release(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
384 		   uint8_t desc_pool_id)
385 {
386 	struct dp_pdev *pdev = tx_desc->pdev;
387 	uint8_t comp_status = 0;
388 
389 	if (dp_tx_release_ds_tx_desc(soc, tx_desc, desc_pool_id))
390 		return;
391 
392 	qdf_assert(pdev);
393 
394 	soc = pdev->soc;
395 
396 	dp_tx_outstanding_dec(pdev);
397 
398 	if (tx_desc->msdu_ext_desc) {
399 		if (tx_desc->frm_type == dp_tx_frm_tso)
400 			dp_tx_tso_desc_release(soc, tx_desc);
401 
402 		if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
403 			dp_tx_me_free_buf(tx_desc->pdev,
404 					  tx_desc->msdu_ext_desc->me_buffer);
405 
406 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
407 
408 		tx_desc->msdu_ext_desc = NULL;
409 	}
410 
411 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
412 		qdf_atomic_dec(&soc->num_tx_exception);
413 
414 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
415 				tx_desc->buffer_src)
416 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
417 							     soc->hal_soc);
418 	else
419 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
420 
421 	dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d",
422 		    tx_desc->id, comp_status,
423 		    qdf_atomic_read(&pdev->num_tx_outstanding));
424 
425 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
426 	return;
427 }
428 
429 /**
430  * dp_tx_prepare_htt_metadata() - Prepare HTT metadata for special frames
431  * @vdev: DP vdev Handle
432  * @nbuf: skb
433  * @msdu_info: msdu_info required to create HTT metadata
434  *
435  * Prepares and fills HTT metadata in the frame pre-header for special frames
436  * that should be transmitted using varying transmit parameters.
437  * There are 2 VDEV modes that currently needs this special metadata -
438  *  1) Mesh Mode
439  *  2) DSRC Mode
440  *
441  * Return: HTT metadata size
442  *
443  */
444 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
445 					  struct dp_tx_msdu_info_s *msdu_info)
446 {
447 	uint32_t *meta_data = msdu_info->meta_data;
448 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
449 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
450 
451 	uint8_t htt_desc_size;
452 
453 	/* Size rounded of multiple of 8 bytes */
454 	uint8_t htt_desc_size_aligned;
455 
456 	uint8_t *hdr = NULL;
457 
458 	/*
459 	 * Metadata - HTT MSDU Extension header
460 	 */
461 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
462 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
463 
464 	if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
465 	    HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
466 							   meta_data[0]) ||
467 	    msdu_info->exception_fw) {
468 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
469 				 htt_desc_size_aligned)) {
470 			nbuf = qdf_nbuf_realloc_headroom(nbuf,
471 							 htt_desc_size_aligned);
472 			if (!nbuf) {
473 				/*
474 				 * qdf_nbuf_realloc_headroom won't do skb_clone
475 				 * as skb_realloc_headroom does. so, no free is
476 				 * needed here.
477 				 */
478 				DP_STATS_INC(vdev,
479 					     tx_i.dropped.headroom_insufficient,
480 					     1);
481 				qdf_print(" %s[%d] skb_realloc_headroom failed",
482 					  __func__, __LINE__);
483 				return 0;
484 			}
485 		}
486 		/* Fill and add HTT metaheader */
487 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
488 		if (!hdr) {
489 			dp_tx_err("Error in filling HTT metadata");
490 
491 			return 0;
492 		}
493 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
494 
495 	} else if (vdev->opmode == wlan_op_mode_ocb) {
496 		/* Todo - Add support for DSRC */
497 	}
498 
499 	return htt_desc_size_aligned;
500 }
501 
502 /**
503  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
504  * @tso_seg: TSO segment to process
505  * @ext_desc: Pointer to MSDU extension descriptor
506  *
507  * Return: void
508  */
509 #if defined(FEATURE_TSO)
510 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
511 		void *ext_desc)
512 {
513 	uint8_t num_frag;
514 	uint32_t tso_flags;
515 
516 	/*
517 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
518 	 * tcp_flag_mask
519 	 *
520 	 * Checksum enable flags are set in TCL descriptor and not in Extension
521 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
522 	 */
523 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
524 
525 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
526 
527 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
528 		tso_seg->tso_flags.ip_len);
529 
530 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
531 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
532 
533 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
534 		uint32_t lo = 0;
535 		uint32_t hi = 0;
536 
537 		qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
538 				  (tso_seg->tso_frags[num_frag].length));
539 
540 		qdf_dmaaddr_to_32s(
541 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
542 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
543 			tso_seg->tso_frags[num_frag].length);
544 	}
545 
546 	return;
547 }
548 #else
549 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
550 		void *ext_desc)
551 {
552 	return;
553 }
554 #endif
555 
556 #if defined(FEATURE_TSO)
557 /**
558  * dp_tx_free_tso_seg_list() - Loop through the tso segments
559  *                             allocated and free them
560  * @soc: soc handle
561  * @free_seg: list of tso segments
562  * @msdu_info: msdu descriptor
563  *
564  * Return: void
565  */
566 static void dp_tx_free_tso_seg_list(
567 		struct dp_soc *soc,
568 		struct qdf_tso_seg_elem_t *free_seg,
569 		struct dp_tx_msdu_info_s *msdu_info)
570 {
571 	struct qdf_tso_seg_elem_t *next_seg;
572 
573 	while (free_seg) {
574 		next_seg = free_seg->next;
575 		dp_tx_tso_desc_free(soc,
576 				    msdu_info->tx_queue.desc_pool_id,
577 				    free_seg);
578 		free_seg = next_seg;
579 	}
580 }
581 
582 /**
583  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
584  *                                 allocated and free them
585  * @soc:  soc handle
586  * @free_num_seg: list of tso number segments
587  * @msdu_info: msdu descriptor
588  *
589  * Return: void
590  */
591 static void dp_tx_free_tso_num_seg_list(
592 		struct dp_soc *soc,
593 		struct qdf_tso_num_seg_elem_t *free_num_seg,
594 		struct dp_tx_msdu_info_s *msdu_info)
595 {
596 	struct qdf_tso_num_seg_elem_t *next_num_seg;
597 
598 	while (free_num_seg) {
599 		next_num_seg = free_num_seg->next;
600 		dp_tso_num_seg_free(soc,
601 				    msdu_info->tx_queue.desc_pool_id,
602 				    free_num_seg);
603 		free_num_seg = next_num_seg;
604 	}
605 }
606 
607 /**
608  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
609  *                              do dma unmap for each segment
610  * @soc: soc handle
611  * @free_seg: list of tso segments
612  * @num_seg_desc: tso number segment descriptor
613  *
614  * Return: void
615  */
616 static void dp_tx_unmap_tso_seg_list(
617 		struct dp_soc *soc,
618 		struct qdf_tso_seg_elem_t *free_seg,
619 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
620 {
621 	struct qdf_tso_seg_elem_t *next_seg;
622 
623 	if (qdf_unlikely(!num_seg_desc)) {
624 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
625 		return;
626 	}
627 
628 	while (free_seg) {
629 		next_seg = free_seg->next;
630 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
631 		free_seg = next_seg;
632 	}
633 }
634 
635 #ifdef FEATURE_TSO_STATS
636 /**
637  * dp_tso_get_stats_idx() - Retrieve the tso packet id
638  * @pdev: pdev handle
639  *
640  * Return: id
641  */
642 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
643 {
644 	uint32_t stats_idx;
645 
646 	stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
647 						% CDP_MAX_TSO_PACKETS);
648 	return stats_idx;
649 }
650 #else
651 static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
652 {
653 	return 0;
654 }
655 #endif /* FEATURE_TSO_STATS */
656 
657 /**
658  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
659  *				     free the tso segments descriptor and
660  *				     tso num segments descriptor
661  * @soc:  soc handle
662  * @msdu_info: msdu descriptor
663  * @tso_seg_unmap: flag to show if dma unmap is necessary
664  *
665  * Return: void
666  */
667 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
668 					  struct dp_tx_msdu_info_s *msdu_info,
669 					  bool tso_seg_unmap)
670 {
671 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
672 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
673 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
674 					tso_info->tso_num_seg_list;
675 
676 	/* do dma unmap for each segment */
677 	if (tso_seg_unmap)
678 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
679 
680 	/* free all tso number segment descriptor though looks only have 1 */
681 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
682 
683 	/* free all tso segment descriptor */
684 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
685 }
686 
687 /**
688  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
689  * @vdev: virtual device handle
690  * @msdu: network buffer
691  * @msdu_info: meta data associated with the msdu
692  *
693  * Return: QDF_STATUS_SUCCESS success
694  */
695 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
696 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
697 {
698 	struct qdf_tso_seg_elem_t *tso_seg;
699 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
700 	struct dp_soc *soc = vdev->pdev->soc;
701 	struct dp_pdev *pdev = vdev->pdev;
702 	struct qdf_tso_info_t *tso_info;
703 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
704 	tso_info = &msdu_info->u.tso_info;
705 	tso_info->curr_seg = NULL;
706 	tso_info->tso_seg_list = NULL;
707 	tso_info->num_segs = num_seg;
708 	msdu_info->frm_type = dp_tx_frm_tso;
709 	tso_info->tso_num_seg_list = NULL;
710 
711 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
712 
713 	while (num_seg) {
714 		tso_seg = dp_tx_tso_desc_alloc(
715 				soc, msdu_info->tx_queue.desc_pool_id);
716 		if (tso_seg) {
717 			tso_seg->next = tso_info->tso_seg_list;
718 			tso_info->tso_seg_list = tso_seg;
719 			num_seg--;
720 		} else {
721 			dp_err_rl("Failed to alloc tso seg desc");
722 			DP_STATS_INC_PKT(vdev->pdev,
723 					 tso_stats.tso_no_mem_dropped, 1,
724 					 qdf_nbuf_len(msdu));
725 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
726 
727 			return QDF_STATUS_E_NOMEM;
728 		}
729 	}
730 
731 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
732 
733 	tso_num_seg = dp_tso_num_seg_alloc(soc,
734 			msdu_info->tx_queue.desc_pool_id);
735 
736 	if (tso_num_seg) {
737 		tso_num_seg->next = tso_info->tso_num_seg_list;
738 		tso_info->tso_num_seg_list = tso_num_seg;
739 	} else {
740 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
741 			 __func__);
742 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
743 
744 		return QDF_STATUS_E_NOMEM;
745 	}
746 
747 	msdu_info->num_seg =
748 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
749 
750 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
751 			msdu_info->num_seg);
752 
753 	if (!(msdu_info->num_seg)) {
754 		/*
755 		 * Free allocated TSO seg desc and number seg desc,
756 		 * do unmap for segments if dma map has done.
757 		 */
758 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
759 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
760 
761 		return QDF_STATUS_E_INVAL;
762 	}
763 	dp_tx_tso_history_add(soc, msdu_info->u.tso_info,
764 			      msdu, 0, DP_TX_DESC_MAP);
765 
766 	tso_info->curr_seg = tso_info->tso_seg_list;
767 
768 	tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
769 	dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
770 			     msdu, msdu_info->num_seg);
771 	dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
772 				    tso_info->msdu_stats_idx);
773 	dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
774 	return QDF_STATUS_SUCCESS;
775 }
776 #else
777 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
778 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
779 {
780 	return QDF_STATUS_E_NOMEM;
781 }
782 #endif
783 
784 QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
785 			(DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
786 			 sizeof(struct htt_tx_msdu_desc_ext2_t)));
787 
788 /**
789  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
790  * @vdev: DP Vdev handle
791  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
792  * @desc_pool_id: Descriptor Pool ID
793  *
794  * Return:
795  */
796 static
797 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
798 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
799 {
800 	uint8_t i;
801 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
802 	struct dp_tx_seg_info_s *seg_info;
803 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
804 	struct dp_soc *soc = vdev->pdev->soc;
805 
806 	/* Allocate an extension descriptor */
807 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
808 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
809 
810 	if (!msdu_ext_desc) {
811 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
812 		return NULL;
813 	}
814 
815 	if (msdu_info->exception_fw &&
816 			qdf_unlikely(vdev->mesh_vdev)) {
817 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
818 				&msdu_info->meta_data[0],
819 				sizeof(struct htt_tx_msdu_desc_ext2_t));
820 		qdf_atomic_inc(&soc->num_tx_exception);
821 		msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
822 	}
823 
824 	switch (msdu_info->frm_type) {
825 	case dp_tx_frm_sg:
826 	case dp_tx_frm_me:
827 	case dp_tx_frm_raw:
828 		seg_info = msdu_info->u.sg_info.curr_seg;
829 		/* Update the buffer pointers in MSDU Extension Descriptor */
830 		for (i = 0; i < seg_info->frag_cnt; i++) {
831 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
832 				seg_info->frags[i].paddr_lo,
833 				seg_info->frags[i].paddr_hi,
834 				seg_info->frags[i].len);
835 		}
836 
837 		break;
838 
839 	case dp_tx_frm_tso:
840 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
841 				&cached_ext_desc[0]);
842 		break;
843 
844 
845 	default:
846 		break;
847 	}
848 
849 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
850 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
851 
852 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
853 			msdu_ext_desc->vaddr);
854 
855 	return msdu_ext_desc;
856 }
857 
858 /**
859  * dp_tx_trace_pkt() - Trace TX packet at DP layer
860  * @soc: datapath SOC
861  * @skb: skb to be traced
862  * @msdu_id: msdu_id of the packet
863  * @vdev_id: vdev_id of the packet
864  * @op_mode: Vdev Operation mode
865  *
866  * Return: None
867  */
868 #ifdef DP_DISABLE_TX_PKT_TRACE
869 static void dp_tx_trace_pkt(struct dp_soc *soc,
870 			    qdf_nbuf_t skb, uint16_t msdu_id,
871 			    uint8_t vdev_id, enum QDF_OPMODE op_mode)
872 {
873 }
874 #else
875 static void dp_tx_trace_pkt(struct dp_soc *soc,
876 			    qdf_nbuf_t skb, uint16_t msdu_id,
877 			    uint8_t vdev_id, enum QDF_OPMODE op_mode)
878 {
879 	if (dp_is_tput_high(soc))
880 		return;
881 
882 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
883 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
884 	DPTRACE(qdf_dp_trace_ptr(skb,
885 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
886 				 QDF_TRACE_DEFAULT_PDEV_ID,
887 				 qdf_nbuf_data_addr(skb),
888 				 sizeof(qdf_nbuf_data(skb)),
889 				 msdu_id, vdev_id, 0,
890 				 op_mode));
891 
892 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID,
893 			     op_mode);
894 
895 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
896 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
897 				      msdu_id, QDF_TX));
898 }
899 #endif
900 
901 #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
902 /**
903  * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as
904  *				      exception by the upper layer (OS_IF)
905  * @soc: DP soc handle
906  * @nbuf: packet to be transmitted
907  *
908  * Return: 1 if the packet is marked as exception,
909  *	   0, if the packet is not marked as exception.
910  */
911 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
912 						 qdf_nbuf_t nbuf)
913 {
914 	return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf);
915 }
916 #else
917 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
918 						 qdf_nbuf_t nbuf)
919 {
920 	return 0;
921 }
922 #endif
923 
924 #ifdef DP_TRAFFIC_END_INDICATION
925 /**
926  * dp_tx_get_traffic_end_indication_pkt() - Allocate and prepare packet to send
927  *                                          as indication to fw to inform that
928  *                                          data stream has ended
929  * @vdev: DP vdev handle
930  * @nbuf: original buffer from network stack
931  *
932  * Return: NULL on failure,
933  *         nbuf on success
934  */
935 static inline qdf_nbuf_t
936 dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
937 				     qdf_nbuf_t nbuf)
938 {
939 	/* Packet length should be enough to copy upto L3 header */
940 	uint8_t end_nbuf_len = 64;
941 	uint8_t htt_desc_size_aligned;
942 	uint8_t htt_desc_size;
943 	qdf_nbuf_t end_nbuf;
944 
945 	if (qdf_unlikely(QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) ==
946 			 QDF_NBUF_CB_PACKET_TYPE_END_INDICATION)) {
947 		htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
948 		htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
949 
950 		end_nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q);
951 		if (!end_nbuf) {
952 			end_nbuf = qdf_nbuf_alloc(NULL,
953 						  (htt_desc_size_aligned +
954 						  end_nbuf_len),
955 						  htt_desc_size_aligned,
956 						  8, false);
957 			if (!end_nbuf) {
958 				dp_err("Packet allocation failed");
959 				goto out;
960 			}
961 		} else {
962 			qdf_nbuf_reset(end_nbuf, htt_desc_size_aligned, 8);
963 		}
964 		qdf_mem_copy(qdf_nbuf_data(end_nbuf), qdf_nbuf_data(nbuf),
965 			     end_nbuf_len);
966 		qdf_nbuf_set_pktlen(end_nbuf, end_nbuf_len);
967 
968 		return end_nbuf;
969 	}
970 out:
971 	return NULL;
972 }
973 
974 /**
975  * dp_tx_send_traffic_end_indication_pkt() - Send indication packet to FW
976  *                                           via exception path.
977  * @vdev: DP vdev handle
978  * @end_nbuf: skb to send as indication
979  * @msdu_info: msdu_info of original nbuf
980  * @peer_id: peer id
981  *
982  * Return: None
983  */
984 static inline void
985 dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
986 				      qdf_nbuf_t end_nbuf,
987 				      struct dp_tx_msdu_info_s *msdu_info,
988 				      uint16_t peer_id)
989 {
990 	struct dp_tx_msdu_info_s e_msdu_info = {0};
991 	qdf_nbuf_t nbuf;
992 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
993 		(struct htt_tx_msdu_desc_ext2_t *)(e_msdu_info.meta_data);
994 	e_msdu_info.tx_queue = msdu_info->tx_queue;
995 	e_msdu_info.tid = msdu_info->tid;
996 	e_msdu_info.exception_fw = 1;
997 	desc_ext->host_tx_desc_pool = 1;
998 	desc_ext->traffic_end_indication = 1;
999 	nbuf = dp_tx_send_msdu_single(vdev, end_nbuf, &e_msdu_info,
1000 				      peer_id, NULL);
1001 	if (nbuf) {
1002 		dp_err("Traffic end indication packet tx failed");
1003 		qdf_nbuf_free(nbuf);
1004 	}
1005 }
1006 
1007 /**
1008  * dp_tx_traffic_end_indication_set_desc_flag() - Set tx descriptor flag to
1009  *                                                mark it traffic end indication
1010  *                                                packet.
1011  * @tx_desc: Tx descriptor pointer
1012  * @msdu_info: msdu_info structure pointer
1013  *
1014  * Return: None
1015  */
1016 static inline void
1017 dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
1018 					   struct dp_tx_msdu_info_s *msdu_info)
1019 {
1020 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
1021 		(struct htt_tx_msdu_desc_ext2_t *)(msdu_info->meta_data);
1022 
1023 	if (qdf_unlikely(desc_ext->traffic_end_indication))
1024 		tx_desc->flags |= DP_TX_DESC_FLAG_TRAFFIC_END_IND;
1025 }
1026 
1027 /**
1028  * dp_tx_traffic_end_indication_enq_ind_pkt() - Enqueue the packet instead of
1029  *                                              freeing which are associated
1030  *                                              with traffic end indication
1031  *                                              flagged descriptor.
1032  * @soc: dp soc handle
1033  * @desc: Tx descriptor pointer
1034  * @nbuf: buffer pointer
1035  *
1036  * Return: True if packet gets enqueued else false
1037  */
1038 static bool
1039 dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
1040 					 struct dp_tx_desc_s *desc,
1041 					 qdf_nbuf_t nbuf)
1042 {
1043 	struct dp_vdev *vdev = NULL;
1044 
1045 	if (qdf_unlikely((desc->flags &
1046 			  DP_TX_DESC_FLAG_TRAFFIC_END_IND) != 0)) {
1047 		vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
1048 					     DP_MOD_ID_TX_COMP);
1049 		if (vdev) {
1050 			qdf_nbuf_queue_add(&vdev->end_ind_pkt_q, nbuf);
1051 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_COMP);
1052 			return true;
1053 		}
1054 	}
1055 	return false;
1056 }
1057 
1058 /**
1059  * dp_tx_traffic_end_indication_is_enabled() - get the feature
1060  *                                             enable/disable status
1061  * @vdev: dp vdev handle
1062  *
1063  * Return: True if feature is enable else false
1064  */
1065 static inline bool
1066 dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
1067 {
1068 	return qdf_unlikely(vdev->traffic_end_ind_en);
1069 }
1070 
1071 static inline qdf_nbuf_t
1072 dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1073 			       struct dp_tx_msdu_info_s *msdu_info,
1074 			       uint16_t peer_id, qdf_nbuf_t end_nbuf)
1075 {
1076 	if (dp_tx_traffic_end_indication_is_enabled(vdev))
1077 		end_nbuf = dp_tx_get_traffic_end_indication_pkt(vdev, nbuf);
1078 
1079 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
1080 
1081 	if (qdf_unlikely(end_nbuf))
1082 		dp_tx_send_traffic_end_indication_pkt(vdev, end_nbuf,
1083 						      msdu_info, peer_id);
1084 	return nbuf;
1085 }
1086 #else
1087 static inline qdf_nbuf_t
1088 dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
1089 				     qdf_nbuf_t nbuf)
1090 {
1091 	return NULL;
1092 }
1093 
1094 static inline void
1095 dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
1096 				      qdf_nbuf_t end_nbuf,
1097 				      struct dp_tx_msdu_info_s *msdu_info,
1098 				      uint16_t peer_id)
1099 {}
1100 
1101 static inline void
1102 dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
1103 					   struct dp_tx_msdu_info_s *msdu_info)
1104 {}
1105 
1106 static inline bool
1107 dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
1108 					 struct dp_tx_desc_s *desc,
1109 					 qdf_nbuf_t nbuf)
1110 {
1111 	return false;
1112 }
1113 
1114 static inline bool
1115 dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
1116 {
1117 	return false;
1118 }
1119 
1120 static inline qdf_nbuf_t
1121 dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1122 			       struct dp_tx_msdu_info_s *msdu_info,
1123 			       uint16_t peer_id, qdf_nbuf_t end_nbuf)
1124 {
1125 	return dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
1126 }
1127 #endif
1128 
1129 #if defined(QCA_SUPPORT_WDS_EXTENDED)
1130 static bool
1131 dp_tx_is_wds_ast_override_en(struct dp_soc *soc,
1132 			     struct cdp_tx_exception_metadata *tx_exc_metadata)
1133 {
1134 	if (soc->features.wds_ext_ast_override_enable &&
1135 	    tx_exc_metadata && tx_exc_metadata->is_wds_extended)
1136 		return true;
1137 
1138 	return false;
1139 }
1140 #else
1141 static bool
1142 dp_tx_is_wds_ast_override_en(struct dp_soc *soc,
1143 			     struct cdp_tx_exception_metadata *tx_exc_metadata)
1144 {
1145 	return false;
1146 }
1147 #endif
1148 
1149 /**
1150  * dp_tx_prepare_desc_single() - Allocate and prepare Tx descriptor
1151  * @vdev: DP vdev handle
1152  * @nbuf: skb
1153  * @desc_pool_id: Descriptor pool ID
1154  * @msdu_info: Metadata to the fw
1155  * @tx_exc_metadata: Handle that holds exception path metadata
1156  *
1157  * Allocate and prepare Tx descriptor with msdu information.
1158  *
1159  * Return: Pointer to Tx Descriptor on success,
1160  *         NULL on failure
1161  */
1162 static
1163 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
1164 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
1165 		struct dp_tx_msdu_info_s *msdu_info,
1166 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1167 {
1168 	uint8_t align_pad;
1169 	uint8_t is_exception = 0;
1170 	uint8_t htt_hdr_size;
1171 	struct dp_tx_desc_s *tx_desc;
1172 	struct dp_pdev *pdev = vdev->pdev;
1173 	struct dp_soc *soc = pdev->soc;
1174 
1175 	if (dp_tx_limit_check(vdev, nbuf))
1176 		return NULL;
1177 
1178 	/* Allocate software Tx descriptor */
1179 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1180 
1181 	if (qdf_unlikely(!tx_desc)) {
1182 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1183 		DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
1184 		return NULL;
1185 	}
1186 
1187 	dp_tx_outstanding_inc(pdev);
1188 
1189 	/* Initialize the SW tx descriptor */
1190 	tx_desc->nbuf = nbuf;
1191 	tx_desc->frm_type = dp_tx_frm_std;
1192 	tx_desc->tx_encap_type = ((tx_exc_metadata &&
1193 		(tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
1194 		tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
1195 	tx_desc->vdev_id = vdev->vdev_id;
1196 	tx_desc->pdev = pdev;
1197 	tx_desc->msdu_ext_desc = NULL;
1198 	tx_desc->pkt_offset = 0;
1199 	tx_desc->length = qdf_nbuf_headlen(nbuf);
1200 	tx_desc->shinfo_addr = skb_end_pointer(nbuf);
1201 
1202 	dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id,
1203 			vdev->qdf_opmode);
1204 
1205 	if (qdf_unlikely(vdev->multipass_en)) {
1206 		if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
1207 			goto failure;
1208 	}
1209 
1210 	/* Packets marked by upper layer (OS-IF) to be sent to FW */
1211 	if (dp_tx_is_nbuf_marked_exception(soc, nbuf))
1212 		is_exception = 1;
1213 
1214 	/* for BE chipsets if wds extension was enbled will not mark FW
1215 	 * in desc will mark ast index based search for ast index.
1216 	 */
1217 	if (dp_tx_is_wds_ast_override_en(soc, tx_exc_metadata))
1218 		return tx_desc;
1219 
1220 	/*
1221 	 * For special modes (vdev_type == ocb or mesh), data frames should be
1222 	 * transmitted using varying transmit parameters (tx spec) which include
1223 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
1224 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
1225 	 * These frames are sent as exception packets to firmware.
1226 	 *
1227 	 * HW requirement is that metadata should always point to a
1228 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
1229 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
1230 	 *  to get 8-byte aligned start address along with align_pad added
1231 	 *
1232 	 *  |-----------------------------|
1233 	 *  |                             |
1234 	 *  |-----------------------------| <-----Buffer Pointer Address given
1235 	 *  |                             |  ^    in HW descriptor (aligned)
1236 	 *  |       HTT Metadata          |  |
1237 	 *  |                             |  |
1238 	 *  |                             |  | Packet Offset given in descriptor
1239 	 *  |                             |  |
1240 	 *  |-----------------------------|  |
1241 	 *  |       Alignment Pad         |  v
1242 	 *  |-----------------------------| <----- Actual buffer start address
1243 	 *  |        SKB Data             |           (Unaligned)
1244 	 *  |                             |
1245 	 *  |                             |
1246 	 *  |                             |
1247 	 *  |                             |
1248 	 *  |                             |
1249 	 *  |-----------------------------|
1250 	 */
1251 	if (qdf_unlikely((msdu_info->exception_fw)) ||
1252 				(vdev->opmode == wlan_op_mode_ocb) ||
1253 				(tx_exc_metadata &&
1254 				tx_exc_metadata->is_tx_sniffer)) {
1255 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
1256 
1257 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
1258 			DP_STATS_INC(vdev,
1259 				     tx_i.dropped.headroom_insufficient, 1);
1260 			goto failure;
1261 		}
1262 
1263 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
1264 			dp_tx_err("qdf_nbuf_push_head failed");
1265 			goto failure;
1266 		}
1267 
1268 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
1269 				msdu_info);
1270 		if (htt_hdr_size == 0)
1271 			goto failure;
1272 
1273 		tx_desc->length = qdf_nbuf_headlen(nbuf);
1274 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
1275 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1276 		dp_tx_traffic_end_indication_set_desc_flag(tx_desc,
1277 							   msdu_info);
1278 		is_exception = 1;
1279 		tx_desc->length -= tx_desc->pkt_offset;
1280 	}
1281 
1282 #if !TQM_BYPASS_WAR
1283 	if (is_exception || tx_exc_metadata)
1284 #endif
1285 	{
1286 		/* Temporary WAR due to TQM VP issues */
1287 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1288 		qdf_atomic_inc(&soc->num_tx_exception);
1289 	}
1290 
1291 	return tx_desc;
1292 
1293 failure:
1294 	dp_tx_desc_release(soc, tx_desc, desc_pool_id);
1295 	return NULL;
1296 }
1297 
1298 /**
1299  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment
1300  *                        frame
1301  * @vdev: DP vdev handle
1302  * @nbuf: skb
1303  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
1304  * @desc_pool_id : Descriptor Pool ID
1305  *
1306  * Allocate and prepare Tx descriptor with msdu and fragment descritor
1307  * information. For frames with fragments, allocate and prepare
1308  * an MSDU extension descriptor
1309  *
1310  * Return: Pointer to Tx Descriptor on success,
1311  *         NULL on failure
1312  */
1313 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
1314 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
1315 		uint8_t desc_pool_id)
1316 {
1317 	struct dp_tx_desc_s *tx_desc;
1318 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
1319 	struct dp_pdev *pdev = vdev->pdev;
1320 	struct dp_soc *soc = pdev->soc;
1321 
1322 	if (dp_tx_limit_check(vdev, nbuf))
1323 		return NULL;
1324 
1325 	/* Allocate software Tx descriptor */
1326 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1327 	if (!tx_desc) {
1328 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1329 		return NULL;
1330 	}
1331 	dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg,
1332 				  nbuf, tx_desc->id, DP_TX_DESC_COOKIE);
1333 
1334 	dp_tx_outstanding_inc(pdev);
1335 
1336 	/* Initialize the SW tx descriptor */
1337 	tx_desc->nbuf = nbuf;
1338 	tx_desc->frm_type = msdu_info->frm_type;
1339 	tx_desc->tx_encap_type = vdev->tx_encap_type;
1340 	tx_desc->vdev_id = vdev->vdev_id;
1341 	tx_desc->pdev = pdev;
1342 	tx_desc->pkt_offset = 0;
1343 
1344 	dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id,
1345 			vdev->qdf_opmode);
1346 
1347 	/* Handle scattered frames - TSO/SG/ME */
1348 	/* Allocate and prepare an extension descriptor for scattered frames */
1349 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
1350 	if (!msdu_ext_desc) {
1351 		dp_tx_info("Tx Extension Descriptor Alloc Fail");
1352 		goto failure;
1353 	}
1354 
1355 #if !TQM_BYPASS_WAR
1356 	if (qdf_unlikely(msdu_info->exception_fw) ||
1357 	    dp_tx_is_nbuf_marked_exception(soc, nbuf))
1358 #endif
1359 	{
1360 		/* Temporary WAR due to TQM VP issues */
1361 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1362 		qdf_atomic_inc(&soc->num_tx_exception);
1363 	}
1364 
1365 
1366 	tx_desc->msdu_ext_desc = msdu_ext_desc;
1367 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
1368 
1369 	msdu_ext_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
1370 	msdu_ext_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
1371 
1372 	tx_desc->dma_addr = msdu_ext_desc->paddr;
1373 
1374 	if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
1375 		tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
1376 	else
1377 		tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
1378 
1379 	return tx_desc;
1380 failure:
1381 	dp_tx_desc_release(soc, tx_desc, desc_pool_id);
1382 	return NULL;
1383 }
1384 
1385 /**
1386  * dp_tx_prepare_raw() - Prepare RAW packet TX
1387  * @vdev: DP vdev handle
1388  * @nbuf: buffer pointer
1389  * @seg_info: Pointer to Segment info Descriptor to be prepared
1390  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
1391  *     descriptor
1392  *
1393  * Return:
1394  */
1395 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1396 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1397 {
1398 	qdf_nbuf_t curr_nbuf = NULL;
1399 	uint16_t total_len = 0;
1400 	qdf_dma_addr_t paddr;
1401 	int32_t i;
1402 	int32_t mapped_buf_num = 0;
1403 
1404 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
1405 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1406 
1407 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
1408 
1409 	/* Continue only if frames are of DATA type */
1410 	if (!DP_FRAME_IS_DATA(qos_wh)) {
1411 		DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
1412 		dp_tx_debug("Pkt. recd is of not data type");
1413 		goto error;
1414 	}
1415 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
1416 	if (vdev->raw_mode_war &&
1417 	    (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
1418 	    (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
1419 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
1420 
1421 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
1422 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
1423 		/*
1424 		 * Number of nbuf's must not exceed the size of the frags
1425 		 * array in seg_info.
1426 		 */
1427 		if (i >= DP_TX_MAX_NUM_FRAGS) {
1428 			dp_err_rl("nbuf cnt exceeds the max number of segs");
1429 			DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
1430 			goto error;
1431 		}
1432 		if (QDF_STATUS_SUCCESS !=
1433 			qdf_nbuf_map_nbytes_single(vdev->osdev,
1434 						   curr_nbuf,
1435 						   QDF_DMA_TO_DEVICE,
1436 						   curr_nbuf->len)) {
1437 			dp_tx_err("%s dma map error ", __func__);
1438 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
1439 			goto error;
1440 		}
1441 		/* Update the count of mapped nbuf's */
1442 		mapped_buf_num++;
1443 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
1444 		seg_info->frags[i].paddr_lo = paddr;
1445 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
1446 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
1447 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
1448 		total_len += qdf_nbuf_len(curr_nbuf);
1449 	}
1450 
1451 	seg_info->frag_cnt = i;
1452 	seg_info->total_len = total_len;
1453 	seg_info->next = NULL;
1454 
1455 	sg_info->curr_seg = seg_info;
1456 
1457 	msdu_info->frm_type = dp_tx_frm_raw;
1458 	msdu_info->num_seg = 1;
1459 
1460 	return nbuf;
1461 
1462 error:
1463 	i = 0;
1464 	while (nbuf) {
1465 		curr_nbuf = nbuf;
1466 		if (i < mapped_buf_num) {
1467 			qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
1468 						     QDF_DMA_TO_DEVICE,
1469 						     curr_nbuf->len);
1470 			i++;
1471 		}
1472 		nbuf = qdf_nbuf_next(nbuf);
1473 		qdf_nbuf_free(curr_nbuf);
1474 	}
1475 	return NULL;
1476 
1477 }
1478 
1479 /**
1480  * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
1481  * @soc: DP soc handle
1482  * @nbuf: Buffer pointer
1483  *
1484  * unmap the chain of nbufs that belong to this RAW frame.
1485  *
1486  * Return: None
1487  */
1488 static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
1489 				    qdf_nbuf_t nbuf)
1490 {
1491 	qdf_nbuf_t cur_nbuf = nbuf;
1492 
1493 	do {
1494 		qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
1495 					     QDF_DMA_TO_DEVICE,
1496 					     cur_nbuf->len);
1497 		cur_nbuf = qdf_nbuf_next(cur_nbuf);
1498 	} while (cur_nbuf);
1499 }
1500 
1501 #ifdef VDEV_PEER_PROTOCOL_COUNT
1502 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
1503 					       qdf_nbuf_t nbuf)
1504 {
1505 	qdf_nbuf_t nbuf_local;
1506 	struct dp_vdev *vdev_local = vdev_hdl;
1507 
1508 	do {
1509 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track)))
1510 			break;
1511 		nbuf_local = nbuf;
1512 		if (qdf_unlikely(((vdev_local)->tx_encap_type) ==
1513 			 htt_cmn_pkt_type_raw))
1514 			break;
1515 		else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local))))
1516 			break;
1517 		else if (qdf_nbuf_is_tso((nbuf_local)))
1518 			break;
1519 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local),
1520 						       (nbuf_local),
1521 						       NULL, 1, 0);
1522 	} while (0);
1523 }
1524 #endif
1525 
1526 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1527 void dp_tx_update_stats(struct dp_soc *soc,
1528 			struct dp_tx_desc_s *tx_desc,
1529 			uint8_t ring_id)
1530 {
1531 	uint32_t stats_len = dp_tx_get_pkt_len(tx_desc);
1532 
1533 	DP_STATS_INC_PKT(soc, tx.egress[ring_id], 1, stats_len);
1534 }
1535 
1536 int
1537 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1538 			 struct dp_tx_desc_s *tx_desc,
1539 			 uint8_t tid,
1540 			 struct dp_tx_msdu_info_s *msdu_info,
1541 			 uint8_t ring_id)
1542 {
1543 	struct dp_swlm *swlm = &soc->swlm;
1544 	union swlm_data swlm_query_data;
1545 	struct dp_swlm_tcl_data tcl_data;
1546 	QDF_STATUS status;
1547 	int ret;
1548 
1549 	if (!swlm->is_enabled)
1550 		return msdu_info->skip_hp_update;
1551 
1552 	tcl_data.nbuf = tx_desc->nbuf;
1553 	tcl_data.tid = tid;
1554 	tcl_data.ring_id = ring_id;
1555 	tcl_data.pkt_len = dp_tx_get_pkt_len(tx_desc);
1556 	tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
1557 	swlm_query_data.tcl_data = &tcl_data;
1558 
1559 	status = dp_swlm_tcl_pre_check(soc, &tcl_data);
1560 	if (QDF_IS_STATUS_ERROR(status)) {
1561 		dp_swlm_tcl_reset_session_data(soc, ring_id);
1562 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
1563 		return 0;
1564 	}
1565 
1566 	ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
1567 	if (ret) {
1568 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_success, 1);
1569 	} else {
1570 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
1571 	}
1572 
1573 	return ret;
1574 }
1575 
1576 void
1577 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1578 		      int coalesce)
1579 {
1580 	if (coalesce)
1581 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1582 	else
1583 		dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1584 }
1585 
1586 static inline void
1587 dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
1588 {
1589 	if (((i + 1) < msdu_info->num_seg))
1590 		msdu_info->skip_hp_update = 1;
1591 	else
1592 		msdu_info->skip_hp_update = 0;
1593 }
1594 
1595 static inline void
1596 dp_flush_tcp_hp(struct dp_soc *soc, uint8_t ring_id)
1597 {
1598 	hal_ring_handle_t hal_ring_hdl =
1599 		dp_tx_get_hal_ring_hdl(soc, ring_id);
1600 
1601 	if (dp_tx_hal_ring_access_start(soc, hal_ring_hdl)) {
1602 		dp_err("Fillmore: SRNG access start failed");
1603 		return;
1604 	}
1605 
1606 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
1607 }
1608 
1609 static inline void
1610 dp_tx_check_and_flush_hp(struct dp_soc *soc,
1611 			 QDF_STATUS status,
1612 			 struct dp_tx_msdu_info_s *msdu_info)
1613 {
1614 	if (QDF_IS_STATUS_ERROR(status) && !msdu_info->skip_hp_update) {
1615 		dp_flush_tcp_hp(soc,
1616 			(msdu_info->tx_queue.ring_id & DP_TX_QUEUE_MASK));
1617 	}
1618 }
1619 #else
1620 static inline void
1621 dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
1622 {
1623 }
1624 
1625 static inline void
1626 dp_tx_check_and_flush_hp(struct dp_soc *soc,
1627 			 QDF_STATUS status,
1628 			 struct dp_tx_msdu_info_s *msdu_info)
1629 {
1630 }
1631 #endif
1632 
1633 #ifdef FEATURE_RUNTIME_PM
1634 void
1635 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1636 			      hal_ring_handle_t hal_ring_hdl,
1637 			      int coalesce)
1638 {
1639 	int ret;
1640 
1641 	/*
1642 	 * Avoid runtime get and put APIs under high throughput scenarios.
1643 	 */
1644 	if (dp_get_rtpm_tput_policy_requirement(soc)) {
1645 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1646 		return;
1647 	}
1648 
1649 	ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
1650 	if (QDF_IS_STATUS_SUCCESS(ret)) {
1651 		if (hif_system_pm_state_check(soc->hif_handle)) {
1652 			dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1653 			hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1654 			hal_srng_inc_flush_cnt(hal_ring_hdl);
1655 		} else {
1656 			dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1657 		}
1658 		hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
1659 	} else {
1660 		dp_runtime_get(soc);
1661 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1662 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1663 		qdf_atomic_inc(&soc->tx_pending_rtpm);
1664 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1665 		dp_runtime_put(soc);
1666 	}
1667 }
1668 #else
1669 
1670 #ifdef DP_POWER_SAVE
1671 void
1672 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1673 			      hal_ring_handle_t hal_ring_hdl,
1674 			      int coalesce)
1675 {
1676 	if (hif_system_pm_state_check(soc->hif_handle)) {
1677 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1678 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1679 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1680 	} else {
1681 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1682 	}
1683 }
1684 #endif
1685 #endif
1686 
1687 /**
1688  * dp_tx_get_tid() - Obtain TID to be used for this frame
1689  * @vdev: DP vdev handle
1690  * @nbuf: skb
1691  * @msdu_info: msdu descriptor
1692  *
1693  * Extract the DSCP or PCP information from frame and map into TID value.
1694  *
1695  * Return: void
1696  */
1697 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1698 			  struct dp_tx_msdu_info_s *msdu_info)
1699 {
1700 	uint8_t tos = 0, dscp_tid_override = 0;
1701 	uint8_t *hdr_ptr, *L3datap;
1702 	uint8_t is_mcast = 0;
1703 	qdf_ether_header_t *eh = NULL;
1704 	qdf_ethervlan_header_t *evh = NULL;
1705 	uint16_t   ether_type;
1706 	qdf_llc_t *llcHdr;
1707 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1708 
1709 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1710 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1711 		eh = (qdf_ether_header_t *)nbuf->data;
1712 		hdr_ptr = (uint8_t *)(eh->ether_dhost);
1713 		L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
1714 	} else {
1715 		qdf_dot3_qosframe_t *qos_wh =
1716 			(qdf_dot3_qosframe_t *) nbuf->data;
1717 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1718 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1719 		return;
1720 	}
1721 
1722 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1723 	ether_type = eh->ether_type;
1724 
1725 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
1726 	/*
1727 	 * Check if packet is dot3 or eth2 type.
1728 	 */
1729 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1730 		ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1731 				sizeof(*llcHdr));
1732 
1733 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1734 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1735 				sizeof(*llcHdr);
1736 			ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
1737 					+ sizeof(*llcHdr) +
1738 					sizeof(qdf_net_vlanhdr_t));
1739 		} else {
1740 			L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
1741 				sizeof(*llcHdr);
1742 		}
1743 	} else {
1744 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1745 			evh = (qdf_ethervlan_header_t *) eh;
1746 			ether_type = evh->ether_type;
1747 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1748 		}
1749 	}
1750 
1751 	/*
1752 	 * Find priority from IP TOS DSCP field
1753 	 */
1754 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1755 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1756 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1757 			/* Only for unicast frames */
1758 			if (!is_mcast) {
1759 				/* send it on VO queue */
1760 				msdu_info->tid = DP_VO_TID;
1761 			}
1762 		} else {
1763 			/*
1764 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1765 			 * from TOS byte.
1766 			 */
1767 			tos = ip->ip_tos;
1768 			dscp_tid_override = 1;
1769 
1770 		}
1771 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1772 		/* TODO
1773 		 * use flowlabel
1774 		 *igmpmld cases to be handled in phase 2
1775 		 */
1776 		unsigned long ver_pri_flowlabel;
1777 		unsigned long pri;
1778 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1779 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1780 			DP_IPV6_PRIORITY_SHIFT;
1781 		tos = pri;
1782 		dscp_tid_override = 1;
1783 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1784 		msdu_info->tid = DP_VO_TID;
1785 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1786 		/* Only for unicast frames */
1787 		if (!is_mcast) {
1788 			/* send ucast arp on VO queue */
1789 			msdu_info->tid = DP_VO_TID;
1790 		}
1791 	}
1792 
1793 	/*
1794 	 * Assign all MCAST packets to BE
1795 	 */
1796 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1797 		if (is_mcast) {
1798 			tos = 0;
1799 			dscp_tid_override = 1;
1800 		}
1801 	}
1802 
1803 	if (dscp_tid_override == 1) {
1804 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1805 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1806 	}
1807 
1808 	if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
1809 		msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
1810 
1811 	return;
1812 }
1813 
1814 /**
1815  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1816  * @vdev: DP vdev handle
1817  * @nbuf: skb
1818  * @msdu_info: msdu descriptor
1819  *
1820  * Software based TID classification is required when more than 2 DSCP-TID
1821  * mapping tables are needed.
1822  * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
1823  *
1824  * Return: void
1825  */
1826 static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1827 				      struct dp_tx_msdu_info_s *msdu_info)
1828 {
1829 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1830 
1831 	/*
1832 	 * skip_sw_tid_classification flag will set in below cases-
1833 	 * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
1834 	 * 2. hlos_tid_override enabled for vdev
1835 	 * 3. mesh mode enabled for vdev
1836 	 */
1837 	if (qdf_likely(vdev->skip_sw_tid_classification)) {
1838 		/* Update tid in msdu_info from skb priority */
1839 		if (qdf_unlikely(vdev->skip_sw_tid_classification
1840 			& DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
1841 			uint32_t tid = qdf_nbuf_get_priority(nbuf);
1842 
1843 			if (tid == DP_TX_INVALID_QOS_TAG)
1844 				return;
1845 
1846 			msdu_info->tid = tid;
1847 			return;
1848 		}
1849 		return;
1850 	}
1851 
1852 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1853 }
1854 
1855 #ifdef FEATURE_WLAN_TDLS
1856 /**
1857  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1858  * @soc: datapath SOC
1859  * @vdev: datapath vdev
1860  * @tx_desc: TX descriptor
1861  *
1862  * Return: None
1863  */
1864 static void dp_tx_update_tdls_flags(struct dp_soc *soc,
1865 				    struct dp_vdev *vdev,
1866 				    struct dp_tx_desc_s *tx_desc)
1867 {
1868 	if (vdev) {
1869 		if (vdev->is_tdls_frame) {
1870 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1871 			vdev->is_tdls_frame = false;
1872 		}
1873 	}
1874 }
1875 
1876 static uint8_t dp_htt_tx_comp_get_status(struct dp_soc *soc, char *htt_desc)
1877 {
1878 	uint8_t tx_status = HTT_TX_FW2WBM_TX_STATUS_MAX;
1879 
1880 	switch (soc->arch_id) {
1881 	case CDP_ARCH_TYPE_LI:
1882 		tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
1883 		break;
1884 
1885 	case CDP_ARCH_TYPE_BE:
1886 		tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
1887 		break;
1888 
1889 	case CDP_ARCH_TYPE_RH:
1890 		{
1891 			uint32_t *msg_word = (uint32_t *)htt_desc;
1892 
1893 			tx_status = HTT_TX_MSDU_INFO_RELEASE_REASON_GET(
1894 							*(msg_word + 3));
1895 		}
1896 		break;
1897 	default:
1898 		dp_err("Incorrect CDP_ARCH %d", soc->arch_id);
1899 		QDF_BUG(0);
1900 	}
1901 
1902 	return tx_status;
1903 }
1904 
1905 /**
1906  * dp_non_std_htt_tx_comp_free_buff() - Free the non std tx packet buffer
1907  * @soc: dp_soc handle
1908  * @tx_desc: TX descriptor
1909  *
1910  * Return: None
1911  */
1912 static void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
1913 					 struct dp_tx_desc_s *tx_desc)
1914 {
1915 	uint8_t tx_status = 0;
1916 	uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
1917 
1918 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1919 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
1920 						     DP_MOD_ID_TDLS);
1921 
1922 	if (qdf_unlikely(!vdev)) {
1923 		dp_err_rl("vdev is null!");
1924 		goto error;
1925 	}
1926 
1927 	hal_tx_comp_get_htt_desc(&tx_desc->comp, htt_tx_status);
1928 	tx_status = dp_htt_tx_comp_get_status(soc, htt_tx_status);
1929 	dp_debug("vdev_id: %d tx_status: %d", tx_desc->vdev_id, tx_status);
1930 
1931 	if (vdev->tx_non_std_data_callback.func) {
1932 		qdf_nbuf_set_next(nbuf, NULL);
1933 		vdev->tx_non_std_data_callback.func(
1934 				vdev->tx_non_std_data_callback.ctxt,
1935 				nbuf, tx_status);
1936 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1937 		return;
1938 	} else {
1939 		dp_err_rl("callback func is null");
1940 	}
1941 
1942 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1943 error:
1944 	qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
1945 	qdf_nbuf_free(nbuf);
1946 }
1947 
1948 /**
1949  * dp_tx_msdu_single_map() - do nbuf map
1950  * @vdev: DP vdev handle
1951  * @tx_desc: DP TX descriptor pointer
1952  * @nbuf: skb pointer
1953  *
1954  * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
1955  * operation done in other component.
1956  *
1957  * Return: QDF_STATUS
1958  */
1959 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1960 					       struct dp_tx_desc_s *tx_desc,
1961 					       qdf_nbuf_t nbuf)
1962 {
1963 	if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
1964 		return qdf_nbuf_map_nbytes_single(vdev->osdev,
1965 						  nbuf,
1966 						  QDF_DMA_TO_DEVICE,
1967 						  nbuf->len);
1968 	else
1969 		return qdf_nbuf_map_single(vdev->osdev, nbuf,
1970 					   QDF_DMA_TO_DEVICE);
1971 }
1972 #else
1973 static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
1974 					   struct dp_vdev *vdev,
1975 					   struct dp_tx_desc_s *tx_desc)
1976 {
1977 }
1978 
1979 static inline void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
1980 						struct dp_tx_desc_s *tx_desc)
1981 {
1982 }
1983 
1984 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1985 					       struct dp_tx_desc_s *tx_desc,
1986 					       qdf_nbuf_t nbuf)
1987 {
1988 	return qdf_nbuf_map_nbytes_single(vdev->osdev,
1989 					  nbuf,
1990 					  QDF_DMA_TO_DEVICE,
1991 					  nbuf->len);
1992 }
1993 #endif
1994 
1995 static inline
1996 qdf_dma_addr_t dp_tx_nbuf_map_regular(struct dp_vdev *vdev,
1997 				      struct dp_tx_desc_s *tx_desc,
1998 				      qdf_nbuf_t nbuf)
1999 {
2000 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
2001 
2002 	ret = dp_tx_msdu_single_map(vdev, tx_desc, nbuf);
2003 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret)))
2004 		return 0;
2005 
2006 	return qdf_nbuf_mapped_paddr_get(nbuf);
2007 }
2008 
2009 static inline
2010 void dp_tx_nbuf_unmap_regular(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2011 {
2012 	qdf_nbuf_unmap_nbytes_single_paddr(soc->osdev,
2013 					   desc->nbuf,
2014 					   desc->dma_addr,
2015 					   QDF_DMA_TO_DEVICE,
2016 					   desc->length);
2017 }
2018 
2019 #ifdef QCA_DP_TX_RMNET_OPTIMIZATION
2020 static inline bool
2021 is_nbuf_frm_rmnet(qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
2022 {
2023 	struct net_device *ingress_dev;
2024 	skb_frag_t *frag;
2025 	uint16_t buf_len = 0;
2026 	uint16_t linear_data_len = 0;
2027 	uint8_t *payload_addr = NULL;
2028 
2029 	ingress_dev = dev_get_by_index(dev_net(nbuf->dev), nbuf->skb_iif);
2030 
2031 	if (!ingress_dev)
2032 		return false;
2033 
2034 	if ((ingress_dev->priv_flags & IFF_PHONY_HEADROOM)) {
2035 		dev_put(ingress_dev);
2036 		frag = &(skb_shinfo(nbuf)->frags[0]);
2037 		buf_len = skb_frag_size(frag);
2038 		payload_addr = (uint8_t *)skb_frag_address(frag);
2039 		linear_data_len = skb_headlen(nbuf);
2040 
2041 		buf_len += linear_data_len;
2042 		payload_addr = payload_addr - linear_data_len;
2043 		memcpy(payload_addr, nbuf->data, linear_data_len);
2044 
2045 		msdu_info->frm_type = dp_tx_frm_rmnet;
2046 		msdu_info->buf_len = buf_len;
2047 		msdu_info->payload_addr = payload_addr;
2048 
2049 		return true;
2050 	}
2051 	dev_put(ingress_dev);
2052 	return false;
2053 }
2054 
2055 static inline
2056 qdf_dma_addr_t dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s *msdu_info,
2057 				    struct dp_tx_desc_s *tx_desc)
2058 {
2059 	qdf_dma_addr_t paddr;
2060 
2061 	paddr = (qdf_dma_addr_t)qdf_mem_virt_to_phys(msdu_info->payload_addr);
2062 	tx_desc->length  = msdu_info->buf_len;
2063 
2064 	qdf_nbuf_dma_clean_range((void *)msdu_info->payload_addr,
2065 				 (void *)(msdu_info->payload_addr +
2066 					  msdu_info->buf_len));
2067 
2068 	tx_desc->flags |= DP_TX_DESC_FLAG_RMNET;
2069 	return paddr;
2070 }
2071 #else
2072 static inline bool
2073 is_nbuf_frm_rmnet(qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
2074 {
2075 	return false;
2076 }
2077 
2078 static inline
2079 qdf_dma_addr_t dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s *msdu_info,
2080 				    struct dp_tx_desc_s *tx_desc)
2081 {
2082 	return 0;
2083 }
2084 #endif
2085 
2086 #if defined(QCA_DP_TX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
2087 static inline
2088 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
2089 			      struct dp_tx_desc_s *tx_desc,
2090 			      qdf_nbuf_t nbuf)
2091 {
2092 	if (qdf_likely(tx_desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
2093 		qdf_nbuf_dma_clean_range((void *)nbuf->data,
2094 					 (void *)(nbuf->data + nbuf->len));
2095 		return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2096 	} else {
2097 		return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
2098 	}
2099 }
2100 
2101 static inline
2102 void dp_tx_nbuf_unmap(struct dp_soc *soc,
2103 		      struct dp_tx_desc_s *desc)
2104 {
2105 	if (qdf_unlikely(!(desc->flags &
2106 			   (DP_TX_DESC_FLAG_SIMPLE | DP_TX_DESC_FLAG_RMNET))))
2107 		return dp_tx_nbuf_unmap_regular(soc, desc);
2108 }
2109 #else
2110 static inline
2111 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
2112 			      struct dp_tx_desc_s *tx_desc,
2113 			      qdf_nbuf_t nbuf)
2114 {
2115 	return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
2116 }
2117 
2118 static inline
2119 void dp_tx_nbuf_unmap(struct dp_soc *soc,
2120 		      struct dp_tx_desc_s *desc)
2121 {
2122 	return dp_tx_nbuf_unmap_regular(soc, desc);
2123 }
2124 #endif
2125 
2126 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
2127 static inline
2128 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2129 {
2130 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE))) {
2131 		dp_tx_nbuf_unmap(soc, desc);
2132 		desc->flags |= DP_TX_DESC_FLAG_UNMAP_DONE;
2133 	}
2134 }
2135 
2136 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2137 {
2138 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE)))
2139 		dp_tx_nbuf_unmap(soc, desc);
2140 }
2141 #else
2142 static inline
2143 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2144 {
2145 }
2146 
2147 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2148 {
2149 	dp_tx_nbuf_unmap(soc, desc);
2150 }
2151 #endif
2152 
2153 #ifdef MESH_MODE_SUPPORT
2154 /**
2155  * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
2156  * @soc: datapath SOC
2157  * @vdev: datapath vdev
2158  * @tx_desc: TX descriptor
2159  *
2160  * Return: None
2161  */
2162 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
2163 					   struct dp_vdev *vdev,
2164 					   struct dp_tx_desc_s *tx_desc)
2165 {
2166 	if (qdf_unlikely(vdev->mesh_vdev))
2167 		tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
2168 }
2169 
2170 /**
2171  * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
2172  * @soc: dp_soc handle
2173  * @tx_desc: TX descriptor
2174  * @delayed_free: delay the nbuf free
2175  *
2176  * Return: nbuf to be freed late
2177  */
2178 static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
2179 						   struct dp_tx_desc_s *tx_desc,
2180 						   bool delayed_free)
2181 {
2182 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2183 	struct dp_vdev *vdev = NULL;
2184 
2185 	vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, DP_MOD_ID_MESH);
2186 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2187 		if (vdev)
2188 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2189 
2190 		if (delayed_free)
2191 			return nbuf;
2192 
2193 		qdf_nbuf_free(nbuf);
2194 	} else {
2195 		if (vdev && vdev->osif_tx_free_ext) {
2196 			vdev->osif_tx_free_ext((nbuf));
2197 		} else {
2198 			if (delayed_free)
2199 				return nbuf;
2200 
2201 			qdf_nbuf_free(nbuf);
2202 		}
2203 	}
2204 
2205 	if (vdev)
2206 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
2207 
2208 	return NULL;
2209 }
2210 #else
2211 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
2212 					   struct dp_vdev *vdev,
2213 					   struct dp_tx_desc_s *tx_desc)
2214 {
2215 }
2216 
2217 static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
2218 						   struct dp_tx_desc_s *tx_desc,
2219 						   bool delayed_free)
2220 {
2221 	return NULL;
2222 }
2223 #endif
2224 
2225 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
2226 {
2227 	struct dp_pdev *pdev = NULL;
2228 	struct dp_ast_entry *src_ast_entry = NULL;
2229 	struct dp_ast_entry *dst_ast_entry = NULL;
2230 	struct dp_soc *soc = NULL;
2231 
2232 	qdf_assert(vdev);
2233 	pdev = vdev->pdev;
2234 	qdf_assert(pdev);
2235 	soc = pdev->soc;
2236 
2237 	dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
2238 				(soc, dstmac, vdev->pdev->pdev_id);
2239 
2240 	src_ast_entry = dp_peer_ast_hash_find_by_pdevid
2241 				(soc, srcmac, vdev->pdev->pdev_id);
2242 	if (dst_ast_entry && src_ast_entry) {
2243 		if (dst_ast_entry->peer_id ==
2244 				src_ast_entry->peer_id)
2245 			return 1;
2246 	}
2247 
2248 	return 0;
2249 }
2250 
2251 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
2252 	defined(WLAN_MCAST_MLO)
2253 /* MLO peer id for reinject*/
2254 #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
2255 /* MLO vdev id inc offset */
2256 #define DP_MLO_VDEV_ID_OFFSET 0x80
2257 
2258 #ifdef QCA_SUPPORT_WDS_EXTENDED
2259 static inline bool
2260 dp_tx_wds_ext_check(struct cdp_tx_exception_metadata *tx_exc_metadata)
2261 {
2262 	if (tx_exc_metadata && tx_exc_metadata->is_wds_extended)
2263 		return true;
2264 
2265 	return false;
2266 }
2267 #else
2268 static inline bool
2269 dp_tx_wds_ext_check(struct cdp_tx_exception_metadata *tx_exc_metadata)
2270 {
2271 	return false;
2272 }
2273 #endif
2274 
2275 static inline void
2276 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
2277 			 struct cdp_tx_exception_metadata *tx_exc_metadata)
2278 {
2279 	/* wds ext enabled will not set the TO_FW bit */
2280 	if (dp_tx_wds_ext_check(tx_exc_metadata))
2281 		return;
2282 
2283 	if (!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) {
2284 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2285 		qdf_atomic_inc(&soc->num_tx_exception);
2286 	}
2287 }
2288 
2289 static inline void
2290 dp_tx_update_mcast_param(uint16_t peer_id,
2291 			 uint16_t *htt_tcl_metadata,
2292 			 struct dp_vdev *vdev,
2293 			 struct dp_tx_msdu_info_s *msdu_info)
2294 {
2295 	if (peer_id == DP_MLO_MCAST_REINJECT_PEER_ID) {
2296 		*htt_tcl_metadata = 0;
2297 		DP_TX_TCL_METADATA_TYPE_SET(
2298 				*htt_tcl_metadata,
2299 				HTT_TCL_METADATA_V2_TYPE_GLOBAL_SEQ_BASED);
2300 		HTT_TX_TCL_METADATA_GLBL_SEQ_NO_SET(*htt_tcl_metadata,
2301 						    msdu_info->gsn);
2302 
2303 		msdu_info->vdev_id = vdev->vdev_id + DP_MLO_VDEV_ID_OFFSET;
2304 		HTT_TX_TCL_METADATA_GLBL_SEQ_HOST_INSPECTED_SET(
2305 							*htt_tcl_metadata, 1);
2306 	} else {
2307 		msdu_info->vdev_id = vdev->vdev_id;
2308 	}
2309 }
2310 #else
2311 static inline void
2312 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
2313 			 struct cdp_tx_exception_metadata *tx_exc_metadata)
2314 {
2315 }
2316 
2317 static inline void
2318 dp_tx_update_mcast_param(uint16_t peer_id,
2319 			 uint16_t *htt_tcl_metadata,
2320 			 struct dp_vdev *vdev,
2321 			 struct dp_tx_msdu_info_s *msdu_info)
2322 {
2323 }
2324 #endif
2325 
2326 #ifdef DP_TX_SW_DROP_STATS_INC
2327 static void tx_sw_drop_stats_inc(struct dp_pdev *pdev,
2328 				 qdf_nbuf_t nbuf,
2329 				 enum cdp_tx_sw_drop drop_code)
2330 {
2331 	/* EAPOL Drop stats */
2332 	if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) {
2333 		switch (drop_code) {
2334 		case TX_DESC_ERR:
2335 			DP_STATS_INC(pdev, eap_drop_stats.tx_desc_err, 1);
2336 			break;
2337 		case TX_HAL_RING_ACCESS_ERR:
2338 			DP_STATS_INC(pdev,
2339 				     eap_drop_stats.tx_hal_ring_access_err, 1);
2340 			break;
2341 		case TX_DMA_MAP_ERR:
2342 			DP_STATS_INC(pdev, eap_drop_stats.tx_dma_map_err, 1);
2343 			break;
2344 		case TX_HW_ENQUEUE:
2345 			DP_STATS_INC(pdev, eap_drop_stats.tx_hw_enqueue, 1);
2346 			break;
2347 		case TX_SW_ENQUEUE:
2348 			DP_STATS_INC(pdev, eap_drop_stats.tx_sw_enqueue, 1);
2349 			break;
2350 		default:
2351 			dp_info_rl("Invalid eapol_drop code: %d", drop_code);
2352 			break;
2353 		}
2354 	}
2355 }
2356 #else
2357 static void tx_sw_drop_stats_inc(struct dp_pdev *pdev,
2358 				 qdf_nbuf_t nbuf,
2359 				 enum cdp_tx_sw_drop drop_code)
2360 {
2361 }
2362 #endif
2363 
2364 qdf_nbuf_t
2365 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2366 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
2367 		       struct cdp_tx_exception_metadata *tx_exc_metadata)
2368 {
2369 	struct dp_pdev *pdev = vdev->pdev;
2370 	struct dp_soc *soc = pdev->soc;
2371 	struct dp_tx_desc_s *tx_desc;
2372 	QDF_STATUS status;
2373 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
2374 	uint16_t htt_tcl_metadata = 0;
2375 	enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
2376 	uint8_t tid = msdu_info->tid;
2377 	struct cdp_tid_tx_stats *tid_stats = NULL;
2378 	qdf_dma_addr_t paddr;
2379 
2380 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
2381 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
2382 			msdu_info, tx_exc_metadata);
2383 	if (!tx_desc) {
2384 		dp_err_rl("Tx_desc prepare Fail vdev_id %d vdev %pK queue %d",
2385 			  vdev->vdev_id, vdev, tx_q->desc_pool_id);
2386 		drop_code = TX_DESC_ERR;
2387 		goto fail_return;
2388 	}
2389 
2390 	dp_tx_update_tdls_flags(soc, vdev, tx_desc);
2391 
2392 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
2393 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2394 		DP_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
2395 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
2396 		DP_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
2397 					    DP_TCL_METADATA_TYPE_PEER_BASED);
2398 		DP_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
2399 					       peer_id);
2400 		dp_tx_bypass_reinjection(soc, tx_desc, tx_exc_metadata);
2401 	} else
2402 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2403 
2404 	if (msdu_info->exception_fw)
2405 		DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2406 
2407 	dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
2408 					 !pdev->enhanced_stats_en);
2409 
2410 	dp_tx_update_mesh_flags(soc, vdev, tx_desc);
2411 
2412 	if (qdf_unlikely(msdu_info->frm_type == dp_tx_frm_rmnet))
2413 		paddr = dp_tx_rmnet_nbuf_map(msdu_info, tx_desc);
2414 	else
2415 		paddr =  dp_tx_nbuf_map(vdev, tx_desc, nbuf);
2416 
2417 	if (!paddr) {
2418 		/* Handle failure */
2419 		dp_err("qdf_nbuf_map failed");
2420 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
2421 		drop_code = TX_DMA_MAP_ERR;
2422 		goto release_desc;
2423 	}
2424 
2425 	tx_desc->dma_addr = paddr;
2426 	dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
2427 			       tx_desc->id, DP_TX_DESC_MAP);
2428 	dp_tx_update_mcast_param(peer_id, &htt_tcl_metadata, vdev, msdu_info);
2429 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
2430 	status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
2431 					     htt_tcl_metadata,
2432 					     tx_exc_metadata, msdu_info);
2433 
2434 	if (status != QDF_STATUS_SUCCESS) {
2435 		dp_tx_err_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2436 			     tx_desc, tx_q->ring_id);
2437 		dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
2438 				       tx_desc->id, DP_TX_DESC_UNMAP);
2439 		dp_tx_nbuf_unmap(soc, tx_desc);
2440 		drop_code = TX_HW_ENQUEUE;
2441 		goto release_desc;
2442 	}
2443 
2444 	tx_sw_drop_stats_inc(pdev, nbuf, drop_code);
2445 	return NULL;
2446 
2447 release_desc:
2448 	dp_tx_desc_release(soc, tx_desc, tx_q->desc_pool_id);
2449 
2450 fail_return:
2451 	dp_tx_get_tid(vdev, nbuf, msdu_info);
2452 	tx_sw_drop_stats_inc(pdev, nbuf, drop_code);
2453 	tid_stats = &pdev->stats.tid_stats.
2454 		    tid_tx_stats[tx_q->ring_id][tid];
2455 	tid_stats->swdrop_cnt[drop_code]++;
2456 	return nbuf;
2457 }
2458 
2459 /**
2460  * dp_tdls_tx_comp_free_buff() - Free non std buffer when TDLS flag is set
2461  * @soc: Soc handle
2462  * @desc: software Tx descriptor to be processed
2463  *
2464  * Return: 0 if Success
2465  */
2466 #ifdef FEATURE_WLAN_TDLS
2467 static inline int
2468 dp_tdls_tx_comp_free_buff(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2469 {
2470 	/* If it is TDLS mgmt, don't unmap or free the frame */
2471 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) {
2472 		dp_non_std_htt_tx_comp_free_buff(soc, desc);
2473 		return 0;
2474 	}
2475 	return 1;
2476 }
2477 #else
2478 static inline int
2479 dp_tdls_tx_comp_free_buff(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2480 {
2481 	return 1;
2482 }
2483 #endif
2484 
2485 qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
2486 			       bool delayed_free)
2487 {
2488 	qdf_nbuf_t nbuf = desc->nbuf;
2489 	enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
2490 
2491 	/* nbuf already freed in vdev detach path */
2492 	if (!nbuf)
2493 		return NULL;
2494 
2495 	if (!dp_tdls_tx_comp_free_buff(soc, desc))
2496 		return NULL;
2497 
2498 	/* 0 : MSDU buffer, 1 : MLE */
2499 	if (desc->msdu_ext_desc) {
2500 		/* TSO free */
2501 		if (hal_tx_ext_desc_get_tso_enable(
2502 					desc->msdu_ext_desc->vaddr)) {
2503 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
2504 					       desc->id, DP_TX_COMP_MSDU_EXT);
2505 			dp_tx_tso_seg_history_add(soc,
2506 						  desc->msdu_ext_desc->tso_desc,
2507 						  desc->nbuf, desc->id, type);
2508 			/* unmap eash TSO seg before free the nbuf */
2509 			dp_tx_tso_unmap_segment(soc,
2510 						desc->msdu_ext_desc->tso_desc,
2511 						desc->msdu_ext_desc->
2512 						tso_num_desc);
2513 			goto nbuf_free;
2514 		}
2515 
2516 		if (qdf_unlikely(desc->frm_type == dp_tx_frm_sg)) {
2517 			void *msdu_ext_desc = desc->msdu_ext_desc->vaddr;
2518 			qdf_dma_addr_t iova;
2519 			uint32_t frag_len;
2520 			uint32_t i;
2521 
2522 			qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
2523 						     QDF_DMA_TO_DEVICE,
2524 						     qdf_nbuf_headlen(nbuf));
2525 
2526 			for (i = 1; i < DP_TX_MAX_NUM_FRAGS; i++) {
2527 				hal_tx_ext_desc_get_frag_info(msdu_ext_desc, i,
2528 							      &iova,
2529 							      &frag_len);
2530 				if (!iova || !frag_len)
2531 					break;
2532 
2533 				qdf_mem_unmap_page(soc->osdev, iova, frag_len,
2534 						   QDF_DMA_TO_DEVICE);
2535 			}
2536 
2537 			goto nbuf_free;
2538 		}
2539 	}
2540 	/* If it's ME frame, dont unmap the cloned nbuf's */
2541 	if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
2542 		goto nbuf_free;
2543 
2544 	dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, desc->id, type);
2545 	dp_tx_unmap(soc, desc);
2546 
2547 	if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
2548 		return dp_mesh_tx_comp_free_buff(soc, desc, delayed_free);
2549 
2550 	if (dp_tx_traffic_end_indication_enq_ind_pkt(soc, desc, nbuf))
2551 		return NULL;
2552 
2553 nbuf_free:
2554 	if (delayed_free)
2555 		return nbuf;
2556 
2557 	qdf_nbuf_free(nbuf);
2558 
2559 	return NULL;
2560 }
2561 
2562 /**
2563  * dp_tx_sg_unmap_buf() - Unmap scatter gather fragments
2564  * @soc: DP soc handle
2565  * @nbuf: skb
2566  * @msdu_info: MSDU info
2567  *
2568  * Return: None
2569  */
2570 static inline void
2571 dp_tx_sg_unmap_buf(struct dp_soc *soc, qdf_nbuf_t nbuf,
2572 		   struct dp_tx_msdu_info_s *msdu_info)
2573 {
2574 	uint32_t cur_idx;
2575 	struct dp_tx_seg_info_s *seg = msdu_info->u.sg_info.curr_seg;
2576 
2577 	qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE,
2578 				     qdf_nbuf_headlen(nbuf));
2579 
2580 	for (cur_idx = 1; cur_idx < seg->frag_cnt; cur_idx++)
2581 		qdf_mem_unmap_page(soc->osdev, (qdf_dma_addr_t)
2582 				   (seg->frags[cur_idx].paddr_lo | ((uint64_t)
2583 				    seg->frags[cur_idx].paddr_hi) << 32),
2584 				   seg->frags[cur_idx].len,
2585 				   QDF_DMA_TO_DEVICE);
2586 }
2587 
2588 #if QDF_LOCK_STATS
2589 noinline
2590 #else
2591 #endif
2592 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2593 				    struct dp_tx_msdu_info_s *msdu_info)
2594 {
2595 	uint32_t i;
2596 	struct dp_pdev *pdev = vdev->pdev;
2597 	struct dp_soc *soc = pdev->soc;
2598 	struct dp_tx_desc_s *tx_desc;
2599 	bool is_cce_classified = false;
2600 	QDF_STATUS status;
2601 	uint16_t htt_tcl_metadata = 0;
2602 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
2603 	struct cdp_tid_tx_stats *tid_stats = NULL;
2604 	uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
2605 
2606 	if (msdu_info->frm_type == dp_tx_frm_me)
2607 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2608 
2609 	i = 0;
2610 	/* Print statement to track i and num_seg */
2611 	/*
2612 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
2613 	 * descriptors using information in msdu_info
2614 	 */
2615 	while (i < msdu_info->num_seg) {
2616 		/*
2617 		 * Setup Tx descriptor for an MSDU, and MSDU extension
2618 		 * descriptor
2619 		 */
2620 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
2621 				tx_q->desc_pool_id);
2622 
2623 		if (!tx_desc) {
2624 			if (msdu_info->frm_type == dp_tx_frm_me) {
2625 				prep_desc_fail++;
2626 				dp_tx_me_free_buf(pdev,
2627 					(void *)(msdu_info->u.sg_info
2628 						.curr_seg->frags[0].vaddr));
2629 				if (prep_desc_fail == msdu_info->num_seg) {
2630 					/*
2631 					 * Unmap is needed only if descriptor
2632 					 * preparation failed for all segments.
2633 					 */
2634 					qdf_nbuf_unmap(soc->osdev,
2635 						       msdu_info->u.sg_info.
2636 						       curr_seg->nbuf,
2637 						       QDF_DMA_TO_DEVICE);
2638 				}
2639 				/*
2640 				 * Free the nbuf for the current segment
2641 				 * and make it point to the next in the list.
2642 				 * For me, there are as many segments as there
2643 				 * are no of clients.
2644 				 */
2645 				qdf_nbuf_free(msdu_info->u.sg_info
2646 					      .curr_seg->nbuf);
2647 				if (msdu_info->u.sg_info.curr_seg->next) {
2648 					msdu_info->u.sg_info.curr_seg =
2649 						msdu_info->u.sg_info
2650 						.curr_seg->next;
2651 					nbuf = msdu_info->u.sg_info
2652 					       .curr_seg->nbuf;
2653 				}
2654 				i++;
2655 				continue;
2656 			}
2657 
2658 			if (msdu_info->frm_type == dp_tx_frm_tso) {
2659 				dp_tx_tso_seg_history_add(
2660 						soc,
2661 						msdu_info->u.tso_info.curr_seg,
2662 						nbuf, 0, DP_TX_DESC_UNMAP);
2663 				dp_tx_tso_unmap_segment(soc,
2664 							msdu_info->u.tso_info.
2665 							curr_seg,
2666 							msdu_info->u.tso_info.
2667 							tso_num_seg_list);
2668 
2669 				if (msdu_info->u.tso_info.curr_seg->next) {
2670 					msdu_info->u.tso_info.curr_seg =
2671 					msdu_info->u.tso_info.curr_seg->next;
2672 					i++;
2673 					continue;
2674 				}
2675 			}
2676 
2677 			if (msdu_info->frm_type == dp_tx_frm_sg)
2678 				dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
2679 
2680 			goto done;
2681 		}
2682 
2683 		if (msdu_info->frm_type == dp_tx_frm_me) {
2684 			tx_desc->msdu_ext_desc->me_buffer =
2685 				(struct dp_tx_me_buf_t *)msdu_info->
2686 				u.sg_info.curr_seg->frags[0].vaddr;
2687 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
2688 		}
2689 
2690 		if (is_cce_classified)
2691 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2692 
2693 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2694 		if (msdu_info->exception_fw) {
2695 			DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2696 		}
2697 
2698 		dp_tx_is_hp_update_required(i, msdu_info);
2699 
2700 		/*
2701 		 * For frames with multiple segments (TSO, ME), jump to next
2702 		 * segment.
2703 		 */
2704 		if (msdu_info->frm_type == dp_tx_frm_tso) {
2705 			if (msdu_info->u.tso_info.curr_seg->next) {
2706 				msdu_info->u.tso_info.curr_seg =
2707 					msdu_info->u.tso_info.curr_seg->next;
2708 
2709 				/*
2710 				 * If this is a jumbo nbuf, then increment the
2711 				 * number of nbuf users for each additional
2712 				 * segment of the msdu. This will ensure that
2713 				 * the skb is freed only after receiving tx
2714 				 * completion for all segments of an nbuf
2715 				 */
2716 				qdf_nbuf_inc_users(nbuf);
2717 
2718 				/* Check with MCL if this is needed */
2719 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
2720 				 */
2721 			}
2722 		}
2723 
2724 		dp_tx_update_mcast_param(DP_INVALID_PEER,
2725 					 &htt_tcl_metadata,
2726 					 vdev,
2727 					 msdu_info);
2728 		/*
2729 		 * Enqueue the Tx MSDU descriptor to HW for transmit
2730 		 */
2731 		status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
2732 						     htt_tcl_metadata,
2733 						     NULL, msdu_info);
2734 
2735 		dp_tx_check_and_flush_hp(soc, status, msdu_info);
2736 
2737 		if (status != QDF_STATUS_SUCCESS) {
2738 			dp_info_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2739 				   tx_desc, tx_q->ring_id);
2740 
2741 			dp_tx_get_tid(vdev, nbuf, msdu_info);
2742 			tid_stats = &pdev->stats.tid_stats.
2743 				    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
2744 			tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
2745 
2746 			if (msdu_info->frm_type == dp_tx_frm_me) {
2747 				hw_enq_fail++;
2748 				if (hw_enq_fail == msdu_info->num_seg) {
2749 					/*
2750 					 * Unmap is needed only if enqueue
2751 					 * failed for all segments.
2752 					 */
2753 					qdf_nbuf_unmap(soc->osdev,
2754 						       msdu_info->u.sg_info.
2755 						       curr_seg->nbuf,
2756 						       QDF_DMA_TO_DEVICE);
2757 				}
2758 				/*
2759 				 * Free the nbuf for the current segment
2760 				 * and make it point to the next in the list.
2761 				 * For me, there are as many segments as there
2762 				 * are no of clients.
2763 				 */
2764 				qdf_nbuf_free(msdu_info->u.sg_info
2765 					      .curr_seg->nbuf);
2766 				dp_tx_desc_release(soc, tx_desc,
2767 						   tx_q->desc_pool_id);
2768 				if (msdu_info->u.sg_info.curr_seg->next) {
2769 					msdu_info->u.sg_info.curr_seg =
2770 						msdu_info->u.sg_info
2771 						.curr_seg->next;
2772 					nbuf = msdu_info->u.sg_info
2773 					       .curr_seg->nbuf;
2774 				} else
2775 					break;
2776 				i++;
2777 				continue;
2778 			}
2779 
2780 			/*
2781 			 * For TSO frames, the nbuf users increment done for
2782 			 * the current segment has to be reverted, since the
2783 			 * hw enqueue for this segment failed
2784 			 */
2785 			if (msdu_info->frm_type == dp_tx_frm_tso &&
2786 			    msdu_info->u.tso_info.curr_seg) {
2787 				/*
2788 				 * unmap and free current,
2789 				 * retransmit remaining segments
2790 				 */
2791 				dp_tx_comp_free_buf(soc, tx_desc, false);
2792 				i++;
2793 				dp_tx_desc_release(soc, tx_desc,
2794 						   tx_q->desc_pool_id);
2795 				continue;
2796 			}
2797 
2798 			if (msdu_info->frm_type == dp_tx_frm_sg)
2799 				dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
2800 
2801 			dp_tx_desc_release(soc, tx_desc, tx_q->desc_pool_id);
2802 			goto done;
2803 		}
2804 
2805 		/*
2806 		 * TODO
2807 		 * if tso_info structure can be modified to have curr_seg
2808 		 * as first element, following 2 blocks of code (for TSO and SG)
2809 		 * can be combined into 1
2810 		 */
2811 
2812 		/*
2813 		 * For Multicast-Unicast converted packets,
2814 		 * each converted frame (for a client) is represented as
2815 		 * 1 segment
2816 		 */
2817 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
2818 				(msdu_info->frm_type == dp_tx_frm_me)) {
2819 			if (msdu_info->u.sg_info.curr_seg->next) {
2820 				msdu_info->u.sg_info.curr_seg =
2821 					msdu_info->u.sg_info.curr_seg->next;
2822 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2823 			} else
2824 				break;
2825 		}
2826 		i++;
2827 	}
2828 
2829 	nbuf = NULL;
2830 
2831 done:
2832 	return nbuf;
2833 }
2834 
2835 /**
2836  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
2837  *                     for SG frames
2838  * @vdev: DP vdev handle
2839  * @nbuf: skb
2840  * @seg_info: Pointer to Segment info Descriptor to be prepared
2841  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2842  *
2843  * Return: NULL on success,
2844  *         nbuf when it fails to send
2845  */
2846 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2847 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
2848 {
2849 	uint32_t cur_frag, nr_frags, i;
2850 	qdf_dma_addr_t paddr;
2851 	struct dp_tx_sg_info_s *sg_info;
2852 
2853 	sg_info = &msdu_info->u.sg_info;
2854 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
2855 
2856 	if (QDF_STATUS_SUCCESS !=
2857 		qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
2858 					   QDF_DMA_TO_DEVICE,
2859 					   qdf_nbuf_headlen(nbuf))) {
2860 		dp_tx_err("dma map error");
2861 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2862 		qdf_nbuf_free(nbuf);
2863 		return NULL;
2864 	}
2865 
2866 	paddr = qdf_nbuf_mapped_paddr_get(nbuf);
2867 	seg_info->frags[0].paddr_lo = paddr;
2868 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
2869 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
2870 	seg_info->frags[0].vaddr = (void *) nbuf;
2871 
2872 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
2873 		if (QDF_STATUS_SUCCESS != qdf_nbuf_frag_map(vdev->osdev,
2874 							    nbuf, 0,
2875 							    QDF_DMA_TO_DEVICE,
2876 							    cur_frag)) {
2877 			dp_tx_err("frag dma map error");
2878 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2879 			goto map_err;
2880 		}
2881 
2882 		paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
2883 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
2884 		seg_info->frags[cur_frag + 1].paddr_hi =
2885 			((uint64_t) paddr) >> 32;
2886 		seg_info->frags[cur_frag + 1].len =
2887 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
2888 	}
2889 
2890 	seg_info->frag_cnt = (cur_frag + 1);
2891 	seg_info->total_len = qdf_nbuf_len(nbuf);
2892 	seg_info->next = NULL;
2893 
2894 	sg_info->curr_seg = seg_info;
2895 
2896 	msdu_info->frm_type = dp_tx_frm_sg;
2897 	msdu_info->num_seg = 1;
2898 
2899 	return nbuf;
2900 map_err:
2901 	/* restore paddr into nbuf before calling unmap */
2902 	qdf_nbuf_mapped_paddr_set(nbuf,
2903 				  (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
2904 				  ((uint64_t)
2905 				  seg_info->frags[0].paddr_hi) << 32));
2906 	qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
2907 				     QDF_DMA_TO_DEVICE,
2908 				     seg_info->frags[0].len);
2909 	for (i = 1; i <= cur_frag; i++) {
2910 		qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
2911 				   (seg_info->frags[i].paddr_lo | ((uint64_t)
2912 				   seg_info->frags[i].paddr_hi) << 32),
2913 				   seg_info->frags[i].len,
2914 				   QDF_DMA_TO_DEVICE);
2915 	}
2916 	qdf_nbuf_free(nbuf);
2917 	return NULL;
2918 }
2919 
2920 /**
2921  * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
2922  * @vdev: DP vdev handle
2923  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2924  * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
2925  *
2926  * Return: NULL on failure,
2927  *         nbuf when extracted successfully
2928  */
2929 static
2930 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
2931 				    struct dp_tx_msdu_info_s *msdu_info,
2932 				    uint16_t ppdu_cookie)
2933 {
2934 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2935 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2936 
2937 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2938 
2939 	HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
2940 				(msdu_info->meta_data[5], 1);
2941 	HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
2942 				(msdu_info->meta_data[5], 1);
2943 	HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
2944 				(msdu_info->meta_data[6], ppdu_cookie);
2945 
2946 	msdu_info->exception_fw = 1;
2947 	msdu_info->is_tx_sniffer = 1;
2948 }
2949 
2950 #ifdef MESH_MODE_SUPPORT
2951 
2952 /**
2953  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
2954  *				and prepare msdu_info for mesh frames.
2955  * @vdev: DP vdev handle
2956  * @nbuf: skb
2957  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2958  *
2959  * Return: NULL on failure,
2960  *         nbuf when extracted successfully
2961  */
2962 static
2963 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2964 				struct dp_tx_msdu_info_s *msdu_info)
2965 {
2966 	struct meta_hdr_s *mhdr;
2967 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2968 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2969 
2970 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2971 
2972 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
2973 		msdu_info->exception_fw = 0;
2974 		goto remove_meta_hdr;
2975 	}
2976 
2977 	msdu_info->exception_fw = 1;
2978 
2979 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2980 
2981 	meta_data->host_tx_desc_pool = 1;
2982 	meta_data->update_peer_cache = 1;
2983 	meta_data->learning_frame = 1;
2984 
2985 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
2986 		meta_data->power = mhdr->power;
2987 
2988 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
2989 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
2990 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
2991 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
2992 
2993 		meta_data->dyn_bw = 1;
2994 
2995 		meta_data->valid_pwr = 1;
2996 		meta_data->valid_mcs_mask = 1;
2997 		meta_data->valid_nss_mask = 1;
2998 		meta_data->valid_preamble_type  = 1;
2999 		meta_data->valid_retries = 1;
3000 		meta_data->valid_bw_info = 1;
3001 	}
3002 
3003 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
3004 		meta_data->encrypt_type = 0;
3005 		meta_data->valid_encrypt_type = 1;
3006 		meta_data->learning_frame = 0;
3007 	}
3008 
3009 	meta_data->valid_key_flags = 1;
3010 	meta_data->key_flags = (mhdr->keyix & 0x3);
3011 
3012 remove_meta_hdr:
3013 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
3014 		dp_tx_err("qdf_nbuf_pull_head failed");
3015 		qdf_nbuf_free(nbuf);
3016 		return NULL;
3017 	}
3018 
3019 	msdu_info->tid = qdf_nbuf_get_priority(nbuf);
3020 
3021 	dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x"
3022 		   " tid %d to_fw %d",
3023 		   msdu_info->meta_data[0],
3024 		   msdu_info->meta_data[1],
3025 		   msdu_info->meta_data[2],
3026 		   msdu_info->meta_data[3],
3027 		   msdu_info->meta_data[4],
3028 		   msdu_info->meta_data[5],
3029 		   msdu_info->tid, msdu_info->exception_fw);
3030 
3031 	return nbuf;
3032 }
3033 #else
3034 static
3035 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
3036 				struct dp_tx_msdu_info_s *msdu_info)
3037 {
3038 	return nbuf;
3039 }
3040 
3041 #endif
3042 
3043 /**
3044  * dp_check_exc_metadata() - Checks if parameters are valid
3045  * @tx_exc: holds all exception path parameters
3046  *
3047  * Return: true when all the parameters are valid else false
3048  *
3049  */
3050 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
3051 {
3052 	bool invalid_tid = (tx_exc->tid >= DP_MAX_TIDS && tx_exc->tid !=
3053 			    HTT_INVALID_TID);
3054 	bool invalid_encap_type =
3055 			(tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
3056 			 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
3057 	bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
3058 				 tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
3059 	bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
3060 			       tx_exc->ppdu_cookie == 0);
3061 
3062 	if (tx_exc->is_intrabss_fwd)
3063 		return true;
3064 
3065 	if (invalid_tid || invalid_encap_type || invalid_sec_type ||
3066 	    invalid_cookie) {
3067 		return false;
3068 	}
3069 
3070 	return true;
3071 }
3072 
3073 #ifdef ATH_SUPPORT_IQUE
3074 bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3075 {
3076 	qdf_ether_header_t *eh;
3077 
3078 	/* Mcast to Ucast Conversion*/
3079 	if (qdf_likely(!vdev->mcast_enhancement_en))
3080 		return true;
3081 
3082 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3083 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
3084 	    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
3085 		dp_verbose_debug("Mcast frm for ME %pK", vdev);
3086 		qdf_nbuf_set_next(nbuf, NULL);
3087 
3088 		DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
3089 				 qdf_nbuf_len(nbuf));
3090 		if (dp_tx_prepare_send_me(vdev, nbuf) ==
3091 				QDF_STATUS_SUCCESS) {
3092 			return false;
3093 		}
3094 
3095 		if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
3096 			if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
3097 					QDF_STATUS_SUCCESS) {
3098 				return false;
3099 			}
3100 		}
3101 	}
3102 
3103 	return true;
3104 }
3105 #else
3106 bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3107 {
3108 	return true;
3109 }
3110 #endif
3111 
3112 #ifdef QCA_SUPPORT_WDS_EXTENDED
3113 /**
3114  * dp_tx_mcast_drop() - Drop mcast frame if drop_tx_mcast is set in WDS_EXT
3115  * @vdev: vdev handle
3116  * @nbuf: skb
3117  *
3118  * Return: true if frame is dropped, false otherwise
3119  */
3120 static inline bool dp_tx_mcast_drop(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3121 {
3122 	/* Drop tx mcast and WDS Extended feature check */
3123 	if (qdf_unlikely((vdev->drop_tx_mcast) && (vdev->wds_ext_enabled))) {
3124 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
3125 						qdf_nbuf_data(nbuf);
3126 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
3127 			DP_STATS_INC(vdev, tx_i.dropped.tx_mcast_drop, 1);
3128 			return true;
3129 		}
3130 	}
3131 
3132 	return false;
3133 }
3134 #else
3135 static inline bool dp_tx_mcast_drop(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3136 {
3137 	return false;
3138 }
3139 #endif
3140 /**
3141  * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
3142  * @nbuf: qdf_nbuf_t
3143  * @vdev: struct dp_vdev *
3144  *
3145  * Allow packet for processing only if it is for peer client which is
3146  * connected with same vap. Drop packet if client is connected to
3147  * different vap.
3148  *
3149  * Return: QDF_STATUS
3150  */
3151 static inline QDF_STATUS
3152 dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
3153 {
3154 	struct dp_ast_entry *dst_ast_entry = NULL;
3155 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3156 
3157 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
3158 	    DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
3159 		return QDF_STATUS_SUCCESS;
3160 
3161 	qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
3162 	dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
3163 							eh->ether_dhost,
3164 							vdev->vdev_id);
3165 
3166 	/* If there is no ast entry, return failure */
3167 	if (qdf_unlikely(!dst_ast_entry)) {
3168 		qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
3169 		return QDF_STATUS_E_FAILURE;
3170 	}
3171 	qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
3172 
3173 	return QDF_STATUS_SUCCESS;
3174 }
3175 
3176 /**
3177  * dp_tx_nawds_handler() - NAWDS handler
3178  *
3179  * @soc: DP soc handle
3180  * @vdev: DP vdev handle
3181  * @msdu_info: msdu_info required to create HTT metadata
3182  * @nbuf: skb
3183  * @sa_peer_id:
3184  *
3185  * This API transfers the multicast frames with the peer id
3186  * on NAWDS enabled peer.
3187  *
3188  * Return: none
3189  */
3190 
3191 void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
3192 			 struct dp_tx_msdu_info_s *msdu_info,
3193 			 qdf_nbuf_t nbuf, uint16_t sa_peer_id)
3194 {
3195 	struct dp_peer *peer = NULL;
3196 	qdf_nbuf_t nbuf_clone = NULL;
3197 	uint16_t peer_id = DP_INVALID_PEER;
3198 	struct dp_txrx_peer *txrx_peer;
3199 	uint8_t link_id = 0;
3200 
3201 	/* This check avoids pkt forwarding which is entered
3202 	 * in the ast table but still doesn't have valid peerid.
3203 	 */
3204 	if (sa_peer_id == HTT_INVALID_PEER)
3205 		return;
3206 
3207 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3208 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3209 		txrx_peer = dp_get_txrx_peer(peer);
3210 		if (!txrx_peer)
3211 			continue;
3212 
3213 		if (!txrx_peer->bss_peer && txrx_peer->nawds_enabled) {
3214 			peer_id = peer->peer_id;
3215 
3216 			if (!dp_peer_is_primary_link_peer(peer))
3217 				continue;
3218 
3219 			/* In the case of wds ext peer mcast traffic will be
3220 			 * sent as part of VLAN interface
3221 			 */
3222 			if (dp_peer_is_wds_ext_peer(txrx_peer))
3223 				continue;
3224 
3225 			/* Multicast packets needs to be
3226 			 * dropped in case of intra bss forwarding
3227 			 */
3228 			if (sa_peer_id == txrx_peer->peer_id) {
3229 				dp_tx_debug("multicast packet");
3230 				DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3231 							  tx.nawds_mcast_drop,
3232 							  1, link_id);
3233 				continue;
3234 			}
3235 
3236 			nbuf_clone = qdf_nbuf_clone(nbuf);
3237 
3238 			if (!nbuf_clone) {
3239 				QDF_TRACE(QDF_MODULE_ID_DP,
3240 					  QDF_TRACE_LEVEL_ERROR,
3241 					  FL("nbuf clone failed"));
3242 				break;
3243 			}
3244 
3245 			nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
3246 							    msdu_info, peer_id,
3247 							    NULL);
3248 
3249 			if (nbuf_clone) {
3250 				dp_tx_debug("pkt send failed");
3251 				qdf_nbuf_free(nbuf_clone);
3252 			} else {
3253 				if (peer_id != DP_INVALID_PEER)
3254 					DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
3255 								      tx.nawds_mcast,
3256 								      1, qdf_nbuf_len(nbuf), link_id);
3257 			}
3258 		}
3259 	}
3260 
3261 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3262 }
3263 
3264 #ifdef WLAN_MCAST_MLO
3265 static inline bool
3266 dp_tx_check_mesh_vdev(struct dp_vdev *vdev,
3267 		      struct cdp_tx_exception_metadata *tx_exc_metadata)
3268 {
3269 	if (!tx_exc_metadata->is_mlo_mcast && qdf_unlikely(vdev->mesh_vdev))
3270 		return true;
3271 
3272 	return false;
3273 }
3274 #else
3275 static inline bool
3276 dp_tx_check_mesh_vdev(struct dp_vdev *vdev,
3277 		      struct cdp_tx_exception_metadata *tx_exc_metadata)
3278 {
3279 	if (qdf_unlikely(vdev->mesh_vdev))
3280 		return true;
3281 
3282 	return false;
3283 }
3284 #endif
3285 
3286 qdf_nbuf_t
3287 dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3288 		     qdf_nbuf_t nbuf,
3289 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
3290 {
3291 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3292 	struct dp_tx_msdu_info_s msdu_info;
3293 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3294 						     DP_MOD_ID_TX_EXCEPTION);
3295 
3296 	if (qdf_unlikely(!vdev))
3297 		goto fail;
3298 
3299 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
3300 
3301 	if (!tx_exc_metadata)
3302 		goto fail;
3303 
3304 	msdu_info.tid = tx_exc_metadata->tid;
3305 	dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
3306 			 QDF_MAC_ADDR_REF(nbuf->data));
3307 
3308 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
3309 
3310 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
3311 		dp_tx_err("Invalid parameters in exception path");
3312 		goto fail;
3313 	}
3314 
3315 	/* for peer based metadata check if peer is valid */
3316 	if (tx_exc_metadata->peer_id != CDP_INVALID_PEER) {
3317 		struct dp_peer *peer = NULL;
3318 
3319 		 peer = dp_peer_get_ref_by_id(vdev->pdev->soc,
3320 					      tx_exc_metadata->peer_id,
3321 					      DP_MOD_ID_TX_EXCEPTION);
3322 		if (qdf_unlikely(!peer)) {
3323 			DP_STATS_INC(vdev,
3324 				     tx_i.dropped.invalid_peer_id_in_exc_path,
3325 				     1);
3326 			goto fail;
3327 		}
3328 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_EXCEPTION);
3329 	}
3330 	/* Basic sanity checks for unsupported packets */
3331 
3332 	/* MESH mode */
3333 	if (dp_tx_check_mesh_vdev(vdev, tx_exc_metadata)) {
3334 		dp_tx_err("Mesh mode is not supported in exception path");
3335 		goto fail;
3336 	}
3337 
3338 	/*
3339 	 * Classify the frame and call corresponding
3340 	 * "prepare" function which extracts the segment (TSO)
3341 	 * and fragmentation information (for TSO , SG, ME, or Raw)
3342 	 * into MSDU_INFO structure which is later used to fill
3343 	 * SW and HW descriptors.
3344 	 */
3345 	if (qdf_nbuf_is_tso(nbuf)) {
3346 		dp_verbose_debug("TSO frame %pK", vdev);
3347 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
3348 				 qdf_nbuf_len(nbuf));
3349 
3350 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
3351 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
3352 					 qdf_nbuf_len(nbuf));
3353 			goto fail;
3354 		}
3355 
3356 		DP_STATS_INC(vdev,  tx_i.rcvd.num, msdu_info.num_seg - 1);
3357 
3358 		goto send_multiple;
3359 	}
3360 
3361 	/* SG */
3362 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
3363 		struct dp_tx_seg_info_s seg_info = {0};
3364 
3365 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
3366 		if (!nbuf)
3367 			goto fail;
3368 
3369 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
3370 
3371 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
3372 				 qdf_nbuf_len(nbuf));
3373 
3374 		goto send_multiple;
3375 	}
3376 
3377 	if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
3378 		DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
3379 				 qdf_nbuf_len(nbuf));
3380 
3381 		dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
3382 					       tx_exc_metadata->ppdu_cookie);
3383 	}
3384 
3385 	/*
3386 	 * Get HW Queue to use for this frame.
3387 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
3388 	 * dedicated for data and 1 for command.
3389 	 * "queue_id" maps to one hardware ring.
3390 	 *  With each ring, we also associate a unique Tx descriptor pool
3391 	 *  to minimize lock contention for these resources.
3392 	 */
3393 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3394 	DP_STATS_INC(vdev, tx_i.rcvd_per_core[msdu_info.tx_queue.desc_pool_id],
3395 		     1);
3396 
3397 	/*
3398 	 * if the packet is mcast packet send through mlo_macst handler
3399 	 * for all prnt_vdevs
3400 	 */
3401 
3402 	if (soc->arch_ops.dp_tx_mlo_mcast_send) {
3403 		nbuf = soc->arch_ops.dp_tx_mlo_mcast_send(soc, vdev,
3404 							  nbuf,
3405 							  tx_exc_metadata);
3406 		if (!nbuf)
3407 			goto fail;
3408 	}
3409 
3410 	if (qdf_likely(tx_exc_metadata->is_intrabss_fwd)) {
3411 		if (qdf_unlikely(vdev->nawds_enabled)) {
3412 			/*
3413 			 * This is a multicast packet
3414 			 */
3415 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
3416 					    tx_exc_metadata->peer_id);
3417 			DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
3418 					 1, qdf_nbuf_len(nbuf));
3419 		}
3420 
3421 		nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
3422 					      DP_INVALID_PEER, NULL);
3423 	} else {
3424 		/*
3425 		 * Check exception descriptors
3426 		 */
3427 		if (dp_tx_exception_limit_check(vdev))
3428 			goto fail;
3429 
3430 		/*  Single linear frame */
3431 		/*
3432 		 * If nbuf is a simple linear frame, use send_single function to
3433 		 * prepare direct-buffer type TCL descriptor and enqueue to TCL
3434 		 * SRNG. There is no need to setup a MSDU extension descriptor.
3435 		 */
3436 		nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
3437 					      tx_exc_metadata->peer_id,
3438 					      tx_exc_metadata);
3439 	}
3440 
3441 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3442 	return nbuf;
3443 
3444 send_multiple:
3445 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3446 
3447 fail:
3448 	if (vdev)
3449 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3450 	dp_verbose_debug("pkt send failed");
3451 	return nbuf;
3452 }
3453 
3454 qdf_nbuf_t
3455 dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
3456 				   uint8_t vdev_id, qdf_nbuf_t nbuf,
3457 				   struct cdp_tx_exception_metadata *tx_exc_metadata)
3458 {
3459 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3460 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3461 						     DP_MOD_ID_TX_EXCEPTION);
3462 
3463 	if (qdf_unlikely(!vdev))
3464 		goto fail;
3465 
3466 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
3467 			== QDF_STATUS_E_FAILURE)) {
3468 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
3469 		goto fail;
3470 	}
3471 
3472 	/* Unref count as it will again be taken inside dp_tx_exception */
3473 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3474 
3475 	return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
3476 
3477 fail:
3478 	if (vdev)
3479 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3480 	dp_verbose_debug("pkt send failed");
3481 	return nbuf;
3482 }
3483 
3484 #ifdef MESH_MODE_SUPPORT
3485 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3486 			   qdf_nbuf_t nbuf)
3487 {
3488 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3489 	struct meta_hdr_s *mhdr;
3490 	qdf_nbuf_t nbuf_mesh = NULL;
3491 	qdf_nbuf_t nbuf_clone = NULL;
3492 	struct dp_vdev *vdev;
3493 	uint8_t no_enc_frame = 0;
3494 
3495 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
3496 	if (!nbuf_mesh) {
3497 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3498 				"qdf_nbuf_unshare failed");
3499 		return nbuf;
3500 	}
3501 
3502 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
3503 	if (!vdev) {
3504 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3505 				"vdev is NULL for vdev_id %d", vdev_id);
3506 		return nbuf;
3507 	}
3508 
3509 	nbuf = nbuf_mesh;
3510 
3511 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
3512 
3513 	if ((vdev->sec_type != cdp_sec_type_none) &&
3514 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
3515 		no_enc_frame = 1;
3516 
3517 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
3518 		qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
3519 
3520 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
3521 		       !no_enc_frame) {
3522 		nbuf_clone = qdf_nbuf_clone(nbuf);
3523 		if (!nbuf_clone) {
3524 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3525 				"qdf_nbuf_clone failed");
3526 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
3527 			return nbuf;
3528 		}
3529 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
3530 	}
3531 
3532 	if (nbuf_clone) {
3533 		if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
3534 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
3535 		} else {
3536 			qdf_nbuf_free(nbuf_clone);
3537 		}
3538 	}
3539 
3540 	if (no_enc_frame)
3541 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
3542 	else
3543 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
3544 
3545 	nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
3546 	if ((!nbuf) && no_enc_frame) {
3547 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
3548 	}
3549 
3550 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
3551 	return nbuf;
3552 }
3553 
3554 #else
3555 
3556 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3557 			   qdf_nbuf_t nbuf)
3558 {
3559 	return dp_tx_send(soc_hdl, vdev_id, nbuf);
3560 }
3561 
3562 #endif
3563 
3564 #ifdef QCA_DP_TX_NBUF_AND_NBUF_DATA_PREFETCH
3565 static inline
3566 void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
3567 {
3568 	if (nbuf) {
3569 		qdf_prefetch(&nbuf->len);
3570 		qdf_prefetch(&nbuf->data);
3571 	}
3572 }
3573 #else
3574 static inline
3575 void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
3576 {
3577 }
3578 #endif
3579 
3580 #ifdef DP_UMAC_HW_RESET_SUPPORT
3581 qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3582 		      qdf_nbuf_t nbuf)
3583 {
3584 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3585 	struct dp_vdev *vdev = NULL;
3586 
3587 	vdev = soc->vdev_id_map[vdev_id];
3588 	if (qdf_unlikely(!vdev))
3589 		return nbuf;
3590 
3591 	DP_STATS_INC(vdev, tx_i.dropped.drop_ingress, 1);
3592 	return nbuf;
3593 }
3594 
3595 qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3596 			  qdf_nbuf_t nbuf,
3597 			  struct cdp_tx_exception_metadata *tx_exc_metadata)
3598 {
3599 	return dp_tx_drop(soc_hdl, vdev_id, nbuf);
3600 }
3601 #endif
3602 
3603 #ifdef FEATURE_DIRECT_LINK
3604 /**
3605  * dp_vdev_tx_mark_to_fw() - Mark to_fw bit for the tx packet
3606  * @nbuf: skb
3607  * @vdev: DP vdev handle
3608  *
3609  * Return: None
3610  */
3611 static inline void dp_vdev_tx_mark_to_fw(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
3612 {
3613 	if (qdf_unlikely(vdev->to_fw))
3614 		QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf) = 1;
3615 }
3616 #else
3617 static inline void dp_vdev_tx_mark_to_fw(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
3618 {
3619 }
3620 #endif
3621 
3622 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3623 		      qdf_nbuf_t nbuf)
3624 {
3625 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3626 	uint16_t peer_id = HTT_INVALID_PEER;
3627 	/*
3628 	 * doing a memzero is causing additional function call overhead
3629 	 * so doing static stack clearing
3630 	 */
3631 	struct dp_tx_msdu_info_s msdu_info = {0};
3632 	struct dp_vdev *vdev = NULL;
3633 	qdf_nbuf_t end_nbuf = NULL;
3634 
3635 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3636 		return nbuf;
3637 
3638 	/*
3639 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3640 	 * this in per packet path.
3641 	 *
3642 	 * As in this path vdev memory is already protected with netdev
3643 	 * tx lock
3644 	 */
3645 	vdev = soc->vdev_id_map[vdev_id];
3646 	if (qdf_unlikely(!vdev))
3647 		return nbuf;
3648 
3649 	dp_vdev_tx_mark_to_fw(nbuf, vdev);
3650 
3651 	/*
3652 	 * Set Default Host TID value to invalid TID
3653 	 * (TID override disabled)
3654 	 */
3655 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
3656 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
3657 
3658 	if (qdf_unlikely(vdev->mesh_vdev)) {
3659 		qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
3660 								&msdu_info);
3661 		if (!nbuf_mesh) {
3662 			dp_verbose_debug("Extracting mesh metadata failed");
3663 			return nbuf;
3664 		}
3665 		nbuf = nbuf_mesh;
3666 	}
3667 
3668 	/*
3669 	 * Get HW Queue to use for this frame.
3670 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
3671 	 * dedicated for data and 1 for command.
3672 	 * "queue_id" maps to one hardware ring.
3673 	 *  With each ring, we also associate a unique Tx descriptor pool
3674 	 *  to minimize lock contention for these resources.
3675 	 */
3676 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3677 	DP_STATS_INC(vdev, tx_i.rcvd_per_core[msdu_info.tx_queue.desc_pool_id],
3678 		     1);
3679 
3680 	/*
3681 	 * TCL H/W supports 2 DSCP-TID mapping tables.
3682 	 *  Table 1 - Default DSCP-TID mapping table
3683 	 *  Table 2 - 1 DSCP-TID override table
3684 	 *
3685 	 * If we need a different DSCP-TID mapping for this vap,
3686 	 * call tid_classify to extract DSCP/ToS from frame and
3687 	 * map to a TID and store in msdu_info. This is later used
3688 	 * to fill in TCL Input descriptor (per-packet TID override).
3689 	 */
3690 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
3691 
3692 	/*
3693 	 * Classify the frame and call corresponding
3694 	 * "prepare" function which extracts the segment (TSO)
3695 	 * and fragmentation information (for TSO , SG, ME, or Raw)
3696 	 * into MSDU_INFO structure which is later used to fill
3697 	 * SW and HW descriptors.
3698 	 */
3699 	if (qdf_nbuf_is_tso(nbuf)) {
3700 		dp_verbose_debug("TSO frame %pK", vdev);
3701 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
3702 				 qdf_nbuf_len(nbuf));
3703 
3704 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
3705 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
3706 					 qdf_nbuf_len(nbuf));
3707 			return nbuf;
3708 		}
3709 
3710 		DP_STATS_INC(vdev,  tx_i.rcvd.num, msdu_info.num_seg - 1);
3711 
3712 		goto send_multiple;
3713 	}
3714 
3715 	/* SG */
3716 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
3717 		if (qdf_nbuf_get_nr_frags(nbuf) > DP_TX_MAX_NUM_FRAGS - 1) {
3718 			if (qdf_unlikely(qdf_nbuf_linearize(nbuf)))
3719 				return nbuf;
3720 		} else {
3721 			struct dp_tx_seg_info_s seg_info = {0};
3722 
3723 			if (qdf_unlikely(is_nbuf_frm_rmnet(nbuf, &msdu_info)))
3724 				goto send_single;
3725 
3726 			nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info,
3727 						&msdu_info);
3728 			if (!nbuf)
3729 				return NULL;
3730 
3731 			dp_verbose_debug("non-TSO SG frame %pK", vdev);
3732 
3733 			DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
3734 					 qdf_nbuf_len(nbuf));
3735 
3736 			goto send_multiple;
3737 		}
3738 	}
3739 
3740 	if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
3741 		return NULL;
3742 
3743 	if (qdf_unlikely(dp_tx_mcast_drop(vdev, nbuf)))
3744 		return nbuf;
3745 
3746 	/* RAW */
3747 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
3748 		struct dp_tx_seg_info_s seg_info = {0};
3749 
3750 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
3751 		if (!nbuf)
3752 			return NULL;
3753 
3754 		dp_verbose_debug("Raw frame %pK", vdev);
3755 
3756 		goto send_multiple;
3757 
3758 	}
3759 
3760 	if (qdf_unlikely(vdev->nawds_enabled)) {
3761 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
3762 					  qdf_nbuf_data(nbuf);
3763 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
3764 			uint16_t sa_peer_id = DP_INVALID_PEER;
3765 
3766 			if (!soc->ast_offload_support) {
3767 				struct dp_ast_entry *ast_entry = NULL;
3768 
3769 				qdf_spin_lock_bh(&soc->ast_lock);
3770 				ast_entry = dp_peer_ast_hash_find_by_pdevid
3771 					(soc,
3772 					 (uint8_t *)(eh->ether_shost),
3773 					 vdev->pdev->pdev_id);
3774 				if (ast_entry)
3775 					sa_peer_id = ast_entry->peer_id;
3776 				qdf_spin_unlock_bh(&soc->ast_lock);
3777 			}
3778 
3779 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
3780 					    sa_peer_id);
3781 		}
3782 		peer_id = DP_INVALID_PEER;
3783 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
3784 				 1, qdf_nbuf_len(nbuf));
3785 	}
3786 
3787 send_single:
3788 	/*  Single linear frame */
3789 	/*
3790 	 * If nbuf is a simple linear frame, use send_single function to
3791 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
3792 	 * SRNG. There is no need to setup a MSDU extension descriptor.
3793 	 */
3794 	dp_tx_prefetch_nbuf_data(nbuf);
3795 
3796 	nbuf = dp_tx_send_msdu_single_wrapper(vdev, nbuf, &msdu_info,
3797 					      peer_id, end_nbuf);
3798 	return nbuf;
3799 
3800 send_multiple:
3801 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3802 
3803 	if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
3804 		dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
3805 
3806 	return nbuf;
3807 }
3808 
3809 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
3810 				    uint8_t vdev_id, qdf_nbuf_t nbuf)
3811 {
3812 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3813 	struct dp_vdev *vdev = NULL;
3814 
3815 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3816 		return nbuf;
3817 
3818 	/*
3819 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3820 	 * this in per packet path.
3821 	 *
3822 	 * As in this path vdev memory is already protected with netdev
3823 	 * tx lock
3824 	 */
3825 	vdev = soc->vdev_id_map[vdev_id];
3826 	if (qdf_unlikely(!vdev))
3827 		return nbuf;
3828 
3829 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
3830 			== QDF_STATUS_E_FAILURE)) {
3831 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
3832 		return nbuf;
3833 	}
3834 
3835 	return dp_tx_send(soc_hdl, vdev_id, nbuf);
3836 }
3837 
3838 #ifdef UMAC_SUPPORT_PROXY_ARP
3839 /**
3840  * dp_tx_proxy_arp() - Tx proxy arp handler
3841  * @vdev: datapath vdev handle
3842  * @nbuf: sk buffer
3843  *
3844  * Return: status
3845  */
3846 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3847 {
3848 	if (vdev->osif_proxy_arp)
3849 		return vdev->osif_proxy_arp(vdev->osif_vdev, nbuf);
3850 
3851 	/*
3852 	 * when UMAC_SUPPORT_PROXY_ARP is defined, we expect
3853 	 * osif_proxy_arp has a valid function pointer assigned
3854 	 * to it
3855 	 */
3856 	dp_tx_err("valid function pointer for osif_proxy_arp is expected!!\n");
3857 
3858 	return QDF_STATUS_NOT_INITIALIZED;
3859 }
3860 #else
3861 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3862 {
3863 	return QDF_STATUS_SUCCESS;
3864 }
3865 #endif
3866 
3867 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
3868 	!defined(CONFIG_MLO_SINGLE_DEV)
3869 #ifdef WLAN_MCAST_MLO
3870 static bool
3871 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
3872 		       struct dp_tx_desc_s *tx_desc,
3873 		       qdf_nbuf_t nbuf,
3874 		       uint8_t reinject_reason)
3875 {
3876 	if (reinject_reason == HTT_TX_FW2WBM_REINJECT_REASON_MLO_MCAST) {
3877 		if (soc->arch_ops.dp_tx_mcast_handler)
3878 			soc->arch_ops.dp_tx_mcast_handler(soc, vdev, nbuf);
3879 
3880 		dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
3881 		return true;
3882 	}
3883 
3884 	return false;
3885 }
3886 #else /* WLAN_MCAST_MLO */
3887 static inline bool
3888 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
3889 		       struct dp_tx_desc_s *tx_desc,
3890 		       qdf_nbuf_t nbuf,
3891 		       uint8_t reinject_reason)
3892 {
3893 	return false;
3894 }
3895 #endif /* WLAN_MCAST_MLO */
3896 #else
3897 static inline bool
3898 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
3899 		       struct dp_tx_desc_s *tx_desc,
3900 		       qdf_nbuf_t nbuf,
3901 		       uint8_t reinject_reason)
3902 {
3903 	return false;
3904 }
3905 #endif
3906 
3907 void dp_tx_reinject_handler(struct dp_soc *soc,
3908 			    struct dp_vdev *vdev,
3909 			    struct dp_tx_desc_s *tx_desc,
3910 			    uint8_t *status,
3911 			    uint8_t reinject_reason)
3912 {
3913 	struct dp_peer *peer = NULL;
3914 	uint32_t peer_id = HTT_INVALID_PEER;
3915 	qdf_nbuf_t nbuf = tx_desc->nbuf;
3916 	qdf_nbuf_t nbuf_copy = NULL;
3917 	struct dp_tx_msdu_info_s msdu_info;
3918 #ifdef WDS_VENDOR_EXTENSION
3919 	int is_mcast = 0, is_ucast = 0;
3920 	int num_peers_3addr = 0;
3921 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
3922 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
3923 #endif
3924 	struct dp_txrx_peer *txrx_peer;
3925 
3926 	qdf_assert(vdev);
3927 
3928 	dp_tx_debug("Tx reinject path");
3929 
3930 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
3931 			qdf_nbuf_len(tx_desc->nbuf));
3932 
3933 	if (dp_tx_reinject_mlo_hdl(soc, vdev, tx_desc, nbuf, reinject_reason))
3934 		return;
3935 
3936 #ifdef WDS_VENDOR_EXTENSION
3937 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
3938 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
3939 	} else {
3940 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
3941 	}
3942 	is_ucast = !is_mcast;
3943 
3944 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3945 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3946 		txrx_peer = dp_get_txrx_peer(peer);
3947 
3948 		if (!txrx_peer || txrx_peer->bss_peer)
3949 			continue;
3950 
3951 		/* Detect wds peers that use 3-addr framing for mcast.
3952 		 * if there are any, the bss_peer is used to send the
3953 		 * the mcast frame using 3-addr format. all wds enabled
3954 		 * peers that use 4-addr framing for mcast frames will
3955 		 * be duplicated and sent as 4-addr frames below.
3956 		 */
3957 		if (!txrx_peer->wds_enabled ||
3958 		    !txrx_peer->wds_ecm.wds_tx_mcast_4addr) {
3959 			num_peers_3addr = 1;
3960 			break;
3961 		}
3962 	}
3963 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3964 #endif
3965 
3966 	if (qdf_unlikely(vdev->mesh_vdev)) {
3967 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
3968 	} else {
3969 		qdf_spin_lock_bh(&vdev->peer_list_lock);
3970 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3971 			txrx_peer = dp_get_txrx_peer(peer);
3972 			if (!txrx_peer)
3973 				continue;
3974 
3975 			if ((txrx_peer->peer_id != HTT_INVALID_PEER) &&
3976 #ifdef WDS_VENDOR_EXTENSION
3977 			/*
3978 			 * . if 3-addr STA, then send on BSS Peer
3979 			 * . if Peer WDS enabled and accept 4-addr mcast,
3980 			 * send mcast on that peer only
3981 			 * . if Peer WDS enabled and accept 4-addr ucast,
3982 			 * send ucast on that peer only
3983 			 */
3984 			((txrx_peer->bss_peer && num_peers_3addr && is_mcast) ||
3985 			 (txrx_peer->wds_enabled &&
3986 			 ((is_mcast && txrx_peer->wds_ecm.wds_tx_mcast_4addr) ||
3987 			 (is_ucast &&
3988 			 txrx_peer->wds_ecm.wds_tx_ucast_4addr))))) {
3989 #else
3990 			(txrx_peer->bss_peer &&
3991 			 (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) {
3992 #endif
3993 				peer_id = DP_INVALID_PEER;
3994 
3995 				nbuf_copy = qdf_nbuf_copy(nbuf);
3996 
3997 				if (!nbuf_copy) {
3998 					dp_tx_debug("nbuf copy failed");
3999 					break;
4000 				}
4001 				qdf_mem_zero(&msdu_info, sizeof(msdu_info));
4002 				dp_tx_get_queue(vdev, nbuf,
4003 						&msdu_info.tx_queue);
4004 
4005 				nbuf_copy = dp_tx_send_msdu_single(vdev,
4006 						nbuf_copy,
4007 						&msdu_info,
4008 						peer_id,
4009 						NULL);
4010 
4011 				if (nbuf_copy) {
4012 					dp_tx_debug("pkt send failed");
4013 					qdf_nbuf_free(nbuf_copy);
4014 				}
4015 			}
4016 		}
4017 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4018 
4019 		qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
4020 					     QDF_DMA_TO_DEVICE, nbuf->len);
4021 		qdf_nbuf_free(nbuf);
4022 	}
4023 
4024 	dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
4025 }
4026 
4027 void dp_tx_inspect_handler(struct dp_soc *soc,
4028 			   struct dp_vdev *vdev,
4029 			   struct dp_tx_desc_s *tx_desc,
4030 			   uint8_t *status)
4031 {
4032 
4033 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4034 			"%s Tx inspect path",
4035 			__func__);
4036 
4037 	DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
4038 			 qdf_nbuf_len(tx_desc->nbuf));
4039 
4040 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
4041 	dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
4042 }
4043 
4044 #ifdef MESH_MODE_SUPPORT
4045 /**
4046  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
4047  *                                         in mesh meta header
4048  * @tx_desc: software descriptor head pointer
4049  * @ts: pointer to tx completion stats
4050  * Return: none
4051  */
4052 static
4053 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
4054 		struct hal_tx_completion_status *ts)
4055 {
4056 	qdf_nbuf_t netbuf = tx_desc->nbuf;
4057 
4058 	if (!tx_desc->msdu_ext_desc) {
4059 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
4060 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4061 				"netbuf %pK offset %d",
4062 				netbuf, tx_desc->pkt_offset);
4063 			return;
4064 		}
4065 	}
4066 }
4067 
4068 #else
4069 static
4070 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
4071 		struct hal_tx_completion_status *ts)
4072 {
4073 }
4074 
4075 #endif
4076 
4077 #ifdef CONFIG_SAWF
4078 static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
4079 					 struct dp_vdev *vdev,
4080 					 struct dp_txrx_peer *txrx_peer,
4081 					 struct dp_tx_desc_s *tx_desc,
4082 					 struct hal_tx_completion_status *ts,
4083 					 uint8_t tid)
4084 {
4085 	dp_sawf_tx_compl_update_peer_stats(soc, vdev, txrx_peer, tx_desc,
4086 					   ts, tid);
4087 }
4088 
4089 static void dp_tx_compute_delay_avg(struct cdp_delay_tx_stats  *tx_delay,
4090 				    uint32_t nw_delay,
4091 				    uint32_t sw_delay,
4092 				    uint32_t hw_delay)
4093 {
4094 	dp_peer_tid_delay_avg(tx_delay,
4095 			      nw_delay,
4096 			      sw_delay,
4097 			      hw_delay);
4098 }
4099 #else
4100 static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
4101 					 struct dp_vdev *vdev,
4102 					 struct dp_txrx_peer *txrx_peer,
4103 					 struct dp_tx_desc_s *tx_desc,
4104 					 struct hal_tx_completion_status *ts,
4105 					 uint8_t tid)
4106 {
4107 }
4108 
4109 static inline void
4110 dp_tx_compute_delay_avg(struct cdp_delay_tx_stats *tx_delay,
4111 			uint32_t nw_delay, uint32_t sw_delay,
4112 			uint32_t hw_delay)
4113 {
4114 }
4115 #endif
4116 
4117 #ifdef QCA_PEER_EXT_STATS
4118 #ifdef WLAN_CONFIG_TX_DELAY
4119 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
4120 				    struct dp_tx_desc_s *tx_desc,
4121 				    struct hal_tx_completion_status *ts,
4122 				    struct dp_vdev *vdev)
4123 {
4124 	struct dp_soc *soc = vdev->pdev->soc;
4125 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
4126 	int64_t timestamp_ingress, timestamp_hw_enqueue;
4127 	uint32_t sw_enqueue_delay, fwhw_transmit_delay = 0;
4128 
4129 	if (!ts->valid)
4130 		return;
4131 
4132 	timestamp_ingress = qdf_nbuf_get_timestamp_us(tx_desc->nbuf);
4133 	timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
4134 
4135 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4136 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
4137 
4138 	if (soc->arch_ops.dp_tx_compute_hw_delay)
4139 		if (!soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts,
4140 							  &fwhw_transmit_delay))
4141 			dp_hist_update_stats(&tx_delay->hwtx_delay,
4142 					     fwhw_transmit_delay);
4143 
4144 	dp_tx_compute_delay_avg(tx_delay, 0, sw_enqueue_delay,
4145 				fwhw_transmit_delay);
4146 }
4147 #else
4148 /**
4149  * dp_tx_compute_tid_delay() - Compute per TID delay
4150  * @stats: Per TID delay stats
4151  * @tx_desc: Software Tx descriptor
4152  * @ts: Tx completion status
4153  * @vdev: vdev
4154  *
4155  * Compute the software enqueue and hw enqueue delays and
4156  * update the respective histograms
4157  *
4158  * Return: void
4159  */
4160 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
4161 				    struct dp_tx_desc_s *tx_desc,
4162 				    struct hal_tx_completion_status *ts,
4163 				    struct dp_vdev *vdev)
4164 {
4165 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
4166 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
4167 	uint32_t sw_enqueue_delay, fwhw_transmit_delay;
4168 
4169 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
4170 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
4171 	timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
4172 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4173 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
4174 					 timestamp_hw_enqueue);
4175 
4176 	/*
4177 	 * Update the Tx software enqueue delay and HW enque-Completion delay.
4178 	 */
4179 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
4180 	dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
4181 }
4182 #endif
4183 
4184 /**
4185  * dp_tx_update_peer_delay_stats() - Update the peer delay stats
4186  * @txrx_peer: DP peer context
4187  * @tx_desc: Tx software descriptor
4188  * @ts: Tx completion status
4189  * @ring_id: Rx CPU context ID/CPU_ID
4190  *
4191  * Update the peer extended stats. These are enhanced other
4192  * delay stats per msdu level.
4193  *
4194  * Return: void
4195  */
4196 static void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
4197 					  struct dp_tx_desc_s *tx_desc,
4198 					  struct hal_tx_completion_status *ts,
4199 					  uint8_t ring_id)
4200 {
4201 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4202 	struct dp_soc *soc = NULL;
4203 	struct dp_peer_delay_stats *delay_stats = NULL;
4204 	uint8_t tid;
4205 
4206 	soc = pdev->soc;
4207 	if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
4208 		return;
4209 
4210 	if (!txrx_peer->delay_stats)
4211 		return;
4212 
4213 	tid = ts->tid;
4214 	delay_stats = txrx_peer->delay_stats;
4215 
4216 	qdf_assert(ring < CDP_MAX_TXRX_CTX);
4217 
4218 	/*
4219 	 * For non-TID packets use the TID 9
4220 	 */
4221 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4222 		tid = CDP_MAX_DATA_TIDS - 1;
4223 
4224 	dp_tx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
4225 				tx_desc, ts, txrx_peer->vdev);
4226 }
4227 #else
4228 static inline
4229 void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
4230 				   struct dp_tx_desc_s *tx_desc,
4231 				   struct hal_tx_completion_status *ts,
4232 				   uint8_t ring_id)
4233 {
4234 }
4235 #endif
4236 
4237 #ifdef WLAN_PEER_JITTER
4238 /**
4239  * dp_tx_jitter_get_avg_jitter() - compute the average jitter
4240  * @curr_delay: Current delay
4241  * @prev_delay: Previous delay
4242  * @avg_jitter: Average Jitter
4243  * Return: Newly Computed Average Jitter
4244  */
4245 static uint32_t dp_tx_jitter_get_avg_jitter(uint32_t curr_delay,
4246 					    uint32_t prev_delay,
4247 					    uint32_t avg_jitter)
4248 {
4249 	uint32_t curr_jitter;
4250 	int32_t jitter_diff;
4251 
4252 	curr_jitter = qdf_abs(curr_delay - prev_delay);
4253 	if (!avg_jitter)
4254 		return curr_jitter;
4255 
4256 	jitter_diff = curr_jitter - avg_jitter;
4257 	if (jitter_diff < 0)
4258 		avg_jitter = avg_jitter -
4259 			(qdf_abs(jitter_diff) >> DP_AVG_JITTER_WEIGHT_DENOM);
4260 	else
4261 		avg_jitter = avg_jitter +
4262 			(qdf_abs(jitter_diff) >> DP_AVG_JITTER_WEIGHT_DENOM);
4263 
4264 	return avg_jitter;
4265 }
4266 
4267 /**
4268  * dp_tx_jitter_get_avg_delay() - compute the average delay
4269  * @curr_delay: Current delay
4270  * @avg_delay: Average delay
4271  * Return: Newly Computed Average Delay
4272  */
4273 static uint32_t dp_tx_jitter_get_avg_delay(uint32_t curr_delay,
4274 					   uint32_t avg_delay)
4275 {
4276 	int32_t delay_diff;
4277 
4278 	if (!avg_delay)
4279 		return curr_delay;
4280 
4281 	delay_diff = curr_delay - avg_delay;
4282 	if (delay_diff < 0)
4283 		avg_delay = avg_delay - (qdf_abs(delay_diff) >>
4284 					DP_AVG_DELAY_WEIGHT_DENOM);
4285 	else
4286 		avg_delay = avg_delay + (qdf_abs(delay_diff) >>
4287 					DP_AVG_DELAY_WEIGHT_DENOM);
4288 
4289 	return avg_delay;
4290 }
4291 
4292 #ifdef WLAN_CONFIG_TX_DELAY
4293 /**
4294  * dp_tx_compute_cur_delay() - get the current delay
4295  * @soc: soc handle
4296  * @vdev: vdev structure for data path state
4297  * @ts: Tx completion status
4298  * @curr_delay: current delay
4299  * @tx_desc: tx descriptor
4300  * Return: void
4301  */
4302 static
4303 QDF_STATUS dp_tx_compute_cur_delay(struct dp_soc *soc,
4304 				   struct dp_vdev *vdev,
4305 				   struct hal_tx_completion_status *ts,
4306 				   uint32_t *curr_delay,
4307 				   struct dp_tx_desc_s *tx_desc)
4308 {
4309 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
4310 
4311 	if (soc->arch_ops.dp_tx_compute_hw_delay)
4312 		status = soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts,
4313 							      curr_delay);
4314 	return status;
4315 }
4316 #else
4317 static
4318 QDF_STATUS dp_tx_compute_cur_delay(struct dp_soc *soc,
4319 				   struct dp_vdev *vdev,
4320 				   struct hal_tx_completion_status *ts,
4321 				   uint32_t *curr_delay,
4322 				   struct dp_tx_desc_s *tx_desc)
4323 {
4324 	int64_t current_timestamp, timestamp_hw_enqueue;
4325 
4326 	current_timestamp = qdf_ktime_to_us(qdf_ktime_real_get());
4327 	timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
4328 	*curr_delay = (uint32_t)(current_timestamp - timestamp_hw_enqueue);
4329 
4330 	return QDF_STATUS_SUCCESS;
4331 }
4332 #endif
4333 
4334 /**
4335  * dp_tx_compute_tid_jitter() - compute per tid per ring jitter
4336  * @jitter: per tid per ring jitter stats
4337  * @ts: Tx completion status
4338  * @vdev: vdev structure for data path state
4339  * @tx_desc: tx descriptor
4340  * Return: void
4341  */
4342 static void dp_tx_compute_tid_jitter(struct cdp_peer_tid_stats *jitter,
4343 				     struct hal_tx_completion_status *ts,
4344 				     struct dp_vdev *vdev,
4345 				     struct dp_tx_desc_s *tx_desc)
4346 {
4347 	uint32_t curr_delay, avg_delay, avg_jitter, prev_delay;
4348 	struct dp_soc *soc = vdev->pdev->soc;
4349 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
4350 
4351 	if (ts->status !=  HAL_TX_TQM_RR_FRAME_ACKED) {
4352 		jitter->tx_drop += 1;
4353 		return;
4354 	}
4355 
4356 	status = dp_tx_compute_cur_delay(soc, vdev, ts, &curr_delay,
4357 					 tx_desc);
4358 
4359 	if (QDF_IS_STATUS_SUCCESS(status)) {
4360 		avg_delay = jitter->tx_avg_delay;
4361 		avg_jitter = jitter->tx_avg_jitter;
4362 		prev_delay = jitter->tx_prev_delay;
4363 		avg_jitter = dp_tx_jitter_get_avg_jitter(curr_delay,
4364 							 prev_delay,
4365 							 avg_jitter);
4366 		avg_delay = dp_tx_jitter_get_avg_delay(curr_delay, avg_delay);
4367 		jitter->tx_avg_delay = avg_delay;
4368 		jitter->tx_avg_jitter = avg_jitter;
4369 		jitter->tx_prev_delay = curr_delay;
4370 		jitter->tx_total_success += 1;
4371 	} else if (status == QDF_STATUS_E_FAILURE) {
4372 		jitter->tx_avg_err += 1;
4373 	}
4374 }
4375 
4376 /* dp_tx_update_peer_jitter_stats() - Update the peer jitter stats
4377  * @txrx_peer: DP peer context
4378  * @tx_desc: Tx software descriptor
4379  * @ts: Tx completion status
4380  * @ring_id: Rx CPU context ID/CPU_ID
4381  * Return: void
4382  */
4383 static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer,
4384 					   struct dp_tx_desc_s *tx_desc,
4385 					   struct hal_tx_completion_status *ts,
4386 					   uint8_t ring_id)
4387 {
4388 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4389 	struct dp_soc *soc = pdev->soc;
4390 	struct cdp_peer_tid_stats *jitter_stats = NULL;
4391 	uint8_t tid;
4392 	struct cdp_peer_tid_stats *rx_tid = NULL;
4393 
4394 	if (qdf_likely(!wlan_cfg_is_peer_jitter_stats_enabled(soc->wlan_cfg_ctx)))
4395 		return;
4396 
4397 	tid = ts->tid;
4398 	jitter_stats = txrx_peer->jitter_stats;
4399 	qdf_assert_always(jitter_stats);
4400 	qdf_assert(ring < CDP_MAX_TXRX_CTX);
4401 	/*
4402 	 * For non-TID packets use the TID 9
4403 	 */
4404 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4405 		tid = CDP_MAX_DATA_TIDS - 1;
4406 
4407 	rx_tid = &jitter_stats[tid * CDP_MAX_TXRX_CTX + ring_id];
4408 	dp_tx_compute_tid_jitter(rx_tid,
4409 				 ts, txrx_peer->vdev, tx_desc);
4410 }
4411 #else
4412 static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer,
4413 					   struct dp_tx_desc_s *tx_desc,
4414 					   struct hal_tx_completion_status *ts,
4415 					   uint8_t ring_id)
4416 {
4417 }
4418 #endif
4419 
4420 #ifdef HW_TX_DELAY_STATS_ENABLE
4421 /**
4422  * dp_update_tx_delay_stats() - update the delay stats
4423  * @vdev: vdev handle
4424  * @delay: delay in ms or us based on the flag delay_in_us
4425  * @tid: tid value
4426  * @mode: type of tx delay mode
4427  * @ring_id: ring number
4428  * @delay_in_us: flag to indicate whether the delay is in ms or us
4429  *
4430  * Return: none
4431  */
4432 static inline
4433 void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
4434 			      uint8_t mode, uint8_t ring_id, bool delay_in_us)
4435 {
4436 	struct cdp_tid_tx_stats *tstats =
4437 		&vdev->stats.tid_tx_stats[ring_id][tid];
4438 
4439 	dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
4440 			      delay_in_us);
4441 }
4442 #else
4443 static inline
4444 void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
4445 			      uint8_t mode, uint8_t ring_id, bool delay_in_us)
4446 {
4447 	struct cdp_tid_tx_stats *tstats =
4448 		&vdev->pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
4449 
4450 	dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
4451 			      delay_in_us);
4452 }
4453 #endif
4454 
4455 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
4456 			 uint8_t tid, uint8_t ring_id)
4457 {
4458 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
4459 	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
4460 	uint32_t fwhw_transmit_delay_us;
4461 
4462 	if (qdf_likely(!vdev->pdev->delay_stats_flag) &&
4463 	    qdf_likely(!dp_is_vdev_tx_delay_stats_enabled(vdev)))
4464 		return;
4465 
4466 	if (dp_is_vdev_tx_delay_stats_enabled(vdev)) {
4467 		fwhw_transmit_delay_us =
4468 			qdf_ktime_to_us(qdf_ktime_real_get()) -
4469 			qdf_ktime_to_us(tx_desc->timestamp);
4470 
4471 		/*
4472 		 * Delay between packet enqueued to HW and Tx completion in us
4473 		 */
4474 		dp_update_tx_delay_stats(vdev, fwhw_transmit_delay_us, tid,
4475 					 CDP_DELAY_STATS_FW_HW_TRANSMIT,
4476 					 ring_id, true);
4477 		/*
4478 		 * For MCL, only enqueue to completion delay is required
4479 		 * so return if the vdev flag is enabled.
4480 		 */
4481 		return;
4482 	}
4483 
4484 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
4485 	timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
4486 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
4487 					 timestamp_hw_enqueue);
4488 
4489 	if (!timestamp_hw_enqueue)
4490 		return;
4491 	/*
4492 	 * Delay between packet enqueued to HW and Tx completion in ms
4493 	 */
4494 	dp_update_tx_delay_stats(vdev, fwhw_transmit_delay, tid,
4495 				 CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id,
4496 				 false);
4497 
4498 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
4499 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4500 	interframe_delay = (uint32_t)(timestamp_ingress -
4501 				      vdev->prev_tx_enq_tstamp);
4502 
4503 	/*
4504 	 * Delay in software enqueue
4505 	 */
4506 	dp_update_tx_delay_stats(vdev, sw_enqueue_delay, tid,
4507 				 CDP_DELAY_STATS_SW_ENQ, ring_id,
4508 				 false);
4509 
4510 	/*
4511 	 * Update interframe delay stats calculated at hardstart receive point.
4512 	 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
4513 	 * interframe delay will not be calculate correctly for 1st frame.
4514 	 * On the other side, this will help in avoiding extra per packet check
4515 	 * of !vdev->prev_tx_enq_tstamp.
4516 	 */
4517 	dp_update_tx_delay_stats(vdev, interframe_delay, tid,
4518 				 CDP_DELAY_STATS_TX_INTERFRAME, ring_id,
4519 				 false);
4520 	vdev->prev_tx_enq_tstamp = timestamp_ingress;
4521 }
4522 
4523 #ifdef DISABLE_DP_STATS
4524 static
4525 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf,
4526 				   struct dp_txrx_peer *txrx_peer,
4527 				   uint8_t link_id)
4528 {
4529 }
4530 #else
4531 static inline void
4532 dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer,
4533 		       uint8_t link_id)
4534 {
4535 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
4536 
4537 	DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
4538 	if (subtype != QDF_PROTO_INVALID)
4539 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.no_ack_count[subtype],
4540 					  1, link_id);
4541 }
4542 #endif
4543 
4544 #ifndef QCA_ENHANCED_STATS_SUPPORT
4545 #ifdef DP_PEER_EXTENDED_API
4546 static inline uint8_t
4547 dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
4548 {
4549 	return txrx_peer->mpdu_retry_threshold;
4550 }
4551 #else
4552 static inline uint8_t
4553 dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
4554 {
4555 	return 0;
4556 }
4557 #endif
4558 
4559 /**
4560  * dp_tx_update_peer_extd_stats()- Update Tx extended path stats for peer
4561  *
4562  * @ts: Tx compltion status
4563  * @txrx_peer: datapath txrx_peer handle
4564  * @link_id: Link id
4565  *
4566  * Return: void
4567  */
4568 static inline void
4569 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
4570 			     struct dp_txrx_peer *txrx_peer, uint8_t link_id)
4571 {
4572 	uint8_t mcs, pkt_type, dst_mcs_idx;
4573 	uint8_t retry_threshold = dp_tx_get_mpdu_retry_threshold(txrx_peer);
4574 
4575 	mcs = ts->mcs;
4576 	pkt_type = ts->pkt_type;
4577 	/* do HW to SW pkt type conversion */
4578 	pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX :
4579 		    hal_2_dp_pkt_type_map[pkt_type]);
4580 
4581 	dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs);
4582 	if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
4583 		DP_PEER_EXTD_STATS_INC(txrx_peer,
4584 				       tx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
4585 				       1, link_id);
4586 
4587 	DP_PEER_EXTD_STATS_INC(txrx_peer, tx.sgi_count[ts->sgi], 1, link_id);
4588 	DP_PEER_EXTD_STATS_INC(txrx_peer, tx.bw[ts->bw], 1, link_id);
4589 	DP_PEER_EXTD_STATS_UPD(txrx_peer, tx.last_ack_rssi, ts->ack_frame_rssi,
4590 			       link_id);
4591 	DP_PEER_EXTD_STATS_INC(txrx_peer,
4592 			       tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1,
4593 			       link_id);
4594 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.stbc, 1, ts->stbc, link_id);
4595 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.ldpc, 1, ts->ldpc, link_id);
4596 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries, 1, ts->transmit_cnt > 1,
4597 				link_id);
4598 	if (ts->first_msdu) {
4599 		DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries_mpdu, 1,
4600 					ts->transmit_cnt > 1, link_id);
4601 
4602 		if (!retry_threshold)
4603 			return;
4604 		DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.mpdu_success_with_retries,
4605 					qdf_do_div(ts->transmit_cnt,
4606 						   retry_threshold),
4607 					ts->transmit_cnt > retry_threshold,
4608 					link_id);
4609 	}
4610 }
4611 #else
4612 static inline void
4613 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
4614 			     struct dp_txrx_peer *txrx_peer, uint8_t link_id)
4615 {
4616 }
4617 #endif
4618 
4619 #if defined(WLAN_FEATURE_11BE_MLO) && \
4620 	(defined(QCA_ENHANCED_STATS_SUPPORT) || \
4621 		defined(DP_MLO_LINK_STATS_SUPPORT))
4622 static inline uint8_t
4623 dp_tx_get_link_id_from_ppdu_id(struct dp_soc *soc,
4624 			       struct hal_tx_completion_status *ts,
4625 			       struct dp_txrx_peer *txrx_peer,
4626 			       struct dp_vdev *vdev)
4627 {
4628 	uint8_t hw_link_id = 0;
4629 	uint32_t ppdu_id;
4630 	uint8_t link_id_offset, link_id_bits;
4631 
4632 	if (!txrx_peer->is_mld_peer || !vdev->pdev->link_peer_stats)
4633 		return 0;
4634 
4635 	link_id_offset = soc->link_id_offset;
4636 	link_id_bits = soc->link_id_bits;
4637 	ppdu_id = ts->ppdu_id;
4638 	hw_link_id = ((DP_GET_HW_LINK_ID_FRM_PPDU_ID(ppdu_id, link_id_offset,
4639 						   link_id_bits)) + 1);
4640 	if (hw_link_id > DP_MAX_MLO_LINKS) {
4641 		hw_link_id = 0;
4642 		DP_PEER_PER_PKT_STATS_INC(
4643 				txrx_peer,
4644 				tx.inval_link_id_pkt_cnt, 1, hw_link_id);
4645 	}
4646 
4647 	return hw_link_id;
4648 }
4649 #else
4650 static inline uint8_t
4651 dp_tx_get_link_id_from_ppdu_id(struct dp_soc *soc,
4652 			       struct hal_tx_completion_status *ts,
4653 			       struct dp_txrx_peer *txrx_peer,
4654 			       struct dp_vdev *vdev)
4655 {
4656 	return 0;
4657 }
4658 #endif
4659 
4660 /**
4661  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
4662  *				per wbm ring
4663  *
4664  * @tx_desc: software descriptor head pointer
4665  * @ts: Tx completion status
4666  * @txrx_peer: peer handle
4667  * @ring_id: ring number
4668  * @link_id: Link id
4669  *
4670  * Return: None
4671  */
4672 static inline void
4673 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
4674 			struct hal_tx_completion_status *ts,
4675 			struct dp_txrx_peer *txrx_peer, uint8_t ring_id,
4676 			uint8_t link_id)
4677 {
4678 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4679 	uint8_t tid = ts->tid;
4680 	uint32_t length;
4681 	struct cdp_tid_tx_stats *tid_stats;
4682 
4683 	if (!pdev)
4684 		return;
4685 
4686 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4687 		tid = CDP_MAX_DATA_TIDS - 1;
4688 
4689 	tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
4690 
4691 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
4692 		dp_err_rl("Release source:%d is not from TQM", ts->release_src);
4693 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.release_src_not_tqm, 1,
4694 					  link_id);
4695 		return;
4696 	}
4697 
4698 	length = qdf_nbuf_len(tx_desc->nbuf);
4699 	DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
4700 
4701 	if (qdf_unlikely(pdev->delay_stats_flag) ||
4702 	    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(txrx_peer->vdev)))
4703 		dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id);
4704 
4705 	if (ts->status < CDP_MAX_TX_TQM_STATUS) {
4706 		tid_stats->tqm_status_cnt[ts->status]++;
4707 	}
4708 
4709 	if (qdf_likely(ts->status == HAL_TX_TQM_RR_FRAME_ACKED)) {
4710 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.retry_count, 1,
4711 					   ts->transmit_cnt > 1, link_id);
4712 
4713 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.multiple_retry_count,
4714 					   1, ts->transmit_cnt > 2, link_id);
4715 
4716 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.ofdma, 1, ts->ofdma,
4717 					   link_id);
4718 
4719 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.amsdu_cnt, 1,
4720 					   ts->msdu_part_of_amsdu, link_id);
4721 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.non_amsdu_cnt, 1,
4722 					   !ts->msdu_part_of_amsdu, link_id);
4723 
4724 		txrx_peer->stats[link_id].per_pkt_stats.tx.last_tx_ts =
4725 							qdf_system_ticks();
4726 
4727 		dp_tx_update_peer_extd_stats(ts, txrx_peer, link_id);
4728 
4729 		return;
4730 	}
4731 
4732 	/*
4733 	 * tx_failed is ideally supposed to be updated from HTT ppdu
4734 	 * completion stats. But in IPQ807X/IPQ6018 chipsets owing to
4735 	 * hw limitation there are no completions for failed cases.
4736 	 * Hence updating tx_failed from data path. Please note that
4737 	 * if tx_failed is fixed to be from ppdu, then this has to be
4738 	 * removed
4739 	 */
4740 	DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
4741 
4742 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.failed_retry_count, 1,
4743 				   ts->transmit_cnt > DP_RETRY_COUNT,
4744 				   link_id);
4745 	dp_update_no_ack_stats(tx_desc->nbuf, txrx_peer, link_id);
4746 
4747 	if (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED) {
4748 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.age_out, 1,
4749 					  link_id);
4750 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_REM) {
4751 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.dropped.fw_rem, 1,
4752 					      length, link_id);
4753 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX) {
4754 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_notx, 1,
4755 					  link_id);
4756 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TX) {
4757 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_tx, 1,
4758 					  link_id);
4759 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON1) {
4760 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason1, 1,
4761 					  link_id);
4762 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON2) {
4763 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason2, 1,
4764 					  link_id);
4765 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON3) {
4766 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason3, 1,
4767 					  link_id);
4768 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_DISABLE_QUEUE) {
4769 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4770 					  tx.dropped.fw_rem_queue_disable, 1,
4771 					  link_id);
4772 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TILL_NONMATCHING) {
4773 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4774 					  tx.dropped.fw_rem_no_match, 1,
4775 					  link_id);
4776 	} else if (ts->status == HAL_TX_TQM_RR_DROP_THRESHOLD) {
4777 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4778 					  tx.dropped.drop_threshold, 1,
4779 					  link_id);
4780 	} else if (ts->status == HAL_TX_TQM_RR_LINK_DESC_UNAVAILABLE) {
4781 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4782 					  tx.dropped.drop_link_desc_na, 1,
4783 					  link_id);
4784 	} else if (ts->status == HAL_TX_TQM_RR_DROP_OR_INVALID_MSDU) {
4785 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4786 					  tx.dropped.invalid_drop, 1,
4787 					  link_id);
4788 	} else if (ts->status == HAL_TX_TQM_RR_MULTICAST_DROP) {
4789 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4790 					  tx.dropped.mcast_vdev_drop, 1,
4791 					  link_id);
4792 	} else {
4793 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.invalid_rr, 1,
4794 					  link_id);
4795 	}
4796 }
4797 
4798 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
4799 /**
4800  * dp_tx_flow_pool_lock() - take flow pool lock
4801  * @soc: core txrx main context
4802  * @tx_desc: tx desc
4803  *
4804  * Return: None
4805  */
4806 static inline
4807 void dp_tx_flow_pool_lock(struct dp_soc *soc,
4808 			  struct dp_tx_desc_s *tx_desc)
4809 {
4810 	struct dp_tx_desc_pool_s *pool;
4811 	uint8_t desc_pool_id;
4812 
4813 	desc_pool_id = tx_desc->pool_id;
4814 	pool = &soc->tx_desc[desc_pool_id];
4815 
4816 	qdf_spin_lock_bh(&pool->flow_pool_lock);
4817 }
4818 
4819 /**
4820  * dp_tx_flow_pool_unlock() - release flow pool lock
4821  * @soc: core txrx main context
4822  * @tx_desc: tx desc
4823  *
4824  * Return: None
4825  */
4826 static inline
4827 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
4828 			    struct dp_tx_desc_s *tx_desc)
4829 {
4830 	struct dp_tx_desc_pool_s *pool;
4831 	uint8_t desc_pool_id;
4832 
4833 	desc_pool_id = tx_desc->pool_id;
4834 	pool = &soc->tx_desc[desc_pool_id];
4835 
4836 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
4837 }
4838 #else
4839 static inline
4840 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
4841 {
4842 }
4843 
4844 static inline
4845 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
4846 {
4847 }
4848 #endif
4849 
4850 /**
4851  * dp_tx_notify_completion() - Notify tx completion for this desc
4852  * @soc: core txrx main context
4853  * @vdev: datapath vdev handle
4854  * @tx_desc: tx desc
4855  * @netbuf:  buffer
4856  * @status: tx status
4857  *
4858  * Return: none
4859  */
4860 static inline void dp_tx_notify_completion(struct dp_soc *soc,
4861 					   struct dp_vdev *vdev,
4862 					   struct dp_tx_desc_s *tx_desc,
4863 					   qdf_nbuf_t netbuf,
4864 					   uint8_t status)
4865 {
4866 	void *osif_dev;
4867 	ol_txrx_completion_fp tx_compl_cbk = NULL;
4868 	uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
4869 
4870 	qdf_assert(tx_desc);
4871 
4872 	if (!vdev ||
4873 	    !vdev->osif_vdev) {
4874 		return;
4875 	}
4876 
4877 	osif_dev = vdev->osif_vdev;
4878 	tx_compl_cbk = vdev->tx_comp;
4879 
4880 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
4881 		flag |= BIT(QDF_TX_RX_STATUS_OK);
4882 
4883 	if (tx_compl_cbk)
4884 		tx_compl_cbk(netbuf, osif_dev, flag);
4885 }
4886 
4887 /**
4888  * dp_tx_sojourn_stats_process() - Collect sojourn stats
4889  * @pdev: pdev handle
4890  * @txrx_peer: DP peer context
4891  * @tid: tid value
4892  * @txdesc_ts: timestamp from txdesc
4893  * @ppdu_id: ppdu id
4894  * @link_id: link id
4895  *
4896  * Return: none
4897  */
4898 #ifdef FEATURE_PERPKT_INFO
4899 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
4900 					       struct dp_txrx_peer *txrx_peer,
4901 					       uint8_t tid,
4902 					       uint64_t txdesc_ts,
4903 					       uint32_t ppdu_id,
4904 					       uint8_t link_id)
4905 {
4906 	uint64_t delta_ms;
4907 	struct cdp_tx_sojourn_stats *sojourn_stats;
4908 	struct dp_peer *primary_link_peer = NULL;
4909 	struct dp_soc *link_peer_soc = NULL;
4910 
4911 	if (qdf_unlikely(!pdev->enhanced_stats_en))
4912 		return;
4913 
4914 	if (qdf_unlikely(tid == HTT_INVALID_TID ||
4915 			 tid >= CDP_DATA_TID_MAX))
4916 		return;
4917 
4918 	if (qdf_unlikely(!pdev->sojourn_buf))
4919 		return;
4920 
4921 	primary_link_peer = dp_get_primary_link_peer_by_id(pdev->soc,
4922 							   txrx_peer->peer_id,
4923 							   DP_MOD_ID_TX_COMP);
4924 
4925 	if (qdf_unlikely(!primary_link_peer))
4926 		return;
4927 
4928 	sojourn_stats = (struct cdp_tx_sojourn_stats *)
4929 		qdf_nbuf_data(pdev->sojourn_buf);
4930 
4931 	link_peer_soc = primary_link_peer->vdev->pdev->soc;
4932 	sojourn_stats->cookie = (void *)
4933 			dp_monitor_peer_get_peerstats_ctx(link_peer_soc,
4934 							  primary_link_peer);
4935 
4936 	delta_ms = qdf_ktime_to_ms(qdf_ktime_real_get()) -
4937 				txdesc_ts;
4938 	qdf_ewma_tx_lag_add(&txrx_peer->stats[link_id].per_pkt_stats.tx.avg_sojourn_msdu[tid],
4939 			    delta_ms);
4940 	sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
4941 	sojourn_stats->num_msdus[tid] = 1;
4942 	sojourn_stats->avg_sojourn_msdu[tid].internal =
4943 		txrx_peer->stats[link_id].
4944 			per_pkt_stats.tx.avg_sojourn_msdu[tid].internal;
4945 	dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
4946 			     pdev->sojourn_buf, HTT_INVALID_PEER,
4947 			     WDI_NO_VAL, pdev->pdev_id);
4948 	sojourn_stats->sum_sojourn_msdu[tid] = 0;
4949 	sojourn_stats->num_msdus[tid] = 0;
4950 	sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
4951 
4952 	dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_TX_COMP);
4953 }
4954 #else
4955 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
4956 					       struct dp_txrx_peer *txrx_peer,
4957 					       uint8_t tid,
4958 					       uint64_t txdesc_ts,
4959 					       uint32_t ppdu_id)
4960 {
4961 }
4962 #endif
4963 
4964 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
4965 void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
4966 				       struct dp_tx_desc_s *desc,
4967 				       struct hal_tx_completion_status *ts)
4968 {
4969 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc,
4970 			     desc, ts->peer_id,
4971 			     WDI_NO_VAL, desc->pdev->pdev_id);
4972 }
4973 #endif
4974 
4975 void
4976 dp_tx_comp_process_desc(struct dp_soc *soc,
4977 			struct dp_tx_desc_s *desc,
4978 			struct hal_tx_completion_status *ts,
4979 			struct dp_txrx_peer *txrx_peer)
4980 {
4981 	uint64_t time_latency = 0;
4982 	uint16_t peer_id = DP_INVALID_PEER_ID;
4983 
4984 	/*
4985 	 * m_copy/tx_capture modes are not supported for
4986 	 * scatter gather packets
4987 	 */
4988 	if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
4989 		time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
4990 				qdf_ktime_to_ms(desc->timestamp));
4991 	}
4992 
4993 	dp_send_completion_to_pkt_capture(soc, desc, ts);
4994 
4995 	if (dp_tx_pkt_tracepoints_enabled())
4996 		qdf_trace_dp_packet(desc->nbuf, QDF_TX,
4997 				    desc->msdu_ext_desc ?
4998 				    desc->msdu_ext_desc->tso_desc : NULL,
4999 				    qdf_ktime_to_us(desc->timestamp));
5000 
5001 	if (!(desc->msdu_ext_desc)) {
5002 		dp_tx_enh_unmap(soc, desc);
5003 		if (txrx_peer)
5004 			peer_id = txrx_peer->peer_id;
5005 
5006 		if (QDF_STATUS_SUCCESS ==
5007 		    dp_monitor_tx_add_to_comp_queue(soc, desc, ts, peer_id)) {
5008 			return;
5009 		}
5010 
5011 		if (QDF_STATUS_SUCCESS ==
5012 		    dp_get_completion_indication_for_stack(soc,
5013 							   desc->pdev,
5014 							   txrx_peer, ts,
5015 							   desc->nbuf,
5016 							   time_latency)) {
5017 			dp_send_completion_to_stack(soc,
5018 						    desc->pdev,
5019 						    ts->peer_id,
5020 						    ts->ppdu_id,
5021 						    desc->nbuf);
5022 			return;
5023 		}
5024 	}
5025 
5026 	desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX;
5027 	dp_tx_comp_free_buf(soc, desc, false);
5028 }
5029 
5030 #ifdef DISABLE_DP_STATS
5031 /**
5032  * dp_tx_update_connectivity_stats() - update tx connectivity stats
5033  * @soc: core txrx main context
5034  * @vdev: virtual device instance
5035  * @tx_desc: tx desc
5036  * @status: tx status
5037  *
5038  * Return: none
5039  */
5040 static inline
5041 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
5042 				     struct dp_vdev *vdev,
5043 				     struct dp_tx_desc_s *tx_desc,
5044 				     uint8_t status)
5045 {
5046 }
5047 #else
5048 static inline
5049 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
5050 				     struct dp_vdev *vdev,
5051 				     struct dp_tx_desc_s *tx_desc,
5052 				     uint8_t status)
5053 {
5054 	void *osif_dev;
5055 	ol_txrx_stats_rx_fp stats_cbk;
5056 	uint8_t pkt_type;
5057 
5058 	qdf_assert(tx_desc);
5059 
5060 	if (!vdev ||
5061 	    !vdev->osif_vdev ||
5062 	    !vdev->stats_cb)
5063 		return;
5064 
5065 	osif_dev = vdev->osif_vdev;
5066 	stats_cbk = vdev->stats_cb;
5067 
5068 	stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
5069 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
5070 		stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
5071 			  &pkt_type);
5072 }
5073 #endif
5074 
5075 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
5076 /* Mask for bit29 ~ bit31 */
5077 #define DP_TX_TS_BIT29_31_MASK 0xE0000000
5078 /* Timestamp value (unit us) if bit29 is set */
5079 #define DP_TX_TS_BIT29_SET_VALUE BIT(29)
5080 /**
5081  * dp_tx_adjust_enqueue_buffer_ts() - adjust the enqueue buffer_timestamp
5082  * @ack_ts: OTA ack timestamp, unit us.
5083  * @enqueue_ts: TCL enqueue TX data to TQM timestamp, unit us.
5084  * @base_delta_ts: base timestamp delta for ack_ts and enqueue_ts
5085  *
5086  * this function will restore the bit29 ~ bit31 3 bits value for
5087  * buffer_timestamp in wbm2sw ring entry, currently buffer_timestamp only
5088  * can support 0x7FFF * 1024 us (29 bits), but if the timestamp is >
5089  * 0x7FFF * 1024 us, bit29~ bit31 will be lost.
5090  *
5091  * Return: the adjusted buffer_timestamp value
5092  */
5093 static inline
5094 uint32_t dp_tx_adjust_enqueue_buffer_ts(uint32_t ack_ts,
5095 					uint32_t enqueue_ts,
5096 					uint32_t base_delta_ts)
5097 {
5098 	uint32_t ack_buffer_ts;
5099 	uint32_t ack_buffer_ts_bit29_31;
5100 	uint32_t adjusted_enqueue_ts;
5101 
5102 	/* corresponding buffer_timestamp value when receive OTA Ack */
5103 	ack_buffer_ts = ack_ts - base_delta_ts;
5104 	ack_buffer_ts_bit29_31 = ack_buffer_ts & DP_TX_TS_BIT29_31_MASK;
5105 
5106 	/* restore the bit29 ~ bit31 value */
5107 	adjusted_enqueue_ts = ack_buffer_ts_bit29_31 | enqueue_ts;
5108 
5109 	/*
5110 	 * if actual enqueue_ts value occupied 29 bits only, this enqueue_ts
5111 	 * value + real UL delay overflow 29 bits, then 30th bit (bit-29)
5112 	 * should not be marked, otherwise extra 0x20000000 us is added to
5113 	 * enqueue_ts.
5114 	 */
5115 	if (qdf_unlikely(adjusted_enqueue_ts > ack_buffer_ts))
5116 		adjusted_enqueue_ts -= DP_TX_TS_BIT29_SET_VALUE;
5117 
5118 	return adjusted_enqueue_ts;
5119 }
5120 
5121 QDF_STATUS
5122 dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
5123 			  uint32_t delta_tsf,
5124 			  uint32_t *delay_us)
5125 {
5126 	uint32_t buffer_ts;
5127 	uint32_t delay;
5128 
5129 	if (!delay_us)
5130 		return QDF_STATUS_E_INVAL;
5131 
5132 	/* Tx_rate_stats_info_valid is 0 and tsf is invalid then */
5133 	if (!ts->valid)
5134 		return QDF_STATUS_E_INVAL;
5135 
5136 	/* buffer_timestamp is in units of 1024 us and is [31:13] of
5137 	 * WBM_RELEASE_RING_4. After left shift 10 bits, it's
5138 	 * valid up to 29 bits.
5139 	 */
5140 	buffer_ts = ts->buffer_timestamp << 10;
5141 	buffer_ts = dp_tx_adjust_enqueue_buffer_ts(ts->tsf,
5142 						   buffer_ts, delta_tsf);
5143 
5144 	delay = ts->tsf - buffer_ts - delta_tsf;
5145 
5146 	if (qdf_unlikely(delay & 0x80000000)) {
5147 		dp_err_rl("delay = 0x%x (-ve)\n"
5148 			  "release_src = %d\n"
5149 			  "ppdu_id = 0x%x\n"
5150 			  "peer_id = 0x%x\n"
5151 			  "tid = 0x%x\n"
5152 			  "release_reason = %d\n"
5153 			  "tsf = %u (0x%x)\n"
5154 			  "buffer_timestamp = %u (0x%x)\n"
5155 			  "delta_tsf = %u (0x%x)\n",
5156 			  delay, ts->release_src, ts->ppdu_id, ts->peer_id,
5157 			  ts->tid, ts->status, ts->tsf, ts->tsf,
5158 			  ts->buffer_timestamp, ts->buffer_timestamp,
5159 			  delta_tsf, delta_tsf);
5160 
5161 		delay = 0;
5162 		goto end;
5163 	}
5164 
5165 	delay &= 0x1FFFFFFF; /* mask 29 BITS */
5166 	if (delay > 0x1000000) {
5167 		dp_info_rl("----------------------\n"
5168 			   "Tx completion status:\n"
5169 			   "----------------------\n"
5170 			   "release_src = %d\n"
5171 			   "ppdu_id = 0x%x\n"
5172 			   "release_reason = %d\n"
5173 			   "tsf = %u (0x%x)\n"
5174 			   "buffer_timestamp = %u (0x%x)\n"
5175 			   "delta_tsf = %u (0x%x)\n",
5176 			   ts->release_src, ts->ppdu_id, ts->status,
5177 			   ts->tsf, ts->tsf, ts->buffer_timestamp,
5178 			   ts->buffer_timestamp, delta_tsf, delta_tsf);
5179 		return QDF_STATUS_E_FAILURE;
5180 	}
5181 
5182 
5183 end:
5184 	*delay_us = delay;
5185 
5186 	return QDF_STATUS_SUCCESS;
5187 }
5188 
5189 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5190 		      uint32_t delta_tsf)
5191 {
5192 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5193 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5194 						     DP_MOD_ID_CDP);
5195 
5196 	if (!vdev) {
5197 		dp_err_rl("vdev %d does not exist", vdev_id);
5198 		return;
5199 	}
5200 
5201 	vdev->delta_tsf = delta_tsf;
5202 	dp_debug("vdev id %u delta_tsf %u", vdev_id, delta_tsf);
5203 
5204 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5205 }
5206 #endif
5207 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
5208 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
5209 				      uint8_t vdev_id, bool enable)
5210 {
5211 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5212 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5213 						     DP_MOD_ID_CDP);
5214 
5215 	if (!vdev) {
5216 		dp_err_rl("vdev %d does not exist", vdev_id);
5217 		return QDF_STATUS_E_FAILURE;
5218 	}
5219 
5220 	qdf_atomic_set(&vdev->ul_delay_report, enable);
5221 
5222 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5223 
5224 	return QDF_STATUS_SUCCESS;
5225 }
5226 
5227 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5228 			       uint32_t *val)
5229 {
5230 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5231 	struct dp_vdev *vdev;
5232 	uint32_t delay_accum;
5233 	uint32_t pkts_accum;
5234 
5235 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
5236 	if (!vdev) {
5237 		dp_err_rl("vdev %d does not exist", vdev_id);
5238 		return QDF_STATUS_E_FAILURE;
5239 	}
5240 
5241 	if (!qdf_atomic_read(&vdev->ul_delay_report)) {
5242 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5243 		return QDF_STATUS_E_FAILURE;
5244 	}
5245 
5246 	/* Average uplink delay based on current accumulated values */
5247 	delay_accum = qdf_atomic_read(&vdev->ul_delay_accum);
5248 	pkts_accum = qdf_atomic_read(&vdev->ul_pkts_accum);
5249 
5250 	*val = delay_accum / pkts_accum;
5251 	dp_debug("uplink_delay %u delay_accum %u pkts_accum %u", *val,
5252 		 delay_accum, pkts_accum);
5253 
5254 	/* Reset accumulated values to 0 */
5255 	qdf_atomic_set(&vdev->ul_delay_accum, 0);
5256 	qdf_atomic_set(&vdev->ul_pkts_accum, 0);
5257 
5258 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5259 
5260 	return QDF_STATUS_SUCCESS;
5261 }
5262 
5263 static void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
5264 				      struct hal_tx_completion_status *ts)
5265 {
5266 	uint32_t ul_delay;
5267 
5268 	if (qdf_unlikely(!vdev)) {
5269 		dp_info_rl("vdev is null or delete in progress");
5270 		return;
5271 	}
5272 
5273 	if (!qdf_atomic_read(&vdev->ul_delay_report))
5274 		return;
5275 
5276 	if (QDF_IS_STATUS_ERROR(dp_tx_compute_hw_delay_us(ts,
5277 							  vdev->delta_tsf,
5278 							  &ul_delay)))
5279 		return;
5280 
5281 	ul_delay /= 1000; /* in unit of ms */
5282 
5283 	qdf_atomic_add(ul_delay, &vdev->ul_delay_accum);
5284 	qdf_atomic_inc(&vdev->ul_pkts_accum);
5285 }
5286 #else /* !WLAN_FEATURE_TSF_UPLINK_DELAY */
5287 static inline
5288 void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
5289 			       struct hal_tx_completion_status *ts)
5290 {
5291 }
5292 #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
5293 
5294 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
5295 				  struct dp_tx_desc_s *tx_desc,
5296 				  struct hal_tx_completion_status *ts,
5297 				  struct dp_txrx_peer *txrx_peer,
5298 				  uint8_t ring_id)
5299 {
5300 	uint32_t length;
5301 	qdf_ether_header_t *eh;
5302 	struct dp_vdev *vdev = NULL;
5303 	qdf_nbuf_t nbuf = tx_desc->nbuf;
5304 	enum qdf_dp_tx_rx_status dp_status;
5305 	uint8_t link_id = 0;
5306 	enum QDF_OPMODE op_mode = QDF_MAX_NO_OF_MODE;
5307 
5308 	if (!nbuf) {
5309 		dp_info_rl("invalid tx descriptor. nbuf NULL");
5310 		goto out;
5311 	}
5312 
5313 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
5314 	length = dp_tx_get_pkt_len(tx_desc);
5315 
5316 	dp_status = dp_tx_hw_to_qdf(ts->status);
5317 	dp_tx_comp_debug("-------------------- \n"
5318 			 "Tx Completion Stats: \n"
5319 			 "-------------------- \n"
5320 			 "ack_frame_rssi = %d \n"
5321 			 "first_msdu = %d \n"
5322 			 "last_msdu = %d \n"
5323 			 "msdu_part_of_amsdu = %d \n"
5324 			 "rate_stats valid = %d \n"
5325 			 "bw = %d \n"
5326 			 "pkt_type = %d \n"
5327 			 "stbc = %d \n"
5328 			 "ldpc = %d \n"
5329 			 "sgi = %d \n"
5330 			 "mcs = %d \n"
5331 			 "ofdma = %d \n"
5332 			 "tones_in_ru = %d \n"
5333 			 "tsf = %d \n"
5334 			 "ppdu_id = %d \n"
5335 			 "transmit_cnt = %d \n"
5336 			 "tid = %d \n"
5337 			 "peer_id = %d\n"
5338 			 "tx_status = %d\n"
5339 			 "tx_release_source = %d\n",
5340 			 ts->ack_frame_rssi, ts->first_msdu,
5341 			 ts->last_msdu, ts->msdu_part_of_amsdu,
5342 			 ts->valid, ts->bw, ts->pkt_type, ts->stbc,
5343 			 ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
5344 			 ts->tones_in_ru, ts->tsf, ts->ppdu_id,
5345 			 ts->transmit_cnt, ts->tid, ts->peer_id,
5346 			 ts->status, ts->release_src);
5347 
5348 	/* Update SoC level stats */
5349 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
5350 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
5351 
5352 	if (!txrx_peer) {
5353 		dp_info_rl("peer is null or deletion in progress");
5354 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
5355 		goto out_log;
5356 	}
5357 	vdev = txrx_peer->vdev;
5358 
5359 	link_id = dp_tx_get_link_id_from_ppdu_id(soc, ts, txrx_peer, vdev);
5360 
5361 	op_mode = vdev->qdf_opmode;
5362 	dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
5363 	dp_tx_update_uplink_delay(soc, vdev, ts);
5364 
5365 	/* check tx complete notification */
5366 	if (qdf_nbuf_tx_notify_comp_get(nbuf))
5367 		dp_tx_notify_completion(soc, vdev, tx_desc,
5368 					nbuf, ts->status);
5369 
5370 	/* Update per-packet stats for mesh mode */
5371 	if (qdf_unlikely(vdev->mesh_vdev) &&
5372 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
5373 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
5374 
5375 	/* Update peer level stats */
5376 	if (qdf_unlikely(txrx_peer->bss_peer &&
5377 			 vdev->opmode == wlan_op_mode_ap)) {
5378 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
5379 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1,
5380 						      length, link_id);
5381 
5382 			if (txrx_peer->vdev->tx_encap_type ==
5383 				htt_cmn_pkt_type_ethernet &&
5384 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
5385 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
5386 							      tx.bcast, 1,
5387 							      length, link_id);
5388 			}
5389 		}
5390 	} else {
5391 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.ucast, 1, length,
5392 					      link_id);
5393 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
5394 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.tx_success,
5395 						      1, length, link_id);
5396 			if (qdf_unlikely(txrx_peer->in_twt)) {
5397 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
5398 							      tx.tx_success_twt,
5399 							      1, length,
5400 							      link_id);
5401 			}
5402 		}
5403 	}
5404 
5405 	dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id, link_id);
5406 	dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts, ring_id);
5407 	dp_tx_update_peer_jitter_stats(txrx_peer, tx_desc, ts, ring_id);
5408 	dp_tx_update_peer_sawf_stats(soc, vdev, txrx_peer, tx_desc,
5409 				     ts, ts->tid);
5410 	dp_tx_send_pktlog(soc, vdev->pdev, tx_desc, nbuf, dp_status);
5411 
5412 #ifdef QCA_SUPPORT_RDK_STATS
5413 	if (soc->peerstats_enabled)
5414 		dp_tx_sojourn_stats_process(vdev->pdev, txrx_peer, ts->tid,
5415 					    qdf_ktime_to_ms(tx_desc->timestamp),
5416 					    ts->ppdu_id, link_id);
5417 #endif
5418 
5419 out_log:
5420 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
5421 			 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
5422 			 QDF_TRACE_DEFAULT_PDEV_ID,
5423 			 qdf_nbuf_data_addr(nbuf),
5424 			 sizeof(qdf_nbuf_data(nbuf)),
5425 			 tx_desc->id, ts->status, dp_status, op_mode));
5426 out:
5427 	return;
5428 }
5429 
5430 #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \
5431 	defined(QCA_ENHANCED_STATS_SUPPORT)
5432 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
5433 				   uint32_t length, uint8_t tx_status,
5434 				   bool update)
5435 {
5436 	if (update || (!txrx_peer->hw_txrx_stats_en)) {
5437 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
5438 
5439 		if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
5440 			DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
5441 	}
5442 }
5443 #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
5444 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
5445 				   uint32_t length, uint8_t tx_status,
5446 				   bool update)
5447 {
5448 	if (!txrx_peer->hw_txrx_stats_en) {
5449 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
5450 
5451 		if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
5452 			DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
5453 	}
5454 }
5455 
5456 #else
5457 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
5458 				   uint32_t length, uint8_t tx_status,
5459 				   bool update)
5460 {
5461 	DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
5462 
5463 	if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
5464 		DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
5465 }
5466 #endif
5467 
5468 /**
5469  * dp_tx_prefetch_next_nbuf_data(): Prefetch nbuf and nbuf data
5470  * @next: descriptor of the nrxt buffer
5471  *
5472  * Return: none
5473  */
5474 #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
5475 static inline
5476 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
5477 {
5478 	qdf_nbuf_t nbuf = NULL;
5479 
5480 	if (next)
5481 		nbuf = next->nbuf;
5482 	if (nbuf)
5483 		qdf_prefetch(nbuf);
5484 }
5485 #else
5486 static inline
5487 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
5488 {
5489 }
5490 #endif
5491 
5492 /**
5493  * dp_tx_mcast_reinject_handler() - Tx reinjected multicast packets handler
5494  * @soc: core txrx main context
5495  * @desc: software descriptor
5496  *
5497  * Return: true when packet is reinjected
5498  */
5499 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
5500 	defined(WLAN_MCAST_MLO) && !defined(CONFIG_MLO_SINGLE_DEV)
5501 static inline bool
5502 dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
5503 {
5504 	struct dp_vdev *vdev = NULL;
5505 
5506 	if (desc->tx_status == HAL_TX_TQM_RR_MULTICAST_DROP) {
5507 		if (!soc->arch_ops.dp_tx_mcast_handler ||
5508 		    !soc->arch_ops.dp_tx_is_mcast_primary)
5509 			return false;
5510 
5511 		vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
5512 					     DP_MOD_ID_REINJECT);
5513 
5514 		if (qdf_unlikely(!vdev)) {
5515 			dp_tx_comp_info_rl("Unable to get vdev ref  %d",
5516 					   desc->id);
5517 			return false;
5518 		}
5519 
5520 		if (!(soc->arch_ops.dp_tx_is_mcast_primary(soc, vdev))) {
5521 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
5522 			return false;
5523 		}
5524 		DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
5525 				 qdf_nbuf_len(desc->nbuf));
5526 		soc->arch_ops.dp_tx_mcast_handler(soc, vdev, desc->nbuf);
5527 		dp_tx_desc_release(soc, desc, desc->pool_id);
5528 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
5529 		return true;
5530 	}
5531 
5532 	return false;
5533 }
5534 #else
5535 static inline bool
5536 dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
5537 {
5538 	return false;
5539 }
5540 #endif
5541 
5542 #ifdef QCA_DP_TX_NBUF_LIST_FREE
5543 static inline void
5544 dp_tx_nbuf_queue_head_init(qdf_nbuf_queue_head_t *nbuf_queue_head)
5545 {
5546 	qdf_nbuf_queue_head_init(nbuf_queue_head);
5547 }
5548 
5549 static inline void
5550 dp_tx_nbuf_dev_queue_free(qdf_nbuf_queue_head_t *nbuf_queue_head,
5551 			  struct dp_tx_desc_s *desc)
5552 {
5553 	qdf_nbuf_t nbuf = NULL;
5554 
5555 	nbuf = desc->nbuf;
5556 	if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_FAST))
5557 		qdf_nbuf_dev_queue_head(nbuf_queue_head, nbuf);
5558 	else
5559 		qdf_nbuf_free(nbuf);
5560 }
5561 
5562 static inline void
5563 dp_tx_nbuf_dev_queue_free_no_flag(qdf_nbuf_queue_head_t *nbuf_queue_head,
5564 				  qdf_nbuf_t nbuf)
5565 {
5566 	if (!nbuf)
5567 		return;
5568 
5569 	if (nbuf->is_from_recycler)
5570 		qdf_nbuf_dev_queue_head(nbuf_queue_head, nbuf);
5571 	else
5572 		qdf_nbuf_free(nbuf);
5573 }
5574 
5575 static inline void
5576 dp_tx_nbuf_dev_kfree_list(qdf_nbuf_queue_head_t *nbuf_queue_head)
5577 {
5578 	qdf_nbuf_dev_kfree_list(nbuf_queue_head);
5579 }
5580 #else
5581 static inline void
5582 dp_tx_nbuf_queue_head_init(qdf_nbuf_queue_head_t *nbuf_queue_head)
5583 {
5584 }
5585 
5586 static inline void
5587 dp_tx_nbuf_dev_queue_free(qdf_nbuf_queue_head_t *nbuf_queue_head,
5588 			  struct dp_tx_desc_s *desc)
5589 {
5590 	qdf_nbuf_free(desc->nbuf);
5591 }
5592 
5593 static inline void
5594 dp_tx_nbuf_dev_queue_free_no_flag(qdf_nbuf_queue_head_t *nbuf_queue_head,
5595 				  qdf_nbuf_t nbuf)
5596 {
5597 	qdf_nbuf_free(nbuf);
5598 }
5599 
5600 static inline void
5601 dp_tx_nbuf_dev_kfree_list(qdf_nbuf_queue_head_t *nbuf_queue_head)
5602 {
5603 }
5604 #endif
5605 
5606 #ifdef WLAN_SUPPORT_PPEDS
5607 static inline void
5608 dp_tx_update_ppeds_tx_comp_stats(struct dp_soc *soc,
5609 				 struct dp_txrx_peer *txrx_peer,
5610 				 struct hal_tx_completion_status *ts,
5611 				 struct dp_tx_desc_s *desc,
5612 				 uint8_t ring_id)
5613 {
5614 	uint8_t link_id = 0;
5615 	struct dp_vdev *vdev = NULL;
5616 
5617 	if (qdf_likely(txrx_peer)) {
5618 		if (!(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
5619 			hal_tx_comp_get_status(&desc->comp,
5620 					       ts,
5621 					       soc->hal_soc);
5622 			vdev = txrx_peer->vdev;
5623 			link_id = dp_tx_get_link_id_from_ppdu_id(soc,
5624 								 ts,
5625 								 txrx_peer,
5626 								 vdev);
5627 			if (link_id < 1 || link_id > DP_MAX_MLO_LINKS)
5628 				link_id = 0;
5629 			dp_tx_update_peer_stats(desc, ts,
5630 						txrx_peer,
5631 						ring_id,
5632 						link_id);
5633 		} else {
5634 			dp_tx_update_peer_basic_stats(txrx_peer, desc->length,
5635 						      desc->tx_status, false);
5636 		}
5637 	}
5638 }
5639 #else
5640 static inline void
5641 dp_tx_update_ppeds_tx_comp_stats(struct dp_soc *soc,
5642 				 struct dp_txrx_peer *txrx_peer,
5643 				 struct hal_tx_completion_status *ts,
5644 				 struct dp_tx_desc_s *desc,
5645 				 uint8_t ring_id)
5646 {
5647 }
5648 #endif
5649 
5650 void
5651 dp_tx_comp_process_desc_list(struct dp_soc *soc,
5652 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id)
5653 {
5654 	struct dp_tx_desc_s *desc;
5655 	struct dp_tx_desc_s *next;
5656 	struct hal_tx_completion_status ts;
5657 	struct dp_txrx_peer *txrx_peer = NULL;
5658 	uint16_t peer_id = DP_INVALID_PEER;
5659 	dp_txrx_ref_handle txrx_ref_handle = NULL;
5660 	qdf_nbuf_queue_head_t h;
5661 
5662 	desc = comp_head;
5663 
5664 	dp_tx_nbuf_queue_head_init(&h);
5665 
5666 	while (desc) {
5667 		next = desc->next;
5668 		dp_tx_prefetch_next_nbuf_data(next);
5669 
5670 		if (peer_id != desc->peer_id) {
5671 			if (txrx_peer)
5672 				dp_txrx_peer_unref_delete(txrx_ref_handle,
5673 							  DP_MOD_ID_TX_COMP);
5674 			peer_id = desc->peer_id;
5675 			txrx_peer =
5676 				dp_txrx_peer_get_ref_by_id(soc, peer_id,
5677 							   &txrx_ref_handle,
5678 							   DP_MOD_ID_TX_COMP);
5679 		}
5680 
5681 		if (dp_tx_mcast_reinject_handler(soc, desc)) {
5682 			desc = next;
5683 			continue;
5684 		}
5685 
5686 		if (desc->flags & DP_TX_DESC_FLAG_PPEDS) {
5687 			qdf_nbuf_t nbuf;
5688 			dp_tx_update_ppeds_tx_comp_stats(soc, txrx_peer, &ts,
5689 							 desc, ring_id);
5690 
5691 			if (desc->pool_id != DP_TX_PPEDS_POOL_ID) {
5692 				nbuf = desc->nbuf;
5693 				dp_tx_nbuf_dev_queue_free_no_flag(&h, nbuf);
5694 				dp_tx_desc_free(soc, desc, desc->pool_id);
5695 
5696 				__dp_tx_outstanding_dec(soc);
5697 			} else {
5698 				nbuf = dp_ppeds_tx_desc_free(soc, desc);
5699 				dp_tx_nbuf_dev_queue_free_no_flag(&h, nbuf);
5700 			}
5701 			desc = next;
5702 			continue;
5703 		}
5704 
5705 		if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
5706 			struct dp_pdev *pdev = desc->pdev;
5707 
5708 			if (qdf_likely(txrx_peer))
5709 				dp_tx_update_peer_basic_stats(txrx_peer,
5710 							      desc->length,
5711 							      desc->tx_status,
5712 							      false);
5713 			qdf_assert(pdev);
5714 			dp_tx_outstanding_dec(pdev);
5715 
5716 			/*
5717 			 * Calling a QDF WRAPPER here is creating significant
5718 			 * performance impact so avoided the wrapper call here
5719 			 */
5720 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
5721 					       desc->id, DP_TX_COMP_UNMAP);
5722 			dp_tx_nbuf_unmap(soc, desc);
5723 			dp_tx_nbuf_dev_queue_free(&h, desc);
5724 			dp_tx_desc_free(soc, desc, desc->pool_id);
5725 			desc = next;
5726 			continue;
5727 		}
5728 
5729 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
5730 
5731 		dp_tx_comp_process_tx_status(soc, desc, &ts, txrx_peer,
5732 					     ring_id);
5733 
5734 		dp_tx_comp_process_desc(soc, desc, &ts, txrx_peer);
5735 
5736 		dp_tx_desc_release(soc, desc, desc->pool_id);
5737 		desc = next;
5738 	}
5739 	dp_tx_nbuf_dev_kfree_list(&h);
5740 	if (txrx_peer)
5741 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
5742 }
5743 
5744 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
5745 static inline
5746 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
5747 				   int max_reap_limit)
5748 {
5749 	bool limit_hit = false;
5750 
5751 	limit_hit =
5752 		(num_reaped >= max_reap_limit) ? true : false;
5753 
5754 	if (limit_hit)
5755 		DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
5756 
5757 	return limit_hit;
5758 }
5759 
5760 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
5761 {
5762 	return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
5763 }
5764 
5765 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
5766 {
5767 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
5768 
5769 	return cfg->tx_comp_loop_pkt_limit;
5770 }
5771 #else
5772 static inline
5773 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
5774 				   int max_reap_limit)
5775 {
5776 	return false;
5777 }
5778 
5779 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
5780 {
5781 	return false;
5782 }
5783 
5784 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
5785 {
5786 	return 0;
5787 }
5788 #endif
5789 
5790 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
5791 static inline int
5792 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
5793 				  int *max_reap_limit)
5794 {
5795 	return soc->arch_ops.dp_srng_test_and_update_nf_params(soc, dp_srng,
5796 							       max_reap_limit);
5797 }
5798 #else
5799 static inline int
5800 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
5801 				  int *max_reap_limit)
5802 {
5803 	return 0;
5804 }
5805 #endif
5806 
5807 #ifdef DP_TX_TRACKING
5808 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
5809 {
5810 	if ((tx_desc->magic != DP_TX_MAGIC_PATTERN_INUSE) &&
5811 	    (tx_desc->magic != DP_TX_MAGIC_PATTERN_FREE)) {
5812 		dp_err_rl("tx_desc %u is corrupted", tx_desc->id);
5813 		qdf_trigger_self_recovery(NULL, QDF_TX_DESC_LEAK);
5814 	}
5815 }
5816 #endif
5817 
5818 #ifndef WLAN_SOFTUMAC_SUPPORT
5819 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
5820 			    hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
5821 			    uint32_t quota)
5822 {
5823 	void *tx_comp_hal_desc;
5824 	void *last_prefetched_hw_desc = NULL;
5825 	struct dp_tx_desc_s *last_prefetched_sw_desc = NULL;
5826 	hal_soc_handle_t hal_soc;
5827 	uint8_t buffer_src;
5828 	struct dp_tx_desc_s *tx_desc = NULL;
5829 	struct dp_tx_desc_s *head_desc = NULL;
5830 	struct dp_tx_desc_s *tail_desc = NULL;
5831 	uint32_t num_processed = 0;
5832 	uint32_t count;
5833 	uint32_t num_avail_for_reap = 0;
5834 	bool force_break = false;
5835 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
5836 	int max_reap_limit, ring_near_full;
5837 	uint32_t num_entries;
5838 
5839 	DP_HIST_INIT();
5840 
5841 	num_entries = hal_srng_get_num_entries(soc->hal_soc, hal_ring_hdl);
5842 
5843 more_data:
5844 
5845 	hal_soc = soc->hal_soc;
5846 	/* Re-initialize local variables to be re-used */
5847 	head_desc = NULL;
5848 	tail_desc = NULL;
5849 	count = 0;
5850 	max_reap_limit = dp_tx_comp_get_loop_pkt_limit(soc);
5851 
5852 	ring_near_full = dp_srng_test_and_update_nf_params(soc, tx_comp_ring,
5853 							   &max_reap_limit);
5854 
5855 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
5856 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
5857 		return 0;
5858 	}
5859 
5860 	if (!num_avail_for_reap)
5861 		num_avail_for_reap = hal_srng_dst_num_valid(hal_soc,
5862 							    hal_ring_hdl, 0);
5863 
5864 	if (num_avail_for_reap >= quota)
5865 		num_avail_for_reap = quota;
5866 
5867 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
5868 	last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc,
5869 							    hal_ring_hdl,
5870 							    num_avail_for_reap);
5871 
5872 	/* Find head descriptor from completion ring */
5873 	while (qdf_likely(num_avail_for_reap--)) {
5874 
5875 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
5876 		if (qdf_unlikely(!tx_comp_hal_desc))
5877 			break;
5878 		buffer_src = hal_tx_comp_get_buffer_source(hal_soc,
5879 							   tx_comp_hal_desc);
5880 
5881 		/* If this buffer was not released by TQM or FW, then it is not
5882 		 * Tx completion indication, assert */
5883 		if (qdf_unlikely(buffer_src !=
5884 					HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
5885 				 (qdf_unlikely(buffer_src !=
5886 					HAL_TX_COMP_RELEASE_SOURCE_FW))) {
5887 			uint8_t wbm_internal_error;
5888 
5889 			dp_err_rl(
5890 				"Tx comp release_src != TQM | FW but from %d",
5891 				buffer_src);
5892 			hal_dump_comp_desc(tx_comp_hal_desc);
5893 			DP_STATS_INC(soc, tx.invalid_release_source, 1);
5894 
5895 			/* When WBM sees NULL buffer_addr_info in any of
5896 			 * ingress rings it sends an error indication,
5897 			 * with wbm_internal_error=1, to a specific ring.
5898 			 * The WBM2SW ring used to indicate these errors is
5899 			 * fixed in HW, and that ring is being used as Tx
5900 			 * completion ring. These errors are not related to
5901 			 * Tx completions, and should just be ignored
5902 			 */
5903 			wbm_internal_error = hal_get_wbm_internal_error(
5904 							hal_soc,
5905 							tx_comp_hal_desc);
5906 
5907 			if (wbm_internal_error) {
5908 				dp_err_rl("Tx comp wbm_internal_error!!");
5909 				DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
5910 
5911 				if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
5912 								buffer_src)
5913 					dp_handle_wbm_internal_error(
5914 						soc,
5915 						tx_comp_hal_desc,
5916 						hal_tx_comp_get_buffer_type(
5917 							tx_comp_hal_desc));
5918 
5919 			} else {
5920 				dp_err_rl("Tx comp wbm_internal_error false");
5921 				DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
5922 			}
5923 			continue;
5924 		}
5925 
5926 		soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
5927 							       tx_comp_hal_desc,
5928 							       &tx_desc);
5929 		if (qdf_unlikely(!tx_desc)) {
5930 			dp_err("unable to retrieve tx_desc!");
5931 			hal_dump_comp_desc(tx_comp_hal_desc);
5932 			DP_STATS_INC(soc, tx.invalid_tx_comp_desc, 1);
5933 			QDF_BUG(0);
5934 			continue;
5935 		}
5936 		tx_desc->buffer_src = buffer_src;
5937 
5938 		if (tx_desc->flags & DP_TX_DESC_FLAG_PPEDS)
5939 			goto add_to_pool2;
5940 
5941 		/*
5942 		 * If the release source is FW, process the HTT status
5943 		 */
5944 		if (qdf_unlikely(buffer_src ==
5945 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
5946 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
5947 
5948 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
5949 					htt_tx_status);
5950 			/* Collect hw completion contents */
5951 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
5952 					      &tx_desc->comp, 1);
5953 			soc->arch_ops.dp_tx_process_htt_completion(
5954 							soc,
5955 							tx_desc,
5956 							htt_tx_status,
5957 							ring_id);
5958 		} else {
5959 			tx_desc->tx_status =
5960 				hal_tx_comp_get_tx_status(tx_comp_hal_desc);
5961 			tx_desc->buffer_src = buffer_src;
5962 			/*
5963 			 * If the fast completion mode is enabled extended
5964 			 * metadata from descriptor is not copied
5965 			 */
5966 			if (qdf_likely(tx_desc->flags &
5967 						DP_TX_DESC_FLAG_SIMPLE))
5968 				goto add_to_pool;
5969 
5970 			/*
5971 			 * If the descriptor is already freed in vdev_detach,
5972 			 * continue to next descriptor
5973 			 */
5974 			if (qdf_unlikely
5975 				((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
5976 				 !tx_desc->flags)) {
5977 				dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
5978 						   tx_desc->id);
5979 				DP_STATS_INC(soc, tx.tx_comp_exception, 1);
5980 				dp_tx_desc_check_corruption(tx_desc);
5981 				continue;
5982 			}
5983 
5984 			if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
5985 				dp_tx_comp_info_rl("pdev in down state %d",
5986 						   tx_desc->id);
5987 				tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
5988 				dp_tx_comp_free_buf(soc, tx_desc, false);
5989 				dp_tx_desc_release(soc, tx_desc,
5990 						   tx_desc->pool_id);
5991 				goto next_desc;
5992 			}
5993 
5994 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
5995 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
5996 				dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
5997 						 tx_desc->flags, tx_desc->id);
5998 				qdf_assert_always(0);
5999 			}
6000 
6001 			/* Collect hw completion contents */
6002 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
6003 					      &tx_desc->comp, 1);
6004 add_to_pool:
6005 			DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
6006 
6007 add_to_pool2:
6008 			/* First ring descriptor on the cycle */
6009 			if (!head_desc) {
6010 				head_desc = tx_desc;
6011 				tail_desc = tx_desc;
6012 			}
6013 
6014 			tail_desc->next = tx_desc;
6015 			tx_desc->next = NULL;
6016 			tail_desc = tx_desc;
6017 		}
6018 next_desc:
6019 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
6020 
6021 		/*
6022 		 * Processed packet count is more than given quota
6023 		 * stop to processing
6024 		 */
6025 
6026 		count++;
6027 
6028 		dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
6029 					       num_avail_for_reap,
6030 					       hal_ring_hdl,
6031 					       &last_prefetched_hw_desc,
6032 					       &last_prefetched_sw_desc);
6033 
6034 		if (dp_tx_comp_loop_pkt_limit_hit(soc, count, max_reap_limit))
6035 			break;
6036 	}
6037 
6038 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
6039 
6040 	/* Process the reaped descriptors */
6041 	if (head_desc)
6042 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
6043 
6044 	DP_STATS_INC(soc, tx.tx_comp[ring_id], count);
6045 
6046 	/*
6047 	 * If we are processing in near-full condition, there are 3 scenario
6048 	 * 1) Ring entries has reached critical state
6049 	 * 2) Ring entries are still near high threshold
6050 	 * 3) Ring entries are below the safe level
6051 	 *
6052 	 * One more loop will move the state to normal processing and yield
6053 	 */
6054 	if (ring_near_full)
6055 		goto more_data;
6056 
6057 	if (dp_tx_comp_enable_eol_data_check(soc)) {
6058 
6059 		if (num_processed >= quota)
6060 			force_break = true;
6061 
6062 		if (!force_break &&
6063 		    hal_srng_dst_peek_sync_locked(soc->hal_soc,
6064 						  hal_ring_hdl)) {
6065 			DP_STATS_INC(soc, tx.hp_oos2, 1);
6066 			if (!hif_exec_should_yield(soc->hif_handle,
6067 						   int_ctx->dp_intr_id))
6068 				goto more_data;
6069 
6070 			num_avail_for_reap =
6071 				hal_srng_dst_num_valid_locked(soc->hal_soc,
6072 							      hal_ring_hdl,
6073 							      true);
6074 			if (qdf_unlikely(num_entries &&
6075 					 (num_avail_for_reap >=
6076 					  num_entries >> 1))) {
6077 				DP_STATS_INC(soc, tx.near_full, 1);
6078 				goto more_data;
6079 			}
6080 		}
6081 	}
6082 	DP_TX_HIST_STATS_PER_PDEV();
6083 
6084 	return num_processed;
6085 }
6086 #endif
6087 
6088 #ifdef FEATURE_WLAN_TDLS
6089 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
6090 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
6091 {
6092 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6093 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6094 						     DP_MOD_ID_TDLS);
6095 
6096 	if (!vdev) {
6097 		dp_err("vdev handle for id %d is NULL", vdev_id);
6098 		return NULL;
6099 	}
6100 
6101 	if (tx_spec & OL_TX_SPEC_NO_FREE)
6102 		vdev->is_tdls_frame = true;
6103 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
6104 
6105 	return dp_tx_send(soc_hdl, vdev_id, msdu_list);
6106 }
6107 #endif
6108 
6109 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
6110 {
6111 	int pdev_id;
6112 	/*
6113 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
6114 	 */
6115 	DP_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
6116 				    DP_TCL_METADATA_TYPE_VDEV_BASED);
6117 
6118 	DP_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
6119 				       vdev->vdev_id);
6120 
6121 	pdev_id =
6122 		dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
6123 						       vdev->pdev->pdev_id);
6124 	DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
6125 
6126 	/*
6127 	 * Set HTT Extension Valid bit to 0 by default
6128 	 */
6129 	DP_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
6130 
6131 	dp_tx_vdev_update_search_flags(vdev);
6132 
6133 	return QDF_STATUS_SUCCESS;
6134 }
6135 
6136 #ifndef FEATURE_WDS
6137 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
6138 {
6139 	return false;
6140 }
6141 #endif
6142 
6143 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
6144 {
6145 	struct dp_soc *soc = vdev->pdev->soc;
6146 
6147 	/*
6148 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
6149 	 * for TDLS link
6150 	 *
6151 	 * Enable AddrY (SA based search) only for non-WDS STA and
6152 	 * ProxySTA VAP (in HKv1) modes.
6153 	 *
6154 	 * In all other VAP modes, only DA based search should be
6155 	 * enabled
6156 	 */
6157 	if (vdev->opmode == wlan_op_mode_sta &&
6158 	    vdev->tdls_link_connected)
6159 		vdev->hal_desc_addr_search_flags =
6160 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
6161 	else if ((vdev->opmode == wlan_op_mode_sta) &&
6162 		 !dp_tx_da_search_override(vdev))
6163 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
6164 	else
6165 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
6166 
6167 	if (vdev->opmode == wlan_op_mode_sta && !vdev->tdls_link_connected)
6168 		vdev->search_type = soc->sta_mode_search_policy;
6169 	else
6170 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
6171 }
6172 
6173 #ifdef WLAN_SUPPORT_PPEDS
6174 static inline bool
6175 dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
6176 			  struct dp_vdev *vdev,
6177 			  struct dp_tx_desc_s *tx_desc)
6178 {
6179 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
6180 		return false;
6181 
6182 	if (tx_desc->flags & DP_TX_DESC_FLAG_PPEDS)
6183 		return true;
6184 	/*
6185 	 * if vdev is given, then only check whether desc
6186 	 * vdev match. if vdev is NULL, then check whether
6187 	 * desc pdev match.
6188 	 */
6189 	return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
6190 		(tx_desc->pdev == pdev);
6191 }
6192 #else
6193 static inline bool
6194 dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
6195 			  struct dp_vdev *vdev,
6196 			  struct dp_tx_desc_s *tx_desc)
6197 {
6198 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
6199 		return false;
6200 
6201 	/*
6202 	 * if vdev is given, then only check whether desc
6203 	 * vdev match. if vdev is NULL, then check whether
6204 	 * desc pdev match.
6205 	 */
6206 	return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
6207 		(tx_desc->pdev == pdev);
6208 }
6209 #endif
6210 
6211 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6212 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
6213 		      bool force_free)
6214 {
6215 	uint8_t i;
6216 	uint32_t j;
6217 	uint32_t num_desc, page_id, offset;
6218 	uint16_t num_desc_per_page;
6219 	struct dp_soc *soc = pdev->soc;
6220 	struct dp_tx_desc_s *tx_desc = NULL;
6221 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
6222 
6223 	if (!vdev && !force_free) {
6224 		dp_err("Reset TX desc vdev, Vdev param is required!");
6225 		return;
6226 	}
6227 
6228 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
6229 		tx_desc_pool = &soc->tx_desc[i];
6230 		if (!(tx_desc_pool->pool_size) ||
6231 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
6232 		    !(tx_desc_pool->desc_pages.cacheable_pages))
6233 			continue;
6234 
6235 		/*
6236 		 * Add flow pool lock protection in case pool is freed
6237 		 * due to all tx_desc is recycled when handle TX completion.
6238 		 * this is not necessary when do force flush as:
6239 		 * a. double lock will happen if dp_tx_desc_release is
6240 		 *    also trying to acquire it.
6241 		 * b. dp interrupt has been disabled before do force TX desc
6242 		 *    flush in dp_pdev_deinit().
6243 		 */
6244 		if (!force_free)
6245 			qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
6246 		num_desc = tx_desc_pool->pool_size;
6247 		num_desc_per_page =
6248 			tx_desc_pool->desc_pages.num_element_per_page;
6249 		for (j = 0; j < num_desc; j++) {
6250 			page_id = j / num_desc_per_page;
6251 			offset = j % num_desc_per_page;
6252 
6253 			if (qdf_unlikely(!(tx_desc_pool->
6254 					 desc_pages.cacheable_pages)))
6255 				break;
6256 
6257 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
6258 
6259 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
6260 				/*
6261 				 * Free TX desc if force free is
6262 				 * required, otherwise only reset vdev
6263 				 * in this TX desc.
6264 				 */
6265 				if (force_free) {
6266 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
6267 					dp_tx_comp_free_buf(soc, tx_desc,
6268 							    false);
6269 					dp_tx_desc_release(soc, tx_desc, i);
6270 				} else {
6271 					tx_desc->vdev_id = DP_INVALID_VDEV_ID;
6272 				}
6273 			}
6274 		}
6275 		if (!force_free)
6276 			qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
6277 	}
6278 }
6279 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
6280 /**
6281  * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
6282  *
6283  * @soc: Handle to DP soc structure
6284  * @tx_desc: pointer of one TX desc
6285  * @desc_pool_id: TX Desc pool id
6286  */
6287 static inline void
6288 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
6289 		      uint8_t desc_pool_id)
6290 {
6291 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
6292 
6293 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
6294 
6295 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
6296 }
6297 
6298 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
6299 		      bool force_free)
6300 {
6301 	uint8_t i, num_pool;
6302 	uint32_t j;
6303 	uint32_t num_desc, num_desc_t, page_id, offset;
6304 	uint16_t num_desc_per_page;
6305 	struct dp_soc *soc = pdev->soc;
6306 	struct dp_tx_desc_s *tx_desc = NULL;
6307 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
6308 
6309 	if (!vdev && !force_free) {
6310 		dp_err("Reset TX desc vdev, Vdev param is required!");
6311 		return;
6312 	}
6313 
6314 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6315 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6316 
6317 	for (i = 0; i < num_pool; i++) {
6318 		tx_desc_pool = &soc->tx_desc[i];
6319 		if (!tx_desc_pool->desc_pages.cacheable_pages)
6320 			continue;
6321 
6322 		num_desc_t = dp_get_updated_tx_desc(soc->ctrl_psoc, i,
6323 						    num_desc);
6324 		num_desc_per_page =
6325 			tx_desc_pool->desc_pages.num_element_per_page;
6326 		for (j = 0; j < num_desc_t; j++) {
6327 			page_id = j / num_desc_per_page;
6328 			offset = j % num_desc_per_page;
6329 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
6330 
6331 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
6332 				if (force_free) {
6333 					dp_tx_comp_free_buf(soc, tx_desc,
6334 							    false);
6335 					dp_tx_desc_release(soc, tx_desc, i);
6336 				} else {
6337 					dp_tx_desc_reset_vdev(soc, tx_desc,
6338 							      i);
6339 				}
6340 			}
6341 		}
6342 	}
6343 }
6344 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
6345 
6346 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
6347 {
6348 	struct dp_pdev *pdev = vdev->pdev;
6349 
6350 	/* Reset TX desc associated to this Vdev as NULL */
6351 	dp_tx_desc_flush(pdev, vdev, false);
6352 
6353 	return QDF_STATUS_SUCCESS;
6354 }
6355 
6356 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6357 /* Pools will be allocated dynamically */
6358 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
6359 					   int num_desc)
6360 {
6361 	uint8_t i;
6362 
6363 	for (i = 0; i < num_pool; i++) {
6364 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
6365 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
6366 	}
6367 
6368 	return QDF_STATUS_SUCCESS;
6369 }
6370 
6371 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
6372 					  uint32_t num_desc)
6373 {
6374 	return QDF_STATUS_SUCCESS;
6375 }
6376 
6377 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
6378 {
6379 }
6380 
6381 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
6382 {
6383 	uint8_t i;
6384 
6385 	for (i = 0; i < num_pool; i++)
6386 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
6387 }
6388 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
6389 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
6390 					   uint32_t num_desc)
6391 {
6392 	uint8_t i, count;
6393 
6394 	/* Allocate software Tx descriptor pools */
6395 	for (i = 0; i < num_pool; i++) {
6396 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
6397 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6398 				  FL("Tx Desc Pool alloc %d failed %pK"),
6399 				  i, soc);
6400 			goto fail;
6401 		}
6402 	}
6403 	return QDF_STATUS_SUCCESS;
6404 
6405 fail:
6406 	for (count = 0; count < i; count++)
6407 		dp_tx_desc_pool_free(soc, count);
6408 
6409 	return QDF_STATUS_E_NOMEM;
6410 }
6411 
6412 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
6413 					  uint32_t num_desc)
6414 {
6415 	uint8_t i;
6416 	for (i = 0; i < num_pool; i++) {
6417 		if (dp_tx_desc_pool_init(soc, i, num_desc)) {
6418 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6419 				  FL("Tx Desc Pool init %d failed %pK"),
6420 				  i, soc);
6421 			return QDF_STATUS_E_NOMEM;
6422 		}
6423 	}
6424 	return QDF_STATUS_SUCCESS;
6425 }
6426 
6427 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
6428 {
6429 	uint8_t i;
6430 
6431 	for (i = 0; i < num_pool; i++)
6432 		dp_tx_desc_pool_deinit(soc, i);
6433 }
6434 
6435 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
6436 {
6437 	uint8_t i;
6438 
6439 	for (i = 0; i < num_pool; i++)
6440 		dp_tx_desc_pool_free(soc, i);
6441 }
6442 
6443 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
6444 
6445 /**
6446  * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
6447  * @soc: core txrx main context
6448  * @num_pool: number of pools
6449  *
6450  */
6451 static void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
6452 {
6453 	dp_tx_tso_desc_pool_deinit(soc, num_pool);
6454 	dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
6455 }
6456 
6457 /**
6458  * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
6459  * @soc: core txrx main context
6460  * @num_pool: number of pools
6461  *
6462  */
6463 static void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
6464 {
6465 	dp_tx_tso_desc_pool_free(soc, num_pool);
6466 	dp_tx_tso_num_seg_pool_free(soc, num_pool);
6467 }
6468 
6469 #ifndef WLAN_SOFTUMAC_SUPPORT
6470 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
6471 {
6472 	uint8_t num_pool, num_ext_pool;
6473 
6474 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6475 	num_ext_pool = dp_get_ext_tx_desc_pool_num(soc);
6476 
6477 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
6478 	dp_tx_ext_desc_pool_free(soc, num_ext_pool);
6479 	dp_tx_delete_static_pools(soc, num_pool);
6480 }
6481 
6482 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
6483 {
6484 	uint8_t num_pool, num_ext_pool;
6485 
6486 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6487 	num_ext_pool = dp_get_ext_tx_desc_pool_num(soc);
6488 
6489 	dp_tx_flow_control_deinit(soc);
6490 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
6491 	dp_tx_ext_desc_pool_deinit(soc, num_ext_pool);
6492 	dp_tx_deinit_static_pools(soc, num_pool);
6493 }
6494 #else
6495 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
6496 {
6497 	uint8_t num_pool;
6498 
6499 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6500 
6501 	dp_tx_delete_static_pools(soc, num_pool);
6502 }
6503 
6504 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
6505 {
6506 	uint8_t num_pool;
6507 
6508 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6509 
6510 	dp_tx_flow_control_deinit(soc);
6511 	dp_tx_deinit_static_pools(soc, num_pool);
6512 }
6513 #endif /*WLAN_SOFTUMAC_SUPPORT*/
6514 
6515 /**
6516  * dp_tx_tso_cmn_desc_pool_alloc() - TSO cmn desc pool allocator
6517  * @soc: DP soc handle
6518  * @num_pool: Number of pools
6519  * @num_desc: Number of descriptors
6520  *
6521  * Reserve TSO descriptor buffers
6522  *
6523  * Return: QDF_STATUS_E_FAILURE on failure or
6524  *         QDF_STATUS_SUCCESS on success
6525  */
6526 static QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
6527 						uint8_t num_pool,
6528 						uint32_t num_desc)
6529 {
6530 	if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
6531 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
6532 		return QDF_STATUS_E_FAILURE;
6533 	}
6534 
6535 	if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
6536 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
6537 		       num_pool, soc);
6538 		return QDF_STATUS_E_FAILURE;
6539 	}
6540 	return QDF_STATUS_SUCCESS;
6541 }
6542 
6543 /**
6544  * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
6545  * @soc: DP soc handle
6546  * @num_pool: Number of pools
6547  * @num_desc: Number of descriptors
6548  *
6549  * Initialize TSO descriptor pools
6550  *
6551  * Return: QDF_STATUS_E_FAILURE on failure or
6552  *         QDF_STATUS_SUCCESS on success
6553  */
6554 
6555 static QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
6556 					       uint8_t num_pool,
6557 					       uint32_t num_desc)
6558 {
6559 	if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
6560 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
6561 		return QDF_STATUS_E_FAILURE;
6562 	}
6563 
6564 	if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
6565 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
6566 		       num_pool, soc);
6567 		return QDF_STATUS_E_FAILURE;
6568 	}
6569 	return QDF_STATUS_SUCCESS;
6570 }
6571 
6572 #ifndef WLAN_SOFTUMAC_SUPPORT
6573 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
6574 {
6575 	uint8_t num_pool, num_ext_pool;
6576 	uint32_t num_desc;
6577 	uint32_t num_ext_desc;
6578 
6579 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6580 	num_ext_pool = dp_get_ext_tx_desc_pool_num(soc);
6581 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6582 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
6583 
6584 	dp_info("Tx Desc Alloc num_pool: %d descs: %d", num_pool, num_desc);
6585 
6586 	if ((num_pool > MAX_TXDESC_POOLS) ||
6587 	    (num_ext_pool > MAX_TXDESC_POOLS) ||
6588 	    (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
6589 		goto fail1;
6590 
6591 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
6592 		goto fail1;
6593 
6594 	if (dp_tx_ext_desc_pool_alloc(soc, num_ext_pool, num_ext_desc))
6595 		goto fail2;
6596 
6597 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
6598 		return QDF_STATUS_SUCCESS;
6599 
6600 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_ext_pool, num_ext_desc))
6601 		goto fail3;
6602 
6603 	return QDF_STATUS_SUCCESS;
6604 
6605 fail3:
6606 	dp_tx_ext_desc_pool_free(soc, num_ext_pool);
6607 fail2:
6608 	dp_tx_delete_static_pools(soc, num_pool);
6609 fail1:
6610 	return QDF_STATUS_E_RESOURCES;
6611 }
6612 
6613 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
6614 {
6615 	uint8_t num_pool, num_ext_pool;
6616 	uint32_t num_desc;
6617 	uint32_t num_ext_desc;
6618 
6619 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6620 	num_ext_pool = dp_get_ext_tx_desc_pool_num(soc);
6621 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6622 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
6623 
6624 	if (dp_tx_init_static_pools(soc, num_pool, num_desc))
6625 		goto fail1;
6626 
6627 	if (dp_tx_ext_desc_pool_init(soc, num_ext_pool, num_ext_desc))
6628 		goto fail2;
6629 
6630 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
6631 		return QDF_STATUS_SUCCESS;
6632 
6633 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_ext_pool, num_ext_desc))
6634 		goto fail3;
6635 
6636 	dp_tx_flow_control_init(soc);
6637 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
6638 	return QDF_STATUS_SUCCESS;
6639 
6640 fail3:
6641 	dp_tx_ext_desc_pool_deinit(soc, num_ext_pool);
6642 fail2:
6643 	dp_tx_deinit_static_pools(soc, num_pool);
6644 fail1:
6645 	return QDF_STATUS_E_RESOURCES;
6646 }
6647 
6648 #else
6649 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
6650 {
6651 	uint8_t num_pool;
6652 	uint32_t num_desc;
6653 
6654 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6655 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6656 
6657 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6658 		  "%s Tx Desc Alloc num_pool = %d, descs = %d",
6659 		  __func__, num_pool, num_desc);
6660 
6661 	if ((num_pool > MAX_TXDESC_POOLS) ||
6662 	    (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
6663 		return QDF_STATUS_E_RESOURCES;
6664 
6665 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
6666 		return QDF_STATUS_E_RESOURCES;
6667 
6668 	return QDF_STATUS_SUCCESS;
6669 }
6670 
6671 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
6672 {
6673 	uint8_t num_pool;
6674 	uint32_t num_desc;
6675 
6676 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6677 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6678 
6679 	if (dp_tx_init_static_pools(soc, num_pool, num_desc))
6680 		return QDF_STATUS_E_RESOURCES;
6681 
6682 	dp_tx_flow_control_init(soc);
6683 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
6684 	return QDF_STATUS_SUCCESS;
6685 }
6686 #endif
6687 
6688 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
6689 {
6690 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6691 	uint8_t num_ext_desc_pool;
6692 	uint32_t num_ext_desc;
6693 
6694 	num_ext_desc_pool = dp_get_ext_tx_desc_pool_num(soc);
6695 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
6696 
6697 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_ext_desc_pool, num_ext_desc))
6698 		return QDF_STATUS_E_FAILURE;
6699 
6700 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_ext_desc_pool, num_ext_desc))
6701 		return QDF_STATUS_E_FAILURE;
6702 
6703 	return QDF_STATUS_SUCCESS;
6704 }
6705 
6706 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
6707 {
6708 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6709 	uint8_t num_ext_desc_pool = dp_get_ext_tx_desc_pool_num(soc);
6710 
6711 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_ext_desc_pool);
6712 	dp_tx_tso_cmn_desc_pool_free(soc, num_ext_desc_pool);
6713 
6714 	return QDF_STATUS_SUCCESS;
6715 }
6716 
6717 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
6718 void dp_pkt_add_timestamp(struct dp_vdev *vdev,
6719 			  enum qdf_pkt_timestamp_index index, uint64_t time,
6720 			  qdf_nbuf_t nbuf)
6721 {
6722 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled())) {
6723 		uint64_t tsf_time;
6724 
6725 		if (vdev->get_tsf_time) {
6726 			vdev->get_tsf_time(vdev->osif_vdev, time, &tsf_time);
6727 			qdf_add_dp_pkt_timestamp(nbuf, index, tsf_time);
6728 		}
6729 	}
6730 }
6731 
6732 void dp_pkt_get_timestamp(uint64_t *time)
6733 {
6734 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled()))
6735 		*time = qdf_get_log_timestamp();
6736 }
6737 #endif
6738 
6739 #ifdef QCA_MULTIPASS_SUPPORT
6740 void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev,
6741 				 struct dp_tx_msdu_info_s *msdu_info,
6742 				 uint16_t group_key)
6743 {
6744 	struct htt_tx_msdu_desc_ext2_t *meta_data =
6745 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
6746 
6747 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
6748 
6749 	/*
6750 	 * When attempting to send a multicast packet with multi-passphrase,
6751 	 * host shall add HTT EXT meta data "struct htt_tx_msdu_desc_ext2_t"
6752 	 * ref htt.h indicating the group_id field in "key_flags" also having
6753 	 * "valid_key_flags" as 1. Assign “key_flags = group_key_ix”.
6754 	 */
6755 	HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(msdu_info->meta_data[0],
6756 						       1);
6757 	HTT_TX_MSDU_EXT2_DESC_KEY_FLAGS_SET(msdu_info->meta_data[2], group_key);
6758 }
6759 
6760 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
6761 	defined(WLAN_MCAST_MLO)
6762 /**
6763  * dp_tx_need_mcast_reinject() - If frame needs to be processed in reinject path
6764  * @vdev: DP vdev handle
6765  *
6766  * Return: true if reinject handling is required else false
6767  */
6768 static inline bool
6769 dp_tx_need_mcast_reinject(struct dp_vdev *vdev)
6770 {
6771 	if (vdev->mlo_vdev && vdev->opmode == wlan_op_mode_ap)
6772 		return true;
6773 
6774 	return false;
6775 }
6776 #else
6777 static inline bool
6778 dp_tx_need_mcast_reinject(struct dp_vdev *vdev)
6779 {
6780 	return false;
6781 }
6782 #endif
6783 
6784 /**
6785  * dp_tx_need_multipass_process() - If frame needs multipass phrase processing
6786  * @soc: dp soc handle
6787  * @vdev: DP vdev handle
6788  * @buf: frame
6789  * @vlan_id: vlan id of frame
6790  *
6791  * Return: whether peer is special or classic
6792  */
6793 static
6794 uint8_t dp_tx_need_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
6795 				     qdf_nbuf_t buf, uint16_t *vlan_id)
6796 {
6797 	struct dp_txrx_peer *txrx_peer = NULL;
6798 	struct dp_peer *peer = NULL;
6799 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
6800 	struct vlan_ethhdr *veh = NULL;
6801 	bool not_vlan = ((vdev->tx_encap_type == htt_cmn_pkt_type_raw) ||
6802 			(htons(eh->ether_type) != ETH_P_8021Q));
6803 
6804 	if (qdf_unlikely(not_vlan))
6805 		return DP_VLAN_UNTAGGED;
6806 
6807 	veh = (struct vlan_ethhdr *)eh;
6808 	*vlan_id = (ntohs(veh->h_vlan_TCI) & VLAN_VID_MASK);
6809 
6810 	if (qdf_unlikely(DP_FRAME_IS_MULTICAST((eh)->ether_dhost))) {
6811 		/* look for handling of multicast packets in reinject path */
6812 		if (dp_tx_need_mcast_reinject(vdev))
6813 			return DP_VLAN_UNTAGGED;
6814 
6815 		qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
6816 		TAILQ_FOREACH(txrx_peer, &vdev->mpass_peer_list,
6817 			      mpass_peer_list_elem) {
6818 			if (*vlan_id == txrx_peer->vlan_id) {
6819 				qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
6820 				return DP_VLAN_TAGGED_MULTICAST;
6821 			}
6822 		}
6823 		qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
6824 		return DP_VLAN_UNTAGGED;
6825 	}
6826 
6827 	peer = dp_peer_find_hash_find(soc, eh->ether_dhost, 0, DP_VDEV_ALL,
6828 				      DP_MOD_ID_TX_MULTIPASS);
6829 	if (qdf_unlikely(!peer))
6830 		return DP_VLAN_UNTAGGED;
6831 
6832 	/*
6833 	 * Do not drop the frame when vlan_id doesn't match.
6834 	 * Send the frame as it is.
6835 	 */
6836 	if (*vlan_id == peer->txrx_peer->vlan_id) {
6837 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
6838 		return DP_VLAN_TAGGED_UNICAST;
6839 	}
6840 
6841 	dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
6842 	return DP_VLAN_UNTAGGED;
6843 }
6844 
6845 #ifndef WLAN_REPEATER_NOT_SUPPORTED
6846 static inline void
6847 dp_tx_multipass_send_pkt_to_repeater(struct dp_soc *soc, struct dp_vdev *vdev,
6848 				     qdf_nbuf_t nbuf,
6849 				     struct dp_tx_msdu_info_s *msdu_info)
6850 {
6851 	qdf_nbuf_t nbuf_copy = NULL;
6852 
6853 	/* AP can have classic clients, special clients &
6854 	 * classic repeaters.
6855 	 * 1. Classic clients & special client:
6856 	 *	Remove vlan header, find corresponding group key
6857 	 *	index, fill in metaheader and enqueue multicast
6858 	 *	frame to TCL.
6859 	 * 2. Classic repeater:
6860 	 *	Pass through to classic repeater with vlan tag
6861 	 *	intact without any group key index. Hardware
6862 	 *	will know which key to use to send frame to
6863 	 *	repeater.
6864 	 */
6865 	nbuf_copy = qdf_nbuf_copy(nbuf);
6866 
6867 	/*
6868 	 * Send multicast frame to special peers even
6869 	 * if pass through to classic repeater fails.
6870 	 */
6871 	if (nbuf_copy) {
6872 		struct dp_tx_msdu_info_s msdu_info_copy;
6873 
6874 		qdf_mem_zero(&msdu_info_copy, sizeof(msdu_info_copy));
6875 		msdu_info_copy.tid = HTT_TX_EXT_TID_INVALID;
6876 		HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(msdu_info_copy.meta_data[0], 1);
6877 		nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy,
6878 						   &msdu_info_copy,
6879 						   HTT_INVALID_PEER, NULL);
6880 		if (nbuf_copy) {
6881 			qdf_nbuf_free(nbuf_copy);
6882 			dp_info_rl("nbuf_copy send failed");
6883 		}
6884 	}
6885 }
6886 #else
6887 static inline void
6888 dp_tx_multipass_send_pkt_to_repeater(struct dp_soc *soc, struct dp_vdev *vdev,
6889 				     qdf_nbuf_t nbuf,
6890 				     struct dp_tx_msdu_info_s *msdu_info)
6891 {
6892 }
6893 #endif
6894 
6895 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
6896 			     qdf_nbuf_t nbuf,
6897 			     struct dp_tx_msdu_info_s *msdu_info)
6898 {
6899 	uint16_t vlan_id = 0;
6900 	uint16_t group_key = 0;
6901 	uint8_t is_spcl_peer = DP_VLAN_UNTAGGED;
6902 
6903 	if (HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->meta_data[0]))
6904 		return true;
6905 
6906 	is_spcl_peer = dp_tx_need_multipass_process(soc, vdev, nbuf, &vlan_id);
6907 
6908 	if ((is_spcl_peer != DP_VLAN_TAGGED_MULTICAST) &&
6909 	    (is_spcl_peer != DP_VLAN_TAGGED_UNICAST))
6910 		return true;
6911 
6912 	if (is_spcl_peer == DP_VLAN_TAGGED_UNICAST) {
6913 		dp_tx_remove_vlan_tag(vdev, nbuf);
6914 		return true;
6915 	}
6916 
6917 	dp_tx_multipass_send_pkt_to_repeater(soc, vdev, nbuf, msdu_info);
6918 	group_key = vdev->iv_vlan_map[vlan_id];
6919 
6920 	/*
6921 	 * If group key is not installed, drop the frame.
6922 	 */
6923 	if (!group_key)
6924 		return false;
6925 
6926 	dp_tx_remove_vlan_tag(vdev, nbuf);
6927 	dp_tx_add_groupkey_metadata(vdev, msdu_info, group_key);
6928 	msdu_info->exception_fw = 1;
6929 	return true;
6930 }
6931 #endif /* QCA_MULTIPASS_SUPPORT */
6932