xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision 839714c413056bc9b82af766295b4ffabe28bbbf)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "htt.h"
21 #include "dp_htt.h"
22 #include "hal_hw_headers.h"
23 #include "dp_tx.h"
24 #include "dp_tx_desc.h"
25 #include "dp_peer.h"
26 #include "dp_types.h"
27 #include "hal_tx.h"
28 #include "qdf_mem.h"
29 #include "qdf_nbuf.h"
30 #include "qdf_net_types.h"
31 #include "qdf_module.h"
32 #include <wlan_cfg.h>
33 #include "dp_ipa.h"
34 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
35 #include "if_meta_hdr.h"
36 #endif
37 #include "enet.h"
38 #include "dp_internal.h"
39 #ifdef ATH_SUPPORT_IQUE
40 #include "dp_txrx_me.h"
41 #endif
42 #include "dp_hist.h"
43 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
44 #include <wlan_dp_swlm.h>
45 #endif
46 #ifdef WIFI_MONITOR_SUPPORT
47 #include <dp_mon.h>
48 #endif
49 #ifdef FEATURE_WDS
50 #include "dp_txrx_wds.h"
51 #endif
52 #include "cdp_txrx_cmn_reg.h"
53 #ifdef CONFIG_SAWF
54 #include <dp_sawf.h>
55 #endif
56 
57 /* Flag to skip CCE classify when mesh or tid override enabled */
58 #define DP_TX_SKIP_CCE_CLASSIFY \
59 	(DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
60 
61 /* TODO Add support in TSO */
62 #define DP_DESC_NUM_FRAG(x) 0
63 
64 /* disable TQM_BYPASS */
65 #define TQM_BYPASS_WAR 0
66 
67 #define DP_RETRY_COUNT 7
68 #ifdef WLAN_PEER_JITTER
69 #define DP_AVG_JITTER_WEIGHT_DENOM 4
70 #define DP_AVG_DELAY_WEIGHT_DENOM 3
71 #endif
72 
73 #ifdef QCA_DP_TX_FW_METADATA_V2
74 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
75 	HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
76 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
77 	HTT_TX_TCL_METADATA_V2_VALID_HTT_SET(_var, _val)
78 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
79 	HTT_TX_TCL_METADATA_TYPE_V2_SET(_var, _val)
80 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
81 	HTT_TX_TCL_METADATA_V2_HOST_INSPECTED_SET(_var, _val)
82 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
83 	 HTT_TX_TCL_METADATA_V2_PEER_ID_SET(_var, _val)
84 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
85 	HTT_TX_TCL_METADATA_V2_VDEV_ID_SET(_var, _val)
86 #define DP_TCL_METADATA_TYPE_PEER_BASED \
87 	HTT_TCL_METADATA_V2_TYPE_PEER_BASED
88 #define DP_TCL_METADATA_TYPE_VDEV_BASED \
89 	HTT_TCL_METADATA_V2_TYPE_VDEV_BASED
90 #else
91 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
92 	HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
93 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
94 	HTT_TX_TCL_METADATA_VALID_HTT_SET(_var, _val)
95 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
96 	HTT_TX_TCL_METADATA_TYPE_SET(_var, _val)
97 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
98 	HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val)
99 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
100 	HTT_TX_TCL_METADATA_PEER_ID_SET(_var, _val)
101 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
102 	HTT_TX_TCL_METADATA_VDEV_ID_SET(_var, _val)
103 #define DP_TCL_METADATA_TYPE_PEER_BASED \
104 	HTT_TCL_METADATA_TYPE_PEER_BASED
105 #define DP_TCL_METADATA_TYPE_VDEV_BASED \
106 	HTT_TCL_METADATA_TYPE_VDEV_BASED
107 #endif
108 
109 #define DP_GET_HW_LINK_ID_FRM_PPDU_ID(PPDU_ID, LINK_ID_OFFSET, LINK_ID_BITS) \
110 	(((PPDU_ID) >> (LINK_ID_OFFSET)) & ((1 << (LINK_ID_BITS)) - 1))
111 
112 /*mapping between hal encrypt type and cdp_sec_type*/
113 uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
114 					  HAL_TX_ENCRYPT_TYPE_WEP_128,
115 					  HAL_TX_ENCRYPT_TYPE_WEP_104,
116 					  HAL_TX_ENCRYPT_TYPE_WEP_40,
117 					  HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
118 					  HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
119 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
120 					  HAL_TX_ENCRYPT_TYPE_WAPI,
121 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
122 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
123 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
124 					  HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
125 qdf_export_symbol(sec_type_map);
126 
127 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
128 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
129 {
130 	enum dp_tx_event_type type;
131 
132 	if (flags & DP_TX_DESC_FLAG_FLUSH)
133 		type = DP_TX_DESC_FLUSH;
134 	else if (flags & DP_TX_DESC_FLAG_TX_COMP_ERR)
135 		type = DP_TX_COMP_UNMAP_ERR;
136 	else if (flags & DP_TX_DESC_FLAG_COMPLETED_TX)
137 		type = DP_TX_COMP_UNMAP;
138 	else
139 		type = DP_TX_DESC_UNMAP;
140 
141 	return type;
142 }
143 
144 static inline void
145 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
146 		       qdf_nbuf_t skb, uint32_t sw_cookie,
147 		       enum dp_tx_event_type type)
148 {
149 	struct dp_tx_tcl_history *tx_tcl_history = &soc->tx_tcl_history;
150 	struct dp_tx_comp_history *tx_comp_history = &soc->tx_comp_history;
151 	struct dp_tx_desc_event *entry;
152 	uint32_t idx;
153 	uint16_t slot;
154 
155 	switch (type) {
156 	case DP_TX_COMP_UNMAP:
157 	case DP_TX_COMP_UNMAP_ERR:
158 	case DP_TX_COMP_MSDU_EXT:
159 		if (qdf_unlikely(!tx_comp_history->allocated))
160 			return;
161 
162 		dp_get_frag_hist_next_atomic_idx(&tx_comp_history->index, &idx,
163 						 &slot,
164 						 DP_TX_COMP_HIST_SLOT_SHIFT,
165 						 DP_TX_COMP_HIST_PER_SLOT_MAX,
166 						 DP_TX_COMP_HISTORY_SIZE);
167 		entry = &tx_comp_history->entry[slot][idx];
168 		break;
169 	case DP_TX_DESC_MAP:
170 	case DP_TX_DESC_UNMAP:
171 	case DP_TX_DESC_COOKIE:
172 	case DP_TX_DESC_FLUSH:
173 		if (qdf_unlikely(!tx_tcl_history->allocated))
174 			return;
175 
176 		dp_get_frag_hist_next_atomic_idx(&tx_tcl_history->index, &idx,
177 						 &slot,
178 						 DP_TX_TCL_HIST_SLOT_SHIFT,
179 						 DP_TX_TCL_HIST_PER_SLOT_MAX,
180 						 DP_TX_TCL_HISTORY_SIZE);
181 		entry = &tx_tcl_history->entry[slot][idx];
182 		break;
183 	default:
184 		dp_info_rl("Invalid dp_tx_event_type: %d", type);
185 		return;
186 	}
187 
188 	entry->skb = skb;
189 	entry->paddr = paddr;
190 	entry->sw_cookie = sw_cookie;
191 	entry->type = type;
192 	entry->ts = qdf_get_log_timestamp();
193 }
194 
195 static inline void
196 dp_tx_tso_seg_history_add(struct dp_soc *soc,
197 			  struct qdf_tso_seg_elem_t *tso_seg,
198 			  qdf_nbuf_t skb, uint32_t sw_cookie,
199 			  enum dp_tx_event_type type)
200 {
201 	int i;
202 
203 	for (i = 1; i < tso_seg->seg.num_frags; i++) {
204 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[i].paddr,
205 				       skb, sw_cookie, type);
206 	}
207 
208 	if (!tso_seg->next)
209 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[0].paddr,
210 				       skb, 0xFFFFFFFF, type);
211 }
212 
213 static inline void
214 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
215 		      qdf_nbuf_t skb, uint32_t sw_cookie,
216 		      enum dp_tx_event_type type)
217 {
218 	struct qdf_tso_seg_elem_t *curr_seg = tso_info.tso_seg_list;
219 	uint32_t num_segs = tso_info.num_segs;
220 
221 	while (num_segs) {
222 		dp_tx_tso_seg_history_add(soc, curr_seg, skb, sw_cookie, type);
223 		curr_seg = curr_seg->next;
224 		num_segs--;
225 	}
226 }
227 
228 #else
229 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
230 {
231 	return DP_TX_DESC_INVAL_EVT;
232 }
233 
234 static inline void
235 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
236 		       qdf_nbuf_t skb, uint32_t sw_cookie,
237 		       enum dp_tx_event_type type)
238 {
239 }
240 
241 static inline void
242 dp_tx_tso_seg_history_add(struct dp_soc *soc,
243 			  struct qdf_tso_seg_elem_t *tso_seg,
244 			  qdf_nbuf_t skb, uint32_t sw_cookie,
245 			  enum dp_tx_event_type type)
246 {
247 }
248 
249 static inline void
250 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
251 		      qdf_nbuf_t skb, uint32_t sw_cookie,
252 		      enum dp_tx_event_type type)
253 {
254 }
255 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
256 
257 static int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc);
258 
259 /**
260  * dp_is_tput_high() - Check if throughput is high
261  *
262  * @soc: core txrx main context
263  *
264  * The current function is based of the RTPM tput policy variable where RTPM is
265  * avoided based on throughput.
266  */
267 static inline int dp_is_tput_high(struct dp_soc *soc)
268 {
269 	return dp_get_rtpm_tput_policy_requirement(soc);
270 }
271 
272 #if defined(FEATURE_TSO)
273 /**
274  * dp_tx_tso_unmap_segment() - Unmap TSO segment
275  *
276  * @soc: core txrx main context
277  * @seg_desc: tso segment descriptor
278  * @num_seg_desc: tso number segment descriptor
279  */
280 static void dp_tx_tso_unmap_segment(
281 		struct dp_soc *soc,
282 		struct qdf_tso_seg_elem_t *seg_desc,
283 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
284 {
285 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
286 	if (qdf_unlikely(!seg_desc)) {
287 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
288 			 __func__, __LINE__);
289 		qdf_assert(0);
290 	} else if (qdf_unlikely(!num_seg_desc)) {
291 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
292 			 __func__, __LINE__);
293 		qdf_assert(0);
294 	} else {
295 		bool is_last_seg;
296 		/* no tso segment left to do dma unmap */
297 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
298 			return;
299 
300 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
301 					true : false;
302 		qdf_nbuf_unmap_tso_segment(soc->osdev,
303 					   seg_desc, is_last_seg);
304 		num_seg_desc->num_seg.tso_cmn_num_seg--;
305 	}
306 }
307 
308 /**
309  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
310  *                            back to the freelist
311  *
312  * @soc: soc device handle
313  * @tx_desc: Tx software descriptor
314  */
315 static void dp_tx_tso_desc_release(struct dp_soc *soc,
316 				   struct dp_tx_desc_s *tx_desc)
317 {
318 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
319 	if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_desc)) {
320 		dp_tx_err("SO desc is NULL!");
321 		qdf_assert(0);
322 	} else if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_num_desc)) {
323 		dp_tx_err("TSO num desc is NULL!");
324 		qdf_assert(0);
325 	} else {
326 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
327 			(struct qdf_tso_num_seg_elem_t *)tx_desc->
328 				msdu_ext_desc->tso_num_desc;
329 
330 		/* Add the tso num segment into the free list */
331 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
332 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
333 					    tx_desc->msdu_ext_desc->
334 					    tso_num_desc);
335 			tx_desc->msdu_ext_desc->tso_num_desc = NULL;
336 			DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
337 		}
338 
339 		/* Add the tso segment into the free list*/
340 		dp_tx_tso_desc_free(soc,
341 				    tx_desc->pool_id, tx_desc->msdu_ext_desc->
342 				    tso_desc);
343 		tx_desc->msdu_ext_desc->tso_desc = NULL;
344 	}
345 }
346 #else
347 static void dp_tx_tso_unmap_segment(
348 		struct dp_soc *soc,
349 		struct qdf_tso_seg_elem_t *seg_desc,
350 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
351 
352 {
353 }
354 
355 static void dp_tx_tso_desc_release(struct dp_soc *soc,
356 				   struct dp_tx_desc_s *tx_desc)
357 {
358 }
359 #endif
360 
361 void
362 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
363 {
364 	struct dp_pdev *pdev = tx_desc->pdev;
365 	struct dp_soc *soc;
366 	uint8_t comp_status = 0;
367 
368 	qdf_assert(pdev);
369 
370 	soc = pdev->soc;
371 
372 	dp_tx_outstanding_dec(pdev);
373 
374 	if (tx_desc->msdu_ext_desc) {
375 		if (tx_desc->frm_type == dp_tx_frm_tso)
376 			dp_tx_tso_desc_release(soc, tx_desc);
377 
378 		if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
379 			dp_tx_me_free_buf(tx_desc->pdev,
380 					  tx_desc->msdu_ext_desc->me_buffer);
381 
382 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
383 	}
384 
385 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
386 		qdf_atomic_dec(&soc->num_tx_exception);
387 
388 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
389 				tx_desc->buffer_src)
390 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
391 							     soc->hal_soc);
392 	else
393 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
394 
395 	dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d",
396 		    tx_desc->id, comp_status,
397 		    qdf_atomic_read(&pdev->num_tx_outstanding));
398 
399 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
400 	return;
401 }
402 
403 /**
404  * dp_tx_prepare_htt_metadata() - Prepare HTT metadata for special frames
405  * @vdev: DP vdev Handle
406  * @nbuf: skb
407  * @msdu_info: msdu_info required to create HTT metadata
408  *
409  * Prepares and fills HTT metadata in the frame pre-header for special frames
410  * that should be transmitted using varying transmit parameters.
411  * There are 2 VDEV modes that currently needs this special metadata -
412  *  1) Mesh Mode
413  *  2) DSRC Mode
414  *
415  * Return: HTT metadata size
416  *
417  */
418 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
419 					  struct dp_tx_msdu_info_s *msdu_info)
420 {
421 	uint32_t *meta_data = msdu_info->meta_data;
422 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
423 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
424 
425 	uint8_t htt_desc_size;
426 
427 	/* Size rounded of multiple of 8 bytes */
428 	uint8_t htt_desc_size_aligned;
429 
430 	uint8_t *hdr = NULL;
431 
432 	/*
433 	 * Metadata - HTT MSDU Extension header
434 	 */
435 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
436 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
437 
438 	if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
439 	    HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
440 							   meta_data[0]) ||
441 	    msdu_info->exception_fw) {
442 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
443 				 htt_desc_size_aligned)) {
444 			nbuf = qdf_nbuf_realloc_headroom(nbuf,
445 							 htt_desc_size_aligned);
446 			if (!nbuf) {
447 				/*
448 				 * qdf_nbuf_realloc_headroom won't do skb_clone
449 				 * as skb_realloc_headroom does. so, no free is
450 				 * needed here.
451 				 */
452 				DP_STATS_INC(vdev,
453 					     tx_i.dropped.headroom_insufficient,
454 					     1);
455 				qdf_print(" %s[%d] skb_realloc_headroom failed",
456 					  __func__, __LINE__);
457 				return 0;
458 			}
459 		}
460 		/* Fill and add HTT metaheader */
461 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
462 		if (!hdr) {
463 			dp_tx_err("Error in filling HTT metadata");
464 
465 			return 0;
466 		}
467 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
468 
469 	} else if (vdev->opmode == wlan_op_mode_ocb) {
470 		/* Todo - Add support for DSRC */
471 	}
472 
473 	return htt_desc_size_aligned;
474 }
475 
476 /**
477  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
478  * @tso_seg: TSO segment to process
479  * @ext_desc: Pointer to MSDU extension descriptor
480  *
481  * Return: void
482  */
483 #if defined(FEATURE_TSO)
484 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
485 		void *ext_desc)
486 {
487 	uint8_t num_frag;
488 	uint32_t tso_flags;
489 
490 	/*
491 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
492 	 * tcp_flag_mask
493 	 *
494 	 * Checksum enable flags are set in TCL descriptor and not in Extension
495 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
496 	 */
497 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
498 
499 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
500 
501 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
502 		tso_seg->tso_flags.ip_len);
503 
504 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
505 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
506 
507 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
508 		uint32_t lo = 0;
509 		uint32_t hi = 0;
510 
511 		qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
512 				  (tso_seg->tso_frags[num_frag].length));
513 
514 		qdf_dmaaddr_to_32s(
515 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
516 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
517 			tso_seg->tso_frags[num_frag].length);
518 	}
519 
520 	return;
521 }
522 #else
523 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
524 		void *ext_desc)
525 {
526 	return;
527 }
528 #endif
529 
530 #if defined(FEATURE_TSO)
531 /**
532  * dp_tx_free_tso_seg_list() - Loop through the tso segments
533  *                             allocated and free them
534  * @soc: soc handle
535  * @free_seg: list of tso segments
536  * @msdu_info: msdu descriptor
537  *
538  * Return: void
539  */
540 static void dp_tx_free_tso_seg_list(
541 		struct dp_soc *soc,
542 		struct qdf_tso_seg_elem_t *free_seg,
543 		struct dp_tx_msdu_info_s *msdu_info)
544 {
545 	struct qdf_tso_seg_elem_t *next_seg;
546 
547 	while (free_seg) {
548 		next_seg = free_seg->next;
549 		dp_tx_tso_desc_free(soc,
550 				    msdu_info->tx_queue.desc_pool_id,
551 				    free_seg);
552 		free_seg = next_seg;
553 	}
554 }
555 
556 /**
557  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
558  *                                 allocated and free them
559  * @soc:  soc handle
560  * @free_num_seg: list of tso number segments
561  * @msdu_info: msdu descriptor
562  *
563  * Return: void
564  */
565 static void dp_tx_free_tso_num_seg_list(
566 		struct dp_soc *soc,
567 		struct qdf_tso_num_seg_elem_t *free_num_seg,
568 		struct dp_tx_msdu_info_s *msdu_info)
569 {
570 	struct qdf_tso_num_seg_elem_t *next_num_seg;
571 
572 	while (free_num_seg) {
573 		next_num_seg = free_num_seg->next;
574 		dp_tso_num_seg_free(soc,
575 				    msdu_info->tx_queue.desc_pool_id,
576 				    free_num_seg);
577 		free_num_seg = next_num_seg;
578 	}
579 }
580 
581 /**
582  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
583  *                              do dma unmap for each segment
584  * @soc: soc handle
585  * @free_seg: list of tso segments
586  * @num_seg_desc: tso number segment descriptor
587  *
588  * Return: void
589  */
590 static void dp_tx_unmap_tso_seg_list(
591 		struct dp_soc *soc,
592 		struct qdf_tso_seg_elem_t *free_seg,
593 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
594 {
595 	struct qdf_tso_seg_elem_t *next_seg;
596 
597 	if (qdf_unlikely(!num_seg_desc)) {
598 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
599 		return;
600 	}
601 
602 	while (free_seg) {
603 		next_seg = free_seg->next;
604 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
605 		free_seg = next_seg;
606 	}
607 }
608 
609 #ifdef FEATURE_TSO_STATS
610 /**
611  * dp_tso_get_stats_idx() - Retrieve the tso packet id
612  * @pdev: pdev handle
613  *
614  * Return: id
615  */
616 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
617 {
618 	uint32_t stats_idx;
619 
620 	stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
621 						% CDP_MAX_TSO_PACKETS);
622 	return stats_idx;
623 }
624 #else
625 static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
626 {
627 	return 0;
628 }
629 #endif /* FEATURE_TSO_STATS */
630 
631 /**
632  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
633  *				     free the tso segments descriptor and
634  *				     tso num segments descriptor
635  * @soc:  soc handle
636  * @msdu_info: msdu descriptor
637  * @tso_seg_unmap: flag to show if dma unmap is necessary
638  *
639  * Return: void
640  */
641 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
642 					  struct dp_tx_msdu_info_s *msdu_info,
643 					  bool tso_seg_unmap)
644 {
645 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
646 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
647 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
648 					tso_info->tso_num_seg_list;
649 
650 	/* do dma unmap for each segment */
651 	if (tso_seg_unmap)
652 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
653 
654 	/* free all tso number segment descriptor though looks only have 1 */
655 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
656 
657 	/* free all tso segment descriptor */
658 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
659 }
660 
661 /**
662  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
663  * @vdev: virtual device handle
664  * @msdu: network buffer
665  * @msdu_info: meta data associated with the msdu
666  *
667  * Return: QDF_STATUS_SUCCESS success
668  */
669 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
670 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
671 {
672 	struct qdf_tso_seg_elem_t *tso_seg;
673 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
674 	struct dp_soc *soc = vdev->pdev->soc;
675 	struct dp_pdev *pdev = vdev->pdev;
676 	struct qdf_tso_info_t *tso_info;
677 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
678 	tso_info = &msdu_info->u.tso_info;
679 	tso_info->curr_seg = NULL;
680 	tso_info->tso_seg_list = NULL;
681 	tso_info->num_segs = num_seg;
682 	msdu_info->frm_type = dp_tx_frm_tso;
683 	tso_info->tso_num_seg_list = NULL;
684 
685 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
686 
687 	while (num_seg) {
688 		tso_seg = dp_tx_tso_desc_alloc(
689 				soc, msdu_info->tx_queue.desc_pool_id);
690 		if (tso_seg) {
691 			tso_seg->next = tso_info->tso_seg_list;
692 			tso_info->tso_seg_list = tso_seg;
693 			num_seg--;
694 		} else {
695 			dp_err_rl("Failed to alloc tso seg desc");
696 			DP_STATS_INC_PKT(vdev->pdev,
697 					 tso_stats.tso_no_mem_dropped, 1,
698 					 qdf_nbuf_len(msdu));
699 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
700 
701 			return QDF_STATUS_E_NOMEM;
702 		}
703 	}
704 
705 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
706 
707 	tso_num_seg = dp_tso_num_seg_alloc(soc,
708 			msdu_info->tx_queue.desc_pool_id);
709 
710 	if (tso_num_seg) {
711 		tso_num_seg->next = tso_info->tso_num_seg_list;
712 		tso_info->tso_num_seg_list = tso_num_seg;
713 	} else {
714 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
715 			 __func__);
716 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
717 
718 		return QDF_STATUS_E_NOMEM;
719 	}
720 
721 	msdu_info->num_seg =
722 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
723 
724 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
725 			msdu_info->num_seg);
726 
727 	if (!(msdu_info->num_seg)) {
728 		/*
729 		 * Free allocated TSO seg desc and number seg desc,
730 		 * do unmap for segments if dma map has done.
731 		 */
732 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
733 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
734 
735 		return QDF_STATUS_E_INVAL;
736 	}
737 	dp_tx_tso_history_add(soc, msdu_info->u.tso_info,
738 			      msdu, 0, DP_TX_DESC_MAP);
739 
740 	tso_info->curr_seg = tso_info->tso_seg_list;
741 
742 	tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
743 	dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
744 			     msdu, msdu_info->num_seg);
745 	dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
746 				    tso_info->msdu_stats_idx);
747 	dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
748 	return QDF_STATUS_SUCCESS;
749 }
750 #else
751 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
752 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
753 {
754 	return QDF_STATUS_E_NOMEM;
755 }
756 #endif
757 
758 QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
759 			(DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
760 			 sizeof(struct htt_tx_msdu_desc_ext2_t)));
761 
762 /**
763  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
764  * @vdev: DP Vdev handle
765  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
766  * @desc_pool_id: Descriptor Pool ID
767  *
768  * Return:
769  */
770 static
771 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
772 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
773 {
774 	uint8_t i;
775 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
776 	struct dp_tx_seg_info_s *seg_info;
777 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
778 	struct dp_soc *soc = vdev->pdev->soc;
779 
780 	/* Allocate an extension descriptor */
781 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
782 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
783 
784 	if (!msdu_ext_desc) {
785 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
786 		return NULL;
787 	}
788 
789 	if (msdu_info->exception_fw &&
790 			qdf_unlikely(vdev->mesh_vdev)) {
791 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
792 				&msdu_info->meta_data[0],
793 				sizeof(struct htt_tx_msdu_desc_ext2_t));
794 		qdf_atomic_inc(&soc->num_tx_exception);
795 		msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
796 	}
797 
798 	switch (msdu_info->frm_type) {
799 	case dp_tx_frm_sg:
800 	case dp_tx_frm_me:
801 	case dp_tx_frm_raw:
802 		seg_info = msdu_info->u.sg_info.curr_seg;
803 		/* Update the buffer pointers in MSDU Extension Descriptor */
804 		for (i = 0; i < seg_info->frag_cnt; i++) {
805 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
806 				seg_info->frags[i].paddr_lo,
807 				seg_info->frags[i].paddr_hi,
808 				seg_info->frags[i].len);
809 		}
810 
811 		break;
812 
813 	case dp_tx_frm_tso:
814 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
815 				&cached_ext_desc[0]);
816 		break;
817 
818 
819 	default:
820 		break;
821 	}
822 
823 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
824 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
825 
826 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
827 			msdu_ext_desc->vaddr);
828 
829 	return msdu_ext_desc;
830 }
831 
832 /**
833  * dp_tx_trace_pkt() - Trace TX packet at DP layer
834  * @soc: datapath SOC
835  * @skb: skb to be traced
836  * @msdu_id: msdu_id of the packet
837  * @vdev_id: vdev_id of the packet
838  *
839  * Return: None
840  */
841 #ifdef DP_DISABLE_TX_PKT_TRACE
842 static void dp_tx_trace_pkt(struct dp_soc *soc,
843 			    qdf_nbuf_t skb, uint16_t msdu_id,
844 			    uint8_t vdev_id)
845 {
846 }
847 #else
848 static void dp_tx_trace_pkt(struct dp_soc *soc,
849 			    qdf_nbuf_t skb, uint16_t msdu_id,
850 			    uint8_t vdev_id)
851 {
852 	if (dp_is_tput_high(soc))
853 		return;
854 
855 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
856 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
857 	DPTRACE(qdf_dp_trace_ptr(skb,
858 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
859 				 QDF_TRACE_DEFAULT_PDEV_ID,
860 				 qdf_nbuf_data_addr(skb),
861 				 sizeof(qdf_nbuf_data(skb)),
862 				 msdu_id, vdev_id, 0));
863 
864 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
865 
866 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
867 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
868 				      msdu_id, QDF_TX));
869 }
870 #endif
871 
872 #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
873 /**
874  * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as
875  *				      exception by the upper layer (OS_IF)
876  * @soc: DP soc handle
877  * @nbuf: packet to be transmitted
878  *
879  * Return: 1 if the packet is marked as exception,
880  *	   0, if the packet is not marked as exception.
881  */
882 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
883 						 qdf_nbuf_t nbuf)
884 {
885 	return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf);
886 }
887 #else
888 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
889 						 qdf_nbuf_t nbuf)
890 {
891 	return 0;
892 }
893 #endif
894 
895 #ifdef DP_TRAFFIC_END_INDICATION
896 /**
897  * dp_tx_get_traffic_end_indication_pkt() - Allocate and prepare packet to send
898  *                                          as indication to fw to inform that
899  *                                          data stream has ended
900  * @vdev: DP vdev handle
901  * @nbuf: original buffer from network stack
902  *
903  * Return: NULL on failure,
904  *         nbuf on success
905  */
906 static inline qdf_nbuf_t
907 dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
908 				     qdf_nbuf_t nbuf)
909 {
910 	/* Packet length should be enough to copy upto L3 header */
911 	uint8_t end_nbuf_len = 64;
912 	uint8_t htt_desc_size_aligned;
913 	uint8_t htt_desc_size;
914 	qdf_nbuf_t end_nbuf;
915 
916 	if (qdf_unlikely(QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) ==
917 			 QDF_NBUF_CB_PACKET_TYPE_END_INDICATION)) {
918 		htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
919 		htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
920 
921 		end_nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q);
922 		if (!end_nbuf) {
923 			end_nbuf = qdf_nbuf_alloc(NULL,
924 						  (htt_desc_size_aligned +
925 						  end_nbuf_len),
926 						  htt_desc_size_aligned,
927 						  8, false);
928 			if (!end_nbuf) {
929 				dp_err("Packet allocation failed");
930 				goto out;
931 			}
932 		} else {
933 			qdf_nbuf_reset(end_nbuf, htt_desc_size_aligned, 8);
934 		}
935 		qdf_mem_copy(qdf_nbuf_data(end_nbuf), qdf_nbuf_data(nbuf),
936 			     end_nbuf_len);
937 		qdf_nbuf_set_pktlen(end_nbuf, end_nbuf_len);
938 
939 		return end_nbuf;
940 	}
941 out:
942 	return NULL;
943 }
944 
945 /**
946  * dp_tx_send_traffic_end_indication_pkt() - Send indication packet to FW
947  *                                           via exception path.
948  * @vdev: DP vdev handle
949  * @end_nbuf: skb to send as indication
950  * @msdu_info: msdu_info of original nbuf
951  * @peer_id: peer id
952  *
953  * Return: None
954  */
955 static inline void
956 dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
957 				      qdf_nbuf_t end_nbuf,
958 				      struct dp_tx_msdu_info_s *msdu_info,
959 				      uint16_t peer_id)
960 {
961 	struct dp_tx_msdu_info_s e_msdu_info = {0};
962 	qdf_nbuf_t nbuf;
963 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
964 		(struct htt_tx_msdu_desc_ext2_t *)(e_msdu_info.meta_data);
965 	e_msdu_info.tx_queue = msdu_info->tx_queue;
966 	e_msdu_info.tid = msdu_info->tid;
967 	e_msdu_info.exception_fw = 1;
968 	desc_ext->host_tx_desc_pool = 1;
969 	desc_ext->traffic_end_indication = 1;
970 	nbuf = dp_tx_send_msdu_single(vdev, end_nbuf, &e_msdu_info,
971 				      peer_id, NULL);
972 	if (nbuf) {
973 		dp_err("Traffic end indication packet tx failed");
974 		qdf_nbuf_free(nbuf);
975 	}
976 }
977 
978 /**
979  * dp_tx_traffic_end_indication_set_desc_flag() - Set tx descriptor flag to
980  *                                                mark it traffic end indication
981  *                                                packet.
982  * @tx_desc: Tx descriptor pointer
983  * @msdu_info: msdu_info structure pointer
984  *
985  * Return: None
986  */
987 static inline void
988 dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
989 					   struct dp_tx_msdu_info_s *msdu_info)
990 {
991 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
992 		(struct htt_tx_msdu_desc_ext2_t *)(msdu_info->meta_data);
993 
994 	if (qdf_unlikely(desc_ext->traffic_end_indication))
995 		tx_desc->flags |= DP_TX_DESC_FLAG_TRAFFIC_END_IND;
996 }
997 
998 /**
999  * dp_tx_traffic_end_indication_enq_ind_pkt() - Enqueue the packet instead of
1000  *                                              freeing which are associated
1001  *                                              with traffic end indication
1002  *                                              flagged descriptor.
1003  * @soc: dp soc handle
1004  * @desc: Tx descriptor pointer
1005  * @nbuf: buffer pointer
1006  *
1007  * Return: True if packet gets enqueued else false
1008  */
1009 static bool
1010 dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
1011 					 struct dp_tx_desc_s *desc,
1012 					 qdf_nbuf_t nbuf)
1013 {
1014 	struct dp_vdev *vdev = NULL;
1015 
1016 	if (qdf_unlikely((desc->flags &
1017 			  DP_TX_DESC_FLAG_TRAFFIC_END_IND) != 0)) {
1018 		vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
1019 					     DP_MOD_ID_TX_COMP);
1020 		if (vdev) {
1021 			qdf_nbuf_queue_add(&vdev->end_ind_pkt_q, nbuf);
1022 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_COMP);
1023 			return true;
1024 		}
1025 	}
1026 	return false;
1027 }
1028 
1029 /**
1030  * dp_tx_traffic_end_indication_is_enabled() - get the feature
1031  *                                             enable/disable status
1032  * @vdev: dp vdev handle
1033  *
1034  * Return: True if feature is enable else false
1035  */
1036 static inline bool
1037 dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
1038 {
1039 	return qdf_unlikely(vdev->traffic_end_ind_en);
1040 }
1041 
1042 static inline qdf_nbuf_t
1043 dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1044 			       struct dp_tx_msdu_info_s *msdu_info,
1045 			       uint16_t peer_id, qdf_nbuf_t end_nbuf)
1046 {
1047 	if (dp_tx_traffic_end_indication_is_enabled(vdev))
1048 		end_nbuf = dp_tx_get_traffic_end_indication_pkt(vdev, nbuf);
1049 
1050 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
1051 
1052 	if (qdf_unlikely(end_nbuf))
1053 		dp_tx_send_traffic_end_indication_pkt(vdev, end_nbuf,
1054 						      msdu_info, peer_id);
1055 	return nbuf;
1056 }
1057 #else
1058 static inline qdf_nbuf_t
1059 dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
1060 				     qdf_nbuf_t nbuf)
1061 {
1062 	return NULL;
1063 }
1064 
1065 static inline void
1066 dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
1067 				      qdf_nbuf_t end_nbuf,
1068 				      struct dp_tx_msdu_info_s *msdu_info,
1069 				      uint16_t peer_id)
1070 {}
1071 
1072 static inline void
1073 dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
1074 					   struct dp_tx_msdu_info_s *msdu_info)
1075 {}
1076 
1077 static inline bool
1078 dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
1079 					 struct dp_tx_desc_s *desc,
1080 					 qdf_nbuf_t nbuf)
1081 {
1082 	return false;
1083 }
1084 
1085 static inline bool
1086 dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
1087 {
1088 	return false;
1089 }
1090 
1091 static inline qdf_nbuf_t
1092 dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1093 			       struct dp_tx_msdu_info_s *msdu_info,
1094 			       uint16_t peer_id, qdf_nbuf_t end_nbuf)
1095 {
1096 	return dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
1097 }
1098 #endif
1099 
1100 #if defined(QCA_SUPPORT_WDS_EXTENDED)
1101 static bool
1102 dp_tx_is_wds_ast_override_en(struct dp_soc *soc,
1103 			     struct cdp_tx_exception_metadata *tx_exc_metadata)
1104 {
1105 	if (soc->features.wds_ext_ast_override_enable &&
1106 	    tx_exc_metadata && tx_exc_metadata->is_wds_extended)
1107 		return true;
1108 
1109 	return false;
1110 }
1111 #else
1112 static bool
1113 dp_tx_is_wds_ast_override_en(struct dp_soc *soc,
1114 			     struct cdp_tx_exception_metadata *tx_exc_metadata)
1115 {
1116 	return false;
1117 }
1118 #endif
1119 
1120 /**
1121  * dp_tx_prepare_desc_single() - Allocate and prepare Tx descriptor
1122  * @vdev: DP vdev handle
1123  * @nbuf: skb
1124  * @desc_pool_id: Descriptor pool ID
1125  * @msdu_info: Metadata to the fw
1126  * @tx_exc_metadata: Handle that holds exception path metadata
1127  *
1128  * Allocate and prepare Tx descriptor with msdu information.
1129  *
1130  * Return: Pointer to Tx Descriptor on success,
1131  *         NULL on failure
1132  */
1133 static
1134 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
1135 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
1136 		struct dp_tx_msdu_info_s *msdu_info,
1137 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1138 {
1139 	uint8_t align_pad;
1140 	uint8_t is_exception = 0;
1141 	uint8_t htt_hdr_size;
1142 	struct dp_tx_desc_s *tx_desc;
1143 	struct dp_pdev *pdev = vdev->pdev;
1144 	struct dp_soc *soc = pdev->soc;
1145 
1146 	if (dp_tx_limit_check(vdev, nbuf))
1147 		return NULL;
1148 
1149 	/* Allocate software Tx descriptor */
1150 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1151 
1152 	if (qdf_unlikely(!tx_desc)) {
1153 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1154 		DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
1155 		return NULL;
1156 	}
1157 
1158 	dp_tx_outstanding_inc(pdev);
1159 
1160 	/* Initialize the SW tx descriptor */
1161 	tx_desc->nbuf = nbuf;
1162 	tx_desc->frm_type = dp_tx_frm_std;
1163 	tx_desc->tx_encap_type = ((tx_exc_metadata &&
1164 		(tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
1165 		tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
1166 	tx_desc->vdev_id = vdev->vdev_id;
1167 	tx_desc->pdev = pdev;
1168 	tx_desc->msdu_ext_desc = NULL;
1169 	tx_desc->pkt_offset = 0;
1170 	tx_desc->length = qdf_nbuf_headlen(nbuf);
1171 	tx_desc->shinfo_addr = skb_end_pointer(nbuf);
1172 
1173 	dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
1174 
1175 	if (qdf_unlikely(vdev->multipass_en)) {
1176 		if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
1177 			goto failure;
1178 	}
1179 
1180 	/* Packets marked by upper layer (OS-IF) to be sent to FW */
1181 	if (dp_tx_is_nbuf_marked_exception(soc, nbuf))
1182 		is_exception = 1;
1183 
1184 	/* for BE chipsets if wds extension was enbled will not mark FW
1185 	 * in desc will mark ast index based search for ast index.
1186 	 */
1187 	if (dp_tx_is_wds_ast_override_en(soc, tx_exc_metadata))
1188 		return tx_desc;
1189 
1190 	/*
1191 	 * For special modes (vdev_type == ocb or mesh), data frames should be
1192 	 * transmitted using varying transmit parameters (tx spec) which include
1193 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
1194 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
1195 	 * These frames are sent as exception packets to firmware.
1196 	 *
1197 	 * HW requirement is that metadata should always point to a
1198 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
1199 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
1200 	 *  to get 8-byte aligned start address along with align_pad added
1201 	 *
1202 	 *  |-----------------------------|
1203 	 *  |                             |
1204 	 *  |-----------------------------| <-----Buffer Pointer Address given
1205 	 *  |                             |  ^    in HW descriptor (aligned)
1206 	 *  |       HTT Metadata          |  |
1207 	 *  |                             |  |
1208 	 *  |                             |  | Packet Offset given in descriptor
1209 	 *  |                             |  |
1210 	 *  |-----------------------------|  |
1211 	 *  |       Alignment Pad         |  v
1212 	 *  |-----------------------------| <----- Actual buffer start address
1213 	 *  |        SKB Data             |           (Unaligned)
1214 	 *  |                             |
1215 	 *  |                             |
1216 	 *  |                             |
1217 	 *  |                             |
1218 	 *  |                             |
1219 	 *  |-----------------------------|
1220 	 */
1221 	if (qdf_unlikely((msdu_info->exception_fw)) ||
1222 				(vdev->opmode == wlan_op_mode_ocb) ||
1223 				(tx_exc_metadata &&
1224 				tx_exc_metadata->is_tx_sniffer)) {
1225 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
1226 
1227 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
1228 			DP_STATS_INC(vdev,
1229 				     tx_i.dropped.headroom_insufficient, 1);
1230 			goto failure;
1231 		}
1232 
1233 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
1234 			dp_tx_err("qdf_nbuf_push_head failed");
1235 			goto failure;
1236 		}
1237 
1238 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
1239 				msdu_info);
1240 		if (htt_hdr_size == 0)
1241 			goto failure;
1242 
1243 		tx_desc->length = qdf_nbuf_headlen(nbuf);
1244 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
1245 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1246 		dp_tx_traffic_end_indication_set_desc_flag(tx_desc,
1247 							   msdu_info);
1248 		is_exception = 1;
1249 		tx_desc->length -= tx_desc->pkt_offset;
1250 	}
1251 
1252 #if !TQM_BYPASS_WAR
1253 	if (is_exception || tx_exc_metadata)
1254 #endif
1255 	{
1256 		/* Temporary WAR due to TQM VP issues */
1257 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1258 		qdf_atomic_inc(&soc->num_tx_exception);
1259 	}
1260 
1261 	return tx_desc;
1262 
1263 failure:
1264 	dp_tx_desc_release(tx_desc, desc_pool_id);
1265 	return NULL;
1266 }
1267 
1268 /**
1269  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment
1270  *                        frame
1271  * @vdev: DP vdev handle
1272  * @nbuf: skb
1273  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
1274  * @desc_pool_id : Descriptor Pool ID
1275  *
1276  * Allocate and prepare Tx descriptor with msdu and fragment descritor
1277  * information. For frames with fragments, allocate and prepare
1278  * an MSDU extension descriptor
1279  *
1280  * Return: Pointer to Tx Descriptor on success,
1281  *         NULL on failure
1282  */
1283 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
1284 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
1285 		uint8_t desc_pool_id)
1286 {
1287 	struct dp_tx_desc_s *tx_desc;
1288 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
1289 	struct dp_pdev *pdev = vdev->pdev;
1290 	struct dp_soc *soc = pdev->soc;
1291 
1292 	if (dp_tx_limit_check(vdev, nbuf))
1293 		return NULL;
1294 
1295 	/* Allocate software Tx descriptor */
1296 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1297 	if (!tx_desc) {
1298 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1299 		return NULL;
1300 	}
1301 	dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg,
1302 				  nbuf, tx_desc->id, DP_TX_DESC_COOKIE);
1303 
1304 	dp_tx_outstanding_inc(pdev);
1305 
1306 	/* Initialize the SW tx descriptor */
1307 	tx_desc->nbuf = nbuf;
1308 	tx_desc->frm_type = msdu_info->frm_type;
1309 	tx_desc->tx_encap_type = vdev->tx_encap_type;
1310 	tx_desc->vdev_id = vdev->vdev_id;
1311 	tx_desc->pdev = pdev;
1312 	tx_desc->pkt_offset = 0;
1313 
1314 	dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
1315 
1316 	/* Handle scattered frames - TSO/SG/ME */
1317 	/* Allocate and prepare an extension descriptor for scattered frames */
1318 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
1319 	if (!msdu_ext_desc) {
1320 		dp_tx_info("Tx Extension Descriptor Alloc Fail");
1321 		goto failure;
1322 	}
1323 
1324 #if TQM_BYPASS_WAR
1325 	/* Temporary WAR due to TQM VP issues */
1326 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1327 	qdf_atomic_inc(&soc->num_tx_exception);
1328 #endif
1329 	if (qdf_unlikely(msdu_info->exception_fw))
1330 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1331 
1332 	tx_desc->msdu_ext_desc = msdu_ext_desc;
1333 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
1334 
1335 	msdu_ext_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
1336 	msdu_ext_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
1337 
1338 	tx_desc->dma_addr = msdu_ext_desc->paddr;
1339 
1340 	if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
1341 		tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
1342 	else
1343 		tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
1344 
1345 	return tx_desc;
1346 failure:
1347 	dp_tx_desc_release(tx_desc, desc_pool_id);
1348 	return NULL;
1349 }
1350 
1351 /**
1352  * dp_tx_prepare_raw() - Prepare RAW packet TX
1353  * @vdev: DP vdev handle
1354  * @nbuf: buffer pointer
1355  * @seg_info: Pointer to Segment info Descriptor to be prepared
1356  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
1357  *     descriptor
1358  *
1359  * Return:
1360  */
1361 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1362 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1363 {
1364 	qdf_nbuf_t curr_nbuf = NULL;
1365 	uint16_t total_len = 0;
1366 	qdf_dma_addr_t paddr;
1367 	int32_t i;
1368 	int32_t mapped_buf_num = 0;
1369 
1370 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
1371 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1372 
1373 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
1374 
1375 	/* Continue only if frames are of DATA type */
1376 	if (!DP_FRAME_IS_DATA(qos_wh)) {
1377 		DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
1378 		dp_tx_debug("Pkt. recd is of not data type");
1379 		goto error;
1380 	}
1381 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
1382 	if (vdev->raw_mode_war &&
1383 	    (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
1384 	    (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
1385 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
1386 
1387 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
1388 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
1389 		/*
1390 		 * Number of nbuf's must not exceed the size of the frags
1391 		 * array in seg_info.
1392 		 */
1393 		if (i >= DP_TX_MAX_NUM_FRAGS) {
1394 			dp_err_rl("nbuf cnt exceeds the max number of segs");
1395 			DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
1396 			goto error;
1397 		}
1398 		if (QDF_STATUS_SUCCESS !=
1399 			qdf_nbuf_map_nbytes_single(vdev->osdev,
1400 						   curr_nbuf,
1401 						   QDF_DMA_TO_DEVICE,
1402 						   curr_nbuf->len)) {
1403 			dp_tx_err("%s dma map error ", __func__);
1404 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
1405 			goto error;
1406 		}
1407 		/* Update the count of mapped nbuf's */
1408 		mapped_buf_num++;
1409 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
1410 		seg_info->frags[i].paddr_lo = paddr;
1411 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
1412 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
1413 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
1414 		total_len += qdf_nbuf_len(curr_nbuf);
1415 	}
1416 
1417 	seg_info->frag_cnt = i;
1418 	seg_info->total_len = total_len;
1419 	seg_info->next = NULL;
1420 
1421 	sg_info->curr_seg = seg_info;
1422 
1423 	msdu_info->frm_type = dp_tx_frm_raw;
1424 	msdu_info->num_seg = 1;
1425 
1426 	return nbuf;
1427 
1428 error:
1429 	i = 0;
1430 	while (nbuf) {
1431 		curr_nbuf = nbuf;
1432 		if (i < mapped_buf_num) {
1433 			qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
1434 						     QDF_DMA_TO_DEVICE,
1435 						     curr_nbuf->len);
1436 			i++;
1437 		}
1438 		nbuf = qdf_nbuf_next(nbuf);
1439 		qdf_nbuf_free(curr_nbuf);
1440 	}
1441 	return NULL;
1442 
1443 }
1444 
1445 /**
1446  * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
1447  * @soc: DP soc handle
1448  * @nbuf: Buffer pointer
1449  *
1450  * unmap the chain of nbufs that belong to this RAW frame.
1451  *
1452  * Return: None
1453  */
1454 static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
1455 				    qdf_nbuf_t nbuf)
1456 {
1457 	qdf_nbuf_t cur_nbuf = nbuf;
1458 
1459 	do {
1460 		qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
1461 					     QDF_DMA_TO_DEVICE,
1462 					     cur_nbuf->len);
1463 		cur_nbuf = qdf_nbuf_next(cur_nbuf);
1464 	} while (cur_nbuf);
1465 }
1466 
1467 #ifdef VDEV_PEER_PROTOCOL_COUNT
1468 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
1469 					       qdf_nbuf_t nbuf)
1470 {
1471 	qdf_nbuf_t nbuf_local;
1472 	struct dp_vdev *vdev_local = vdev_hdl;
1473 
1474 	do {
1475 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track)))
1476 			break;
1477 		nbuf_local = nbuf;
1478 		if (qdf_unlikely(((vdev_local)->tx_encap_type) ==
1479 			 htt_cmn_pkt_type_raw))
1480 			break;
1481 		else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local))))
1482 			break;
1483 		else if (qdf_nbuf_is_tso((nbuf_local)))
1484 			break;
1485 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local),
1486 						       (nbuf_local),
1487 						       NULL, 1, 0);
1488 	} while (0);
1489 }
1490 #endif
1491 
1492 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1493 void dp_tx_update_stats(struct dp_soc *soc,
1494 			struct dp_tx_desc_s *tx_desc,
1495 			uint8_t ring_id)
1496 {
1497 	uint32_t stats_len = dp_tx_get_pkt_len(tx_desc);
1498 
1499 	DP_STATS_INC_PKT(soc, tx.egress[ring_id], 1, stats_len);
1500 }
1501 
1502 int
1503 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1504 			 struct dp_tx_desc_s *tx_desc,
1505 			 uint8_t tid,
1506 			 struct dp_tx_msdu_info_s *msdu_info,
1507 			 uint8_t ring_id)
1508 {
1509 	struct dp_swlm *swlm = &soc->swlm;
1510 	union swlm_data swlm_query_data;
1511 	struct dp_swlm_tcl_data tcl_data;
1512 	QDF_STATUS status;
1513 	int ret;
1514 
1515 	if (!swlm->is_enabled)
1516 		return msdu_info->skip_hp_update;
1517 
1518 	tcl_data.nbuf = tx_desc->nbuf;
1519 	tcl_data.tid = tid;
1520 	tcl_data.ring_id = ring_id;
1521 	tcl_data.pkt_len = dp_tx_get_pkt_len(tx_desc);
1522 	tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
1523 	swlm_query_data.tcl_data = &tcl_data;
1524 
1525 	status = dp_swlm_tcl_pre_check(soc, &tcl_data);
1526 	if (QDF_IS_STATUS_ERROR(status)) {
1527 		dp_swlm_tcl_reset_session_data(soc, ring_id);
1528 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
1529 		return 0;
1530 	}
1531 
1532 	ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
1533 	if (ret) {
1534 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_success, 1);
1535 	} else {
1536 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
1537 	}
1538 
1539 	return ret;
1540 }
1541 
1542 void
1543 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1544 		      int coalesce)
1545 {
1546 	if (coalesce)
1547 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1548 	else
1549 		dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1550 }
1551 
1552 static inline void
1553 dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
1554 {
1555 	if (((i + 1) < msdu_info->num_seg))
1556 		msdu_info->skip_hp_update = 1;
1557 	else
1558 		msdu_info->skip_hp_update = 0;
1559 }
1560 
1561 static inline void
1562 dp_flush_tcp_hp(struct dp_soc *soc, uint8_t ring_id)
1563 {
1564 	hal_ring_handle_t hal_ring_hdl =
1565 		dp_tx_get_hal_ring_hdl(soc, ring_id);
1566 
1567 	if (dp_tx_hal_ring_access_start(soc, hal_ring_hdl)) {
1568 		dp_err("Fillmore: SRNG access start failed");
1569 		return;
1570 	}
1571 
1572 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
1573 }
1574 
1575 static inline void
1576 dp_tx_check_and_flush_hp(struct dp_soc *soc,
1577 			 QDF_STATUS status,
1578 			 struct dp_tx_msdu_info_s *msdu_info)
1579 {
1580 	if (QDF_IS_STATUS_ERROR(status) && !msdu_info->skip_hp_update) {
1581 		dp_flush_tcp_hp(soc,
1582 			(msdu_info->tx_queue.ring_id & DP_TX_QUEUE_MASK));
1583 	}
1584 }
1585 #else
1586 static inline void
1587 dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
1588 {
1589 }
1590 
1591 static inline void
1592 dp_tx_check_and_flush_hp(struct dp_soc *soc,
1593 			 QDF_STATUS status,
1594 			 struct dp_tx_msdu_info_s *msdu_info)
1595 {
1596 }
1597 #endif
1598 
1599 #ifdef FEATURE_RUNTIME_PM
1600 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
1601 {
1602 	int ret;
1603 
1604 	ret = qdf_atomic_read(&soc->rtpm_high_tput_flag) &&
1605 	      (hif_rtpm_get_state() <= HIF_RTPM_STATE_ON);
1606 	return ret;
1607 }
1608 void
1609 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1610 			      hal_ring_handle_t hal_ring_hdl,
1611 			      int coalesce)
1612 {
1613 	int ret;
1614 
1615 	/*
1616 	 * Avoid runtime get and put APIs under high throughput scenarios.
1617 	 */
1618 	if (dp_get_rtpm_tput_policy_requirement(soc)) {
1619 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1620 		return;
1621 	}
1622 
1623 	ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
1624 	if (QDF_IS_STATUS_SUCCESS(ret)) {
1625 		if (hif_system_pm_state_check(soc->hif_handle)) {
1626 			dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1627 			hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1628 			hal_srng_inc_flush_cnt(hal_ring_hdl);
1629 		} else {
1630 			dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1631 		}
1632 		hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
1633 	} else {
1634 		dp_runtime_get(soc);
1635 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1636 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1637 		qdf_atomic_inc(&soc->tx_pending_rtpm);
1638 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1639 		dp_runtime_put(soc);
1640 	}
1641 }
1642 #else
1643 
1644 #ifdef DP_POWER_SAVE
1645 void
1646 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1647 			      hal_ring_handle_t hal_ring_hdl,
1648 			      int coalesce)
1649 {
1650 	if (hif_system_pm_state_check(soc->hif_handle)) {
1651 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1652 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1653 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1654 	} else {
1655 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1656 	}
1657 }
1658 #endif
1659 
1660 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
1661 {
1662 	return 0;
1663 }
1664 #endif
1665 
1666 /**
1667  * dp_tx_get_tid() - Obtain TID to be used for this frame
1668  * @vdev: DP vdev handle
1669  * @nbuf: skb
1670  * @msdu_info: msdu descriptor
1671  *
1672  * Extract the DSCP or PCP information from frame and map into TID value.
1673  *
1674  * Return: void
1675  */
1676 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1677 			  struct dp_tx_msdu_info_s *msdu_info)
1678 {
1679 	uint8_t tos = 0, dscp_tid_override = 0;
1680 	uint8_t *hdr_ptr, *L3datap;
1681 	uint8_t is_mcast = 0;
1682 	qdf_ether_header_t *eh = NULL;
1683 	qdf_ethervlan_header_t *evh = NULL;
1684 	uint16_t   ether_type;
1685 	qdf_llc_t *llcHdr;
1686 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1687 
1688 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1689 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1690 		eh = (qdf_ether_header_t *)nbuf->data;
1691 		hdr_ptr = (uint8_t *)(eh->ether_dhost);
1692 		L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
1693 	} else {
1694 		qdf_dot3_qosframe_t *qos_wh =
1695 			(qdf_dot3_qosframe_t *) nbuf->data;
1696 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1697 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1698 		return;
1699 	}
1700 
1701 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1702 	ether_type = eh->ether_type;
1703 
1704 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
1705 	/*
1706 	 * Check if packet is dot3 or eth2 type.
1707 	 */
1708 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1709 		ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1710 				sizeof(*llcHdr));
1711 
1712 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1713 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1714 				sizeof(*llcHdr);
1715 			ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
1716 					+ sizeof(*llcHdr) +
1717 					sizeof(qdf_net_vlanhdr_t));
1718 		} else {
1719 			L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
1720 				sizeof(*llcHdr);
1721 		}
1722 	} else {
1723 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1724 			evh = (qdf_ethervlan_header_t *) eh;
1725 			ether_type = evh->ether_type;
1726 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1727 		}
1728 	}
1729 
1730 	/*
1731 	 * Find priority from IP TOS DSCP field
1732 	 */
1733 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1734 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1735 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1736 			/* Only for unicast frames */
1737 			if (!is_mcast) {
1738 				/* send it on VO queue */
1739 				msdu_info->tid = DP_VO_TID;
1740 			}
1741 		} else {
1742 			/*
1743 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1744 			 * from TOS byte.
1745 			 */
1746 			tos = ip->ip_tos;
1747 			dscp_tid_override = 1;
1748 
1749 		}
1750 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1751 		/* TODO
1752 		 * use flowlabel
1753 		 *igmpmld cases to be handled in phase 2
1754 		 */
1755 		unsigned long ver_pri_flowlabel;
1756 		unsigned long pri;
1757 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1758 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1759 			DP_IPV6_PRIORITY_SHIFT;
1760 		tos = pri;
1761 		dscp_tid_override = 1;
1762 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1763 		msdu_info->tid = DP_VO_TID;
1764 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1765 		/* Only for unicast frames */
1766 		if (!is_mcast) {
1767 			/* send ucast arp on VO queue */
1768 			msdu_info->tid = DP_VO_TID;
1769 		}
1770 	}
1771 
1772 	/*
1773 	 * Assign all MCAST packets to BE
1774 	 */
1775 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1776 		if (is_mcast) {
1777 			tos = 0;
1778 			dscp_tid_override = 1;
1779 		}
1780 	}
1781 
1782 	if (dscp_tid_override == 1) {
1783 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1784 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1785 	}
1786 
1787 	if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
1788 		msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
1789 
1790 	return;
1791 }
1792 
1793 /**
1794  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1795  * @vdev: DP vdev handle
1796  * @nbuf: skb
1797  * @msdu_info: msdu descriptor
1798  *
1799  * Software based TID classification is required when more than 2 DSCP-TID
1800  * mapping tables are needed.
1801  * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
1802  *
1803  * Return: void
1804  */
1805 static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1806 				      struct dp_tx_msdu_info_s *msdu_info)
1807 {
1808 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1809 
1810 	/*
1811 	 * skip_sw_tid_classification flag will set in below cases-
1812 	 * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
1813 	 * 2. hlos_tid_override enabled for vdev
1814 	 * 3. mesh mode enabled for vdev
1815 	 */
1816 	if (qdf_likely(vdev->skip_sw_tid_classification)) {
1817 		/* Update tid in msdu_info from skb priority */
1818 		if (qdf_unlikely(vdev->skip_sw_tid_classification
1819 			& DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
1820 			uint32_t tid = qdf_nbuf_get_priority(nbuf);
1821 
1822 			if (tid == DP_TX_INVALID_QOS_TAG)
1823 				return;
1824 
1825 			msdu_info->tid = tid;
1826 			return;
1827 		}
1828 		return;
1829 	}
1830 
1831 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1832 }
1833 
1834 #ifdef FEATURE_WLAN_TDLS
1835 /**
1836  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1837  * @soc: datapath SOC
1838  * @vdev: datapath vdev
1839  * @tx_desc: TX descriptor
1840  *
1841  * Return: None
1842  */
1843 static void dp_tx_update_tdls_flags(struct dp_soc *soc,
1844 				    struct dp_vdev *vdev,
1845 				    struct dp_tx_desc_s *tx_desc)
1846 {
1847 	if (vdev) {
1848 		if (vdev->is_tdls_frame) {
1849 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1850 			vdev->is_tdls_frame = false;
1851 		}
1852 	}
1853 }
1854 
1855 static uint8_t dp_htt_tx_comp_get_status(struct dp_soc *soc, char *htt_desc)
1856 {
1857 	uint8_t tx_status = HTT_TX_FW2WBM_TX_STATUS_MAX;
1858 
1859 	switch (soc->arch_id) {
1860 	case CDP_ARCH_TYPE_LI:
1861 		tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
1862 		break;
1863 
1864 	case CDP_ARCH_TYPE_BE:
1865 		tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
1866 		break;
1867 
1868 	default:
1869 		dp_err("Incorrect CDP_ARCH %d", soc->arch_id);
1870 		QDF_BUG(0);
1871 	}
1872 
1873 	return tx_status;
1874 }
1875 
1876 /**
1877  * dp_non_std_htt_tx_comp_free_buff() - Free the non std tx packet buffer
1878  * @soc: dp_soc handle
1879  * @tx_desc: TX descriptor
1880  *
1881  * Return: None
1882  */
1883 static void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
1884 					 struct dp_tx_desc_s *tx_desc)
1885 {
1886 	uint8_t tx_status = 0;
1887 	uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
1888 
1889 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1890 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
1891 						     DP_MOD_ID_TDLS);
1892 
1893 	if (qdf_unlikely(!vdev)) {
1894 		dp_err_rl("vdev is null!");
1895 		goto error;
1896 	}
1897 
1898 	hal_tx_comp_get_htt_desc(&tx_desc->comp, htt_tx_status);
1899 	tx_status = dp_htt_tx_comp_get_status(soc, htt_tx_status);
1900 	dp_debug("vdev_id: %d tx_status: %d", tx_desc->vdev_id, tx_status);
1901 
1902 	if (vdev->tx_non_std_data_callback.func) {
1903 		qdf_nbuf_set_next(nbuf, NULL);
1904 		vdev->tx_non_std_data_callback.func(
1905 				vdev->tx_non_std_data_callback.ctxt,
1906 				nbuf, tx_status);
1907 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1908 		return;
1909 	} else {
1910 		dp_err_rl("callback func is null");
1911 	}
1912 
1913 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1914 error:
1915 	qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
1916 	qdf_nbuf_free(nbuf);
1917 }
1918 
1919 /**
1920  * dp_tx_msdu_single_map() - do nbuf map
1921  * @vdev: DP vdev handle
1922  * @tx_desc: DP TX descriptor pointer
1923  * @nbuf: skb pointer
1924  *
1925  * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
1926  * operation done in other component.
1927  *
1928  * Return: QDF_STATUS
1929  */
1930 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1931 					       struct dp_tx_desc_s *tx_desc,
1932 					       qdf_nbuf_t nbuf)
1933 {
1934 	if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
1935 		return qdf_nbuf_map_nbytes_single(vdev->osdev,
1936 						  nbuf,
1937 						  QDF_DMA_TO_DEVICE,
1938 						  nbuf->len);
1939 	else
1940 		return qdf_nbuf_map_single(vdev->osdev, nbuf,
1941 					   QDF_DMA_TO_DEVICE);
1942 }
1943 #else
1944 static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
1945 					   struct dp_vdev *vdev,
1946 					   struct dp_tx_desc_s *tx_desc)
1947 {
1948 }
1949 
1950 static inline void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
1951 						struct dp_tx_desc_s *tx_desc)
1952 {
1953 }
1954 
1955 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1956 					       struct dp_tx_desc_s *tx_desc,
1957 					       qdf_nbuf_t nbuf)
1958 {
1959 	return qdf_nbuf_map_nbytes_single(vdev->osdev,
1960 					  nbuf,
1961 					  QDF_DMA_TO_DEVICE,
1962 					  nbuf->len);
1963 }
1964 #endif
1965 
1966 static inline
1967 qdf_dma_addr_t dp_tx_nbuf_map_regular(struct dp_vdev *vdev,
1968 				      struct dp_tx_desc_s *tx_desc,
1969 				      qdf_nbuf_t nbuf)
1970 {
1971 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
1972 
1973 	ret = dp_tx_msdu_single_map(vdev, tx_desc, nbuf);
1974 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret)))
1975 		return 0;
1976 
1977 	return qdf_nbuf_mapped_paddr_get(nbuf);
1978 }
1979 
1980 static inline
1981 void dp_tx_nbuf_unmap_regular(struct dp_soc *soc, struct dp_tx_desc_s *desc)
1982 {
1983 	qdf_nbuf_unmap_nbytes_single_paddr(soc->osdev,
1984 					   desc->nbuf,
1985 					   desc->dma_addr,
1986 					   QDF_DMA_TO_DEVICE,
1987 					   desc->length);
1988 }
1989 
1990 #ifdef QCA_DP_TX_RMNET_OPTIMIZATION
1991 static inline bool
1992 is_nbuf_frm_rmnet(qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
1993 {
1994 	struct net_device *ingress_dev;
1995 	skb_frag_t *frag;
1996 	uint16_t buf_len = 0;
1997 	uint16_t linear_data_len = 0;
1998 	uint8_t *payload_addr = NULL;
1999 
2000 	ingress_dev = dev_get_by_index(dev_net(nbuf->dev), nbuf->skb_iif);
2001 
2002 	if ((ingress_dev->priv_flags & IFF_PHONY_HEADROOM)) {
2003 		dev_put(ingress_dev);
2004 		frag = &(skb_shinfo(nbuf)->frags[0]);
2005 		buf_len = skb_frag_size(frag);
2006 		payload_addr = (uint8_t *)skb_frag_address(frag);
2007 		linear_data_len = skb_headlen(nbuf);
2008 
2009 		buf_len += linear_data_len;
2010 		payload_addr = payload_addr - linear_data_len;
2011 		memcpy(payload_addr, nbuf->data, linear_data_len);
2012 
2013 		msdu_info->frm_type = dp_tx_frm_rmnet;
2014 		msdu_info->buf_len = buf_len;
2015 		msdu_info->payload_addr = payload_addr;
2016 
2017 		return true;
2018 	}
2019 	dev_put(ingress_dev);
2020 	return false;
2021 }
2022 
2023 static inline
2024 qdf_dma_addr_t dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s *msdu_info,
2025 				    struct dp_tx_desc_s *tx_desc)
2026 {
2027 	qdf_dma_addr_t paddr;
2028 
2029 	paddr = (qdf_dma_addr_t)qdf_mem_virt_to_phys(msdu_info->payload_addr);
2030 	tx_desc->length  = msdu_info->buf_len;
2031 
2032 	qdf_nbuf_dma_clean_range((void *)msdu_info->payload_addr,
2033 				 (void *)(msdu_info->payload_addr +
2034 					  msdu_info->buf_len));
2035 
2036 	tx_desc->flags |= DP_TX_DESC_FLAG_RMNET;
2037 	return paddr;
2038 }
2039 #else
2040 static inline bool
2041 is_nbuf_frm_rmnet(qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
2042 {
2043 	return false;
2044 }
2045 
2046 static inline
2047 qdf_dma_addr_t dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s *msdu_info,
2048 				    struct dp_tx_desc_s *tx_desc)
2049 {
2050 	return 0;
2051 }
2052 #endif
2053 
2054 #if defined(QCA_DP_TX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
2055 static inline
2056 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
2057 			      struct dp_tx_desc_s *tx_desc,
2058 			      qdf_nbuf_t nbuf)
2059 {
2060 	if (qdf_likely(tx_desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
2061 		qdf_nbuf_dma_clean_range((void *)nbuf->data,
2062 					 (void *)(nbuf->data + nbuf->len));
2063 		return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2064 	} else {
2065 		return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
2066 	}
2067 }
2068 
2069 static inline
2070 void dp_tx_nbuf_unmap(struct dp_soc *soc,
2071 		      struct dp_tx_desc_s *desc)
2072 {
2073 	if (qdf_unlikely(!(desc->flags &
2074 			   (DP_TX_DESC_FLAG_SIMPLE | DP_TX_DESC_FLAG_RMNET))))
2075 		return dp_tx_nbuf_unmap_regular(soc, desc);
2076 }
2077 #else
2078 static inline
2079 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
2080 			      struct dp_tx_desc_s *tx_desc,
2081 			      qdf_nbuf_t nbuf)
2082 {
2083 	return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
2084 }
2085 
2086 static inline
2087 void dp_tx_nbuf_unmap(struct dp_soc *soc,
2088 		      struct dp_tx_desc_s *desc)
2089 {
2090 	return dp_tx_nbuf_unmap_regular(soc, desc);
2091 }
2092 #endif
2093 
2094 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
2095 static inline
2096 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2097 {
2098 	dp_tx_nbuf_unmap(soc, desc);
2099 	desc->flags |= DP_TX_DESC_FLAG_UNMAP_DONE;
2100 }
2101 
2102 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2103 {
2104 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE)))
2105 		dp_tx_nbuf_unmap(soc, desc);
2106 }
2107 #else
2108 static inline
2109 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2110 {
2111 }
2112 
2113 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2114 {
2115 	dp_tx_nbuf_unmap(soc, desc);
2116 }
2117 #endif
2118 
2119 #ifdef MESH_MODE_SUPPORT
2120 /**
2121  * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
2122  * @soc: datapath SOC
2123  * @vdev: datapath vdev
2124  * @tx_desc: TX descriptor
2125  *
2126  * Return: None
2127  */
2128 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
2129 					   struct dp_vdev *vdev,
2130 					   struct dp_tx_desc_s *tx_desc)
2131 {
2132 	if (qdf_unlikely(vdev->mesh_vdev))
2133 		tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
2134 }
2135 
2136 /**
2137  * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
2138  * @soc: dp_soc handle
2139  * @tx_desc: TX descriptor
2140  * @delayed_free: delay the nbuf free
2141  *
2142  * Return: nbuf to be freed late
2143  */
2144 static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
2145 						   struct dp_tx_desc_s *tx_desc,
2146 						   bool delayed_free)
2147 {
2148 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2149 	struct dp_vdev *vdev = NULL;
2150 
2151 	vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, DP_MOD_ID_MESH);
2152 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2153 		if (vdev)
2154 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2155 
2156 		if (delayed_free)
2157 			return nbuf;
2158 
2159 		qdf_nbuf_free(nbuf);
2160 	} else {
2161 		if (vdev && vdev->osif_tx_free_ext) {
2162 			vdev->osif_tx_free_ext((nbuf));
2163 		} else {
2164 			if (delayed_free)
2165 				return nbuf;
2166 
2167 			qdf_nbuf_free(nbuf);
2168 		}
2169 	}
2170 
2171 	if (vdev)
2172 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
2173 
2174 	return NULL;
2175 }
2176 #else
2177 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
2178 					   struct dp_vdev *vdev,
2179 					   struct dp_tx_desc_s *tx_desc)
2180 {
2181 }
2182 
2183 static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
2184 						   struct dp_tx_desc_s *tx_desc,
2185 						   bool delayed_free)
2186 {
2187 	return NULL;
2188 }
2189 #endif
2190 
2191 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
2192 {
2193 	struct dp_pdev *pdev = NULL;
2194 	struct dp_ast_entry *src_ast_entry = NULL;
2195 	struct dp_ast_entry *dst_ast_entry = NULL;
2196 	struct dp_soc *soc = NULL;
2197 
2198 	qdf_assert(vdev);
2199 	pdev = vdev->pdev;
2200 	qdf_assert(pdev);
2201 	soc = pdev->soc;
2202 
2203 	dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
2204 				(soc, dstmac, vdev->pdev->pdev_id);
2205 
2206 	src_ast_entry = dp_peer_ast_hash_find_by_pdevid
2207 				(soc, srcmac, vdev->pdev->pdev_id);
2208 	if (dst_ast_entry && src_ast_entry) {
2209 		if (dst_ast_entry->peer_id ==
2210 				src_ast_entry->peer_id)
2211 			return 1;
2212 	}
2213 
2214 	return 0;
2215 }
2216 
2217 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
2218 	defined(WLAN_MCAST_MLO)
2219 /* MLO peer id for reinject*/
2220 #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
2221 /* MLO vdev id inc offset */
2222 #define DP_MLO_VDEV_ID_OFFSET 0x80
2223 
2224 #ifdef QCA_SUPPORT_WDS_EXTENDED
2225 static inline bool
2226 dp_tx_wds_ext_check(struct cdp_tx_exception_metadata *tx_exc_metadata)
2227 {
2228 	if (tx_exc_metadata && tx_exc_metadata->is_wds_extended)
2229 		return true;
2230 
2231 	return false;
2232 }
2233 #else
2234 static inline bool
2235 dp_tx_wds_ext_check(struct cdp_tx_exception_metadata *tx_exc_metadata)
2236 {
2237 	return false;
2238 }
2239 #endif
2240 
2241 static inline void
2242 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
2243 			 struct cdp_tx_exception_metadata *tx_exc_metadata)
2244 {
2245 	/* wds ext enabled will not set the TO_FW bit */
2246 	if (dp_tx_wds_ext_check(tx_exc_metadata))
2247 		return;
2248 
2249 	if (!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) {
2250 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2251 		qdf_atomic_inc(&soc->num_tx_exception);
2252 	}
2253 }
2254 
2255 static inline void
2256 dp_tx_update_mcast_param(uint16_t peer_id,
2257 			 uint16_t *htt_tcl_metadata,
2258 			 struct dp_vdev *vdev,
2259 			 struct dp_tx_msdu_info_s *msdu_info)
2260 {
2261 	if (peer_id == DP_MLO_MCAST_REINJECT_PEER_ID) {
2262 		*htt_tcl_metadata = 0;
2263 		DP_TX_TCL_METADATA_TYPE_SET(
2264 				*htt_tcl_metadata,
2265 				HTT_TCL_METADATA_V2_TYPE_GLOBAL_SEQ_BASED);
2266 		HTT_TX_TCL_METADATA_GLBL_SEQ_NO_SET(*htt_tcl_metadata,
2267 						    msdu_info->gsn);
2268 
2269 		msdu_info->vdev_id = vdev->vdev_id + DP_MLO_VDEV_ID_OFFSET;
2270 		if (qdf_unlikely(vdev->nawds_enabled ||
2271 				 dp_vdev_is_wds_ext_enabled(vdev)))
2272 			HTT_TX_TCL_METADATA_GLBL_SEQ_HOST_INSPECTED_SET(
2273 							*htt_tcl_metadata, 1);
2274 	} else {
2275 		msdu_info->vdev_id = vdev->vdev_id;
2276 	}
2277 }
2278 #else
2279 static inline void
2280 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
2281 			 struct cdp_tx_exception_metadata *tx_exc_metadata)
2282 {
2283 }
2284 
2285 static inline void
2286 dp_tx_update_mcast_param(uint16_t peer_id,
2287 			 uint16_t *htt_tcl_metadata,
2288 			 struct dp_vdev *vdev,
2289 			 struct dp_tx_msdu_info_s *msdu_info)
2290 {
2291 }
2292 #endif
2293 
2294 #ifdef DP_TX_SW_DROP_STATS_INC
2295 static void tx_sw_drop_stats_inc(struct dp_pdev *pdev,
2296 				 qdf_nbuf_t nbuf,
2297 				 enum cdp_tx_sw_drop drop_code)
2298 {
2299 	/* EAPOL Drop stats */
2300 	if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) {
2301 		switch (drop_code) {
2302 		case TX_DESC_ERR:
2303 			DP_STATS_INC(pdev, eap_drop_stats.tx_desc_err, 1);
2304 			break;
2305 		case TX_HAL_RING_ACCESS_ERR:
2306 			DP_STATS_INC(pdev,
2307 				     eap_drop_stats.tx_hal_ring_access_err, 1);
2308 			break;
2309 		case TX_DMA_MAP_ERR:
2310 			DP_STATS_INC(pdev, eap_drop_stats.tx_dma_map_err, 1);
2311 			break;
2312 		case TX_HW_ENQUEUE:
2313 			DP_STATS_INC(pdev, eap_drop_stats.tx_hw_enqueue, 1);
2314 			break;
2315 		case TX_SW_ENQUEUE:
2316 			DP_STATS_INC(pdev, eap_drop_stats.tx_sw_enqueue, 1);
2317 			break;
2318 		default:
2319 			dp_info_rl("Invalid eapol_drop code: %d", drop_code);
2320 			break;
2321 		}
2322 	}
2323 }
2324 #else
2325 static void tx_sw_drop_stats_inc(struct dp_pdev *pdev,
2326 				 qdf_nbuf_t nbuf,
2327 				 enum cdp_tx_sw_drop drop_code)
2328 {
2329 }
2330 #endif
2331 
2332 qdf_nbuf_t
2333 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2334 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
2335 		       struct cdp_tx_exception_metadata *tx_exc_metadata)
2336 {
2337 	struct dp_pdev *pdev = vdev->pdev;
2338 	struct dp_soc *soc = pdev->soc;
2339 	struct dp_tx_desc_s *tx_desc;
2340 	QDF_STATUS status;
2341 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
2342 	uint16_t htt_tcl_metadata = 0;
2343 	enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
2344 	uint8_t tid = msdu_info->tid;
2345 	struct cdp_tid_tx_stats *tid_stats = NULL;
2346 	qdf_dma_addr_t paddr;
2347 
2348 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
2349 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
2350 			msdu_info, tx_exc_metadata);
2351 	if (!tx_desc) {
2352 		dp_err_rl("Tx_desc prepare Fail vdev_id %d vdev %pK queue %d",
2353 			  vdev->vdev_id, vdev, tx_q->desc_pool_id);
2354 		drop_code = TX_DESC_ERR;
2355 		goto fail_return;
2356 	}
2357 
2358 	dp_tx_update_tdls_flags(soc, vdev, tx_desc);
2359 
2360 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
2361 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2362 		DP_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
2363 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
2364 		DP_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
2365 					    DP_TCL_METADATA_TYPE_PEER_BASED);
2366 		DP_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
2367 					       peer_id);
2368 		dp_tx_bypass_reinjection(soc, tx_desc, tx_exc_metadata);
2369 	} else
2370 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2371 
2372 	if (msdu_info->exception_fw)
2373 		DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2374 
2375 	dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
2376 					 !pdev->enhanced_stats_en);
2377 
2378 	dp_tx_update_mesh_flags(soc, vdev, tx_desc);
2379 
2380 	if (qdf_unlikely(msdu_info->frm_type == dp_tx_frm_rmnet))
2381 		paddr = dp_tx_rmnet_nbuf_map(msdu_info, tx_desc);
2382 	else
2383 		paddr =  dp_tx_nbuf_map(vdev, tx_desc, nbuf);
2384 
2385 	if (!paddr) {
2386 		/* Handle failure */
2387 		dp_err("qdf_nbuf_map failed");
2388 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
2389 		drop_code = TX_DMA_MAP_ERR;
2390 		goto release_desc;
2391 	}
2392 
2393 	tx_desc->dma_addr = paddr;
2394 	dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
2395 			       tx_desc->id, DP_TX_DESC_MAP);
2396 	dp_tx_update_mcast_param(peer_id, &htt_tcl_metadata, vdev, msdu_info);
2397 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
2398 	status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
2399 					     htt_tcl_metadata,
2400 					     tx_exc_metadata, msdu_info);
2401 
2402 	if (status != QDF_STATUS_SUCCESS) {
2403 		dp_tx_err_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2404 			     tx_desc, tx_q->ring_id);
2405 		dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
2406 				       tx_desc->id, DP_TX_DESC_UNMAP);
2407 		dp_tx_nbuf_unmap(soc, tx_desc);
2408 		drop_code = TX_HW_ENQUEUE;
2409 		goto release_desc;
2410 	}
2411 
2412 	tx_sw_drop_stats_inc(pdev, nbuf, drop_code);
2413 	return NULL;
2414 
2415 release_desc:
2416 	dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2417 
2418 fail_return:
2419 	dp_tx_get_tid(vdev, nbuf, msdu_info);
2420 	tx_sw_drop_stats_inc(pdev, nbuf, drop_code);
2421 	tid_stats = &pdev->stats.tid_stats.
2422 		    tid_tx_stats[tx_q->ring_id][tid];
2423 	tid_stats->swdrop_cnt[drop_code]++;
2424 	return nbuf;
2425 }
2426 
2427 /**
2428  * dp_tdls_tx_comp_free_buff() - Free non std buffer when TDLS flag is set
2429  * @soc: Soc handle
2430  * @desc: software Tx descriptor to be processed
2431  *
2432  * Return: 0 if Success
2433  */
2434 #ifdef FEATURE_WLAN_TDLS
2435 static inline int
2436 dp_tdls_tx_comp_free_buff(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2437 {
2438 	/* If it is TDLS mgmt, don't unmap or free the frame */
2439 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) {
2440 		dp_non_std_htt_tx_comp_free_buff(soc, desc);
2441 		return 0;
2442 	}
2443 	return 1;
2444 }
2445 #else
2446 static inline int
2447 dp_tdls_tx_comp_free_buff(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2448 {
2449 	return 1;
2450 }
2451 #endif
2452 
2453 qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
2454 			       bool delayed_free)
2455 {
2456 	qdf_nbuf_t nbuf = desc->nbuf;
2457 	enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
2458 
2459 	/* nbuf already freed in vdev detach path */
2460 	if (!nbuf)
2461 		return NULL;
2462 
2463 	if (!dp_tdls_tx_comp_free_buff(soc, desc))
2464 		return NULL;
2465 
2466 	/* 0 : MSDU buffer, 1 : MLE */
2467 	if (desc->msdu_ext_desc) {
2468 		/* TSO free */
2469 		if (hal_tx_ext_desc_get_tso_enable(
2470 					desc->msdu_ext_desc->vaddr)) {
2471 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
2472 					       desc->id, DP_TX_COMP_MSDU_EXT);
2473 			dp_tx_tso_seg_history_add(soc,
2474 						  desc->msdu_ext_desc->tso_desc,
2475 						  desc->nbuf, desc->id, type);
2476 			/* unmap eash TSO seg before free the nbuf */
2477 			dp_tx_tso_unmap_segment(soc,
2478 						desc->msdu_ext_desc->tso_desc,
2479 						desc->msdu_ext_desc->
2480 						tso_num_desc);
2481 			goto nbuf_free;
2482 		}
2483 
2484 		if (qdf_unlikely(desc->frm_type == dp_tx_frm_sg)) {
2485 			void *msdu_ext_desc = desc->msdu_ext_desc->vaddr;
2486 			qdf_dma_addr_t iova;
2487 			uint32_t frag_len;
2488 			uint32_t i;
2489 
2490 			qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
2491 						     QDF_DMA_TO_DEVICE,
2492 						     qdf_nbuf_headlen(nbuf));
2493 
2494 			for (i = 1; i < DP_TX_MAX_NUM_FRAGS; i++) {
2495 				hal_tx_ext_desc_get_frag_info(msdu_ext_desc, i,
2496 							      &iova,
2497 							      &frag_len);
2498 				if (!iova || !frag_len)
2499 					break;
2500 
2501 				qdf_mem_unmap_page(soc->osdev, iova, frag_len,
2502 						   QDF_DMA_TO_DEVICE);
2503 			}
2504 
2505 			goto nbuf_free;
2506 		}
2507 	}
2508 	/* If it's ME frame, dont unmap the cloned nbuf's */
2509 	if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
2510 		goto nbuf_free;
2511 
2512 	dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, desc->id, type);
2513 	dp_tx_unmap(soc, desc);
2514 
2515 	if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
2516 		return dp_mesh_tx_comp_free_buff(soc, desc, delayed_free);
2517 
2518 	if (dp_tx_traffic_end_indication_enq_ind_pkt(soc, desc, nbuf))
2519 		return NULL;
2520 
2521 nbuf_free:
2522 	if (delayed_free)
2523 		return nbuf;
2524 
2525 	qdf_nbuf_free(nbuf);
2526 
2527 	return NULL;
2528 }
2529 
2530 /**
2531  * dp_tx_sg_unmap_buf() - Unmap scatter gather fragments
2532  * @soc: DP soc handle
2533  * @nbuf: skb
2534  * @msdu_info: MSDU info
2535  *
2536  * Return: None
2537  */
2538 static inline void
2539 dp_tx_sg_unmap_buf(struct dp_soc *soc, qdf_nbuf_t nbuf,
2540 		   struct dp_tx_msdu_info_s *msdu_info)
2541 {
2542 	uint32_t cur_idx;
2543 	struct dp_tx_seg_info_s *seg = msdu_info->u.sg_info.curr_seg;
2544 
2545 	qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE,
2546 				     qdf_nbuf_headlen(nbuf));
2547 
2548 	for (cur_idx = 1; cur_idx < seg->frag_cnt; cur_idx++)
2549 		qdf_mem_unmap_page(soc->osdev, (qdf_dma_addr_t)
2550 				   (seg->frags[cur_idx].paddr_lo | ((uint64_t)
2551 				    seg->frags[cur_idx].paddr_hi) << 32),
2552 				   seg->frags[cur_idx].len,
2553 				   QDF_DMA_TO_DEVICE);
2554 }
2555 
2556 #if QDF_LOCK_STATS
2557 noinline
2558 #else
2559 #endif
2560 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2561 				    struct dp_tx_msdu_info_s *msdu_info)
2562 {
2563 	uint32_t i;
2564 	struct dp_pdev *pdev = vdev->pdev;
2565 	struct dp_soc *soc = pdev->soc;
2566 	struct dp_tx_desc_s *tx_desc;
2567 	bool is_cce_classified = false;
2568 	QDF_STATUS status;
2569 	uint16_t htt_tcl_metadata = 0;
2570 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
2571 	struct cdp_tid_tx_stats *tid_stats = NULL;
2572 	uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
2573 
2574 	if (msdu_info->frm_type == dp_tx_frm_me)
2575 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2576 
2577 	i = 0;
2578 	/* Print statement to track i and num_seg */
2579 	/*
2580 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
2581 	 * descriptors using information in msdu_info
2582 	 */
2583 	while (i < msdu_info->num_seg) {
2584 		/*
2585 		 * Setup Tx descriptor for an MSDU, and MSDU extension
2586 		 * descriptor
2587 		 */
2588 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
2589 				tx_q->desc_pool_id);
2590 
2591 		if (!tx_desc) {
2592 			if (msdu_info->frm_type == dp_tx_frm_me) {
2593 				prep_desc_fail++;
2594 				dp_tx_me_free_buf(pdev,
2595 					(void *)(msdu_info->u.sg_info
2596 						.curr_seg->frags[0].vaddr));
2597 				if (prep_desc_fail == msdu_info->num_seg) {
2598 					/*
2599 					 * Unmap is needed only if descriptor
2600 					 * preparation failed for all segments.
2601 					 */
2602 					qdf_nbuf_unmap(soc->osdev,
2603 						       msdu_info->u.sg_info.
2604 						       curr_seg->nbuf,
2605 						       QDF_DMA_TO_DEVICE);
2606 				}
2607 				/*
2608 				 * Free the nbuf for the current segment
2609 				 * and make it point to the next in the list.
2610 				 * For me, there are as many segments as there
2611 				 * are no of clients.
2612 				 */
2613 				qdf_nbuf_free(msdu_info->u.sg_info
2614 					      .curr_seg->nbuf);
2615 				if (msdu_info->u.sg_info.curr_seg->next) {
2616 					msdu_info->u.sg_info.curr_seg =
2617 						msdu_info->u.sg_info
2618 						.curr_seg->next;
2619 					nbuf = msdu_info->u.sg_info
2620 					       .curr_seg->nbuf;
2621 				}
2622 				i++;
2623 				continue;
2624 			}
2625 
2626 			if (msdu_info->frm_type == dp_tx_frm_tso) {
2627 				dp_tx_tso_seg_history_add(
2628 						soc,
2629 						msdu_info->u.tso_info.curr_seg,
2630 						nbuf, 0, DP_TX_DESC_UNMAP);
2631 				dp_tx_tso_unmap_segment(soc,
2632 							msdu_info->u.tso_info.
2633 							curr_seg,
2634 							msdu_info->u.tso_info.
2635 							tso_num_seg_list);
2636 
2637 				if (msdu_info->u.tso_info.curr_seg->next) {
2638 					msdu_info->u.tso_info.curr_seg =
2639 					msdu_info->u.tso_info.curr_seg->next;
2640 					i++;
2641 					continue;
2642 				}
2643 			}
2644 
2645 			if (msdu_info->frm_type == dp_tx_frm_sg)
2646 				dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
2647 
2648 			goto done;
2649 		}
2650 
2651 		if (msdu_info->frm_type == dp_tx_frm_me) {
2652 			tx_desc->msdu_ext_desc->me_buffer =
2653 				(struct dp_tx_me_buf_t *)msdu_info->
2654 				u.sg_info.curr_seg->frags[0].vaddr;
2655 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
2656 		}
2657 
2658 		if (is_cce_classified)
2659 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2660 
2661 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2662 		if (msdu_info->exception_fw) {
2663 			DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2664 		}
2665 
2666 		dp_tx_is_hp_update_required(i, msdu_info);
2667 
2668 		/*
2669 		 * For frames with multiple segments (TSO, ME), jump to next
2670 		 * segment.
2671 		 */
2672 		if (msdu_info->frm_type == dp_tx_frm_tso) {
2673 			if (msdu_info->u.tso_info.curr_seg->next) {
2674 				msdu_info->u.tso_info.curr_seg =
2675 					msdu_info->u.tso_info.curr_seg->next;
2676 
2677 				/*
2678 				 * If this is a jumbo nbuf, then increment the
2679 				 * number of nbuf users for each additional
2680 				 * segment of the msdu. This will ensure that
2681 				 * the skb is freed only after receiving tx
2682 				 * completion for all segments of an nbuf
2683 				 */
2684 				qdf_nbuf_inc_users(nbuf);
2685 
2686 				/* Check with MCL if this is needed */
2687 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
2688 				 */
2689 			}
2690 		}
2691 
2692 		dp_tx_update_mcast_param(DP_INVALID_PEER,
2693 					 &htt_tcl_metadata,
2694 					 vdev,
2695 					 msdu_info);
2696 		/*
2697 		 * Enqueue the Tx MSDU descriptor to HW for transmit
2698 		 */
2699 		status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
2700 						     htt_tcl_metadata,
2701 						     NULL, msdu_info);
2702 
2703 		dp_tx_check_and_flush_hp(soc, status, msdu_info);
2704 
2705 		if (status != QDF_STATUS_SUCCESS) {
2706 			dp_info_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2707 				   tx_desc, tx_q->ring_id);
2708 
2709 			dp_tx_get_tid(vdev, nbuf, msdu_info);
2710 			tid_stats = &pdev->stats.tid_stats.
2711 				    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
2712 			tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
2713 
2714 			if (msdu_info->frm_type == dp_tx_frm_me) {
2715 				hw_enq_fail++;
2716 				if (hw_enq_fail == msdu_info->num_seg) {
2717 					/*
2718 					 * Unmap is needed only if enqueue
2719 					 * failed for all segments.
2720 					 */
2721 					qdf_nbuf_unmap(soc->osdev,
2722 						       msdu_info->u.sg_info.
2723 						       curr_seg->nbuf,
2724 						       QDF_DMA_TO_DEVICE);
2725 				}
2726 				/*
2727 				 * Free the nbuf for the current segment
2728 				 * and make it point to the next in the list.
2729 				 * For me, there are as many segments as there
2730 				 * are no of clients.
2731 				 */
2732 				qdf_nbuf_free(msdu_info->u.sg_info
2733 					      .curr_seg->nbuf);
2734 				dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2735 				if (msdu_info->u.sg_info.curr_seg->next) {
2736 					msdu_info->u.sg_info.curr_seg =
2737 						msdu_info->u.sg_info
2738 						.curr_seg->next;
2739 					nbuf = msdu_info->u.sg_info
2740 					       .curr_seg->nbuf;
2741 				} else
2742 					break;
2743 				i++;
2744 				continue;
2745 			}
2746 
2747 			/*
2748 			 * For TSO frames, the nbuf users increment done for
2749 			 * the current segment has to be reverted, since the
2750 			 * hw enqueue for this segment failed
2751 			 */
2752 			if (msdu_info->frm_type == dp_tx_frm_tso &&
2753 			    msdu_info->u.tso_info.curr_seg) {
2754 				/*
2755 				 * unmap and free current,
2756 				 * retransmit remaining segments
2757 				 */
2758 				dp_tx_comp_free_buf(soc, tx_desc, false);
2759 				i++;
2760 				dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2761 				continue;
2762 			}
2763 
2764 			if (msdu_info->frm_type == dp_tx_frm_sg)
2765 				dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
2766 
2767 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2768 			goto done;
2769 		}
2770 
2771 		/*
2772 		 * TODO
2773 		 * if tso_info structure can be modified to have curr_seg
2774 		 * as first element, following 2 blocks of code (for TSO and SG)
2775 		 * can be combined into 1
2776 		 */
2777 
2778 		/*
2779 		 * For Multicast-Unicast converted packets,
2780 		 * each converted frame (for a client) is represented as
2781 		 * 1 segment
2782 		 */
2783 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
2784 				(msdu_info->frm_type == dp_tx_frm_me)) {
2785 			if (msdu_info->u.sg_info.curr_seg->next) {
2786 				msdu_info->u.sg_info.curr_seg =
2787 					msdu_info->u.sg_info.curr_seg->next;
2788 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2789 			} else
2790 				break;
2791 		}
2792 		i++;
2793 	}
2794 
2795 	nbuf = NULL;
2796 
2797 done:
2798 	return nbuf;
2799 }
2800 
2801 /**
2802  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
2803  *                     for SG frames
2804  * @vdev: DP vdev handle
2805  * @nbuf: skb
2806  * @seg_info: Pointer to Segment info Descriptor to be prepared
2807  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2808  *
2809  * Return: NULL on success,
2810  *         nbuf when it fails to send
2811  */
2812 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2813 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
2814 {
2815 	uint32_t cur_frag, nr_frags, i;
2816 	qdf_dma_addr_t paddr;
2817 	struct dp_tx_sg_info_s *sg_info;
2818 
2819 	sg_info = &msdu_info->u.sg_info;
2820 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
2821 
2822 	if (QDF_STATUS_SUCCESS !=
2823 		qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
2824 					   QDF_DMA_TO_DEVICE,
2825 					   qdf_nbuf_headlen(nbuf))) {
2826 		dp_tx_err("dma map error");
2827 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2828 		qdf_nbuf_free(nbuf);
2829 		return NULL;
2830 	}
2831 
2832 	paddr = qdf_nbuf_mapped_paddr_get(nbuf);
2833 	seg_info->frags[0].paddr_lo = paddr;
2834 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
2835 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
2836 	seg_info->frags[0].vaddr = (void *) nbuf;
2837 
2838 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
2839 		if (QDF_STATUS_SUCCESS != qdf_nbuf_frag_map(vdev->osdev,
2840 							    nbuf, 0,
2841 							    QDF_DMA_TO_DEVICE,
2842 							    cur_frag)) {
2843 			dp_tx_err("frag dma map error");
2844 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2845 			goto map_err;
2846 		}
2847 
2848 		paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
2849 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
2850 		seg_info->frags[cur_frag + 1].paddr_hi =
2851 			((uint64_t) paddr) >> 32;
2852 		seg_info->frags[cur_frag + 1].len =
2853 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
2854 	}
2855 
2856 	seg_info->frag_cnt = (cur_frag + 1);
2857 	seg_info->total_len = qdf_nbuf_len(nbuf);
2858 	seg_info->next = NULL;
2859 
2860 	sg_info->curr_seg = seg_info;
2861 
2862 	msdu_info->frm_type = dp_tx_frm_sg;
2863 	msdu_info->num_seg = 1;
2864 
2865 	return nbuf;
2866 map_err:
2867 	/* restore paddr into nbuf before calling unmap */
2868 	qdf_nbuf_mapped_paddr_set(nbuf,
2869 				  (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
2870 				  ((uint64_t)
2871 				  seg_info->frags[0].paddr_hi) << 32));
2872 	qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
2873 				     QDF_DMA_TO_DEVICE,
2874 				     seg_info->frags[0].len);
2875 	for (i = 1; i <= cur_frag; i++) {
2876 		qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
2877 				   (seg_info->frags[i].paddr_lo | ((uint64_t)
2878 				   seg_info->frags[i].paddr_hi) << 32),
2879 				   seg_info->frags[i].len,
2880 				   QDF_DMA_TO_DEVICE);
2881 	}
2882 	qdf_nbuf_free(nbuf);
2883 	return NULL;
2884 }
2885 
2886 /**
2887  * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
2888  * @vdev: DP vdev handle
2889  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2890  * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
2891  *
2892  * Return: NULL on failure,
2893  *         nbuf when extracted successfully
2894  */
2895 static
2896 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
2897 				    struct dp_tx_msdu_info_s *msdu_info,
2898 				    uint16_t ppdu_cookie)
2899 {
2900 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2901 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2902 
2903 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2904 
2905 	HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
2906 				(msdu_info->meta_data[5], 1);
2907 	HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
2908 				(msdu_info->meta_data[5], 1);
2909 	HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
2910 				(msdu_info->meta_data[6], ppdu_cookie);
2911 
2912 	msdu_info->exception_fw = 1;
2913 	msdu_info->is_tx_sniffer = 1;
2914 }
2915 
2916 #ifdef MESH_MODE_SUPPORT
2917 
2918 /**
2919  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
2920  *				and prepare msdu_info for mesh frames.
2921  * @vdev: DP vdev handle
2922  * @nbuf: skb
2923  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2924  *
2925  * Return: NULL on failure,
2926  *         nbuf when extracted successfully
2927  */
2928 static
2929 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2930 				struct dp_tx_msdu_info_s *msdu_info)
2931 {
2932 	struct meta_hdr_s *mhdr;
2933 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2934 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2935 
2936 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2937 
2938 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
2939 		msdu_info->exception_fw = 0;
2940 		goto remove_meta_hdr;
2941 	}
2942 
2943 	msdu_info->exception_fw = 1;
2944 
2945 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2946 
2947 	meta_data->host_tx_desc_pool = 1;
2948 	meta_data->update_peer_cache = 1;
2949 	meta_data->learning_frame = 1;
2950 
2951 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
2952 		meta_data->power = mhdr->power;
2953 
2954 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
2955 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
2956 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
2957 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
2958 
2959 		meta_data->dyn_bw = 1;
2960 
2961 		meta_data->valid_pwr = 1;
2962 		meta_data->valid_mcs_mask = 1;
2963 		meta_data->valid_nss_mask = 1;
2964 		meta_data->valid_preamble_type  = 1;
2965 		meta_data->valid_retries = 1;
2966 		meta_data->valid_bw_info = 1;
2967 	}
2968 
2969 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
2970 		meta_data->encrypt_type = 0;
2971 		meta_data->valid_encrypt_type = 1;
2972 		meta_data->learning_frame = 0;
2973 	}
2974 
2975 	meta_data->valid_key_flags = 1;
2976 	meta_data->key_flags = (mhdr->keyix & 0x3);
2977 
2978 remove_meta_hdr:
2979 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
2980 		dp_tx_err("qdf_nbuf_pull_head failed");
2981 		qdf_nbuf_free(nbuf);
2982 		return NULL;
2983 	}
2984 
2985 	msdu_info->tid = qdf_nbuf_get_priority(nbuf);
2986 
2987 	dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x"
2988 		   " tid %d to_fw %d",
2989 		   msdu_info->meta_data[0],
2990 		   msdu_info->meta_data[1],
2991 		   msdu_info->meta_data[2],
2992 		   msdu_info->meta_data[3],
2993 		   msdu_info->meta_data[4],
2994 		   msdu_info->meta_data[5],
2995 		   msdu_info->tid, msdu_info->exception_fw);
2996 
2997 	return nbuf;
2998 }
2999 #else
3000 static
3001 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
3002 				struct dp_tx_msdu_info_s *msdu_info)
3003 {
3004 	return nbuf;
3005 }
3006 
3007 #endif
3008 
3009 /**
3010  * dp_check_exc_metadata() - Checks if parameters are valid
3011  * @tx_exc: holds all exception path parameters
3012  *
3013  * Return: true when all the parameters are valid else false
3014  *
3015  */
3016 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
3017 {
3018 	bool invalid_tid = (tx_exc->tid >= DP_MAX_TIDS && tx_exc->tid !=
3019 			    HTT_INVALID_TID);
3020 	bool invalid_encap_type =
3021 			(tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
3022 			 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
3023 	bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
3024 				 tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
3025 	bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
3026 			       tx_exc->ppdu_cookie == 0);
3027 
3028 	if (tx_exc->is_intrabss_fwd)
3029 		return true;
3030 
3031 	if (invalid_tid || invalid_encap_type || invalid_sec_type ||
3032 	    invalid_cookie) {
3033 		return false;
3034 	}
3035 
3036 	return true;
3037 }
3038 
3039 #ifdef ATH_SUPPORT_IQUE
3040 bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3041 {
3042 	qdf_ether_header_t *eh;
3043 
3044 	/* Mcast to Ucast Conversion*/
3045 	if (qdf_likely(!vdev->mcast_enhancement_en))
3046 		return true;
3047 
3048 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3049 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
3050 	    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
3051 		dp_verbose_debug("Mcast frm for ME %pK", vdev);
3052 		qdf_nbuf_set_next(nbuf, NULL);
3053 
3054 		DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
3055 				 qdf_nbuf_len(nbuf));
3056 		if (dp_tx_prepare_send_me(vdev, nbuf) ==
3057 				QDF_STATUS_SUCCESS) {
3058 			return false;
3059 		}
3060 
3061 		if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
3062 			if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
3063 					QDF_STATUS_SUCCESS) {
3064 				return false;
3065 			}
3066 		}
3067 	}
3068 
3069 	return true;
3070 }
3071 #else
3072 bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3073 {
3074 	return true;
3075 }
3076 #endif
3077 
3078 #ifdef QCA_SUPPORT_WDS_EXTENDED
3079 /**
3080  * dp_tx_mcast_drop() - Drop mcast frame if drop_tx_mcast is set in WDS_EXT
3081  * @vdev: vdev handle
3082  * @nbuf: skb
3083  *
3084  * Return: true if frame is dropped, false otherwise
3085  */
3086 static inline bool dp_tx_mcast_drop(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3087 {
3088 	/* Drop tx mcast and WDS Extended feature check */
3089 	if (qdf_unlikely((vdev->drop_tx_mcast) && (vdev->wds_ext_enabled))) {
3090 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
3091 						qdf_nbuf_data(nbuf);
3092 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
3093 			DP_STATS_INC(vdev, tx_i.dropped.tx_mcast_drop, 1);
3094 			return true;
3095 		}
3096 	}
3097 
3098 	return false;
3099 }
3100 #else
3101 static inline bool dp_tx_mcast_drop(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3102 {
3103 	return false;
3104 }
3105 #endif
3106 /**
3107  * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
3108  * @nbuf: qdf_nbuf_t
3109  * @vdev: struct dp_vdev *
3110  *
3111  * Allow packet for processing only if it is for peer client which is
3112  * connected with same vap. Drop packet if client is connected to
3113  * different vap.
3114  *
3115  * Return: QDF_STATUS
3116  */
3117 static inline QDF_STATUS
3118 dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
3119 {
3120 	struct dp_ast_entry *dst_ast_entry = NULL;
3121 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3122 
3123 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
3124 	    DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
3125 		return QDF_STATUS_SUCCESS;
3126 
3127 	qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
3128 	dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
3129 							eh->ether_dhost,
3130 							vdev->vdev_id);
3131 
3132 	/* If there is no ast entry, return failure */
3133 	if (qdf_unlikely(!dst_ast_entry)) {
3134 		qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
3135 		return QDF_STATUS_E_FAILURE;
3136 	}
3137 	qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
3138 
3139 	return QDF_STATUS_SUCCESS;
3140 }
3141 
3142 /**
3143  * dp_tx_nawds_handler() - NAWDS handler
3144  *
3145  * @soc: DP soc handle
3146  * @vdev: DP vdev handle
3147  * @msdu_info: msdu_info required to create HTT metadata
3148  * @nbuf: skb
3149  * @sa_peer_id:
3150  *
3151  * This API transfers the multicast frames with the peer id
3152  * on NAWDS enabled peer.
3153  *
3154  * Return: none
3155  */
3156 
3157 void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
3158 			 struct dp_tx_msdu_info_s *msdu_info,
3159 			 qdf_nbuf_t nbuf, uint16_t sa_peer_id)
3160 {
3161 	struct dp_peer *peer = NULL;
3162 	qdf_nbuf_t nbuf_clone = NULL;
3163 	uint16_t peer_id = DP_INVALID_PEER;
3164 	struct dp_txrx_peer *txrx_peer;
3165 	uint8_t link_id = 0;
3166 
3167 	/* This check avoids pkt forwarding which is entered
3168 	 * in the ast table but still doesn't have valid peerid.
3169 	 */
3170 	if (sa_peer_id == HTT_INVALID_PEER)
3171 		return;
3172 
3173 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3174 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3175 		txrx_peer = dp_get_txrx_peer(peer);
3176 		if (!txrx_peer)
3177 			continue;
3178 
3179 		if (!txrx_peer->bss_peer && txrx_peer->nawds_enabled) {
3180 			peer_id = peer->peer_id;
3181 
3182 			if (!dp_peer_is_primary_link_peer(peer))
3183 				continue;
3184 
3185 			/* In the case of wds ext peer mcast traffic will be
3186 			 * sent as part of VLAN interface
3187 			 */
3188 			if (dp_peer_is_wds_ext_peer(txrx_peer))
3189 				continue;
3190 
3191 			/* Multicast packets needs to be
3192 			 * dropped in case of intra bss forwarding
3193 			 */
3194 			if (sa_peer_id == txrx_peer->peer_id) {
3195 				dp_tx_debug("multicast packet");
3196 				DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3197 							  tx.nawds_mcast_drop,
3198 							  1, link_id);
3199 				continue;
3200 			}
3201 
3202 			nbuf_clone = qdf_nbuf_clone(nbuf);
3203 
3204 			if (!nbuf_clone) {
3205 				QDF_TRACE(QDF_MODULE_ID_DP,
3206 					  QDF_TRACE_LEVEL_ERROR,
3207 					  FL("nbuf clone failed"));
3208 				break;
3209 			}
3210 
3211 			nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
3212 							    msdu_info, peer_id,
3213 							    NULL);
3214 
3215 			if (nbuf_clone) {
3216 				dp_tx_debug("pkt send failed");
3217 				qdf_nbuf_free(nbuf_clone);
3218 			} else {
3219 				if (peer_id != DP_INVALID_PEER)
3220 					DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
3221 								      tx.nawds_mcast,
3222 								      1, qdf_nbuf_len(nbuf), link_id);
3223 			}
3224 		}
3225 	}
3226 
3227 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3228 }
3229 
3230 qdf_nbuf_t
3231 dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3232 		     qdf_nbuf_t nbuf,
3233 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
3234 {
3235 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3236 	struct dp_tx_msdu_info_s msdu_info;
3237 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3238 						     DP_MOD_ID_TX_EXCEPTION);
3239 
3240 	if (qdf_unlikely(!vdev))
3241 		goto fail;
3242 
3243 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
3244 
3245 	if (!tx_exc_metadata)
3246 		goto fail;
3247 
3248 	msdu_info.tid = tx_exc_metadata->tid;
3249 	dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
3250 			 QDF_MAC_ADDR_REF(nbuf->data));
3251 
3252 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
3253 
3254 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
3255 		dp_tx_err("Invalid parameters in exception path");
3256 		goto fail;
3257 	}
3258 
3259 	/* for peer based metadata check if peer is valid */
3260 	if (tx_exc_metadata->peer_id != CDP_INVALID_PEER) {
3261 		struct dp_peer *peer = NULL;
3262 
3263 		 peer = dp_peer_get_ref_by_id(vdev->pdev->soc,
3264 					      tx_exc_metadata->peer_id,
3265 					      DP_MOD_ID_TX_EXCEPTION);
3266 		if (qdf_unlikely(!peer)) {
3267 			DP_STATS_INC(vdev,
3268 				     tx_i.dropped.invalid_peer_id_in_exc_path,
3269 				     1);
3270 			goto fail;
3271 		}
3272 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_EXCEPTION);
3273 	}
3274 	/* Basic sanity checks for unsupported packets */
3275 
3276 	/* MESH mode */
3277 	if (qdf_unlikely(vdev->mesh_vdev)) {
3278 		dp_tx_err("Mesh mode is not supported in exception path");
3279 		goto fail;
3280 	}
3281 
3282 	/*
3283 	 * Classify the frame and call corresponding
3284 	 * "prepare" function which extracts the segment (TSO)
3285 	 * and fragmentation information (for TSO , SG, ME, or Raw)
3286 	 * into MSDU_INFO structure which is later used to fill
3287 	 * SW and HW descriptors.
3288 	 */
3289 	if (qdf_nbuf_is_tso(nbuf)) {
3290 		dp_verbose_debug("TSO frame %pK", vdev);
3291 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
3292 				 qdf_nbuf_len(nbuf));
3293 
3294 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
3295 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
3296 					 qdf_nbuf_len(nbuf));
3297 			goto fail;
3298 		}
3299 
3300 		DP_STATS_INC(vdev,  tx_i.rcvd.num, msdu_info.num_seg - 1);
3301 
3302 		goto send_multiple;
3303 	}
3304 
3305 	/* SG */
3306 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
3307 		struct dp_tx_seg_info_s seg_info = {0};
3308 
3309 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
3310 		if (!nbuf)
3311 			goto fail;
3312 
3313 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
3314 
3315 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
3316 				 qdf_nbuf_len(nbuf));
3317 
3318 		goto send_multiple;
3319 	}
3320 
3321 	if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
3322 		DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
3323 				 qdf_nbuf_len(nbuf));
3324 
3325 		dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
3326 					       tx_exc_metadata->ppdu_cookie);
3327 	}
3328 
3329 	/*
3330 	 * Get HW Queue to use for this frame.
3331 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
3332 	 * dedicated for data and 1 for command.
3333 	 * "queue_id" maps to one hardware ring.
3334 	 *  With each ring, we also associate a unique Tx descriptor pool
3335 	 *  to minimize lock contention for these resources.
3336 	 */
3337 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3338 
3339 	/*
3340 	 * if the packet is mcast packet send through mlo_macst handler
3341 	 * for all prnt_vdevs
3342 	 */
3343 
3344 	if (soc->arch_ops.dp_tx_mlo_mcast_send) {
3345 		nbuf = soc->arch_ops.dp_tx_mlo_mcast_send(soc, vdev,
3346 							  nbuf,
3347 							  tx_exc_metadata);
3348 		if (!nbuf)
3349 			goto fail;
3350 	}
3351 
3352 	if (qdf_likely(tx_exc_metadata->is_intrabss_fwd)) {
3353 		if (qdf_unlikely(vdev->nawds_enabled)) {
3354 			/*
3355 			 * This is a multicast packet
3356 			 */
3357 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
3358 					    tx_exc_metadata->peer_id);
3359 			DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
3360 					 1, qdf_nbuf_len(nbuf));
3361 		}
3362 
3363 		nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
3364 					      DP_INVALID_PEER, NULL);
3365 	} else {
3366 		/*
3367 		 * Check exception descriptors
3368 		 */
3369 		if (dp_tx_exception_limit_check(vdev))
3370 			goto fail;
3371 
3372 		/*  Single linear frame */
3373 		/*
3374 		 * If nbuf is a simple linear frame, use send_single function to
3375 		 * prepare direct-buffer type TCL descriptor and enqueue to TCL
3376 		 * SRNG. There is no need to setup a MSDU extension descriptor.
3377 		 */
3378 		nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
3379 					      tx_exc_metadata->peer_id,
3380 					      tx_exc_metadata);
3381 	}
3382 
3383 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3384 	return nbuf;
3385 
3386 send_multiple:
3387 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3388 
3389 fail:
3390 	if (vdev)
3391 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3392 	dp_verbose_debug("pkt send failed");
3393 	return nbuf;
3394 }
3395 
3396 qdf_nbuf_t
3397 dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
3398 				   uint8_t vdev_id, qdf_nbuf_t nbuf,
3399 				   struct cdp_tx_exception_metadata *tx_exc_metadata)
3400 {
3401 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3402 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3403 						     DP_MOD_ID_TX_EXCEPTION);
3404 
3405 	if (qdf_unlikely(!vdev))
3406 		goto fail;
3407 
3408 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
3409 			== QDF_STATUS_E_FAILURE)) {
3410 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
3411 		goto fail;
3412 	}
3413 
3414 	/* Unref count as it will again be taken inside dp_tx_exception */
3415 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3416 
3417 	return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
3418 
3419 fail:
3420 	if (vdev)
3421 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3422 	dp_verbose_debug("pkt send failed");
3423 	return nbuf;
3424 }
3425 
3426 #ifdef MESH_MODE_SUPPORT
3427 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3428 			   qdf_nbuf_t nbuf)
3429 {
3430 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3431 	struct meta_hdr_s *mhdr;
3432 	qdf_nbuf_t nbuf_mesh = NULL;
3433 	qdf_nbuf_t nbuf_clone = NULL;
3434 	struct dp_vdev *vdev;
3435 	uint8_t no_enc_frame = 0;
3436 
3437 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
3438 	if (!nbuf_mesh) {
3439 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3440 				"qdf_nbuf_unshare failed");
3441 		return nbuf;
3442 	}
3443 
3444 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
3445 	if (!vdev) {
3446 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3447 				"vdev is NULL for vdev_id %d", vdev_id);
3448 		return nbuf;
3449 	}
3450 
3451 	nbuf = nbuf_mesh;
3452 
3453 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
3454 
3455 	if ((vdev->sec_type != cdp_sec_type_none) &&
3456 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
3457 		no_enc_frame = 1;
3458 
3459 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
3460 		qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
3461 
3462 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
3463 		       !no_enc_frame) {
3464 		nbuf_clone = qdf_nbuf_clone(nbuf);
3465 		if (!nbuf_clone) {
3466 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3467 				"qdf_nbuf_clone failed");
3468 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
3469 			return nbuf;
3470 		}
3471 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
3472 	}
3473 
3474 	if (nbuf_clone) {
3475 		if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
3476 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
3477 		} else {
3478 			qdf_nbuf_free(nbuf_clone);
3479 		}
3480 	}
3481 
3482 	if (no_enc_frame)
3483 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
3484 	else
3485 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
3486 
3487 	nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
3488 	if ((!nbuf) && no_enc_frame) {
3489 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
3490 	}
3491 
3492 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
3493 	return nbuf;
3494 }
3495 
3496 #else
3497 
3498 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3499 			   qdf_nbuf_t nbuf)
3500 {
3501 	return dp_tx_send(soc_hdl, vdev_id, nbuf);
3502 }
3503 
3504 #endif
3505 
3506 #ifdef QCA_DP_TX_NBUF_AND_NBUF_DATA_PREFETCH
3507 static inline
3508 void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
3509 {
3510 	if (nbuf) {
3511 		qdf_prefetch(&nbuf->len);
3512 		qdf_prefetch(&nbuf->data);
3513 	}
3514 }
3515 #else
3516 static inline
3517 void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
3518 {
3519 }
3520 #endif
3521 
3522 #ifdef DP_UMAC_HW_RESET_SUPPORT
3523 qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3524 		      qdf_nbuf_t nbuf)
3525 {
3526 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3527 	struct dp_vdev *vdev = NULL;
3528 
3529 	vdev = soc->vdev_id_map[vdev_id];
3530 	if (qdf_unlikely(!vdev))
3531 		return nbuf;
3532 
3533 	DP_STATS_INC(vdev, tx_i.dropped.drop_ingress, 1);
3534 	return nbuf;
3535 }
3536 
3537 qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3538 			  qdf_nbuf_t nbuf,
3539 			  struct cdp_tx_exception_metadata *tx_exc_metadata)
3540 {
3541 	return dp_tx_drop(soc_hdl, vdev_id, nbuf);
3542 }
3543 #endif
3544 
3545 #ifdef FEATURE_DIRECT_LINK
3546 /**
3547  * dp_vdev_tx_mark_to_fw() - Mark to_fw bit for the tx packet
3548  * @nbuf: skb
3549  * @vdev: DP vdev handle
3550  *
3551  * Return: None
3552  */
3553 static inline void dp_vdev_tx_mark_to_fw(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
3554 {
3555 	if (qdf_unlikely(vdev->to_fw))
3556 		QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf) = 1;
3557 }
3558 #else
3559 static inline void dp_vdev_tx_mark_to_fw(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
3560 {
3561 }
3562 #endif
3563 
3564 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3565 		      qdf_nbuf_t nbuf)
3566 {
3567 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3568 	uint16_t peer_id = HTT_INVALID_PEER;
3569 	/*
3570 	 * doing a memzero is causing additional function call overhead
3571 	 * so doing static stack clearing
3572 	 */
3573 	struct dp_tx_msdu_info_s msdu_info = {0};
3574 	struct dp_vdev *vdev = NULL;
3575 	qdf_nbuf_t end_nbuf = NULL;
3576 
3577 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3578 		return nbuf;
3579 
3580 	/*
3581 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3582 	 * this in per packet path.
3583 	 *
3584 	 * As in this path vdev memory is already protected with netdev
3585 	 * tx lock
3586 	 */
3587 	vdev = soc->vdev_id_map[vdev_id];
3588 	if (qdf_unlikely(!vdev))
3589 		return nbuf;
3590 
3591 	dp_vdev_tx_mark_to_fw(nbuf, vdev);
3592 
3593 	/*
3594 	 * Set Default Host TID value to invalid TID
3595 	 * (TID override disabled)
3596 	 */
3597 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
3598 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
3599 
3600 	if (qdf_unlikely(vdev->mesh_vdev)) {
3601 		qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
3602 								&msdu_info);
3603 		if (!nbuf_mesh) {
3604 			dp_verbose_debug("Extracting mesh metadata failed");
3605 			return nbuf;
3606 		}
3607 		nbuf = nbuf_mesh;
3608 	}
3609 
3610 	/*
3611 	 * Get HW Queue to use for this frame.
3612 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
3613 	 * dedicated for data and 1 for command.
3614 	 * "queue_id" maps to one hardware ring.
3615 	 *  With each ring, we also associate a unique Tx descriptor pool
3616 	 *  to minimize lock contention for these resources.
3617 	 */
3618 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3619 	DP_STATS_INC(vdev, tx_i.rcvd_per_core[msdu_info.tx_queue.desc_pool_id],
3620 		     1);
3621 
3622 	/*
3623 	 * TCL H/W supports 2 DSCP-TID mapping tables.
3624 	 *  Table 1 - Default DSCP-TID mapping table
3625 	 *  Table 2 - 1 DSCP-TID override table
3626 	 *
3627 	 * If we need a different DSCP-TID mapping for this vap,
3628 	 * call tid_classify to extract DSCP/ToS from frame and
3629 	 * map to a TID and store in msdu_info. This is later used
3630 	 * to fill in TCL Input descriptor (per-packet TID override).
3631 	 */
3632 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
3633 
3634 	/*
3635 	 * Classify the frame and call corresponding
3636 	 * "prepare" function which extracts the segment (TSO)
3637 	 * and fragmentation information (for TSO , SG, ME, or Raw)
3638 	 * into MSDU_INFO structure which is later used to fill
3639 	 * SW and HW descriptors.
3640 	 */
3641 	if (qdf_nbuf_is_tso(nbuf)) {
3642 		dp_verbose_debug("TSO frame %pK", vdev);
3643 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
3644 				 qdf_nbuf_len(nbuf));
3645 
3646 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
3647 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
3648 					 qdf_nbuf_len(nbuf));
3649 			return nbuf;
3650 		}
3651 
3652 		DP_STATS_INC(vdev,  tx_i.rcvd.num, msdu_info.num_seg - 1);
3653 
3654 		goto send_multiple;
3655 	}
3656 
3657 	/* SG */
3658 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
3659 		if (qdf_nbuf_get_nr_frags(nbuf) > DP_TX_MAX_NUM_FRAGS - 1) {
3660 			if (qdf_unlikely(qdf_nbuf_linearize(nbuf)))
3661 				return nbuf;
3662 		} else {
3663 			struct dp_tx_seg_info_s seg_info = {0};
3664 
3665 			if (qdf_unlikely(is_nbuf_frm_rmnet(nbuf, &msdu_info)))
3666 				goto send_single;
3667 
3668 			nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info,
3669 						&msdu_info);
3670 			if (!nbuf)
3671 				return NULL;
3672 
3673 			dp_verbose_debug("non-TSO SG frame %pK", vdev);
3674 
3675 			DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
3676 					 qdf_nbuf_len(nbuf));
3677 
3678 			goto send_multiple;
3679 		}
3680 	}
3681 
3682 	if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
3683 		return NULL;
3684 
3685 	if (qdf_unlikely(dp_tx_mcast_drop(vdev, nbuf)))
3686 		return nbuf;
3687 
3688 	/* RAW */
3689 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
3690 		struct dp_tx_seg_info_s seg_info = {0};
3691 
3692 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
3693 		if (!nbuf)
3694 			return NULL;
3695 
3696 		dp_verbose_debug("Raw frame %pK", vdev);
3697 
3698 		goto send_multiple;
3699 
3700 	}
3701 
3702 	if (qdf_unlikely(vdev->nawds_enabled)) {
3703 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
3704 					  qdf_nbuf_data(nbuf);
3705 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
3706 			uint16_t sa_peer_id = DP_INVALID_PEER;
3707 
3708 			if (!soc->ast_offload_support) {
3709 				struct dp_ast_entry *ast_entry = NULL;
3710 
3711 				qdf_spin_lock_bh(&soc->ast_lock);
3712 				ast_entry = dp_peer_ast_hash_find_by_pdevid
3713 					(soc,
3714 					 (uint8_t *)(eh->ether_shost),
3715 					 vdev->pdev->pdev_id);
3716 				if (ast_entry)
3717 					sa_peer_id = ast_entry->peer_id;
3718 				qdf_spin_unlock_bh(&soc->ast_lock);
3719 			}
3720 
3721 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
3722 					    sa_peer_id);
3723 		}
3724 		peer_id = DP_INVALID_PEER;
3725 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
3726 				 1, qdf_nbuf_len(nbuf));
3727 	}
3728 
3729 send_single:
3730 	/*  Single linear frame */
3731 	/*
3732 	 * If nbuf is a simple linear frame, use send_single function to
3733 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
3734 	 * SRNG. There is no need to setup a MSDU extension descriptor.
3735 	 */
3736 	dp_tx_prefetch_nbuf_data(nbuf);
3737 
3738 	nbuf = dp_tx_send_msdu_single_wrapper(vdev, nbuf, &msdu_info,
3739 					      peer_id, end_nbuf);
3740 	return nbuf;
3741 
3742 send_multiple:
3743 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3744 
3745 	if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
3746 		dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
3747 
3748 	return nbuf;
3749 }
3750 
3751 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
3752 				    uint8_t vdev_id, qdf_nbuf_t nbuf)
3753 {
3754 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3755 	struct dp_vdev *vdev = NULL;
3756 
3757 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3758 		return nbuf;
3759 
3760 	/*
3761 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3762 	 * this in per packet path.
3763 	 *
3764 	 * As in this path vdev memory is already protected with netdev
3765 	 * tx lock
3766 	 */
3767 	vdev = soc->vdev_id_map[vdev_id];
3768 	if (qdf_unlikely(!vdev))
3769 		return nbuf;
3770 
3771 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
3772 			== QDF_STATUS_E_FAILURE)) {
3773 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
3774 		return nbuf;
3775 	}
3776 
3777 	return dp_tx_send(soc_hdl, vdev_id, nbuf);
3778 }
3779 
3780 #ifdef UMAC_SUPPORT_PROXY_ARP
3781 /**
3782  * dp_tx_proxy_arp() - Tx proxy arp handler
3783  * @vdev: datapath vdev handle
3784  * @nbuf: sk buffer
3785  *
3786  * Return: status
3787  */
3788 static inline
3789 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3790 {
3791 	if (vdev->osif_proxy_arp)
3792 		return vdev->osif_proxy_arp(vdev->osif_vdev, nbuf);
3793 
3794 	/*
3795 	 * when UMAC_SUPPORT_PROXY_ARP is defined, we expect
3796 	 * osif_proxy_arp has a valid function pointer assigned
3797 	 * to it
3798 	 */
3799 	dp_tx_err("valid function pointer for osif_proxy_arp is expected!!\n");
3800 
3801 	return QDF_STATUS_NOT_INITIALIZED;
3802 }
3803 #else
3804 static inline
3805 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3806 {
3807 	return QDF_STATUS_SUCCESS;
3808 }
3809 #endif
3810 
3811 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
3812 	!defined(CONFIG_MLO_SINGLE_DEV)
3813 #ifdef WLAN_MCAST_MLO
3814 static bool
3815 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
3816 		       struct dp_tx_desc_s *tx_desc,
3817 		       qdf_nbuf_t nbuf,
3818 		       uint8_t reinject_reason)
3819 {
3820 	if (reinject_reason == HTT_TX_FW2WBM_REINJECT_REASON_MLO_MCAST) {
3821 		if (soc->arch_ops.dp_tx_mcast_handler)
3822 			soc->arch_ops.dp_tx_mcast_handler(soc, vdev, nbuf);
3823 
3824 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3825 		return true;
3826 	}
3827 
3828 	return false;
3829 }
3830 #else /* WLAN_MCAST_MLO */
3831 static inline bool
3832 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
3833 		       struct dp_tx_desc_s *tx_desc,
3834 		       qdf_nbuf_t nbuf,
3835 		       uint8_t reinject_reason)
3836 {
3837 	return false;
3838 }
3839 #endif /* WLAN_MCAST_MLO */
3840 #else
3841 static inline bool
3842 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
3843 		       struct dp_tx_desc_s *tx_desc,
3844 		       qdf_nbuf_t nbuf,
3845 		       uint8_t reinject_reason)
3846 {
3847 	return false;
3848 }
3849 #endif
3850 
3851 void dp_tx_reinject_handler(struct dp_soc *soc,
3852 			    struct dp_vdev *vdev,
3853 			    struct dp_tx_desc_s *tx_desc,
3854 			    uint8_t *status,
3855 			    uint8_t reinject_reason)
3856 {
3857 	struct dp_peer *peer = NULL;
3858 	uint32_t peer_id = HTT_INVALID_PEER;
3859 	qdf_nbuf_t nbuf = tx_desc->nbuf;
3860 	qdf_nbuf_t nbuf_copy = NULL;
3861 	struct dp_tx_msdu_info_s msdu_info;
3862 #ifdef WDS_VENDOR_EXTENSION
3863 	int is_mcast = 0, is_ucast = 0;
3864 	int num_peers_3addr = 0;
3865 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
3866 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
3867 #endif
3868 	struct dp_txrx_peer *txrx_peer;
3869 
3870 	qdf_assert(vdev);
3871 
3872 	dp_tx_debug("Tx reinject path");
3873 
3874 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
3875 			qdf_nbuf_len(tx_desc->nbuf));
3876 
3877 	if (dp_tx_reinject_mlo_hdl(soc, vdev, tx_desc, nbuf, reinject_reason))
3878 		return;
3879 
3880 #ifdef WDS_VENDOR_EXTENSION
3881 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
3882 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
3883 	} else {
3884 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
3885 	}
3886 	is_ucast = !is_mcast;
3887 
3888 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3889 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3890 		txrx_peer = dp_get_txrx_peer(peer);
3891 
3892 		if (!txrx_peer || txrx_peer->bss_peer)
3893 			continue;
3894 
3895 		/* Detect wds peers that use 3-addr framing for mcast.
3896 		 * if there are any, the bss_peer is used to send the
3897 		 * the mcast frame using 3-addr format. all wds enabled
3898 		 * peers that use 4-addr framing for mcast frames will
3899 		 * be duplicated and sent as 4-addr frames below.
3900 		 */
3901 		if (!txrx_peer->wds_enabled ||
3902 		    !txrx_peer->wds_ecm.wds_tx_mcast_4addr) {
3903 			num_peers_3addr = 1;
3904 			break;
3905 		}
3906 	}
3907 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3908 #endif
3909 
3910 	if (qdf_unlikely(vdev->mesh_vdev)) {
3911 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
3912 	} else {
3913 		qdf_spin_lock_bh(&vdev->peer_list_lock);
3914 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3915 			txrx_peer = dp_get_txrx_peer(peer);
3916 			if (!txrx_peer)
3917 				continue;
3918 
3919 			if ((txrx_peer->peer_id != HTT_INVALID_PEER) &&
3920 #ifdef WDS_VENDOR_EXTENSION
3921 			/*
3922 			 * . if 3-addr STA, then send on BSS Peer
3923 			 * . if Peer WDS enabled and accept 4-addr mcast,
3924 			 * send mcast on that peer only
3925 			 * . if Peer WDS enabled and accept 4-addr ucast,
3926 			 * send ucast on that peer only
3927 			 */
3928 			((txrx_peer->bss_peer && num_peers_3addr && is_mcast) ||
3929 			 (txrx_peer->wds_enabled &&
3930 			 ((is_mcast && txrx_peer->wds_ecm.wds_tx_mcast_4addr) ||
3931 			 (is_ucast &&
3932 			 txrx_peer->wds_ecm.wds_tx_ucast_4addr))))) {
3933 #else
3934 			(txrx_peer->bss_peer &&
3935 			 (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) {
3936 #endif
3937 				peer_id = DP_INVALID_PEER;
3938 
3939 				nbuf_copy = qdf_nbuf_copy(nbuf);
3940 
3941 				if (!nbuf_copy) {
3942 					dp_tx_debug("nbuf copy failed");
3943 					break;
3944 				}
3945 				qdf_mem_zero(&msdu_info, sizeof(msdu_info));
3946 				dp_tx_get_queue(vdev, nbuf,
3947 						&msdu_info.tx_queue);
3948 
3949 				nbuf_copy = dp_tx_send_msdu_single(vdev,
3950 						nbuf_copy,
3951 						&msdu_info,
3952 						peer_id,
3953 						NULL);
3954 
3955 				if (nbuf_copy) {
3956 					dp_tx_debug("pkt send failed");
3957 					qdf_nbuf_free(nbuf_copy);
3958 				}
3959 			}
3960 		}
3961 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
3962 
3963 		qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
3964 					     QDF_DMA_TO_DEVICE, nbuf->len);
3965 		qdf_nbuf_free(nbuf);
3966 	}
3967 
3968 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3969 }
3970 
3971 void dp_tx_inspect_handler(struct dp_soc *soc,
3972 			   struct dp_vdev *vdev,
3973 			   struct dp_tx_desc_s *tx_desc,
3974 			   uint8_t *status)
3975 {
3976 
3977 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3978 			"%s Tx inspect path",
3979 			__func__);
3980 
3981 	DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
3982 			 qdf_nbuf_len(tx_desc->nbuf));
3983 
3984 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
3985 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3986 }
3987 
3988 #ifdef MESH_MODE_SUPPORT
3989 /**
3990  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
3991  *                                         in mesh meta header
3992  * @tx_desc: software descriptor head pointer
3993  * @ts: pointer to tx completion stats
3994  * Return: none
3995  */
3996 static
3997 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
3998 		struct hal_tx_completion_status *ts)
3999 {
4000 	qdf_nbuf_t netbuf = tx_desc->nbuf;
4001 
4002 	if (!tx_desc->msdu_ext_desc) {
4003 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
4004 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4005 				"netbuf %pK offset %d",
4006 				netbuf, tx_desc->pkt_offset);
4007 			return;
4008 		}
4009 	}
4010 }
4011 
4012 #else
4013 static
4014 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
4015 		struct hal_tx_completion_status *ts)
4016 {
4017 }
4018 
4019 #endif
4020 
4021 #ifdef CONFIG_SAWF
4022 static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
4023 					 struct dp_vdev *vdev,
4024 					 struct dp_txrx_peer *txrx_peer,
4025 					 struct dp_tx_desc_s *tx_desc,
4026 					 struct hal_tx_completion_status *ts,
4027 					 uint8_t tid)
4028 {
4029 	dp_sawf_tx_compl_update_peer_stats(soc, vdev, txrx_peer, tx_desc,
4030 					   ts, tid);
4031 }
4032 
4033 static void dp_tx_compute_delay_avg(struct cdp_delay_tx_stats  *tx_delay,
4034 				    uint32_t nw_delay,
4035 				    uint32_t sw_delay,
4036 				    uint32_t hw_delay)
4037 {
4038 	dp_peer_tid_delay_avg(tx_delay,
4039 			      nw_delay,
4040 			      sw_delay,
4041 			      hw_delay);
4042 }
4043 #else
4044 static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
4045 					 struct dp_vdev *vdev,
4046 					 struct dp_txrx_peer *txrx_peer,
4047 					 struct dp_tx_desc_s *tx_desc,
4048 					 struct hal_tx_completion_status *ts,
4049 					 uint8_t tid)
4050 {
4051 }
4052 
4053 static inline void
4054 dp_tx_compute_delay_avg(struct cdp_delay_tx_stats *tx_delay,
4055 			uint32_t nw_delay, uint32_t sw_delay,
4056 			uint32_t hw_delay)
4057 {
4058 }
4059 #endif
4060 
4061 #ifdef QCA_PEER_EXT_STATS
4062 #ifdef WLAN_CONFIG_TX_DELAY
4063 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
4064 				    struct dp_tx_desc_s *tx_desc,
4065 				    struct hal_tx_completion_status *ts,
4066 				    struct dp_vdev *vdev)
4067 {
4068 	struct dp_soc *soc = vdev->pdev->soc;
4069 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
4070 	int64_t timestamp_ingress, timestamp_hw_enqueue;
4071 	uint32_t sw_enqueue_delay, fwhw_transmit_delay = 0;
4072 
4073 	if (!ts->valid)
4074 		return;
4075 
4076 	timestamp_ingress = qdf_nbuf_get_timestamp_us(tx_desc->nbuf);
4077 	timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
4078 
4079 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4080 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
4081 
4082 	if (soc->arch_ops.dp_tx_compute_hw_delay)
4083 		if (!soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts,
4084 							  &fwhw_transmit_delay))
4085 			dp_hist_update_stats(&tx_delay->hwtx_delay,
4086 					     fwhw_transmit_delay);
4087 
4088 	dp_tx_compute_delay_avg(tx_delay, 0, sw_enqueue_delay,
4089 				fwhw_transmit_delay);
4090 }
4091 #else
4092 /**
4093  * dp_tx_compute_tid_delay() - Compute per TID delay
4094  * @stats: Per TID delay stats
4095  * @tx_desc: Software Tx descriptor
4096  * @ts: Tx completion status
4097  * @vdev: vdev
4098  *
4099  * Compute the software enqueue and hw enqueue delays and
4100  * update the respective histograms
4101  *
4102  * Return: void
4103  */
4104 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
4105 				    struct dp_tx_desc_s *tx_desc,
4106 				    struct hal_tx_completion_status *ts,
4107 				    struct dp_vdev *vdev)
4108 {
4109 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
4110 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
4111 	uint32_t sw_enqueue_delay, fwhw_transmit_delay;
4112 
4113 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
4114 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
4115 	timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
4116 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4117 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
4118 					 timestamp_hw_enqueue);
4119 
4120 	/*
4121 	 * Update the Tx software enqueue delay and HW enque-Completion delay.
4122 	 */
4123 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
4124 	dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
4125 }
4126 #endif
4127 
4128 /**
4129  * dp_tx_update_peer_delay_stats() - Update the peer delay stats
4130  * @txrx_peer: DP peer context
4131  * @tx_desc: Tx software descriptor
4132  * @ts: Tx completion status
4133  * @ring_id: Rx CPU context ID/CPU_ID
4134  *
4135  * Update the peer extended stats. These are enhanced other
4136  * delay stats per msdu level.
4137  *
4138  * Return: void
4139  */
4140 static void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
4141 					  struct dp_tx_desc_s *tx_desc,
4142 					  struct hal_tx_completion_status *ts,
4143 					  uint8_t ring_id)
4144 {
4145 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4146 	struct dp_soc *soc = NULL;
4147 	struct dp_peer_delay_stats *delay_stats = NULL;
4148 	uint8_t tid;
4149 
4150 	soc = pdev->soc;
4151 	if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
4152 		return;
4153 
4154 	if (!txrx_peer->delay_stats)
4155 		return;
4156 
4157 	tid = ts->tid;
4158 	delay_stats = txrx_peer->delay_stats;
4159 
4160 	qdf_assert(ring < CDP_MAX_TXRX_CTX);
4161 
4162 	/*
4163 	 * For non-TID packets use the TID 9
4164 	 */
4165 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4166 		tid = CDP_MAX_DATA_TIDS - 1;
4167 
4168 	dp_tx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
4169 				tx_desc, ts, txrx_peer->vdev);
4170 }
4171 #else
4172 static inline
4173 void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
4174 				   struct dp_tx_desc_s *tx_desc,
4175 				   struct hal_tx_completion_status *ts,
4176 				   uint8_t ring_id)
4177 {
4178 }
4179 #endif
4180 
4181 #ifdef WLAN_PEER_JITTER
4182 /**
4183  * dp_tx_jitter_get_avg_jitter() - compute the average jitter
4184  * @curr_delay: Current delay
4185  * @prev_delay: Previous delay
4186  * @avg_jitter: Average Jitter
4187  * Return: Newly Computed Average Jitter
4188  */
4189 static uint32_t dp_tx_jitter_get_avg_jitter(uint32_t curr_delay,
4190 					    uint32_t prev_delay,
4191 					    uint32_t avg_jitter)
4192 {
4193 	uint32_t curr_jitter;
4194 	int32_t jitter_diff;
4195 
4196 	curr_jitter = qdf_abs(curr_delay - prev_delay);
4197 	if (!avg_jitter)
4198 		return curr_jitter;
4199 
4200 	jitter_diff = curr_jitter - avg_jitter;
4201 	if (jitter_diff < 0)
4202 		avg_jitter = avg_jitter -
4203 			(qdf_abs(jitter_diff) >> DP_AVG_JITTER_WEIGHT_DENOM);
4204 	else
4205 		avg_jitter = avg_jitter +
4206 			(qdf_abs(jitter_diff) >> DP_AVG_JITTER_WEIGHT_DENOM);
4207 
4208 	return avg_jitter;
4209 }
4210 
4211 /**
4212  * dp_tx_jitter_get_avg_delay() - compute the average delay
4213  * @curr_delay: Current delay
4214  * @avg_delay: Average delay
4215  * Return: Newly Computed Average Delay
4216  */
4217 static uint32_t dp_tx_jitter_get_avg_delay(uint32_t curr_delay,
4218 					   uint32_t avg_delay)
4219 {
4220 	int32_t delay_diff;
4221 
4222 	if (!avg_delay)
4223 		return curr_delay;
4224 
4225 	delay_diff = curr_delay - avg_delay;
4226 	if (delay_diff < 0)
4227 		avg_delay = avg_delay - (qdf_abs(delay_diff) >>
4228 					DP_AVG_DELAY_WEIGHT_DENOM);
4229 	else
4230 		avg_delay = avg_delay + (qdf_abs(delay_diff) >>
4231 					DP_AVG_DELAY_WEIGHT_DENOM);
4232 
4233 	return avg_delay;
4234 }
4235 
4236 #ifdef WLAN_CONFIG_TX_DELAY
4237 /**
4238  * dp_tx_compute_cur_delay() - get the current delay
4239  * @soc: soc handle
4240  * @vdev: vdev structure for data path state
4241  * @ts: Tx completion status
4242  * @curr_delay: current delay
4243  * @tx_desc: tx descriptor
4244  * Return: void
4245  */
4246 static
4247 QDF_STATUS dp_tx_compute_cur_delay(struct dp_soc *soc,
4248 				   struct dp_vdev *vdev,
4249 				   struct hal_tx_completion_status *ts,
4250 				   uint32_t *curr_delay,
4251 				   struct dp_tx_desc_s *tx_desc)
4252 {
4253 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
4254 
4255 	if (soc->arch_ops.dp_tx_compute_hw_delay)
4256 		status = soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts,
4257 							      curr_delay);
4258 	return status;
4259 }
4260 #else
4261 static
4262 QDF_STATUS dp_tx_compute_cur_delay(struct dp_soc *soc,
4263 				   struct dp_vdev *vdev,
4264 				   struct hal_tx_completion_status *ts,
4265 				   uint32_t *curr_delay,
4266 				   struct dp_tx_desc_s *tx_desc)
4267 {
4268 	int64_t current_timestamp, timestamp_hw_enqueue;
4269 
4270 	current_timestamp = qdf_ktime_to_us(qdf_ktime_real_get());
4271 	timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
4272 	*curr_delay = (uint32_t)(current_timestamp - timestamp_hw_enqueue);
4273 
4274 	return QDF_STATUS_SUCCESS;
4275 }
4276 #endif
4277 
4278 /**
4279  * dp_tx_compute_tid_jitter() - compute per tid per ring jitter
4280  * @jitter: per tid per ring jitter stats
4281  * @ts: Tx completion status
4282  * @vdev: vdev structure for data path state
4283  * @tx_desc: tx descriptor
4284  * Return: void
4285  */
4286 static void dp_tx_compute_tid_jitter(struct cdp_peer_tid_stats *jitter,
4287 				     struct hal_tx_completion_status *ts,
4288 				     struct dp_vdev *vdev,
4289 				     struct dp_tx_desc_s *tx_desc)
4290 {
4291 	uint32_t curr_delay, avg_delay, avg_jitter, prev_delay;
4292 	struct dp_soc *soc = vdev->pdev->soc;
4293 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
4294 
4295 	if (ts->status !=  HAL_TX_TQM_RR_FRAME_ACKED) {
4296 		jitter->tx_drop += 1;
4297 		return;
4298 	}
4299 
4300 	status = dp_tx_compute_cur_delay(soc, vdev, ts, &curr_delay,
4301 					 tx_desc);
4302 
4303 	if (QDF_IS_STATUS_SUCCESS(status)) {
4304 		avg_delay = jitter->tx_avg_delay;
4305 		avg_jitter = jitter->tx_avg_jitter;
4306 		prev_delay = jitter->tx_prev_delay;
4307 		avg_jitter = dp_tx_jitter_get_avg_jitter(curr_delay,
4308 							 prev_delay,
4309 							 avg_jitter);
4310 		avg_delay = dp_tx_jitter_get_avg_delay(curr_delay, avg_delay);
4311 		jitter->tx_avg_delay = avg_delay;
4312 		jitter->tx_avg_jitter = avg_jitter;
4313 		jitter->tx_prev_delay = curr_delay;
4314 		jitter->tx_total_success += 1;
4315 	} else if (status == QDF_STATUS_E_FAILURE) {
4316 		jitter->tx_avg_err += 1;
4317 	}
4318 }
4319 
4320 /* dp_tx_update_peer_jitter_stats() - Update the peer jitter stats
4321  * @txrx_peer: DP peer context
4322  * @tx_desc: Tx software descriptor
4323  * @ts: Tx completion status
4324  * @ring_id: Rx CPU context ID/CPU_ID
4325  * Return: void
4326  */
4327 static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer,
4328 					   struct dp_tx_desc_s *tx_desc,
4329 					   struct hal_tx_completion_status *ts,
4330 					   uint8_t ring_id)
4331 {
4332 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4333 	struct dp_soc *soc = pdev->soc;
4334 	struct cdp_peer_tid_stats *jitter_stats = NULL;
4335 	uint8_t tid;
4336 	struct cdp_peer_tid_stats *rx_tid = NULL;
4337 
4338 	if (qdf_likely(!wlan_cfg_is_peer_jitter_stats_enabled(soc->wlan_cfg_ctx)))
4339 		return;
4340 
4341 	tid = ts->tid;
4342 	jitter_stats = txrx_peer->jitter_stats;
4343 	qdf_assert_always(jitter_stats);
4344 	qdf_assert(ring < CDP_MAX_TXRX_CTX);
4345 	/*
4346 	 * For non-TID packets use the TID 9
4347 	 */
4348 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4349 		tid = CDP_MAX_DATA_TIDS - 1;
4350 
4351 	rx_tid = &jitter_stats[tid * CDP_MAX_TXRX_CTX + ring_id];
4352 	dp_tx_compute_tid_jitter(rx_tid,
4353 				 ts, txrx_peer->vdev, tx_desc);
4354 }
4355 #else
4356 static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer,
4357 					   struct dp_tx_desc_s *tx_desc,
4358 					   struct hal_tx_completion_status *ts,
4359 					   uint8_t ring_id)
4360 {
4361 }
4362 #endif
4363 
4364 #ifdef HW_TX_DELAY_STATS_ENABLE
4365 /**
4366  * dp_update_tx_delay_stats() - update the delay stats
4367  * @vdev: vdev handle
4368  * @delay: delay in ms or us based on the flag delay_in_us
4369  * @tid: tid value
4370  * @mode: type of tx delay mode
4371  * @ring_id: ring number
4372  * @delay_in_us: flag to indicate whether the delay is in ms or us
4373  *
4374  * Return: none
4375  */
4376 static inline
4377 void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
4378 			      uint8_t mode, uint8_t ring_id, bool delay_in_us)
4379 {
4380 	struct cdp_tid_tx_stats *tstats =
4381 		&vdev->stats.tid_tx_stats[ring_id][tid];
4382 
4383 	dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
4384 			      delay_in_us);
4385 }
4386 #else
4387 static inline
4388 void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
4389 			      uint8_t mode, uint8_t ring_id, bool delay_in_us)
4390 {
4391 	struct cdp_tid_tx_stats *tstats =
4392 		&vdev->pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
4393 
4394 	dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
4395 			      delay_in_us);
4396 }
4397 #endif
4398 
4399 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
4400 			 uint8_t tid, uint8_t ring_id)
4401 {
4402 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
4403 	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
4404 	uint32_t fwhw_transmit_delay_us;
4405 
4406 	if (qdf_likely(!vdev->pdev->delay_stats_flag) &&
4407 	    qdf_likely(!dp_is_vdev_tx_delay_stats_enabled(vdev)))
4408 		return;
4409 
4410 	if (dp_is_vdev_tx_delay_stats_enabled(vdev)) {
4411 		fwhw_transmit_delay_us =
4412 			qdf_ktime_to_us(qdf_ktime_real_get()) -
4413 			qdf_ktime_to_us(tx_desc->timestamp);
4414 
4415 		/*
4416 		 * Delay between packet enqueued to HW and Tx completion in us
4417 		 */
4418 		dp_update_tx_delay_stats(vdev, fwhw_transmit_delay_us, tid,
4419 					 CDP_DELAY_STATS_FW_HW_TRANSMIT,
4420 					 ring_id, true);
4421 		/*
4422 		 * For MCL, only enqueue to completion delay is required
4423 		 * so return if the vdev flag is enabled.
4424 		 */
4425 		return;
4426 	}
4427 
4428 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
4429 	timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
4430 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
4431 					 timestamp_hw_enqueue);
4432 
4433 	if (!timestamp_hw_enqueue)
4434 		return;
4435 	/*
4436 	 * Delay between packet enqueued to HW and Tx completion in ms
4437 	 */
4438 	dp_update_tx_delay_stats(vdev, fwhw_transmit_delay, tid,
4439 				 CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id,
4440 				 false);
4441 
4442 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
4443 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4444 	interframe_delay = (uint32_t)(timestamp_ingress -
4445 				      vdev->prev_tx_enq_tstamp);
4446 
4447 	/*
4448 	 * Delay in software enqueue
4449 	 */
4450 	dp_update_tx_delay_stats(vdev, sw_enqueue_delay, tid,
4451 				 CDP_DELAY_STATS_SW_ENQ, ring_id,
4452 				 false);
4453 
4454 	/*
4455 	 * Update interframe delay stats calculated at hardstart receive point.
4456 	 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
4457 	 * interframe delay will not be calculate correctly for 1st frame.
4458 	 * On the other side, this will help in avoiding extra per packet check
4459 	 * of !vdev->prev_tx_enq_tstamp.
4460 	 */
4461 	dp_update_tx_delay_stats(vdev, interframe_delay, tid,
4462 				 CDP_DELAY_STATS_TX_INTERFRAME, ring_id,
4463 				 false);
4464 	vdev->prev_tx_enq_tstamp = timestamp_ingress;
4465 }
4466 
4467 #ifdef DISABLE_DP_STATS
4468 static
4469 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf,
4470 				   struct dp_txrx_peer *txrx_peer,
4471 				   uint8_t link_id)
4472 {
4473 }
4474 #else
4475 static inline void
4476 dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer,
4477 		       uint8_t link_id)
4478 {
4479 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
4480 
4481 	DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
4482 	if (subtype != QDF_PROTO_INVALID)
4483 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.no_ack_count[subtype],
4484 					  1, link_id);
4485 }
4486 #endif
4487 
4488 #ifndef QCA_ENHANCED_STATS_SUPPORT
4489 #ifdef DP_PEER_EXTENDED_API
4490 static inline uint8_t
4491 dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
4492 {
4493 	return txrx_peer->mpdu_retry_threshold;
4494 }
4495 #else
4496 static inline uint8_t
4497 dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
4498 {
4499 	return 0;
4500 }
4501 #endif
4502 
4503 /**
4504  * dp_tx_update_peer_extd_stats()- Update Tx extended path stats for peer
4505  *
4506  * @ts: Tx compltion status
4507  * @txrx_peer: datapath txrx_peer handle
4508  * @link_id: Link id
4509  *
4510  * Return: void
4511  */
4512 static inline void
4513 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
4514 			     struct dp_txrx_peer *txrx_peer, uint8_t link_id)
4515 {
4516 	uint8_t mcs, pkt_type, dst_mcs_idx;
4517 	uint8_t retry_threshold = dp_tx_get_mpdu_retry_threshold(txrx_peer);
4518 
4519 	mcs = ts->mcs;
4520 	pkt_type = ts->pkt_type;
4521 	/* do HW to SW pkt type conversion */
4522 	pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX :
4523 		    hal_2_dp_pkt_type_map[pkt_type]);
4524 
4525 	dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs);
4526 	if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
4527 		DP_PEER_EXTD_STATS_INC(txrx_peer,
4528 				       tx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
4529 				       1, link_id);
4530 
4531 	DP_PEER_EXTD_STATS_INC(txrx_peer, tx.sgi_count[ts->sgi], 1, link_id);
4532 	DP_PEER_EXTD_STATS_INC(txrx_peer, tx.bw[ts->bw], 1, link_id);
4533 	DP_PEER_EXTD_STATS_UPD(txrx_peer, tx.last_ack_rssi, ts->ack_frame_rssi,
4534 			       link_id);
4535 	DP_PEER_EXTD_STATS_INC(txrx_peer,
4536 			       tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1,
4537 			       link_id);
4538 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.stbc, 1, ts->stbc, link_id);
4539 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.ldpc, 1, ts->ldpc, link_id);
4540 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries, 1, ts->transmit_cnt > 1,
4541 				link_id);
4542 	if (ts->first_msdu) {
4543 		DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries_mpdu, 1,
4544 					ts->transmit_cnt > 1, link_id);
4545 
4546 		if (!retry_threshold)
4547 			return;
4548 		DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.mpdu_success_with_retries,
4549 					qdf_do_div(ts->transmit_cnt,
4550 						   retry_threshold),
4551 					ts->transmit_cnt > retry_threshold,
4552 					link_id);
4553 	}
4554 }
4555 #else
4556 static inline void
4557 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
4558 			     struct dp_txrx_peer *txrx_peer, uint8_t link_id)
4559 {
4560 }
4561 #endif
4562 
4563 #ifdef WLAN_FEATURE_11BE_MLO
4564 static inline int
4565 dp_tx_get_link_id_from_ppdu_id(struct dp_soc *soc,
4566 			       struct hal_tx_completion_status *ts,
4567 			       struct dp_txrx_peer *txrx_peer,
4568 			       struct dp_vdev *vdev)
4569 {
4570 	uint8_t hw_link_id = 0;
4571 	uint32_t ppdu_id;
4572 	uint8_t link_id_offset, link_id_bits;
4573 
4574 	if (!txrx_peer->is_mld_peer || !vdev->pdev->link_peer_stats)
4575 		return 0;
4576 
4577 	link_id_offset = soc->link_id_offset;
4578 	link_id_bits = soc->link_id_bits;
4579 	ppdu_id = ts->ppdu_id;
4580 	hw_link_id = DP_GET_HW_LINK_ID_FRM_PPDU_ID(ppdu_id, link_id_offset,
4581 						   link_id_bits);
4582 
4583 	return (hw_link_id + 1);
4584 }
4585 #else
4586 static inline int
4587 dp_tx_get_link_id_from_ppdu_id(struct dp_soc *soc,
4588 			       struct hal_tx_completion_status *ts,
4589 			       struct dp_txrx_peer *txrx_peer,
4590 			       struct dp_vdev *vdev)
4591 {
4592 	return 0;
4593 }
4594 #endif
4595 
4596 /**
4597  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
4598  *				per wbm ring
4599  *
4600  * @tx_desc: software descriptor head pointer
4601  * @ts: Tx completion status
4602  * @txrx_peer: peer handle
4603  * @ring_id: ring number
4604  * @link_id: Link id
4605  *
4606  * Return: None
4607  */
4608 static inline void
4609 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
4610 			struct hal_tx_completion_status *ts,
4611 			struct dp_txrx_peer *txrx_peer, uint8_t ring_id,
4612 			uint8_t link_id)
4613 {
4614 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4615 	uint8_t tid = ts->tid;
4616 	uint32_t length;
4617 	struct cdp_tid_tx_stats *tid_stats;
4618 
4619 	if (!pdev)
4620 		return;
4621 
4622 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4623 		tid = CDP_MAX_DATA_TIDS - 1;
4624 
4625 	tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
4626 
4627 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
4628 		dp_err_rl("Release source:%d is not from TQM", ts->release_src);
4629 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.release_src_not_tqm, 1,
4630 					  link_id);
4631 		return;
4632 	}
4633 
4634 	length = qdf_nbuf_len(tx_desc->nbuf);
4635 	DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
4636 
4637 	if (qdf_unlikely(pdev->delay_stats_flag) ||
4638 	    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(txrx_peer->vdev)))
4639 		dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id);
4640 
4641 	if (ts->status < CDP_MAX_TX_TQM_STATUS) {
4642 		tid_stats->tqm_status_cnt[ts->status]++;
4643 	}
4644 
4645 	if (qdf_likely(ts->status == HAL_TX_TQM_RR_FRAME_ACKED)) {
4646 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.retry_count, 1,
4647 					   ts->transmit_cnt > 1, link_id);
4648 
4649 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.multiple_retry_count,
4650 					   1, ts->transmit_cnt > 2, link_id);
4651 
4652 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.ofdma, 1, ts->ofdma,
4653 					   link_id);
4654 
4655 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.amsdu_cnt, 1,
4656 					   ts->msdu_part_of_amsdu, link_id);
4657 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.non_amsdu_cnt, 1,
4658 					   !ts->msdu_part_of_amsdu, link_id);
4659 
4660 		txrx_peer->stats[link_id].per_pkt_stats.tx.last_tx_ts =
4661 							qdf_system_ticks();
4662 
4663 		dp_tx_update_peer_extd_stats(ts, txrx_peer, link_id);
4664 
4665 		return;
4666 	}
4667 
4668 	/*
4669 	 * tx_failed is ideally supposed to be updated from HTT ppdu
4670 	 * completion stats. But in IPQ807X/IPQ6018 chipsets owing to
4671 	 * hw limitation there are no completions for failed cases.
4672 	 * Hence updating tx_failed from data path. Please note that
4673 	 * if tx_failed is fixed to be from ppdu, then this has to be
4674 	 * removed
4675 	 */
4676 	DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
4677 
4678 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.failed_retry_count, 1,
4679 				   ts->transmit_cnt > DP_RETRY_COUNT,
4680 				   link_id);
4681 	dp_update_no_ack_stats(tx_desc->nbuf, txrx_peer, link_id);
4682 
4683 	if (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED) {
4684 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.age_out, 1,
4685 					  link_id);
4686 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_REM) {
4687 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.dropped.fw_rem, 1,
4688 					      length, link_id);
4689 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX) {
4690 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_notx, 1,
4691 					  link_id);
4692 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TX) {
4693 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_tx, 1,
4694 					  link_id);
4695 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON1) {
4696 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason1, 1,
4697 					  link_id);
4698 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON2) {
4699 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason2, 1,
4700 					  link_id);
4701 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON3) {
4702 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason3, 1,
4703 					  link_id);
4704 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_DISABLE_QUEUE) {
4705 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4706 					  tx.dropped.fw_rem_queue_disable, 1,
4707 					  link_id);
4708 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TILL_NONMATCHING) {
4709 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4710 					  tx.dropped.fw_rem_no_match, 1,
4711 					  link_id);
4712 	} else if (ts->status == HAL_TX_TQM_RR_DROP_THRESHOLD) {
4713 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4714 					  tx.dropped.drop_threshold, 1,
4715 					  link_id);
4716 	} else if (ts->status == HAL_TX_TQM_RR_LINK_DESC_UNAVAILABLE) {
4717 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4718 					  tx.dropped.drop_link_desc_na, 1,
4719 					  link_id);
4720 	} else if (ts->status == HAL_TX_TQM_RR_DROP_OR_INVALID_MSDU) {
4721 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4722 					  tx.dropped.invalid_drop, 1,
4723 					  link_id);
4724 	} else if (ts->status == HAL_TX_TQM_RR_MULTICAST_DROP) {
4725 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4726 					  tx.dropped.mcast_vdev_drop, 1,
4727 					  link_id);
4728 	} else {
4729 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.invalid_rr, 1,
4730 					  link_id);
4731 	}
4732 }
4733 
4734 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
4735 /**
4736  * dp_tx_flow_pool_lock() - take flow pool lock
4737  * @soc: core txrx main context
4738  * @tx_desc: tx desc
4739  *
4740  * Return: None
4741  */
4742 static inline
4743 void dp_tx_flow_pool_lock(struct dp_soc *soc,
4744 			  struct dp_tx_desc_s *tx_desc)
4745 {
4746 	struct dp_tx_desc_pool_s *pool;
4747 	uint8_t desc_pool_id;
4748 
4749 	desc_pool_id = tx_desc->pool_id;
4750 	pool = &soc->tx_desc[desc_pool_id];
4751 
4752 	qdf_spin_lock_bh(&pool->flow_pool_lock);
4753 }
4754 
4755 /**
4756  * dp_tx_flow_pool_unlock() - release flow pool lock
4757  * @soc: core txrx main context
4758  * @tx_desc: tx desc
4759  *
4760  * Return: None
4761  */
4762 static inline
4763 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
4764 			    struct dp_tx_desc_s *tx_desc)
4765 {
4766 	struct dp_tx_desc_pool_s *pool;
4767 	uint8_t desc_pool_id;
4768 
4769 	desc_pool_id = tx_desc->pool_id;
4770 	pool = &soc->tx_desc[desc_pool_id];
4771 
4772 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
4773 }
4774 #else
4775 static inline
4776 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
4777 {
4778 }
4779 
4780 static inline
4781 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
4782 {
4783 }
4784 #endif
4785 
4786 /**
4787  * dp_tx_notify_completion() - Notify tx completion for this desc
4788  * @soc: core txrx main context
4789  * @vdev: datapath vdev handle
4790  * @tx_desc: tx desc
4791  * @netbuf:  buffer
4792  * @status: tx status
4793  *
4794  * Return: none
4795  */
4796 static inline void dp_tx_notify_completion(struct dp_soc *soc,
4797 					   struct dp_vdev *vdev,
4798 					   struct dp_tx_desc_s *tx_desc,
4799 					   qdf_nbuf_t netbuf,
4800 					   uint8_t status)
4801 {
4802 	void *osif_dev;
4803 	ol_txrx_completion_fp tx_compl_cbk = NULL;
4804 	uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
4805 
4806 	qdf_assert(tx_desc);
4807 
4808 	if (!vdev ||
4809 	    !vdev->osif_vdev) {
4810 		return;
4811 	}
4812 
4813 	osif_dev = vdev->osif_vdev;
4814 	tx_compl_cbk = vdev->tx_comp;
4815 
4816 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
4817 		flag |= BIT(QDF_TX_RX_STATUS_OK);
4818 
4819 	if (tx_compl_cbk)
4820 		tx_compl_cbk(netbuf, osif_dev, flag);
4821 }
4822 
4823 /**
4824  * dp_tx_sojourn_stats_process() - Collect sojourn stats
4825  * @pdev: pdev handle
4826  * @txrx_peer: DP peer context
4827  * @tid: tid value
4828  * @txdesc_ts: timestamp from txdesc
4829  * @ppdu_id: ppdu id
4830  * @link_id: link id
4831  *
4832  * Return: none
4833  */
4834 #ifdef FEATURE_PERPKT_INFO
4835 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
4836 					       struct dp_txrx_peer *txrx_peer,
4837 					       uint8_t tid,
4838 					       uint64_t txdesc_ts,
4839 					       uint32_t ppdu_id,
4840 					       uint8_t link_id)
4841 {
4842 	uint64_t delta_ms;
4843 	struct cdp_tx_sojourn_stats *sojourn_stats;
4844 	struct dp_peer *primary_link_peer = NULL;
4845 	struct dp_soc *link_peer_soc = NULL;
4846 
4847 	if (qdf_unlikely(!pdev->enhanced_stats_en))
4848 		return;
4849 
4850 	if (qdf_unlikely(tid == HTT_INVALID_TID ||
4851 			 tid >= CDP_DATA_TID_MAX))
4852 		return;
4853 
4854 	if (qdf_unlikely(!pdev->sojourn_buf))
4855 		return;
4856 
4857 	primary_link_peer = dp_get_primary_link_peer_by_id(pdev->soc,
4858 							   txrx_peer->peer_id,
4859 							   DP_MOD_ID_TX_COMP);
4860 
4861 	if (qdf_unlikely(!primary_link_peer))
4862 		return;
4863 
4864 	sojourn_stats = (struct cdp_tx_sojourn_stats *)
4865 		qdf_nbuf_data(pdev->sojourn_buf);
4866 
4867 	link_peer_soc = primary_link_peer->vdev->pdev->soc;
4868 	sojourn_stats->cookie = (void *)
4869 			dp_monitor_peer_get_peerstats_ctx(link_peer_soc,
4870 							  primary_link_peer);
4871 
4872 	delta_ms = qdf_ktime_to_ms(qdf_ktime_real_get()) -
4873 				txdesc_ts;
4874 	qdf_ewma_tx_lag_add(&txrx_peer->stats[link_id].per_pkt_stats.tx.avg_sojourn_msdu[tid],
4875 			    delta_ms);
4876 	sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
4877 	sojourn_stats->num_msdus[tid] = 1;
4878 	sojourn_stats->avg_sojourn_msdu[tid].internal =
4879 		txrx_peer->stats[link_id].
4880 			per_pkt_stats.tx.avg_sojourn_msdu[tid].internal;
4881 	dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
4882 			     pdev->sojourn_buf, HTT_INVALID_PEER,
4883 			     WDI_NO_VAL, pdev->pdev_id);
4884 	sojourn_stats->sum_sojourn_msdu[tid] = 0;
4885 	sojourn_stats->num_msdus[tid] = 0;
4886 	sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
4887 
4888 	dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_TX_COMP);
4889 }
4890 #else
4891 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
4892 					       struct dp_txrx_peer *txrx_peer,
4893 					       uint8_t tid,
4894 					       uint64_t txdesc_ts,
4895 					       uint32_t ppdu_id)
4896 {
4897 }
4898 #endif
4899 
4900 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
4901 void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
4902 				       struct dp_tx_desc_s *desc,
4903 				       struct hal_tx_completion_status *ts)
4904 {
4905 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc,
4906 			     desc, ts->peer_id,
4907 			     WDI_NO_VAL, desc->pdev->pdev_id);
4908 }
4909 #endif
4910 
4911 void
4912 dp_tx_comp_process_desc(struct dp_soc *soc,
4913 			struct dp_tx_desc_s *desc,
4914 			struct hal_tx_completion_status *ts,
4915 			struct dp_txrx_peer *txrx_peer)
4916 {
4917 	uint64_t time_latency = 0;
4918 	uint16_t peer_id = DP_INVALID_PEER_ID;
4919 
4920 	/*
4921 	 * m_copy/tx_capture modes are not supported for
4922 	 * scatter gather packets
4923 	 */
4924 	if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
4925 		time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
4926 				qdf_ktime_to_ms(desc->timestamp));
4927 	}
4928 
4929 	dp_send_completion_to_pkt_capture(soc, desc, ts);
4930 
4931 	if (dp_tx_pkt_tracepoints_enabled())
4932 		qdf_trace_dp_packet(desc->nbuf, QDF_TX,
4933 				    desc->msdu_ext_desc ?
4934 				    desc->msdu_ext_desc->tso_desc : NULL,
4935 				    qdf_ktime_to_ms(desc->timestamp));
4936 
4937 	if (!(desc->msdu_ext_desc)) {
4938 		dp_tx_enh_unmap(soc, desc);
4939 		if (txrx_peer)
4940 			peer_id = txrx_peer->peer_id;
4941 
4942 		if (QDF_STATUS_SUCCESS ==
4943 		    dp_monitor_tx_add_to_comp_queue(soc, desc, ts, peer_id)) {
4944 			return;
4945 		}
4946 
4947 		if (QDF_STATUS_SUCCESS ==
4948 		    dp_get_completion_indication_for_stack(soc,
4949 							   desc->pdev,
4950 							   txrx_peer, ts,
4951 							   desc->nbuf,
4952 							   time_latency)) {
4953 			dp_send_completion_to_stack(soc,
4954 						    desc->pdev,
4955 						    ts->peer_id,
4956 						    ts->ppdu_id,
4957 						    desc->nbuf);
4958 			return;
4959 		}
4960 	}
4961 
4962 	desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX;
4963 	dp_tx_comp_free_buf(soc, desc, false);
4964 }
4965 
4966 #ifdef DISABLE_DP_STATS
4967 /**
4968  * dp_tx_update_connectivity_stats() - update tx connectivity stats
4969  * @soc: core txrx main context
4970  * @vdev: virtual device instance
4971  * @tx_desc: tx desc
4972  * @status: tx status
4973  *
4974  * Return: none
4975  */
4976 static inline
4977 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
4978 				     struct dp_vdev *vdev,
4979 				     struct dp_tx_desc_s *tx_desc,
4980 				     uint8_t status)
4981 {
4982 }
4983 #else
4984 static inline
4985 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
4986 				     struct dp_vdev *vdev,
4987 				     struct dp_tx_desc_s *tx_desc,
4988 				     uint8_t status)
4989 {
4990 	void *osif_dev;
4991 	ol_txrx_stats_rx_fp stats_cbk;
4992 	uint8_t pkt_type;
4993 
4994 	qdf_assert(tx_desc);
4995 
4996 	if (!vdev ||
4997 	    !vdev->osif_vdev ||
4998 	    !vdev->stats_cb)
4999 		return;
5000 
5001 	osif_dev = vdev->osif_vdev;
5002 	stats_cbk = vdev->stats_cb;
5003 
5004 	stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
5005 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
5006 		stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
5007 			  &pkt_type);
5008 }
5009 #endif
5010 
5011 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
5012 /* Mask for bit29 ~ bit31 */
5013 #define DP_TX_TS_BIT29_31_MASK 0xE0000000
5014 /* Timestamp value (unit us) if bit29 is set */
5015 #define DP_TX_TS_BIT29_SET_VALUE BIT(29)
5016 /**
5017  * dp_tx_adjust_enqueue_buffer_ts() - adjust the enqueue buffer_timestamp
5018  * @ack_ts: OTA ack timestamp, unit us.
5019  * @enqueue_ts: TCL enqueue TX data to TQM timestamp, unit us.
5020  * @base_delta_ts: base timestamp delta for ack_ts and enqueue_ts
5021  *
5022  * this function will restore the bit29 ~ bit31 3 bits value for
5023  * buffer_timestamp in wbm2sw ring entry, currently buffer_timestamp only
5024  * can support 0x7FFF * 1024 us (29 bits), but if the timestamp is >
5025  * 0x7FFF * 1024 us, bit29~ bit31 will be lost.
5026  *
5027  * Return: the adjusted buffer_timestamp value
5028  */
5029 static inline
5030 uint32_t dp_tx_adjust_enqueue_buffer_ts(uint32_t ack_ts,
5031 					uint32_t enqueue_ts,
5032 					uint32_t base_delta_ts)
5033 {
5034 	uint32_t ack_buffer_ts;
5035 	uint32_t ack_buffer_ts_bit29_31;
5036 	uint32_t adjusted_enqueue_ts;
5037 
5038 	/* corresponding buffer_timestamp value when receive OTA Ack */
5039 	ack_buffer_ts = ack_ts - base_delta_ts;
5040 	ack_buffer_ts_bit29_31 = ack_buffer_ts & DP_TX_TS_BIT29_31_MASK;
5041 
5042 	/* restore the bit29 ~ bit31 value */
5043 	adjusted_enqueue_ts = ack_buffer_ts_bit29_31 | enqueue_ts;
5044 
5045 	/*
5046 	 * if actual enqueue_ts value occupied 29 bits only, this enqueue_ts
5047 	 * value + real UL delay overflow 29 bits, then 30th bit (bit-29)
5048 	 * should not be marked, otherwise extra 0x20000000 us is added to
5049 	 * enqueue_ts.
5050 	 */
5051 	if (qdf_unlikely(adjusted_enqueue_ts > ack_buffer_ts))
5052 		adjusted_enqueue_ts -= DP_TX_TS_BIT29_SET_VALUE;
5053 
5054 	return adjusted_enqueue_ts;
5055 }
5056 
5057 QDF_STATUS
5058 dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
5059 			  uint32_t delta_tsf,
5060 			  uint32_t *delay_us)
5061 {
5062 	uint32_t buffer_ts;
5063 	uint32_t delay;
5064 
5065 	if (!delay_us)
5066 		return QDF_STATUS_E_INVAL;
5067 
5068 	/* Tx_rate_stats_info_valid is 0 and tsf is invalid then */
5069 	if (!ts->valid)
5070 		return QDF_STATUS_E_INVAL;
5071 
5072 	/* buffer_timestamp is in units of 1024 us and is [31:13] of
5073 	 * WBM_RELEASE_RING_4. After left shift 10 bits, it's
5074 	 * valid up to 29 bits.
5075 	 */
5076 	buffer_ts = ts->buffer_timestamp << 10;
5077 	buffer_ts = dp_tx_adjust_enqueue_buffer_ts(ts->tsf,
5078 						   buffer_ts, delta_tsf);
5079 
5080 	delay = ts->tsf - buffer_ts - delta_tsf;
5081 
5082 	if (qdf_unlikely(delay & 0x80000000)) {
5083 		dp_err_rl("delay = 0x%x (-ve)\n"
5084 			  "release_src = %d\n"
5085 			  "ppdu_id = 0x%x\n"
5086 			  "peer_id = 0x%x\n"
5087 			  "tid = 0x%x\n"
5088 			  "release_reason = %d\n"
5089 			  "tsf = %u (0x%x)\n"
5090 			  "buffer_timestamp = %u (0x%x)\n"
5091 			  "delta_tsf = %u (0x%x)\n",
5092 			  delay, ts->release_src, ts->ppdu_id, ts->peer_id,
5093 			  ts->tid, ts->status, ts->tsf, ts->tsf,
5094 			  ts->buffer_timestamp, ts->buffer_timestamp,
5095 			  delta_tsf, delta_tsf);
5096 
5097 		delay = 0;
5098 		goto end;
5099 	}
5100 
5101 	delay &= 0x1FFFFFFF; /* mask 29 BITS */
5102 	if (delay > 0x1000000) {
5103 		dp_info_rl("----------------------\n"
5104 			   "Tx completion status:\n"
5105 			   "----------------------\n"
5106 			   "release_src = %d\n"
5107 			   "ppdu_id = 0x%x\n"
5108 			   "release_reason = %d\n"
5109 			   "tsf = %u (0x%x)\n"
5110 			   "buffer_timestamp = %u (0x%x)\n"
5111 			   "delta_tsf = %u (0x%x)\n",
5112 			   ts->release_src, ts->ppdu_id, ts->status,
5113 			   ts->tsf, ts->tsf, ts->buffer_timestamp,
5114 			   ts->buffer_timestamp, delta_tsf, delta_tsf);
5115 		return QDF_STATUS_E_FAILURE;
5116 	}
5117 
5118 
5119 end:
5120 	*delay_us = delay;
5121 
5122 	return QDF_STATUS_SUCCESS;
5123 }
5124 
5125 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5126 		      uint32_t delta_tsf)
5127 {
5128 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5129 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5130 						     DP_MOD_ID_CDP);
5131 
5132 	if (!vdev) {
5133 		dp_err_rl("vdev %d does not exist", vdev_id);
5134 		return;
5135 	}
5136 
5137 	vdev->delta_tsf = delta_tsf;
5138 	dp_debug("vdev id %u delta_tsf %u", vdev_id, delta_tsf);
5139 
5140 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5141 }
5142 #endif
5143 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
5144 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
5145 				      uint8_t vdev_id, bool enable)
5146 {
5147 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5148 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5149 						     DP_MOD_ID_CDP);
5150 
5151 	if (!vdev) {
5152 		dp_err_rl("vdev %d does not exist", vdev_id);
5153 		return QDF_STATUS_E_FAILURE;
5154 	}
5155 
5156 	qdf_atomic_set(&vdev->ul_delay_report, enable);
5157 
5158 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5159 
5160 	return QDF_STATUS_SUCCESS;
5161 }
5162 
5163 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5164 			       uint32_t *val)
5165 {
5166 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5167 	struct dp_vdev *vdev;
5168 	uint32_t delay_accum;
5169 	uint32_t pkts_accum;
5170 
5171 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
5172 	if (!vdev) {
5173 		dp_err_rl("vdev %d does not exist", vdev_id);
5174 		return QDF_STATUS_E_FAILURE;
5175 	}
5176 
5177 	if (!qdf_atomic_read(&vdev->ul_delay_report)) {
5178 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5179 		return QDF_STATUS_E_FAILURE;
5180 	}
5181 
5182 	/* Average uplink delay based on current accumulated values */
5183 	delay_accum = qdf_atomic_read(&vdev->ul_delay_accum);
5184 	pkts_accum = qdf_atomic_read(&vdev->ul_pkts_accum);
5185 
5186 	*val = delay_accum / pkts_accum;
5187 	dp_debug("uplink_delay %u delay_accum %u pkts_accum %u", *val,
5188 		 delay_accum, pkts_accum);
5189 
5190 	/* Reset accumulated values to 0 */
5191 	qdf_atomic_set(&vdev->ul_delay_accum, 0);
5192 	qdf_atomic_set(&vdev->ul_pkts_accum, 0);
5193 
5194 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5195 
5196 	return QDF_STATUS_SUCCESS;
5197 }
5198 
5199 static void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
5200 				      struct hal_tx_completion_status *ts)
5201 {
5202 	uint32_t ul_delay;
5203 
5204 	if (qdf_unlikely(!vdev)) {
5205 		dp_info_rl("vdev is null or delete in progress");
5206 		return;
5207 	}
5208 
5209 	if (!qdf_atomic_read(&vdev->ul_delay_report))
5210 		return;
5211 
5212 	if (QDF_IS_STATUS_ERROR(dp_tx_compute_hw_delay_us(ts,
5213 							  vdev->delta_tsf,
5214 							  &ul_delay)))
5215 		return;
5216 
5217 	ul_delay /= 1000; /* in unit of ms */
5218 
5219 	qdf_atomic_add(ul_delay, &vdev->ul_delay_accum);
5220 	qdf_atomic_inc(&vdev->ul_pkts_accum);
5221 }
5222 #else /* !WLAN_FEATURE_TSF_UPLINK_DELAY */
5223 static inline
5224 void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
5225 			       struct hal_tx_completion_status *ts)
5226 {
5227 }
5228 #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
5229 
5230 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
5231 				  struct dp_tx_desc_s *tx_desc,
5232 				  struct hal_tx_completion_status *ts,
5233 				  struct dp_txrx_peer *txrx_peer,
5234 				  uint8_t ring_id)
5235 {
5236 	uint32_t length;
5237 	qdf_ether_header_t *eh;
5238 	struct dp_vdev *vdev = NULL;
5239 	qdf_nbuf_t nbuf = tx_desc->nbuf;
5240 	enum qdf_dp_tx_rx_status dp_status;
5241 	uint8_t link_id = 0;
5242 
5243 	if (!nbuf) {
5244 		dp_info_rl("invalid tx descriptor. nbuf NULL");
5245 		goto out;
5246 	}
5247 
5248 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
5249 	length = dp_tx_get_pkt_len(tx_desc);
5250 
5251 	dp_status = dp_tx_hw_to_qdf(ts->status);
5252 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
5253 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
5254 				 QDF_TRACE_DEFAULT_PDEV_ID,
5255 				 qdf_nbuf_data_addr(nbuf),
5256 				 sizeof(qdf_nbuf_data(nbuf)),
5257 				 tx_desc->id, ts->status, dp_status));
5258 
5259 	dp_tx_comp_debug("-------------------- \n"
5260 			 "Tx Completion Stats: \n"
5261 			 "-------------------- \n"
5262 			 "ack_frame_rssi = %d \n"
5263 			 "first_msdu = %d \n"
5264 			 "last_msdu = %d \n"
5265 			 "msdu_part_of_amsdu = %d \n"
5266 			 "rate_stats valid = %d \n"
5267 			 "bw = %d \n"
5268 			 "pkt_type = %d \n"
5269 			 "stbc = %d \n"
5270 			 "ldpc = %d \n"
5271 			 "sgi = %d \n"
5272 			 "mcs = %d \n"
5273 			 "ofdma = %d \n"
5274 			 "tones_in_ru = %d \n"
5275 			 "tsf = %d \n"
5276 			 "ppdu_id = %d \n"
5277 			 "transmit_cnt = %d \n"
5278 			 "tid = %d \n"
5279 			 "peer_id = %d\n"
5280 			 "tx_status = %d\n",
5281 			 ts->ack_frame_rssi, ts->first_msdu,
5282 			 ts->last_msdu, ts->msdu_part_of_amsdu,
5283 			 ts->valid, ts->bw, ts->pkt_type, ts->stbc,
5284 			 ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
5285 			 ts->tones_in_ru, ts->tsf, ts->ppdu_id,
5286 			 ts->transmit_cnt, ts->tid, ts->peer_id,
5287 			 ts->status);
5288 
5289 	/* Update SoC level stats */
5290 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
5291 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
5292 
5293 	if (!txrx_peer) {
5294 		dp_info_rl("peer is null or deletion in progress");
5295 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
5296 		goto out;
5297 	}
5298 	vdev = txrx_peer->vdev;
5299 
5300 #ifdef DP_MLO_LINK_STATS_SUPPORT
5301 	link_id = dp_tx_get_link_id_from_ppdu_id(soc, ts, txrx_peer, vdev);
5302 	if (link_id < 1 || link_id > DP_MAX_MLO_LINKS)
5303 		link_id = 0;
5304 #endif
5305 
5306 	dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
5307 	dp_tx_update_uplink_delay(soc, vdev, ts);
5308 
5309 	/* check tx complete notification */
5310 	if (qdf_nbuf_tx_notify_comp_get(nbuf))
5311 		dp_tx_notify_completion(soc, vdev, tx_desc,
5312 					nbuf, ts->status);
5313 
5314 	/* Update per-packet stats for mesh mode */
5315 	if (qdf_unlikely(vdev->mesh_vdev) &&
5316 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
5317 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
5318 
5319 	/* Update peer level stats */
5320 	if (qdf_unlikely(txrx_peer->bss_peer &&
5321 			 vdev->opmode == wlan_op_mode_ap)) {
5322 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
5323 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1,
5324 						      length, link_id);
5325 
5326 			if (txrx_peer->vdev->tx_encap_type ==
5327 				htt_cmn_pkt_type_ethernet &&
5328 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
5329 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
5330 							      tx.bcast, 1,
5331 							      length, link_id);
5332 			}
5333 		}
5334 	} else {
5335 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.ucast, 1, length,
5336 					      link_id);
5337 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
5338 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.tx_success,
5339 						      1, length, link_id);
5340 			if (qdf_unlikely(txrx_peer->in_twt)) {
5341 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
5342 							      tx.tx_success_twt,
5343 							      1, length,
5344 							      link_id);
5345 			}
5346 		}
5347 	}
5348 
5349 	dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id, link_id);
5350 	dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts, ring_id);
5351 	dp_tx_update_peer_jitter_stats(txrx_peer, tx_desc, ts, ring_id);
5352 	dp_tx_update_peer_sawf_stats(soc, vdev, txrx_peer, tx_desc,
5353 				     ts, ts->tid);
5354 	dp_tx_send_pktlog(soc, vdev->pdev, tx_desc, nbuf, dp_status);
5355 
5356 #ifdef QCA_SUPPORT_RDK_STATS
5357 	if (soc->peerstats_enabled)
5358 		dp_tx_sojourn_stats_process(vdev->pdev, txrx_peer, ts->tid,
5359 					    qdf_ktime_to_ms(tx_desc->timestamp),
5360 					    ts->ppdu_id, link_id);
5361 #endif
5362 
5363 out:
5364 	return;
5365 }
5366 
5367 #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \
5368 	defined(QCA_ENHANCED_STATS_SUPPORT)
5369 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
5370 				   uint32_t length, uint8_t tx_status,
5371 				   bool update)
5372 {
5373 	if (update || (!txrx_peer->hw_txrx_stats_en)) {
5374 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
5375 
5376 		if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
5377 			DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
5378 	}
5379 }
5380 #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
5381 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
5382 				   uint32_t length, uint8_t tx_status,
5383 				   bool update)
5384 {
5385 	if (!txrx_peer->hw_txrx_stats_en) {
5386 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
5387 
5388 		if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
5389 			DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
5390 	}
5391 }
5392 
5393 #else
5394 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
5395 				   uint32_t length, uint8_t tx_status,
5396 				   bool update)
5397 {
5398 	DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
5399 
5400 	if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
5401 		DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
5402 }
5403 #endif
5404 
5405 /**
5406  * dp_tx_prefetch_next_nbuf_data(): Prefetch nbuf and nbuf data
5407  * @next: descriptor of the nrxt buffer
5408  *
5409  * Return: none
5410  */
5411 #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
5412 static inline
5413 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
5414 {
5415 	qdf_nbuf_t nbuf = NULL;
5416 
5417 	if (next)
5418 		nbuf = next->nbuf;
5419 	if (nbuf)
5420 		qdf_prefetch(nbuf);
5421 }
5422 #else
5423 static inline
5424 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
5425 {
5426 }
5427 #endif
5428 
5429 /**
5430  * dp_tx_mcast_reinject_handler() - Tx reinjected multicast packets handler
5431  * @soc: core txrx main context
5432  * @desc: software descriptor
5433  *
5434  * Return: true when packet is reinjected
5435  */
5436 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
5437 	defined(WLAN_MCAST_MLO) && !defined(CONFIG_MLO_SINGLE_DEV)
5438 static inline bool
5439 dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
5440 {
5441 	struct dp_vdev *vdev = NULL;
5442 
5443 	if (desc->tx_status == HAL_TX_TQM_RR_MULTICAST_DROP) {
5444 		if (!soc->arch_ops.dp_tx_mcast_handler ||
5445 		    !soc->arch_ops.dp_tx_is_mcast_primary)
5446 			return false;
5447 
5448 		vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
5449 					     DP_MOD_ID_REINJECT);
5450 
5451 		if (qdf_unlikely(!vdev)) {
5452 			dp_tx_comp_info_rl("Unable to get vdev ref  %d",
5453 					   desc->id);
5454 			return false;
5455 		}
5456 
5457 		if (!(soc->arch_ops.dp_tx_is_mcast_primary(soc, vdev))) {
5458 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
5459 			return false;
5460 		}
5461 		DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
5462 				 qdf_nbuf_len(desc->nbuf));
5463 		soc->arch_ops.dp_tx_mcast_handler(soc, vdev, desc->nbuf);
5464 		dp_tx_desc_release(desc, desc->pool_id);
5465 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
5466 		return true;
5467 	}
5468 
5469 	return false;
5470 }
5471 #else
5472 static inline bool
5473 dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
5474 {
5475 	return false;
5476 }
5477 #endif
5478 
5479 #ifdef QCA_DP_TX_NBUF_LIST_FREE
5480 static inline void
5481 dp_tx_nbuf_queue_head_init(qdf_nbuf_queue_head_t *nbuf_queue_head)
5482 {
5483 	qdf_nbuf_queue_head_init(nbuf_queue_head);
5484 }
5485 
5486 static inline void
5487 dp_tx_nbuf_dev_queue_free(qdf_nbuf_queue_head_t *nbuf_queue_head,
5488 			  struct dp_tx_desc_s *desc)
5489 {
5490 	qdf_nbuf_t nbuf = NULL;
5491 
5492 	nbuf = desc->nbuf;
5493 	if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_FAST))
5494 		qdf_nbuf_dev_queue_head(nbuf_queue_head, nbuf);
5495 	else
5496 		qdf_nbuf_free(nbuf);
5497 }
5498 
5499 static inline void
5500 dp_tx_nbuf_dev_queue_free_no_flag(qdf_nbuf_queue_head_t *nbuf_queue_head,
5501 				  qdf_nbuf_t nbuf)
5502 {
5503 	if (!nbuf)
5504 		return;
5505 
5506 	if (nbuf->is_from_recycler)
5507 		qdf_nbuf_dev_queue_head(nbuf_queue_head, nbuf);
5508 	else
5509 		qdf_nbuf_free(nbuf);
5510 }
5511 
5512 static inline void
5513 dp_tx_nbuf_dev_kfree_list(qdf_nbuf_queue_head_t *nbuf_queue_head)
5514 {
5515 	qdf_nbuf_dev_kfree_list(nbuf_queue_head);
5516 }
5517 #else
5518 static inline void
5519 dp_tx_nbuf_queue_head_init(qdf_nbuf_queue_head_t *nbuf_queue_head)
5520 {
5521 }
5522 
5523 static inline void
5524 dp_tx_nbuf_dev_queue_free(qdf_nbuf_queue_head_t *nbuf_queue_head,
5525 			  struct dp_tx_desc_s *desc)
5526 {
5527 	qdf_nbuf_free(desc->nbuf);
5528 }
5529 
5530 static inline void
5531 dp_tx_nbuf_dev_queue_free_no_flag(qdf_nbuf_queue_head_t *nbuf_queue_head,
5532 				  qdf_nbuf_t nbuf)
5533 {
5534 	qdf_nbuf_free(nbuf);
5535 }
5536 
5537 static inline void
5538 dp_tx_nbuf_dev_kfree_list(qdf_nbuf_queue_head_t *nbuf_queue_head)
5539 {
5540 }
5541 #endif
5542 
5543 void
5544 dp_tx_comp_process_desc_list(struct dp_soc *soc,
5545 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id)
5546 {
5547 	struct dp_tx_desc_s *desc;
5548 	struct dp_tx_desc_s *next;
5549 	struct hal_tx_completion_status ts;
5550 	struct dp_txrx_peer *txrx_peer = NULL;
5551 	uint16_t peer_id = DP_INVALID_PEER;
5552 	dp_txrx_ref_handle txrx_ref_handle = NULL;
5553 	qdf_nbuf_queue_head_t h;
5554 
5555 	desc = comp_head;
5556 
5557 	dp_tx_nbuf_queue_head_init(&h);
5558 
5559 	while (desc) {
5560 		next = desc->next;
5561 		dp_tx_prefetch_next_nbuf_data(next);
5562 
5563 		if (peer_id != desc->peer_id) {
5564 			if (txrx_peer)
5565 				dp_txrx_peer_unref_delete(txrx_ref_handle,
5566 							  DP_MOD_ID_TX_COMP);
5567 			peer_id = desc->peer_id;
5568 			txrx_peer =
5569 				dp_txrx_peer_get_ref_by_id(soc, peer_id,
5570 							   &txrx_ref_handle,
5571 							   DP_MOD_ID_TX_COMP);
5572 		}
5573 
5574 		if (dp_tx_mcast_reinject_handler(soc, desc)) {
5575 			desc = next;
5576 			continue;
5577 		}
5578 
5579 		if (desc->flags & DP_TX_DESC_FLAG_PPEDS) {
5580 			qdf_nbuf_t nbuf;
5581 
5582 			if (qdf_likely(txrx_peer))
5583 				dp_tx_update_peer_basic_stats(txrx_peer,
5584 							      desc->length,
5585 							      desc->tx_status,
5586 							      false);
5587 			nbuf = dp_ppeds_tx_desc_free(soc, desc);
5588 			dp_tx_nbuf_dev_queue_free_no_flag(&h, nbuf);
5589 			desc = next;
5590 			continue;
5591 		}
5592 
5593 		if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
5594 			struct dp_pdev *pdev = desc->pdev;
5595 
5596 			if (qdf_likely(txrx_peer))
5597 				dp_tx_update_peer_basic_stats(txrx_peer,
5598 							      desc->length,
5599 							      desc->tx_status,
5600 							      false);
5601 			qdf_assert(pdev);
5602 			dp_tx_outstanding_dec(pdev);
5603 
5604 			/*
5605 			 * Calling a QDF WRAPPER here is creating significant
5606 			 * performance impact so avoided the wrapper call here
5607 			 */
5608 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
5609 					       desc->id, DP_TX_COMP_UNMAP);
5610 			dp_tx_nbuf_unmap(soc, desc);
5611 			dp_tx_nbuf_dev_queue_free(&h, desc);
5612 			dp_tx_desc_free(soc, desc, desc->pool_id);
5613 			desc = next;
5614 			continue;
5615 		}
5616 
5617 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
5618 
5619 		dp_tx_comp_process_tx_status(soc, desc, &ts, txrx_peer,
5620 					     ring_id);
5621 
5622 		dp_tx_comp_process_desc(soc, desc, &ts, txrx_peer);
5623 
5624 		dp_tx_desc_release(desc, desc->pool_id);
5625 		desc = next;
5626 	}
5627 	dp_tx_nbuf_dev_kfree_list(&h);
5628 	if (txrx_peer)
5629 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
5630 }
5631 
5632 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
5633 static inline
5634 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
5635 				   int max_reap_limit)
5636 {
5637 	bool limit_hit = false;
5638 
5639 	limit_hit =
5640 		(num_reaped >= max_reap_limit) ? true : false;
5641 
5642 	if (limit_hit)
5643 		DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
5644 
5645 	return limit_hit;
5646 }
5647 
5648 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
5649 {
5650 	return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
5651 }
5652 
5653 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
5654 {
5655 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
5656 
5657 	return cfg->tx_comp_loop_pkt_limit;
5658 }
5659 #else
5660 static inline
5661 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
5662 				   int max_reap_limit)
5663 {
5664 	return false;
5665 }
5666 
5667 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
5668 {
5669 	return false;
5670 }
5671 
5672 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
5673 {
5674 	return 0;
5675 }
5676 #endif
5677 
5678 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
5679 static inline int
5680 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
5681 				  int *max_reap_limit)
5682 {
5683 	return soc->arch_ops.dp_srng_test_and_update_nf_params(soc, dp_srng,
5684 							       max_reap_limit);
5685 }
5686 #else
5687 static inline int
5688 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
5689 				  int *max_reap_limit)
5690 {
5691 	return 0;
5692 }
5693 #endif
5694 
5695 #ifdef DP_TX_TRACKING
5696 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
5697 {
5698 	if ((tx_desc->magic != DP_TX_MAGIC_PATTERN_INUSE) &&
5699 	    (tx_desc->magic != DP_TX_MAGIC_PATTERN_FREE)) {
5700 		dp_err_rl("tx_desc %u is corrupted", tx_desc->id);
5701 		qdf_trigger_self_recovery(NULL, QDF_TX_DESC_LEAK);
5702 	}
5703 }
5704 #endif
5705 
5706 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
5707 			    hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
5708 			    uint32_t quota)
5709 {
5710 	void *tx_comp_hal_desc;
5711 	void *last_prefetched_hw_desc = NULL;
5712 	struct dp_tx_desc_s *last_prefetched_sw_desc = NULL;
5713 	hal_soc_handle_t hal_soc;
5714 	uint8_t buffer_src;
5715 	struct dp_tx_desc_s *tx_desc = NULL;
5716 	struct dp_tx_desc_s *head_desc = NULL;
5717 	struct dp_tx_desc_s *tail_desc = NULL;
5718 	uint32_t num_processed = 0;
5719 	uint32_t count;
5720 	uint32_t num_avail_for_reap = 0;
5721 	bool force_break = false;
5722 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
5723 	int max_reap_limit, ring_near_full;
5724 	uint32_t num_entries;
5725 
5726 	DP_HIST_INIT();
5727 
5728 	num_entries = hal_srng_get_num_entries(soc->hal_soc, hal_ring_hdl);
5729 
5730 more_data:
5731 
5732 	hal_soc = soc->hal_soc;
5733 	/* Re-initialize local variables to be re-used */
5734 	head_desc = NULL;
5735 	tail_desc = NULL;
5736 	count = 0;
5737 	max_reap_limit = dp_tx_comp_get_loop_pkt_limit(soc);
5738 
5739 	ring_near_full = dp_srng_test_and_update_nf_params(soc, tx_comp_ring,
5740 							   &max_reap_limit);
5741 
5742 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
5743 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
5744 		return 0;
5745 	}
5746 
5747 	if (!num_avail_for_reap)
5748 		num_avail_for_reap = hal_srng_dst_num_valid(hal_soc,
5749 							    hal_ring_hdl, 0);
5750 
5751 	if (num_avail_for_reap >= quota)
5752 		num_avail_for_reap = quota;
5753 
5754 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
5755 	last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc,
5756 							    hal_ring_hdl,
5757 							    num_avail_for_reap);
5758 
5759 	/* Find head descriptor from completion ring */
5760 	while (qdf_likely(num_avail_for_reap--)) {
5761 
5762 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
5763 		if (qdf_unlikely(!tx_comp_hal_desc))
5764 			break;
5765 		buffer_src = hal_tx_comp_get_buffer_source(hal_soc,
5766 							   tx_comp_hal_desc);
5767 
5768 		/* If this buffer was not released by TQM or FW, then it is not
5769 		 * Tx completion indication, assert */
5770 		if (qdf_unlikely(buffer_src !=
5771 					HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
5772 				 (qdf_unlikely(buffer_src !=
5773 					HAL_TX_COMP_RELEASE_SOURCE_FW))) {
5774 			uint8_t wbm_internal_error;
5775 
5776 			dp_err_rl(
5777 				"Tx comp release_src != TQM | FW but from %d",
5778 				buffer_src);
5779 			hal_dump_comp_desc(tx_comp_hal_desc);
5780 			DP_STATS_INC(soc, tx.invalid_release_source, 1);
5781 
5782 			/* When WBM sees NULL buffer_addr_info in any of
5783 			 * ingress rings it sends an error indication,
5784 			 * with wbm_internal_error=1, to a specific ring.
5785 			 * The WBM2SW ring used to indicate these errors is
5786 			 * fixed in HW, and that ring is being used as Tx
5787 			 * completion ring. These errors are not related to
5788 			 * Tx completions, and should just be ignored
5789 			 */
5790 			wbm_internal_error = hal_get_wbm_internal_error(
5791 							hal_soc,
5792 							tx_comp_hal_desc);
5793 
5794 			if (wbm_internal_error) {
5795 				dp_err_rl("Tx comp wbm_internal_error!!");
5796 				DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
5797 
5798 				if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
5799 								buffer_src)
5800 					dp_handle_wbm_internal_error(
5801 						soc,
5802 						tx_comp_hal_desc,
5803 						hal_tx_comp_get_buffer_type(
5804 							tx_comp_hal_desc));
5805 
5806 			} else {
5807 				dp_err_rl("Tx comp wbm_internal_error false");
5808 				DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
5809 			}
5810 			continue;
5811 		}
5812 
5813 		soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
5814 							       tx_comp_hal_desc,
5815 							       &tx_desc);
5816 		if (qdf_unlikely(!tx_desc)) {
5817 			dp_err("unable to retrieve tx_desc!");
5818 			hal_dump_comp_desc(tx_comp_hal_desc);
5819 			DP_STATS_INC(soc, tx.invalid_tx_comp_desc, 1);
5820 			QDF_BUG(0);
5821 			continue;
5822 		}
5823 		tx_desc->buffer_src = buffer_src;
5824 
5825 		if (tx_desc->flags & DP_TX_DESC_FLAG_PPEDS)
5826 			goto add_to_pool2;
5827 
5828 		/*
5829 		 * If the release source is FW, process the HTT status
5830 		 */
5831 		if (qdf_unlikely(buffer_src ==
5832 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
5833 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
5834 
5835 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
5836 					htt_tx_status);
5837 			/* Collect hw completion contents */
5838 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
5839 					      &tx_desc->comp, 1);
5840 			soc->arch_ops.dp_tx_process_htt_completion(
5841 							soc,
5842 							tx_desc,
5843 							htt_tx_status,
5844 							ring_id);
5845 		} else {
5846 			tx_desc->tx_status =
5847 				hal_tx_comp_get_tx_status(tx_comp_hal_desc);
5848 			tx_desc->buffer_src = buffer_src;
5849 			/*
5850 			 * If the fast completion mode is enabled extended
5851 			 * metadata from descriptor is not copied
5852 			 */
5853 			if (qdf_likely(tx_desc->flags &
5854 						DP_TX_DESC_FLAG_SIMPLE))
5855 				goto add_to_pool;
5856 
5857 			/*
5858 			 * If the descriptor is already freed in vdev_detach,
5859 			 * continue to next descriptor
5860 			 */
5861 			if (qdf_unlikely
5862 				((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
5863 				 !tx_desc->flags)) {
5864 				dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
5865 						   tx_desc->id);
5866 				DP_STATS_INC(soc, tx.tx_comp_exception, 1);
5867 				dp_tx_desc_check_corruption(tx_desc);
5868 				continue;
5869 			}
5870 
5871 			if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
5872 				dp_tx_comp_info_rl("pdev in down state %d",
5873 						   tx_desc->id);
5874 				tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
5875 				dp_tx_comp_free_buf(soc, tx_desc, false);
5876 				dp_tx_desc_release(tx_desc, tx_desc->pool_id);
5877 				goto next_desc;
5878 			}
5879 
5880 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
5881 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
5882 				dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
5883 						 tx_desc->flags, tx_desc->id);
5884 				qdf_assert_always(0);
5885 			}
5886 
5887 			/* Collect hw completion contents */
5888 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
5889 					      &tx_desc->comp, 1);
5890 add_to_pool:
5891 			DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
5892 
5893 add_to_pool2:
5894 			/* First ring descriptor on the cycle */
5895 			if (!head_desc) {
5896 				head_desc = tx_desc;
5897 				tail_desc = tx_desc;
5898 			}
5899 
5900 			tail_desc->next = tx_desc;
5901 			tx_desc->next = NULL;
5902 			tail_desc = tx_desc;
5903 		}
5904 next_desc:
5905 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
5906 
5907 		/*
5908 		 * Processed packet count is more than given quota
5909 		 * stop to processing
5910 		 */
5911 
5912 		count++;
5913 
5914 		dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
5915 					       num_avail_for_reap,
5916 					       hal_ring_hdl,
5917 					       &last_prefetched_hw_desc,
5918 					       &last_prefetched_sw_desc);
5919 
5920 		if (dp_tx_comp_loop_pkt_limit_hit(soc, count, max_reap_limit))
5921 			break;
5922 	}
5923 
5924 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
5925 
5926 	/* Process the reaped descriptors */
5927 	if (head_desc)
5928 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
5929 
5930 	DP_STATS_INC(soc, tx.tx_comp[ring_id], count);
5931 
5932 	/*
5933 	 * If we are processing in near-full condition, there are 3 scenario
5934 	 * 1) Ring entries has reached critical state
5935 	 * 2) Ring entries are still near high threshold
5936 	 * 3) Ring entries are below the safe level
5937 	 *
5938 	 * One more loop will move the state to normal processing and yield
5939 	 */
5940 	if (ring_near_full)
5941 		goto more_data;
5942 
5943 	if (dp_tx_comp_enable_eol_data_check(soc)) {
5944 
5945 		if (num_processed >= quota)
5946 			force_break = true;
5947 
5948 		if (!force_break &&
5949 		    hal_srng_dst_peek_sync_locked(soc->hal_soc,
5950 						  hal_ring_hdl)) {
5951 			DP_STATS_INC(soc, tx.hp_oos2, 1);
5952 			if (!hif_exec_should_yield(soc->hif_handle,
5953 						   int_ctx->dp_intr_id))
5954 				goto more_data;
5955 
5956 			num_avail_for_reap =
5957 				hal_srng_dst_num_valid_locked(soc->hal_soc,
5958 							      hal_ring_hdl,
5959 							      true);
5960 			if (qdf_unlikely(num_entries &&
5961 					 (num_avail_for_reap >=
5962 					  num_entries >> 1))) {
5963 				DP_STATS_INC(soc, tx.near_full, 1);
5964 				goto more_data;
5965 			}
5966 		}
5967 	}
5968 	DP_TX_HIST_STATS_PER_PDEV();
5969 
5970 	return num_processed;
5971 }
5972 
5973 #ifdef FEATURE_WLAN_TDLS
5974 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5975 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
5976 {
5977 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5978 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5979 						     DP_MOD_ID_TDLS);
5980 
5981 	if (!vdev) {
5982 		dp_err("vdev handle for id %d is NULL", vdev_id);
5983 		return NULL;
5984 	}
5985 
5986 	if (tx_spec & OL_TX_SPEC_NO_FREE)
5987 		vdev->is_tdls_frame = true;
5988 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
5989 
5990 	return dp_tx_send(soc_hdl, vdev_id, msdu_list);
5991 }
5992 #endif
5993 
5994 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
5995 {
5996 	int pdev_id;
5997 	/*
5998 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
5999 	 */
6000 	DP_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
6001 				    DP_TCL_METADATA_TYPE_VDEV_BASED);
6002 
6003 	DP_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
6004 				       vdev->vdev_id);
6005 
6006 	pdev_id =
6007 		dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
6008 						       vdev->pdev->pdev_id);
6009 	DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
6010 
6011 	/*
6012 	 * Set HTT Extension Valid bit to 0 by default
6013 	 */
6014 	DP_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
6015 
6016 	dp_tx_vdev_update_search_flags(vdev);
6017 
6018 	return QDF_STATUS_SUCCESS;
6019 }
6020 
6021 #ifndef FEATURE_WDS
6022 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
6023 {
6024 	return false;
6025 }
6026 #endif
6027 
6028 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
6029 {
6030 	struct dp_soc *soc = vdev->pdev->soc;
6031 
6032 	/*
6033 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
6034 	 * for TDLS link
6035 	 *
6036 	 * Enable AddrY (SA based search) only for non-WDS STA and
6037 	 * ProxySTA VAP (in HKv1) modes.
6038 	 *
6039 	 * In all other VAP modes, only DA based search should be
6040 	 * enabled
6041 	 */
6042 	if (vdev->opmode == wlan_op_mode_sta &&
6043 	    vdev->tdls_link_connected)
6044 		vdev->hal_desc_addr_search_flags =
6045 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
6046 	else if ((vdev->opmode == wlan_op_mode_sta) &&
6047 		 !dp_tx_da_search_override(vdev))
6048 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
6049 	else
6050 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
6051 
6052 	if (vdev->opmode == wlan_op_mode_sta && !vdev->tdls_link_connected)
6053 		vdev->search_type = soc->sta_mode_search_policy;
6054 	else
6055 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
6056 }
6057 
6058 static inline bool
6059 dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
6060 			  struct dp_vdev *vdev,
6061 			  struct dp_tx_desc_s *tx_desc)
6062 {
6063 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
6064 		return false;
6065 
6066 	/*
6067 	 * if vdev is given, then only check whether desc
6068 	 * vdev match. if vdev is NULL, then check whether
6069 	 * desc pdev match.
6070 	 */
6071 	return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
6072 		(tx_desc->pdev == pdev);
6073 }
6074 
6075 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6076 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
6077 		      bool force_free)
6078 {
6079 	uint8_t i;
6080 	uint32_t j;
6081 	uint32_t num_desc, page_id, offset;
6082 	uint16_t num_desc_per_page;
6083 	struct dp_soc *soc = pdev->soc;
6084 	struct dp_tx_desc_s *tx_desc = NULL;
6085 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
6086 
6087 	if (!vdev && !force_free) {
6088 		dp_err("Reset TX desc vdev, Vdev param is required!");
6089 		return;
6090 	}
6091 
6092 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
6093 		tx_desc_pool = &soc->tx_desc[i];
6094 		if (!(tx_desc_pool->pool_size) ||
6095 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
6096 		    !(tx_desc_pool->desc_pages.cacheable_pages))
6097 			continue;
6098 
6099 		/*
6100 		 * Add flow pool lock protection in case pool is freed
6101 		 * due to all tx_desc is recycled when handle TX completion.
6102 		 * this is not necessary when do force flush as:
6103 		 * a. double lock will happen if dp_tx_desc_release is
6104 		 *    also trying to acquire it.
6105 		 * b. dp interrupt has been disabled before do force TX desc
6106 		 *    flush in dp_pdev_deinit().
6107 		 */
6108 		if (!force_free)
6109 			qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
6110 		num_desc = tx_desc_pool->pool_size;
6111 		num_desc_per_page =
6112 			tx_desc_pool->desc_pages.num_element_per_page;
6113 		for (j = 0; j < num_desc; j++) {
6114 			page_id = j / num_desc_per_page;
6115 			offset = j % num_desc_per_page;
6116 
6117 			if (qdf_unlikely(!(tx_desc_pool->
6118 					 desc_pages.cacheable_pages)))
6119 				break;
6120 
6121 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
6122 
6123 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
6124 				/*
6125 				 * Free TX desc if force free is
6126 				 * required, otherwise only reset vdev
6127 				 * in this TX desc.
6128 				 */
6129 				if (force_free) {
6130 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
6131 					dp_tx_comp_free_buf(soc, tx_desc,
6132 							    false);
6133 					dp_tx_desc_release(tx_desc, i);
6134 				} else {
6135 					tx_desc->vdev_id = DP_INVALID_VDEV_ID;
6136 				}
6137 			}
6138 		}
6139 		if (!force_free)
6140 			qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
6141 	}
6142 }
6143 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
6144 /**
6145  * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
6146  *
6147  * @soc: Handle to DP soc structure
6148  * @tx_desc: pointer of one TX desc
6149  * @desc_pool_id: TX Desc pool id
6150  */
6151 static inline void
6152 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
6153 		      uint8_t desc_pool_id)
6154 {
6155 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
6156 
6157 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
6158 
6159 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
6160 }
6161 
6162 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
6163 		      bool force_free)
6164 {
6165 	uint8_t i, num_pool;
6166 	uint32_t j;
6167 	uint32_t num_desc, page_id, offset;
6168 	uint16_t num_desc_per_page;
6169 	struct dp_soc *soc = pdev->soc;
6170 	struct dp_tx_desc_s *tx_desc = NULL;
6171 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
6172 
6173 	if (!vdev && !force_free) {
6174 		dp_err("Reset TX desc vdev, Vdev param is required!");
6175 		return;
6176 	}
6177 
6178 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6179 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6180 
6181 	for (i = 0; i < num_pool; i++) {
6182 		tx_desc_pool = &soc->tx_desc[i];
6183 		if (!tx_desc_pool->desc_pages.cacheable_pages)
6184 			continue;
6185 
6186 		num_desc_per_page =
6187 			tx_desc_pool->desc_pages.num_element_per_page;
6188 		for (j = 0; j < num_desc; j++) {
6189 			page_id = j / num_desc_per_page;
6190 			offset = j % num_desc_per_page;
6191 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
6192 
6193 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
6194 				if (force_free) {
6195 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
6196 					dp_tx_comp_free_buf(soc, tx_desc,
6197 							    false);
6198 					dp_tx_desc_release(tx_desc, i);
6199 				} else {
6200 					dp_tx_desc_reset_vdev(soc, tx_desc,
6201 							      i);
6202 				}
6203 			}
6204 		}
6205 	}
6206 }
6207 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
6208 
6209 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
6210 {
6211 	struct dp_pdev *pdev = vdev->pdev;
6212 
6213 	/* Reset TX desc associated to this Vdev as NULL */
6214 	dp_tx_desc_flush(pdev, vdev, false);
6215 
6216 	return QDF_STATUS_SUCCESS;
6217 }
6218 
6219 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6220 /* Pools will be allocated dynamically */
6221 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
6222 					   int num_desc)
6223 {
6224 	uint8_t i;
6225 
6226 	for (i = 0; i < num_pool; i++) {
6227 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
6228 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
6229 	}
6230 
6231 	return QDF_STATUS_SUCCESS;
6232 }
6233 
6234 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
6235 					  uint32_t num_desc)
6236 {
6237 	return QDF_STATUS_SUCCESS;
6238 }
6239 
6240 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
6241 {
6242 }
6243 
6244 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
6245 {
6246 	uint8_t i;
6247 
6248 	for (i = 0; i < num_pool; i++)
6249 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
6250 }
6251 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
6252 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
6253 					   uint32_t num_desc)
6254 {
6255 	uint8_t i, count;
6256 
6257 	/* Allocate software Tx descriptor pools */
6258 	for (i = 0; i < num_pool; i++) {
6259 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
6260 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6261 				  FL("Tx Desc Pool alloc %d failed %pK"),
6262 				  i, soc);
6263 			goto fail;
6264 		}
6265 	}
6266 	return QDF_STATUS_SUCCESS;
6267 
6268 fail:
6269 	for (count = 0; count < i; count++)
6270 		dp_tx_desc_pool_free(soc, count);
6271 
6272 	return QDF_STATUS_E_NOMEM;
6273 }
6274 
6275 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
6276 					  uint32_t num_desc)
6277 {
6278 	uint8_t i;
6279 	for (i = 0; i < num_pool; i++) {
6280 		if (dp_tx_desc_pool_init(soc, i, num_desc)) {
6281 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6282 				  FL("Tx Desc Pool init %d failed %pK"),
6283 				  i, soc);
6284 			return QDF_STATUS_E_NOMEM;
6285 		}
6286 	}
6287 	return QDF_STATUS_SUCCESS;
6288 }
6289 
6290 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
6291 {
6292 	uint8_t i;
6293 
6294 	for (i = 0; i < num_pool; i++)
6295 		dp_tx_desc_pool_deinit(soc, i);
6296 }
6297 
6298 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
6299 {
6300 	uint8_t i;
6301 
6302 	for (i = 0; i < num_pool; i++)
6303 		dp_tx_desc_pool_free(soc, i);
6304 }
6305 
6306 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
6307 
6308 /**
6309  * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
6310  * @soc: core txrx main context
6311  * @num_pool: number of pools
6312  *
6313  */
6314 static void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
6315 {
6316 	dp_tx_tso_desc_pool_deinit(soc, num_pool);
6317 	dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
6318 }
6319 
6320 /**
6321  * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
6322  * @soc: core txrx main context
6323  * @num_pool: number of pools
6324  *
6325  */
6326 static void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
6327 {
6328 	dp_tx_tso_desc_pool_free(soc, num_pool);
6329 	dp_tx_tso_num_seg_pool_free(soc, num_pool);
6330 }
6331 
6332 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
6333 {
6334 	uint8_t num_pool;
6335 
6336 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6337 
6338 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
6339 	dp_tx_ext_desc_pool_free(soc, num_pool);
6340 	dp_tx_delete_static_pools(soc, num_pool);
6341 }
6342 
6343 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
6344 {
6345 	uint8_t num_pool;
6346 
6347 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6348 
6349 	dp_tx_flow_control_deinit(soc);
6350 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
6351 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
6352 	dp_tx_deinit_static_pools(soc, num_pool);
6353 }
6354 
6355 /**
6356  * dp_tx_tso_cmn_desc_pool_alloc() - TSO cmn desc pool allocator
6357  * @soc: DP soc handle
6358  * @num_pool: Number of pools
6359  * @num_desc: Number of descriptors
6360  *
6361  * Reserve TSO descriptor buffers
6362  *
6363  * Return: QDF_STATUS_E_FAILURE on failure or
6364  *         QDF_STATUS_SUCCESS on success
6365  */
6366 static QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
6367 						uint8_t num_pool,
6368 						uint32_t num_desc)
6369 {
6370 	if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
6371 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
6372 		return QDF_STATUS_E_FAILURE;
6373 	}
6374 
6375 	if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
6376 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
6377 		       num_pool, soc);
6378 		return QDF_STATUS_E_FAILURE;
6379 	}
6380 	return QDF_STATUS_SUCCESS;
6381 }
6382 
6383 /**
6384  * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
6385  * @soc: DP soc handle
6386  * @num_pool: Number of pools
6387  * @num_desc: Number of descriptors
6388  *
6389  * Initialize TSO descriptor pools
6390  *
6391  * Return: QDF_STATUS_E_FAILURE on failure or
6392  *         QDF_STATUS_SUCCESS on success
6393  */
6394 
6395 static QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
6396 					       uint8_t num_pool,
6397 					       uint32_t num_desc)
6398 {
6399 	if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
6400 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
6401 		return QDF_STATUS_E_FAILURE;
6402 	}
6403 
6404 	if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
6405 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
6406 		       num_pool, soc);
6407 		return QDF_STATUS_E_FAILURE;
6408 	}
6409 	return QDF_STATUS_SUCCESS;
6410 }
6411 
6412 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
6413 {
6414 	uint8_t num_pool;
6415 	uint32_t num_desc;
6416 	uint32_t num_ext_desc;
6417 
6418 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6419 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6420 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
6421 
6422 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6423 		  "%s Tx Desc Alloc num_pool = %d, descs = %d",
6424 		  __func__, num_pool, num_desc);
6425 
6426 	if ((num_pool > MAX_TXDESC_POOLS) ||
6427 	    (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
6428 		goto fail1;
6429 
6430 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
6431 		goto fail1;
6432 
6433 	if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
6434 		goto fail2;
6435 
6436 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
6437 		return QDF_STATUS_SUCCESS;
6438 
6439 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
6440 		goto fail3;
6441 
6442 	return QDF_STATUS_SUCCESS;
6443 
6444 fail3:
6445 	dp_tx_ext_desc_pool_free(soc, num_pool);
6446 fail2:
6447 	dp_tx_delete_static_pools(soc, num_pool);
6448 fail1:
6449 	return QDF_STATUS_E_RESOURCES;
6450 }
6451 
6452 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
6453 {
6454 	uint8_t num_pool;
6455 	uint32_t num_desc;
6456 	uint32_t num_ext_desc;
6457 
6458 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6459 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6460 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
6461 
6462 	if (dp_tx_init_static_pools(soc, num_pool, num_desc))
6463 		goto fail1;
6464 
6465 	if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
6466 		goto fail2;
6467 
6468 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
6469 		return QDF_STATUS_SUCCESS;
6470 
6471 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
6472 		goto fail3;
6473 
6474 	dp_tx_flow_control_init(soc);
6475 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
6476 	return QDF_STATUS_SUCCESS;
6477 
6478 fail3:
6479 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
6480 fail2:
6481 	dp_tx_deinit_static_pools(soc, num_pool);
6482 fail1:
6483 	return QDF_STATUS_E_RESOURCES;
6484 }
6485 
6486 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
6487 {
6488 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6489 	uint8_t num_pool;
6490 	uint32_t num_ext_desc;
6491 
6492 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6493 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
6494 
6495 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
6496 		return QDF_STATUS_E_FAILURE;
6497 
6498 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
6499 		return QDF_STATUS_E_FAILURE;
6500 
6501 	return QDF_STATUS_SUCCESS;
6502 }
6503 
6504 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
6505 {
6506 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6507 	uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6508 
6509 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
6510 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
6511 
6512 	return QDF_STATUS_SUCCESS;
6513 }
6514 
6515 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
6516 void dp_pkt_add_timestamp(struct dp_vdev *vdev,
6517 			  enum qdf_pkt_timestamp_index index, uint64_t time,
6518 			  qdf_nbuf_t nbuf)
6519 {
6520 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled())) {
6521 		uint64_t tsf_time;
6522 
6523 		if (vdev->get_tsf_time) {
6524 			vdev->get_tsf_time(vdev->osif_vdev, time, &tsf_time);
6525 			qdf_add_dp_pkt_timestamp(nbuf, index, tsf_time);
6526 		}
6527 	}
6528 }
6529 
6530 void dp_pkt_get_timestamp(uint64_t *time)
6531 {
6532 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled()))
6533 		*time = qdf_get_log_timestamp();
6534 }
6535 #endif
6536 
6537