xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision 8b3dca18206e1a0461492f082fa6e270b092c035)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "htt.h"
21 #include "dp_htt.h"
22 #include "hal_hw_headers.h"
23 #include "dp_tx.h"
24 #include "dp_tx_desc.h"
25 #include "dp_peer.h"
26 #include "dp_types.h"
27 #include "hal_tx.h"
28 #include "qdf_mem.h"
29 #include "qdf_nbuf.h"
30 #include "qdf_net_types.h"
31 #include "qdf_module.h"
32 #include <wlan_cfg.h>
33 #include "dp_ipa.h"
34 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
35 #include "if_meta_hdr.h"
36 #endif
37 #include "enet.h"
38 #include "dp_internal.h"
39 #ifdef ATH_SUPPORT_IQUE
40 #include "dp_txrx_me.h"
41 #endif
42 #include "dp_hist.h"
43 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
44 #include <wlan_dp_swlm.h>
45 #endif
46 #ifdef WIFI_MONITOR_SUPPORT
47 #include <dp_mon.h>
48 #endif
49 #ifdef FEATURE_WDS
50 #include "dp_txrx_wds.h"
51 #endif
52 #include "cdp_txrx_cmn_reg.h"
53 #ifdef CONFIG_SAWF
54 #include <dp_sawf.h>
55 #endif
56 
57 /* Flag to skip CCE classify when mesh or tid override enabled */
58 #define DP_TX_SKIP_CCE_CLASSIFY \
59 	(DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
60 
61 /* TODO Add support in TSO */
62 #define DP_DESC_NUM_FRAG(x) 0
63 
64 /* disable TQM_BYPASS */
65 #define TQM_BYPASS_WAR 0
66 
67 /* invalid peer id for reinject*/
68 #define DP_INVALID_PEER 0XFFFE
69 
70 #define DP_RETRY_COUNT 7
71 
72 #ifdef QCA_DP_TX_FW_METADATA_V2
73 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
74 	HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
75 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
76 	HTT_TX_TCL_METADATA_V2_VALID_HTT_SET(_var, _val)
77 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
78 	HTT_TX_TCL_METADATA_TYPE_V2_SET(_var, _val)
79 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
80 	HTT_TX_TCL_METADATA_V2_HOST_INSPECTED_SET(_var, _val)
81 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
82 	 HTT_TX_TCL_METADATA_V2_PEER_ID_SET(_var, _val)
83 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
84 	HTT_TX_TCL_METADATA_V2_VDEV_ID_SET(_var, _val)
85 #define DP_TCL_METADATA_TYPE_PEER_BASED \
86 	HTT_TCL_METADATA_V2_TYPE_PEER_BASED
87 #define DP_TCL_METADATA_TYPE_VDEV_BASED \
88 	HTT_TCL_METADATA_V2_TYPE_VDEV_BASED
89 #else
90 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
91 	HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
92 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
93 	HTT_TX_TCL_METADATA_VALID_HTT_SET(_var, _val)
94 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
95 	HTT_TX_TCL_METADATA_TYPE_SET(_var, _val)
96 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
97 	HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val)
98 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
99 	HTT_TX_TCL_METADATA_PEER_ID_SET(_var, _val)
100 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
101 	HTT_TX_TCL_METADATA_VDEV_ID_SET(_var, _val)
102 #define DP_TCL_METADATA_TYPE_PEER_BASED \
103 	HTT_TCL_METADATA_TYPE_PEER_BASED
104 #define DP_TCL_METADATA_TYPE_VDEV_BASED \
105 	HTT_TCL_METADATA_TYPE_VDEV_BASED
106 #endif
107 
108 /*mapping between hal encrypt type and cdp_sec_type*/
109 uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
110 					  HAL_TX_ENCRYPT_TYPE_WEP_128,
111 					  HAL_TX_ENCRYPT_TYPE_WEP_104,
112 					  HAL_TX_ENCRYPT_TYPE_WEP_40,
113 					  HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
114 					  HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
115 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
116 					  HAL_TX_ENCRYPT_TYPE_WAPI,
117 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
118 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
119 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
120 					  HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
121 qdf_export_symbol(sec_type_map);
122 
123 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
124 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
125 {
126 	enum dp_tx_event_type type;
127 
128 	if (flags & DP_TX_DESC_FLAG_FLUSH)
129 		type = DP_TX_DESC_FLUSH;
130 	else if (flags & DP_TX_DESC_FLAG_TX_COMP_ERR)
131 		type = DP_TX_COMP_UNMAP_ERR;
132 	else if (flags & DP_TX_DESC_FLAG_COMPLETED_TX)
133 		type = DP_TX_COMP_UNMAP;
134 	else
135 		type = DP_TX_DESC_UNMAP;
136 
137 	return type;
138 }
139 
140 static inline void
141 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
142 		       qdf_nbuf_t skb, uint32_t sw_cookie,
143 		       enum dp_tx_event_type type)
144 {
145 	struct dp_tx_tcl_history *tx_tcl_history = &soc->tx_tcl_history;
146 	struct dp_tx_comp_history *tx_comp_history = &soc->tx_comp_history;
147 	struct dp_tx_desc_event *entry;
148 	uint32_t idx;
149 	uint16_t slot;
150 
151 	switch (type) {
152 	case DP_TX_COMP_UNMAP:
153 	case DP_TX_COMP_UNMAP_ERR:
154 	case DP_TX_COMP_MSDU_EXT:
155 		if (qdf_unlikely(!tx_comp_history->allocated))
156 			return;
157 
158 		dp_get_frag_hist_next_atomic_idx(&tx_comp_history->index, &idx,
159 						 &slot,
160 						 DP_TX_COMP_HIST_SLOT_SHIFT,
161 						 DP_TX_COMP_HIST_PER_SLOT_MAX,
162 						 DP_TX_COMP_HISTORY_SIZE);
163 		entry = &tx_comp_history->entry[slot][idx];
164 		break;
165 	case DP_TX_DESC_MAP:
166 	case DP_TX_DESC_UNMAP:
167 	case DP_TX_DESC_COOKIE:
168 	case DP_TX_DESC_FLUSH:
169 		if (qdf_unlikely(!tx_tcl_history->allocated))
170 			return;
171 
172 		dp_get_frag_hist_next_atomic_idx(&tx_tcl_history->index, &idx,
173 						 &slot,
174 						 DP_TX_TCL_HIST_SLOT_SHIFT,
175 						 DP_TX_TCL_HIST_PER_SLOT_MAX,
176 						 DP_TX_TCL_HISTORY_SIZE);
177 		entry = &tx_tcl_history->entry[slot][idx];
178 		break;
179 	default:
180 		dp_info_rl("Invalid dp_tx_event_type: %d", type);
181 		return;
182 	}
183 
184 	entry->skb = skb;
185 	entry->paddr = paddr;
186 	entry->sw_cookie = sw_cookie;
187 	entry->type = type;
188 	entry->ts = qdf_get_log_timestamp();
189 }
190 
191 static inline void
192 dp_tx_tso_seg_history_add(struct dp_soc *soc,
193 			  struct qdf_tso_seg_elem_t *tso_seg,
194 			  qdf_nbuf_t skb, uint32_t sw_cookie,
195 			  enum dp_tx_event_type type)
196 {
197 	int i;
198 
199 	for (i = 1; i < tso_seg->seg.num_frags; i++) {
200 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[i].paddr,
201 				       skb, sw_cookie, type);
202 	}
203 
204 	if (!tso_seg->next)
205 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[0].paddr,
206 				       skb, 0xFFFFFFFF, type);
207 }
208 
209 static inline void
210 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
211 		      qdf_nbuf_t skb, uint32_t sw_cookie,
212 		      enum dp_tx_event_type type)
213 {
214 	struct qdf_tso_seg_elem_t *curr_seg = tso_info.tso_seg_list;
215 	uint32_t num_segs = tso_info.num_segs;
216 
217 	while (num_segs) {
218 		dp_tx_tso_seg_history_add(soc, curr_seg, skb, sw_cookie, type);
219 		curr_seg = curr_seg->next;
220 		num_segs--;
221 	}
222 }
223 
224 #else
225 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
226 {
227 	return DP_TX_DESC_INVAL_EVT;
228 }
229 
230 static inline void
231 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
232 		       qdf_nbuf_t skb, uint32_t sw_cookie,
233 		       enum dp_tx_event_type type)
234 {
235 }
236 
237 static inline void
238 dp_tx_tso_seg_history_add(struct dp_soc *soc,
239 			  struct qdf_tso_seg_elem_t *tso_seg,
240 			  qdf_nbuf_t skb, uint32_t sw_cookie,
241 			  enum dp_tx_event_type type)
242 {
243 }
244 
245 static inline void
246 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
247 		      qdf_nbuf_t skb, uint32_t sw_cookie,
248 		      enum dp_tx_event_type type)
249 {
250 }
251 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
252 
253 static int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc);
254 
255 /**
256  * dp_is_tput_high() - Check if throughput is high
257  *
258  * @soc - core txrx main context
259  *
260  * The current function is based of the RTPM tput policy variable where RTPM is
261  * avoided based on throughput.
262  */
263 static inline int dp_is_tput_high(struct dp_soc *soc)
264 {
265 	return dp_get_rtpm_tput_policy_requirement(soc);
266 }
267 
268 #if defined(FEATURE_TSO)
269 /**
270  * dp_tx_tso_unmap_segment() - Unmap TSO segment
271  *
272  * @soc - core txrx main context
273  * @seg_desc - tso segment descriptor
274  * @num_seg_desc - tso number segment descriptor
275  */
276 static void dp_tx_tso_unmap_segment(
277 		struct dp_soc *soc,
278 		struct qdf_tso_seg_elem_t *seg_desc,
279 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
280 {
281 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
282 	if (qdf_unlikely(!seg_desc)) {
283 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
284 			 __func__, __LINE__);
285 		qdf_assert(0);
286 	} else if (qdf_unlikely(!num_seg_desc)) {
287 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
288 			 __func__, __LINE__);
289 		qdf_assert(0);
290 	} else {
291 		bool is_last_seg;
292 		/* no tso segment left to do dma unmap */
293 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
294 			return;
295 
296 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
297 					true : false;
298 		qdf_nbuf_unmap_tso_segment(soc->osdev,
299 					   seg_desc, is_last_seg);
300 		num_seg_desc->num_seg.tso_cmn_num_seg--;
301 	}
302 }
303 
304 /**
305  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
306  *                            back to the freelist
307  *
308  * @soc - soc device handle
309  * @tx_desc - Tx software descriptor
310  */
311 static void dp_tx_tso_desc_release(struct dp_soc *soc,
312 				   struct dp_tx_desc_s *tx_desc)
313 {
314 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
315 	if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_desc)) {
316 		dp_tx_err("SO desc is NULL!");
317 		qdf_assert(0);
318 	} else if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_num_desc)) {
319 		dp_tx_err("TSO num desc is NULL!");
320 		qdf_assert(0);
321 	} else {
322 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
323 			(struct qdf_tso_num_seg_elem_t *)tx_desc->
324 				msdu_ext_desc->tso_num_desc;
325 
326 		/* Add the tso num segment into the free list */
327 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
328 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
329 					    tx_desc->msdu_ext_desc->
330 					    tso_num_desc);
331 			tx_desc->msdu_ext_desc->tso_num_desc = NULL;
332 			DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
333 		}
334 
335 		/* Add the tso segment into the free list*/
336 		dp_tx_tso_desc_free(soc,
337 				    tx_desc->pool_id, tx_desc->msdu_ext_desc->
338 				    tso_desc);
339 		tx_desc->msdu_ext_desc->tso_desc = NULL;
340 	}
341 }
342 #else
343 static void dp_tx_tso_unmap_segment(
344 		struct dp_soc *soc,
345 		struct qdf_tso_seg_elem_t *seg_desc,
346 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
347 
348 {
349 }
350 
351 static void dp_tx_tso_desc_release(struct dp_soc *soc,
352 				   struct dp_tx_desc_s *tx_desc)
353 {
354 }
355 #endif
356 
357 /**
358  * dp_tx_desc_release() - Release Tx Descriptor
359  * @tx_desc : Tx Descriptor
360  * @desc_pool_id: Descriptor Pool ID
361  *
362  * Deallocate all resources attached to Tx descriptor and free the Tx
363  * descriptor.
364  *
365  * Return:
366  */
367 void
368 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
369 {
370 	struct dp_pdev *pdev = tx_desc->pdev;
371 	struct dp_soc *soc;
372 	uint8_t comp_status = 0;
373 
374 	qdf_assert(pdev);
375 
376 	soc = pdev->soc;
377 
378 	dp_tx_outstanding_dec(pdev);
379 
380 	if (tx_desc->msdu_ext_desc) {
381 		if (tx_desc->frm_type == dp_tx_frm_tso)
382 			dp_tx_tso_desc_release(soc, tx_desc);
383 
384 		if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
385 			dp_tx_me_free_buf(tx_desc->pdev,
386 					  tx_desc->msdu_ext_desc->me_buffer);
387 
388 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
389 	}
390 
391 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
392 		qdf_atomic_dec(&soc->num_tx_exception);
393 
394 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
395 				tx_desc->buffer_src)
396 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
397 							     soc->hal_soc);
398 	else
399 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
400 
401 	dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d",
402 		    tx_desc->id, comp_status,
403 		    qdf_atomic_read(&pdev->num_tx_outstanding));
404 
405 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
406 	return;
407 }
408 
409 /**
410  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
411  * @vdev: DP vdev Handle
412  * @nbuf: skb
413  * @msdu_info: msdu_info required to create HTT metadata
414  *
415  * Prepares and fills HTT metadata in the frame pre-header for special frames
416  * that should be transmitted using varying transmit parameters.
417  * There are 2 VDEV modes that currently needs this special metadata -
418  *  1) Mesh Mode
419  *  2) DSRC Mode
420  *
421  * Return: HTT metadata size
422  *
423  */
424 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
425 					  struct dp_tx_msdu_info_s *msdu_info)
426 {
427 	uint32_t *meta_data = msdu_info->meta_data;
428 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
429 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
430 
431 	uint8_t htt_desc_size;
432 
433 	/* Size rounded of multiple of 8 bytes */
434 	uint8_t htt_desc_size_aligned;
435 
436 	uint8_t *hdr = NULL;
437 
438 	/*
439 	 * Metadata - HTT MSDU Extension header
440 	 */
441 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
442 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
443 
444 	if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
445 	    HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
446 							   meta_data[0]) ||
447 	    msdu_info->exception_fw) {
448 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
449 				 htt_desc_size_aligned)) {
450 			nbuf = qdf_nbuf_realloc_headroom(nbuf,
451 							 htt_desc_size_aligned);
452 			if (!nbuf) {
453 				/*
454 				 * qdf_nbuf_realloc_headroom won't do skb_clone
455 				 * as skb_realloc_headroom does. so, no free is
456 				 * needed here.
457 				 */
458 				DP_STATS_INC(vdev,
459 					     tx_i.dropped.headroom_insufficient,
460 					     1);
461 				qdf_print(" %s[%d] skb_realloc_headroom failed",
462 					  __func__, __LINE__);
463 				return 0;
464 			}
465 		}
466 		/* Fill and add HTT metaheader */
467 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
468 		if (!hdr) {
469 			dp_tx_err("Error in filling HTT metadata");
470 
471 			return 0;
472 		}
473 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
474 
475 	} else if (vdev->opmode == wlan_op_mode_ocb) {
476 		/* Todo - Add support for DSRC */
477 	}
478 
479 	return htt_desc_size_aligned;
480 }
481 
482 /**
483  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
484  * @tso_seg: TSO segment to process
485  * @ext_desc: Pointer to MSDU extension descriptor
486  *
487  * Return: void
488  */
489 #if defined(FEATURE_TSO)
490 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
491 		void *ext_desc)
492 {
493 	uint8_t num_frag;
494 	uint32_t tso_flags;
495 
496 	/*
497 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
498 	 * tcp_flag_mask
499 	 *
500 	 * Checksum enable flags are set in TCL descriptor and not in Extension
501 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
502 	 */
503 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
504 
505 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
506 
507 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
508 		tso_seg->tso_flags.ip_len);
509 
510 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
511 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
512 
513 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
514 		uint32_t lo = 0;
515 		uint32_t hi = 0;
516 
517 		qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
518 				  (tso_seg->tso_frags[num_frag].length));
519 
520 		qdf_dmaaddr_to_32s(
521 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
522 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
523 			tso_seg->tso_frags[num_frag].length);
524 	}
525 
526 	return;
527 }
528 #else
529 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
530 		void *ext_desc)
531 {
532 	return;
533 }
534 #endif
535 
536 #if defined(FEATURE_TSO)
537 /**
538  * dp_tx_free_tso_seg_list() - Loop through the tso segments
539  *                             allocated and free them
540  *
541  * @soc: soc handle
542  * @free_seg: list of tso segments
543  * @msdu_info: msdu descriptor
544  *
545  * Return - void
546  */
547 static void dp_tx_free_tso_seg_list(
548 		struct dp_soc *soc,
549 		struct qdf_tso_seg_elem_t *free_seg,
550 		struct dp_tx_msdu_info_s *msdu_info)
551 {
552 	struct qdf_tso_seg_elem_t *next_seg;
553 
554 	while (free_seg) {
555 		next_seg = free_seg->next;
556 		dp_tx_tso_desc_free(soc,
557 				    msdu_info->tx_queue.desc_pool_id,
558 				    free_seg);
559 		free_seg = next_seg;
560 	}
561 }
562 
563 /**
564  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
565  *                                 allocated and free them
566  *
567  * @soc:  soc handle
568  * @free_num_seg: list of tso number segments
569  * @msdu_info: msdu descriptor
570  * Return - void
571  */
572 static void dp_tx_free_tso_num_seg_list(
573 		struct dp_soc *soc,
574 		struct qdf_tso_num_seg_elem_t *free_num_seg,
575 		struct dp_tx_msdu_info_s *msdu_info)
576 {
577 	struct qdf_tso_num_seg_elem_t *next_num_seg;
578 
579 	while (free_num_seg) {
580 		next_num_seg = free_num_seg->next;
581 		dp_tso_num_seg_free(soc,
582 				    msdu_info->tx_queue.desc_pool_id,
583 				    free_num_seg);
584 		free_num_seg = next_num_seg;
585 	}
586 }
587 
588 /**
589  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
590  *                              do dma unmap for each segment
591  *
592  * @soc: soc handle
593  * @free_seg: list of tso segments
594  * @num_seg_desc: tso number segment descriptor
595  *
596  * Return - void
597  */
598 static void dp_tx_unmap_tso_seg_list(
599 		struct dp_soc *soc,
600 		struct qdf_tso_seg_elem_t *free_seg,
601 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
602 {
603 	struct qdf_tso_seg_elem_t *next_seg;
604 
605 	if (qdf_unlikely(!num_seg_desc)) {
606 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
607 		return;
608 	}
609 
610 	while (free_seg) {
611 		next_seg = free_seg->next;
612 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
613 		free_seg = next_seg;
614 	}
615 }
616 
617 #ifdef FEATURE_TSO_STATS
618 /**
619  * dp_tso_get_stats_idx: Retrieve the tso packet id
620  * @pdev - pdev handle
621  *
622  * Return: id
623  */
624 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
625 {
626 	uint32_t stats_idx;
627 
628 	stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
629 						% CDP_MAX_TSO_PACKETS);
630 	return stats_idx;
631 }
632 #else
633 static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
634 {
635 	return 0;
636 }
637 #endif /* FEATURE_TSO_STATS */
638 
639 /**
640  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
641  *				     free the tso segments descriptor and
642  *				     tso num segments descriptor
643  *
644  * @soc:  soc handle
645  * @msdu_info: msdu descriptor
646  * @tso_seg_unmap: flag to show if dma unmap is necessary
647  *
648  * Return - void
649  */
650 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
651 					  struct dp_tx_msdu_info_s *msdu_info,
652 					  bool tso_seg_unmap)
653 {
654 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
655 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
656 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
657 					tso_info->tso_num_seg_list;
658 
659 	/* do dma unmap for each segment */
660 	if (tso_seg_unmap)
661 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
662 
663 	/* free all tso number segment descriptor though looks only have 1 */
664 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
665 
666 	/* free all tso segment descriptor */
667 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
668 }
669 
670 /**
671  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
672  * @vdev: virtual device handle
673  * @msdu: network buffer
674  * @msdu_info: meta data associated with the msdu
675  *
676  * Return: QDF_STATUS_SUCCESS success
677  */
678 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
679 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
680 {
681 	struct qdf_tso_seg_elem_t *tso_seg;
682 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
683 	struct dp_soc *soc = vdev->pdev->soc;
684 	struct dp_pdev *pdev = vdev->pdev;
685 	struct qdf_tso_info_t *tso_info;
686 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
687 	tso_info = &msdu_info->u.tso_info;
688 	tso_info->curr_seg = NULL;
689 	tso_info->tso_seg_list = NULL;
690 	tso_info->num_segs = num_seg;
691 	msdu_info->frm_type = dp_tx_frm_tso;
692 	tso_info->tso_num_seg_list = NULL;
693 
694 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
695 
696 	while (num_seg) {
697 		tso_seg = dp_tx_tso_desc_alloc(
698 				soc, msdu_info->tx_queue.desc_pool_id);
699 		if (tso_seg) {
700 			tso_seg->next = tso_info->tso_seg_list;
701 			tso_info->tso_seg_list = tso_seg;
702 			num_seg--;
703 		} else {
704 			dp_err_rl("Failed to alloc tso seg desc");
705 			DP_STATS_INC_PKT(vdev->pdev,
706 					 tso_stats.tso_no_mem_dropped, 1,
707 					 qdf_nbuf_len(msdu));
708 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
709 
710 			return QDF_STATUS_E_NOMEM;
711 		}
712 	}
713 
714 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
715 
716 	tso_num_seg = dp_tso_num_seg_alloc(soc,
717 			msdu_info->tx_queue.desc_pool_id);
718 
719 	if (tso_num_seg) {
720 		tso_num_seg->next = tso_info->tso_num_seg_list;
721 		tso_info->tso_num_seg_list = tso_num_seg;
722 	} else {
723 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
724 			 __func__);
725 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
726 
727 		return QDF_STATUS_E_NOMEM;
728 	}
729 
730 	msdu_info->num_seg =
731 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
732 
733 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
734 			msdu_info->num_seg);
735 
736 	if (!(msdu_info->num_seg)) {
737 		/*
738 		 * Free allocated TSO seg desc and number seg desc,
739 		 * do unmap for segments if dma map has done.
740 		 */
741 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
742 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
743 
744 		return QDF_STATUS_E_INVAL;
745 	}
746 	dp_tx_tso_history_add(soc, msdu_info->u.tso_info,
747 			      msdu, 0, DP_TX_DESC_MAP);
748 
749 	tso_info->curr_seg = tso_info->tso_seg_list;
750 
751 	tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
752 	dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
753 			     msdu, msdu_info->num_seg);
754 	dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
755 				    tso_info->msdu_stats_idx);
756 	dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
757 	return QDF_STATUS_SUCCESS;
758 }
759 #else
760 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
761 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
762 {
763 	return QDF_STATUS_E_NOMEM;
764 }
765 #endif
766 
767 QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
768 			(DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
769 			 sizeof(struct htt_tx_msdu_desc_ext2_t)));
770 
771 /**
772  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
773  * @vdev: DP Vdev handle
774  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
775  * @desc_pool_id: Descriptor Pool ID
776  *
777  * Return:
778  */
779 static
780 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
781 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
782 {
783 	uint8_t i;
784 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
785 	struct dp_tx_seg_info_s *seg_info;
786 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
787 	struct dp_soc *soc = vdev->pdev->soc;
788 
789 	/* Allocate an extension descriptor */
790 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
791 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
792 
793 	if (!msdu_ext_desc) {
794 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
795 		return NULL;
796 	}
797 
798 	if (msdu_info->exception_fw &&
799 			qdf_unlikely(vdev->mesh_vdev)) {
800 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
801 				&msdu_info->meta_data[0],
802 				sizeof(struct htt_tx_msdu_desc_ext2_t));
803 		qdf_atomic_inc(&soc->num_tx_exception);
804 		msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
805 	}
806 
807 	switch (msdu_info->frm_type) {
808 	case dp_tx_frm_sg:
809 	case dp_tx_frm_me:
810 	case dp_tx_frm_raw:
811 		seg_info = msdu_info->u.sg_info.curr_seg;
812 		/* Update the buffer pointers in MSDU Extension Descriptor */
813 		for (i = 0; i < seg_info->frag_cnt; i++) {
814 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
815 				seg_info->frags[i].paddr_lo,
816 				seg_info->frags[i].paddr_hi,
817 				seg_info->frags[i].len);
818 		}
819 
820 		break;
821 
822 	case dp_tx_frm_tso:
823 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
824 				&cached_ext_desc[0]);
825 		break;
826 
827 
828 	default:
829 		break;
830 	}
831 
832 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
833 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
834 
835 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
836 			msdu_ext_desc->vaddr);
837 
838 	return msdu_ext_desc;
839 }
840 
841 /**
842  * dp_tx_trace_pkt() - Trace TX packet at DP layer
843  *
844  * @skb: skb to be traced
845  * @msdu_id: msdu_id of the packet
846  * @vdev_id: vdev_id of the packet
847  *
848  * Return: None
849  */
850 #ifdef DP_DISABLE_TX_PKT_TRACE
851 static void dp_tx_trace_pkt(struct dp_soc *soc,
852 			    qdf_nbuf_t skb, uint16_t msdu_id,
853 			    uint8_t vdev_id)
854 {
855 }
856 #else
857 static void dp_tx_trace_pkt(struct dp_soc *soc,
858 			    qdf_nbuf_t skb, uint16_t msdu_id,
859 			    uint8_t vdev_id)
860 {
861 	if (dp_is_tput_high(soc))
862 		return;
863 
864 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
865 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
866 	DPTRACE(qdf_dp_trace_ptr(skb,
867 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
868 				 QDF_TRACE_DEFAULT_PDEV_ID,
869 				 qdf_nbuf_data_addr(skb),
870 				 sizeof(qdf_nbuf_data(skb)),
871 				 msdu_id, vdev_id, 0));
872 
873 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
874 
875 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
876 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
877 				      msdu_id, QDF_TX));
878 }
879 #endif
880 
881 #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
882 /**
883  * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as
884  *				      exception by the upper layer (OS_IF)
885  * @soc: DP soc handle
886  * @nbuf: packet to be transmitted
887  *
888  * Returns: 1 if the packet is marked as exception,
889  *	    0, if the packet is not marked as exception.
890  */
891 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
892 						 qdf_nbuf_t nbuf)
893 {
894 	return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf);
895 }
896 #else
897 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
898 						 qdf_nbuf_t nbuf)
899 {
900 	return 0;
901 }
902 #endif
903 
904 #ifdef DP_TRAFFIC_END_INDICATION
905 /**
906  * dp_tx_get_traffic_end_indication_pkt() - Allocate and prepare packet to send
907  *                                          as indication to fw to inform that
908  *                                          data stream has ended
909  * @vdev: DP vdev handle
910  * @nbuf: original buffer from network stack
911  *
912  * Return: NULL on failure,
913  *         nbuf on success
914  */
915 static inline qdf_nbuf_t
916 dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
917 				     qdf_nbuf_t nbuf)
918 {
919 	/* Packet length should be enough to copy upto L3 header */
920 	uint8_t end_nbuf_len = 64;
921 	uint8_t htt_desc_size_aligned;
922 	uint8_t htt_desc_size;
923 	qdf_nbuf_t end_nbuf;
924 
925 	if (qdf_unlikely(QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) ==
926 			 QDF_NBUF_CB_PACKET_TYPE_END_INDICATION)) {
927 		htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
928 		htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
929 
930 		end_nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q);
931 		if (!end_nbuf) {
932 			end_nbuf = qdf_nbuf_alloc(NULL,
933 						  (htt_desc_size_aligned +
934 						  end_nbuf_len),
935 						  htt_desc_size_aligned,
936 						  8, false);
937 			if (!end_nbuf) {
938 				dp_err("Packet allocation failed");
939 				goto out;
940 			}
941 		} else {
942 			qdf_nbuf_reset(end_nbuf, htt_desc_size_aligned, 8);
943 		}
944 		qdf_mem_copy(qdf_nbuf_data(end_nbuf), qdf_nbuf_data(nbuf),
945 			     end_nbuf_len);
946 		qdf_nbuf_set_pktlen(end_nbuf, end_nbuf_len);
947 
948 		return end_nbuf;
949 	}
950 out:
951 	return NULL;
952 }
953 
954 /**
955  * dp_tx_send_traffic_end_indication_pkt() - Send indication packet to FW
956  *                                           via exception path.
957  * @vdev: DP vdev handle
958  * @end_nbuf: skb to send as indication
959  * @msdu_info: msdu_info of original nbuf
960  * @peer_id: peer id
961  *
962  * Return: None
963  */
964 static inline void
965 dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
966 				      qdf_nbuf_t end_nbuf,
967 				      struct dp_tx_msdu_info_s *msdu_info,
968 				      uint16_t peer_id)
969 {
970 	struct dp_tx_msdu_info_s e_msdu_info = {0};
971 	qdf_nbuf_t nbuf;
972 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
973 		(struct htt_tx_msdu_desc_ext2_t *)(e_msdu_info.meta_data);
974 	e_msdu_info.tx_queue = msdu_info->tx_queue;
975 	e_msdu_info.tid = msdu_info->tid;
976 	e_msdu_info.exception_fw = 1;
977 	desc_ext->host_tx_desc_pool = 1;
978 	desc_ext->traffic_end_indication = 1;
979 	nbuf = dp_tx_send_msdu_single(vdev, end_nbuf, &e_msdu_info,
980 				      peer_id, NULL);
981 	if (nbuf) {
982 		dp_err("Traffic end indication packet tx failed");
983 		qdf_nbuf_free(nbuf);
984 	}
985 }
986 
987 /**
988  * dp_tx_traffic_end_indication_set_desc_flag() - Set tx descriptor flag to
989  *                                                mark it trafic end indication
990  *                                                packet.
991  * @tx_desc: Tx descriptor pointer
992  * @msdu_info: msdu_info structure pointer
993  *
994  * Return: None
995  */
996 static inline void
997 dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
998 					   struct dp_tx_msdu_info_s *msdu_info)
999 {
1000 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
1001 		(struct htt_tx_msdu_desc_ext2_t *)(msdu_info->meta_data);
1002 
1003 	if (qdf_unlikely(desc_ext->traffic_end_indication))
1004 		tx_desc->flags |= DP_TX_DESC_FLAG_TRAFFIC_END_IND;
1005 }
1006 
1007 /**
1008  * dp_tx_traffic_end_indication_enq_ind_pkt() - Enqueue the packet instead of
1009  *                                              freeing which are associated
1010  *                                              with traffic end indication
1011  *                                              flagged descriptor.
1012  * @soc: dp soc handle
1013  * @desc: Tx descriptor pointer
1014  * @nbuf: buffer pointer
1015  *
1016  * Return: True if packet gets enqueued else false
1017  */
1018 static bool
1019 dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
1020 					 struct dp_tx_desc_s *desc,
1021 					 qdf_nbuf_t nbuf)
1022 {
1023 	struct dp_vdev *vdev = NULL;
1024 
1025 	if (qdf_unlikely((desc->flags &
1026 			  DP_TX_DESC_FLAG_TRAFFIC_END_IND) != 0)) {
1027 		vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
1028 					     DP_MOD_ID_TX_COMP);
1029 		if (vdev) {
1030 			qdf_nbuf_queue_add(&vdev->end_ind_pkt_q, nbuf);
1031 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_COMP);
1032 			return true;
1033 		}
1034 	}
1035 	return false;
1036 }
1037 
1038 /**
1039  * dp_tx_traffic_end_indication_is_enabled() - get the feature
1040  *                                             enable/disable status
1041  * @vdev: dp vdev handle
1042  *
1043  * Return: True if feature is enable else false
1044  */
1045 static inline bool
1046 dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
1047 {
1048 	return qdf_unlikely(vdev->traffic_end_ind_en);
1049 }
1050 
1051 static inline qdf_nbuf_t
1052 dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1053 			       struct dp_tx_msdu_info_s *msdu_info,
1054 			       uint16_t peer_id, qdf_nbuf_t end_nbuf)
1055 {
1056 	if (dp_tx_traffic_end_indication_is_enabled(vdev))
1057 		end_nbuf = dp_tx_get_traffic_end_indication_pkt(vdev, nbuf);
1058 
1059 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
1060 
1061 	if (qdf_unlikely(end_nbuf))
1062 		dp_tx_send_traffic_end_indication_pkt(vdev, end_nbuf,
1063 						      msdu_info, peer_id);
1064 	return nbuf;
1065 }
1066 #else
1067 static inline qdf_nbuf_t
1068 dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
1069 				     qdf_nbuf_t nbuf)
1070 {
1071 	return NULL;
1072 }
1073 
1074 static inline void
1075 dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
1076 				      qdf_nbuf_t end_nbuf,
1077 				      struct dp_tx_msdu_info_s *msdu_info,
1078 				      uint16_t peer_id)
1079 {}
1080 
1081 static inline void
1082 dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
1083 					   struct dp_tx_msdu_info_s *msdu_info)
1084 {}
1085 
1086 static inline bool
1087 dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
1088 					 struct dp_tx_desc_s *desc,
1089 					 qdf_nbuf_t nbuf)
1090 {
1091 	return false;
1092 }
1093 
1094 static inline bool
1095 dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
1096 {
1097 	return false;
1098 }
1099 
1100 static inline qdf_nbuf_t
1101 dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1102 			       struct dp_tx_msdu_info_s *msdu_info,
1103 			       uint16_t peer_id, qdf_nbuf_t end_nbuf)
1104 {
1105 	return dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
1106 }
1107 #endif
1108 
1109 /**
1110  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
1111  * @vdev: DP vdev handle
1112  * @nbuf: skb
1113  * @desc_pool_id: Descriptor pool ID
1114  * @meta_data: Metadata to the fw
1115  * @tx_exc_metadata: Handle that holds exception path metadata
1116  * Allocate and prepare Tx descriptor with msdu information.
1117  *
1118  * Return: Pointer to Tx Descriptor on success,
1119  *         NULL on failure
1120  */
1121 static
1122 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
1123 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
1124 		struct dp_tx_msdu_info_s *msdu_info,
1125 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1126 {
1127 	uint8_t align_pad;
1128 	uint8_t is_exception = 0;
1129 	uint8_t htt_hdr_size;
1130 	struct dp_tx_desc_s *tx_desc;
1131 	struct dp_pdev *pdev = vdev->pdev;
1132 	struct dp_soc *soc = pdev->soc;
1133 
1134 	if (dp_tx_limit_check(vdev))
1135 		return NULL;
1136 
1137 	/* Allocate software Tx descriptor */
1138 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1139 
1140 	if (qdf_unlikely(!tx_desc)) {
1141 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1142 		DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
1143 		return NULL;
1144 	}
1145 
1146 	dp_tx_outstanding_inc(pdev);
1147 
1148 	/* Initialize the SW tx descriptor */
1149 	tx_desc->nbuf = nbuf;
1150 	tx_desc->frm_type = dp_tx_frm_std;
1151 	tx_desc->tx_encap_type = ((tx_exc_metadata &&
1152 		(tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
1153 		tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
1154 	tx_desc->vdev_id = vdev->vdev_id;
1155 	tx_desc->pdev = pdev;
1156 	tx_desc->msdu_ext_desc = NULL;
1157 	tx_desc->pkt_offset = 0;
1158 	tx_desc->length = qdf_nbuf_headlen(nbuf);
1159 	tx_desc->shinfo_addr = skb_end_pointer(nbuf);
1160 
1161 	dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
1162 
1163 	if (qdf_unlikely(vdev->multipass_en)) {
1164 		if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
1165 			goto failure;
1166 	}
1167 
1168 	/* Packets marked by upper layer (OS-IF) to be sent to FW */
1169 	if (dp_tx_is_nbuf_marked_exception(soc, nbuf))
1170 		is_exception = 1;
1171 	/*
1172 	 * For special modes (vdev_type == ocb or mesh), data frames should be
1173 	 * transmitted using varying transmit parameters (tx spec) which include
1174 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
1175 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
1176 	 * These frames are sent as exception packets to firmware.
1177 	 *
1178 	 * HW requirement is that metadata should always point to a
1179 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
1180 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
1181 	 *  to get 8-byte aligned start address along with align_pad added
1182 	 *
1183 	 *  |-----------------------------|
1184 	 *  |                             |
1185 	 *  |-----------------------------| <-----Buffer Pointer Address given
1186 	 *  |                             |  ^    in HW descriptor (aligned)
1187 	 *  |       HTT Metadata          |  |
1188 	 *  |                             |  |
1189 	 *  |                             |  | Packet Offset given in descriptor
1190 	 *  |                             |  |
1191 	 *  |-----------------------------|  |
1192 	 *  |       Alignment Pad         |  v
1193 	 *  |-----------------------------| <----- Actual buffer start address
1194 	 *  |        SKB Data             |           (Unaligned)
1195 	 *  |                             |
1196 	 *  |                             |
1197 	 *  |                             |
1198 	 *  |                             |
1199 	 *  |                             |
1200 	 *  |-----------------------------|
1201 	 */
1202 	if (qdf_unlikely((msdu_info->exception_fw)) ||
1203 				(vdev->opmode == wlan_op_mode_ocb) ||
1204 				(tx_exc_metadata &&
1205 				tx_exc_metadata->is_tx_sniffer)) {
1206 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
1207 
1208 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
1209 			DP_STATS_INC(vdev,
1210 				     tx_i.dropped.headroom_insufficient, 1);
1211 			goto failure;
1212 		}
1213 
1214 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
1215 			dp_tx_err("qdf_nbuf_push_head failed");
1216 			goto failure;
1217 		}
1218 
1219 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
1220 				msdu_info);
1221 		if (htt_hdr_size == 0)
1222 			goto failure;
1223 
1224 		tx_desc->length = qdf_nbuf_headlen(nbuf);
1225 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
1226 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1227 		dp_tx_traffic_end_indication_set_desc_flag(tx_desc,
1228 							   msdu_info);
1229 		is_exception = 1;
1230 		tx_desc->length -= tx_desc->pkt_offset;
1231 	}
1232 
1233 #if !TQM_BYPASS_WAR
1234 	if (is_exception || tx_exc_metadata)
1235 #endif
1236 	{
1237 		/* Temporary WAR due to TQM VP issues */
1238 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1239 		qdf_atomic_inc(&soc->num_tx_exception);
1240 	}
1241 
1242 	return tx_desc;
1243 
1244 failure:
1245 	dp_tx_desc_release(tx_desc, desc_pool_id);
1246 	return NULL;
1247 }
1248 
1249 /**
1250  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
1251  * @vdev: DP vdev handle
1252  * @nbuf: skb
1253  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
1254  * @desc_pool_id : Descriptor Pool ID
1255  *
1256  * Allocate and prepare Tx descriptor with msdu and fragment descritor
1257  * information. For frames wth fragments, allocate and prepare
1258  * an MSDU extension descriptor
1259  *
1260  * Return: Pointer to Tx Descriptor on success,
1261  *         NULL on failure
1262  */
1263 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
1264 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
1265 		uint8_t desc_pool_id)
1266 {
1267 	struct dp_tx_desc_s *tx_desc;
1268 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
1269 	struct dp_pdev *pdev = vdev->pdev;
1270 	struct dp_soc *soc = pdev->soc;
1271 
1272 	if (dp_tx_limit_check(vdev))
1273 		return NULL;
1274 
1275 	/* Allocate software Tx descriptor */
1276 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1277 	if (!tx_desc) {
1278 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1279 		return NULL;
1280 	}
1281 	dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg,
1282 				  nbuf, tx_desc->id, DP_TX_DESC_COOKIE);
1283 
1284 	dp_tx_outstanding_inc(pdev);
1285 
1286 	/* Initialize the SW tx descriptor */
1287 	tx_desc->nbuf = nbuf;
1288 	tx_desc->frm_type = msdu_info->frm_type;
1289 	tx_desc->tx_encap_type = vdev->tx_encap_type;
1290 	tx_desc->vdev_id = vdev->vdev_id;
1291 	tx_desc->pdev = pdev;
1292 	tx_desc->pkt_offset = 0;
1293 
1294 	dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
1295 
1296 	/* Handle scattered frames - TSO/SG/ME */
1297 	/* Allocate and prepare an extension descriptor for scattered frames */
1298 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
1299 	if (!msdu_ext_desc) {
1300 		dp_tx_info("Tx Extension Descriptor Alloc Fail");
1301 		goto failure;
1302 	}
1303 
1304 #if TQM_BYPASS_WAR
1305 	/* Temporary WAR due to TQM VP issues */
1306 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1307 	qdf_atomic_inc(&soc->num_tx_exception);
1308 #endif
1309 	if (qdf_unlikely(msdu_info->exception_fw))
1310 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1311 
1312 	tx_desc->msdu_ext_desc = msdu_ext_desc;
1313 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
1314 
1315 	msdu_ext_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
1316 	msdu_ext_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
1317 
1318 	tx_desc->dma_addr = msdu_ext_desc->paddr;
1319 
1320 	if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
1321 		tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
1322 	else
1323 		tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
1324 
1325 	return tx_desc;
1326 failure:
1327 	dp_tx_desc_release(tx_desc, desc_pool_id);
1328 	return NULL;
1329 }
1330 
1331 /**
1332  * dp_tx_prepare_raw() - Prepare RAW packet TX
1333  * @vdev: DP vdev handle
1334  * @nbuf: buffer pointer
1335  * @seg_info: Pointer to Segment info Descriptor to be prepared
1336  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
1337  *     descriptor
1338  *
1339  * Return:
1340  */
1341 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1342 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1343 {
1344 	qdf_nbuf_t curr_nbuf = NULL;
1345 	uint16_t total_len = 0;
1346 	qdf_dma_addr_t paddr;
1347 	int32_t i;
1348 	int32_t mapped_buf_num = 0;
1349 
1350 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
1351 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1352 
1353 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
1354 
1355 	/* Continue only if frames are of DATA type */
1356 	if (!DP_FRAME_IS_DATA(qos_wh)) {
1357 		DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
1358 		dp_tx_debug("Pkt. recd is of not data type");
1359 		goto error;
1360 	}
1361 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
1362 	if (vdev->raw_mode_war &&
1363 	    (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
1364 	    (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
1365 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
1366 
1367 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
1368 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
1369 		/*
1370 		 * Number of nbuf's must not exceed the size of the frags
1371 		 * array in seg_info.
1372 		 */
1373 		if (i >= DP_TX_MAX_NUM_FRAGS) {
1374 			dp_err_rl("nbuf cnt exceeds the max number of segs");
1375 			DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
1376 			goto error;
1377 		}
1378 		if (QDF_STATUS_SUCCESS !=
1379 			qdf_nbuf_map_nbytes_single(vdev->osdev,
1380 						   curr_nbuf,
1381 						   QDF_DMA_TO_DEVICE,
1382 						   curr_nbuf->len)) {
1383 			dp_tx_err("%s dma map error ", __func__);
1384 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
1385 			goto error;
1386 		}
1387 		/* Update the count of mapped nbuf's */
1388 		mapped_buf_num++;
1389 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
1390 		seg_info->frags[i].paddr_lo = paddr;
1391 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
1392 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
1393 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
1394 		total_len += qdf_nbuf_len(curr_nbuf);
1395 	}
1396 
1397 	seg_info->frag_cnt = i;
1398 	seg_info->total_len = total_len;
1399 	seg_info->next = NULL;
1400 
1401 	sg_info->curr_seg = seg_info;
1402 
1403 	msdu_info->frm_type = dp_tx_frm_raw;
1404 	msdu_info->num_seg = 1;
1405 
1406 	return nbuf;
1407 
1408 error:
1409 	i = 0;
1410 	while (nbuf) {
1411 		curr_nbuf = nbuf;
1412 		if (i < mapped_buf_num) {
1413 			qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
1414 						     QDF_DMA_TO_DEVICE,
1415 						     curr_nbuf->len);
1416 			i++;
1417 		}
1418 		nbuf = qdf_nbuf_next(nbuf);
1419 		qdf_nbuf_free(curr_nbuf);
1420 	}
1421 	return NULL;
1422 
1423 }
1424 
1425 /**
1426  * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
1427  * @soc: DP soc handle
1428  * @nbuf: Buffer pointer
1429  *
1430  * unmap the chain of nbufs that belong to this RAW frame.
1431  *
1432  * Return: None
1433  */
1434 static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
1435 				    qdf_nbuf_t nbuf)
1436 {
1437 	qdf_nbuf_t cur_nbuf = nbuf;
1438 
1439 	do {
1440 		qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
1441 					     QDF_DMA_TO_DEVICE,
1442 					     cur_nbuf->len);
1443 		cur_nbuf = qdf_nbuf_next(cur_nbuf);
1444 	} while (cur_nbuf);
1445 }
1446 
1447 #ifdef VDEV_PEER_PROTOCOL_COUNT
1448 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
1449 					       qdf_nbuf_t nbuf)
1450 {
1451 	qdf_nbuf_t nbuf_local;
1452 	struct dp_vdev *vdev_local = vdev_hdl;
1453 
1454 	do {
1455 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track)))
1456 			break;
1457 		nbuf_local = nbuf;
1458 		if (qdf_unlikely(((vdev_local)->tx_encap_type) ==
1459 			 htt_cmn_pkt_type_raw))
1460 			break;
1461 		else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local))))
1462 			break;
1463 		else if (qdf_nbuf_is_tso((nbuf_local)))
1464 			break;
1465 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local),
1466 						       (nbuf_local),
1467 						       NULL, 1, 0);
1468 	} while (0);
1469 }
1470 #endif
1471 
1472 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1473 /**
1474  * dp_tx_update_stats() - Update soc level tx stats
1475  * @soc: DP soc handle
1476  * @tx_desc: TX descriptor reference
1477  * @ring_id: TCL ring id
1478  *
1479  * Returns: none
1480  */
1481 void dp_tx_update_stats(struct dp_soc *soc,
1482 			struct dp_tx_desc_s *tx_desc,
1483 			uint8_t ring_id)
1484 {
1485 	uint32_t stats_len = 0;
1486 
1487 	if (tx_desc->frm_type == dp_tx_frm_tso)
1488 		stats_len  = tx_desc->msdu_ext_desc->tso_desc->seg.total_len;
1489 	else
1490 		stats_len = qdf_nbuf_len(tx_desc->nbuf);
1491 
1492 	DP_STATS_INC_PKT(soc, tx.egress[ring_id], 1, stats_len);
1493 }
1494 
1495 int
1496 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1497 			 struct dp_tx_desc_s *tx_desc,
1498 			 uint8_t tid,
1499 			 struct dp_tx_msdu_info_s *msdu_info,
1500 			 uint8_t ring_id)
1501 {
1502 	struct dp_swlm *swlm = &soc->swlm;
1503 	union swlm_data swlm_query_data;
1504 	struct dp_swlm_tcl_data tcl_data;
1505 	QDF_STATUS status;
1506 	int ret;
1507 
1508 	if (!swlm->is_enabled)
1509 		return msdu_info->skip_hp_update;
1510 
1511 	tcl_data.nbuf = tx_desc->nbuf;
1512 	tcl_data.tid = tid;
1513 	tcl_data.ring_id = ring_id;
1514 	if (tx_desc->frm_type == dp_tx_frm_tso) {
1515 		tcl_data.pkt_len  =
1516 			tx_desc->msdu_ext_desc->tso_desc->seg.total_len;
1517 	} else {
1518 		tcl_data.pkt_len = qdf_nbuf_len(tx_desc->nbuf);
1519 	}
1520 	tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
1521 	swlm_query_data.tcl_data = &tcl_data;
1522 
1523 	status = dp_swlm_tcl_pre_check(soc, &tcl_data);
1524 	if (QDF_IS_STATUS_ERROR(status)) {
1525 		dp_swlm_tcl_reset_session_data(soc, ring_id);
1526 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
1527 		return 0;
1528 	}
1529 
1530 	ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
1531 	if (ret) {
1532 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_success, 1);
1533 	} else {
1534 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
1535 	}
1536 
1537 	return ret;
1538 }
1539 
1540 void
1541 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1542 		      int coalesce)
1543 {
1544 	if (coalesce)
1545 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1546 	else
1547 		dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1548 }
1549 
1550 static inline void
1551 dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
1552 {
1553 	if (((i + 1) < msdu_info->num_seg))
1554 		msdu_info->skip_hp_update = 1;
1555 	else
1556 		msdu_info->skip_hp_update = 0;
1557 }
1558 
1559 static inline void
1560 dp_flush_tcp_hp(struct dp_soc *soc, uint8_t ring_id)
1561 {
1562 	hal_ring_handle_t hal_ring_hdl =
1563 		dp_tx_get_hal_ring_hdl(soc, ring_id);
1564 
1565 	if (dp_tx_hal_ring_access_start(soc, hal_ring_hdl)) {
1566 		dp_err("Fillmore: SRNG access start failed");
1567 		return;
1568 	}
1569 
1570 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
1571 }
1572 
1573 static inline void
1574 dp_tx_check_and_flush_hp(struct dp_soc *soc,
1575 			 QDF_STATUS status,
1576 			 struct dp_tx_msdu_info_s *msdu_info)
1577 {
1578 	if (QDF_IS_STATUS_ERROR(status) && !msdu_info->skip_hp_update) {
1579 		dp_flush_tcp_hp(soc,
1580 			(msdu_info->tx_queue.ring_id & DP_TX_QUEUE_MASK));
1581 	}
1582 }
1583 #else
1584 static inline void
1585 dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
1586 {
1587 }
1588 
1589 static inline void
1590 dp_tx_check_and_flush_hp(struct dp_soc *soc,
1591 			 QDF_STATUS status,
1592 			 struct dp_tx_msdu_info_s *msdu_info)
1593 {
1594 }
1595 #endif
1596 
1597 #ifdef FEATURE_RUNTIME_PM
1598 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
1599 {
1600 	int ret;
1601 
1602 	ret = qdf_atomic_read(&soc->rtpm_high_tput_flag) &&
1603 	      (hif_rtpm_get_state() <= HIF_RTPM_STATE_ON);
1604 	return ret;
1605 }
1606 /**
1607  * dp_tx_ring_access_end_wrapper() - Wrapper for ring access end
1608  * @soc: Datapath soc handle
1609  * @hal_ring_hdl: HAL ring handle
1610  * @coalesce: Coalesce the current write or not
1611  *
1612  * Wrapper for HAL ring access end for data transmission for
1613  * FEATURE_RUNTIME_PM
1614  *
1615  * Returns: none
1616  */
1617 void
1618 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1619 			      hal_ring_handle_t hal_ring_hdl,
1620 			      int coalesce)
1621 {
1622 	int ret;
1623 
1624 	/*
1625 	 * Avoid runtime get and put APIs under high throughput scenarios.
1626 	 */
1627 	if (dp_get_rtpm_tput_policy_requirement(soc)) {
1628 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1629 		return;
1630 	}
1631 
1632 	ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
1633 	if (QDF_IS_STATUS_SUCCESS(ret)) {
1634 		if (hif_system_pm_state_check(soc->hif_handle) ||
1635 					qdf_unlikely(soc->is_tx_pause)) {
1636 			dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1637 			hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1638 			hal_srng_inc_flush_cnt(hal_ring_hdl);
1639 		} else {
1640 			dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1641 		}
1642 		hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
1643 	} else {
1644 		dp_runtime_get(soc);
1645 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1646 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1647 		qdf_atomic_inc(&soc->tx_pending_rtpm);
1648 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1649 		dp_runtime_put(soc);
1650 	}
1651 }
1652 #else
1653 
1654 #ifdef DP_POWER_SAVE
1655 void
1656 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1657 			      hal_ring_handle_t hal_ring_hdl,
1658 			      int coalesce)
1659 {
1660 	if (hif_system_pm_state_check(soc->hif_handle) ||
1661 					qdf_unlikely(soc->is_tx_pause)) {
1662 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1663 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1664 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1665 	} else {
1666 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1667 	}
1668 }
1669 #endif
1670 
1671 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
1672 {
1673 	return 0;
1674 }
1675 #endif
1676 
1677 /**
1678  * dp_tx_get_tid() - Obtain TID to be used for this frame
1679  * @vdev: DP vdev handle
1680  * @nbuf: skb
1681  *
1682  * Extract the DSCP or PCP information from frame and map into TID value.
1683  *
1684  * Return: void
1685  */
1686 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1687 			  struct dp_tx_msdu_info_s *msdu_info)
1688 {
1689 	uint8_t tos = 0, dscp_tid_override = 0;
1690 	uint8_t *hdr_ptr, *L3datap;
1691 	uint8_t is_mcast = 0;
1692 	qdf_ether_header_t *eh = NULL;
1693 	qdf_ethervlan_header_t *evh = NULL;
1694 	uint16_t   ether_type;
1695 	qdf_llc_t *llcHdr;
1696 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1697 
1698 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1699 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1700 		eh = (qdf_ether_header_t *)nbuf->data;
1701 		hdr_ptr = (uint8_t *)(eh->ether_dhost);
1702 		L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
1703 	} else {
1704 		qdf_dot3_qosframe_t *qos_wh =
1705 			(qdf_dot3_qosframe_t *) nbuf->data;
1706 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1707 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1708 		return;
1709 	}
1710 
1711 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1712 	ether_type = eh->ether_type;
1713 
1714 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
1715 	/*
1716 	 * Check if packet is dot3 or eth2 type.
1717 	 */
1718 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1719 		ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1720 				sizeof(*llcHdr));
1721 
1722 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1723 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1724 				sizeof(*llcHdr);
1725 			ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
1726 					+ sizeof(*llcHdr) +
1727 					sizeof(qdf_net_vlanhdr_t));
1728 		} else {
1729 			L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
1730 				sizeof(*llcHdr);
1731 		}
1732 	} else {
1733 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1734 			evh = (qdf_ethervlan_header_t *) eh;
1735 			ether_type = evh->ether_type;
1736 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1737 		}
1738 	}
1739 
1740 	/*
1741 	 * Find priority from IP TOS DSCP field
1742 	 */
1743 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1744 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1745 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1746 			/* Only for unicast frames */
1747 			if (!is_mcast) {
1748 				/* send it on VO queue */
1749 				msdu_info->tid = DP_VO_TID;
1750 			}
1751 		} else {
1752 			/*
1753 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1754 			 * from TOS byte.
1755 			 */
1756 			tos = ip->ip_tos;
1757 			dscp_tid_override = 1;
1758 
1759 		}
1760 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1761 		/* TODO
1762 		 * use flowlabel
1763 		 *igmpmld cases to be handled in phase 2
1764 		 */
1765 		unsigned long ver_pri_flowlabel;
1766 		unsigned long pri;
1767 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1768 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1769 			DP_IPV6_PRIORITY_SHIFT;
1770 		tos = pri;
1771 		dscp_tid_override = 1;
1772 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1773 		msdu_info->tid = DP_VO_TID;
1774 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1775 		/* Only for unicast frames */
1776 		if (!is_mcast) {
1777 			/* send ucast arp on VO queue */
1778 			msdu_info->tid = DP_VO_TID;
1779 		}
1780 	}
1781 
1782 	/*
1783 	 * Assign all MCAST packets to BE
1784 	 */
1785 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1786 		if (is_mcast) {
1787 			tos = 0;
1788 			dscp_tid_override = 1;
1789 		}
1790 	}
1791 
1792 	if (dscp_tid_override == 1) {
1793 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1794 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1795 	}
1796 
1797 	if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
1798 		msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
1799 
1800 	return;
1801 }
1802 
1803 /**
1804  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1805  * @vdev: DP vdev handle
1806  * @nbuf: skb
1807  *
1808  * Software based TID classification is required when more than 2 DSCP-TID
1809  * mapping tables are needed.
1810  * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
1811  *
1812  * Return: void
1813  */
1814 static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1815 				      struct dp_tx_msdu_info_s *msdu_info)
1816 {
1817 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1818 
1819 	/*
1820 	 * skip_sw_tid_classification flag will set in below cases-
1821 	 * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
1822 	 * 2. hlos_tid_override enabled for vdev
1823 	 * 3. mesh mode enabled for vdev
1824 	 */
1825 	if (qdf_likely(vdev->skip_sw_tid_classification)) {
1826 		/* Update tid in msdu_info from skb priority */
1827 		if (qdf_unlikely(vdev->skip_sw_tid_classification
1828 			& DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
1829 			uint32_t tid = qdf_nbuf_get_priority(nbuf);
1830 
1831 			if (tid == DP_TX_INVALID_QOS_TAG)
1832 				return;
1833 
1834 			msdu_info->tid = tid;
1835 			return;
1836 		}
1837 		return;
1838 	}
1839 
1840 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1841 }
1842 
1843 #ifdef FEATURE_WLAN_TDLS
1844 /**
1845  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1846  * @soc: datapath SOC
1847  * @vdev: datapath vdev
1848  * @tx_desc: TX descriptor
1849  *
1850  * Return: None
1851  */
1852 static void dp_tx_update_tdls_flags(struct dp_soc *soc,
1853 				    struct dp_vdev *vdev,
1854 				    struct dp_tx_desc_s *tx_desc)
1855 {
1856 	if (vdev) {
1857 		if (vdev->is_tdls_frame) {
1858 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1859 			vdev->is_tdls_frame = false;
1860 		}
1861 	}
1862 }
1863 
1864 static uint8_t dp_htt_tx_comp_get_status(struct dp_soc *soc, char *htt_desc)
1865 {
1866 	uint8_t tx_status = HTT_TX_FW2WBM_TX_STATUS_MAX;
1867 
1868 	switch (soc->arch_id) {
1869 	case CDP_ARCH_TYPE_LI:
1870 		tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
1871 		break;
1872 
1873 	case CDP_ARCH_TYPE_BE:
1874 		tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
1875 		break;
1876 
1877 	default:
1878 		dp_err("Incorrect CDP_ARCH %d", soc->arch_id);
1879 		QDF_BUG(0);
1880 	}
1881 
1882 	return tx_status;
1883 }
1884 
1885 /**
1886  * dp_non_std_htt_tx_comp_free_buff() - Free the non std tx packet buffer
1887  * @soc: dp_soc handle
1888  * @tx_desc: TX descriptor
1889  * @vdev: datapath vdev handle
1890  *
1891  * Return: None
1892  */
1893 static void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
1894 					 struct dp_tx_desc_s *tx_desc)
1895 {
1896 	uint8_t tx_status = 0;
1897 	uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
1898 
1899 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1900 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
1901 						     DP_MOD_ID_TDLS);
1902 
1903 	if (qdf_unlikely(!vdev)) {
1904 		dp_err_rl("vdev is null!");
1905 		goto error;
1906 	}
1907 
1908 	hal_tx_comp_get_htt_desc(&tx_desc->comp, htt_tx_status);
1909 	tx_status = dp_htt_tx_comp_get_status(soc, htt_tx_status);
1910 	dp_debug("vdev_id: %d tx_status: %d", tx_desc->vdev_id, tx_status);
1911 
1912 	if (vdev->tx_non_std_data_callback.func) {
1913 		qdf_nbuf_set_next(nbuf, NULL);
1914 		vdev->tx_non_std_data_callback.func(
1915 				vdev->tx_non_std_data_callback.ctxt,
1916 				nbuf, tx_status);
1917 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1918 		return;
1919 	} else {
1920 		dp_err_rl("callback func is null");
1921 	}
1922 
1923 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1924 error:
1925 	qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
1926 	qdf_nbuf_free(nbuf);
1927 }
1928 
1929 /**
1930  * dp_tx_msdu_single_map() - do nbuf map
1931  * @vdev: DP vdev handle
1932  * @tx_desc: DP TX descriptor pointer
1933  * @nbuf: skb pointer
1934  *
1935  * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
1936  * operation done in other component.
1937  *
1938  * Return: QDF_STATUS
1939  */
1940 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1941 					       struct dp_tx_desc_s *tx_desc,
1942 					       qdf_nbuf_t nbuf)
1943 {
1944 	if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
1945 		return qdf_nbuf_map_nbytes_single(vdev->osdev,
1946 						  nbuf,
1947 						  QDF_DMA_TO_DEVICE,
1948 						  nbuf->len);
1949 	else
1950 		return qdf_nbuf_map_single(vdev->osdev, nbuf,
1951 					   QDF_DMA_TO_DEVICE);
1952 }
1953 #else
1954 static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
1955 					   struct dp_vdev *vdev,
1956 					   struct dp_tx_desc_s *tx_desc)
1957 {
1958 }
1959 
1960 static inline void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
1961 						struct dp_tx_desc_s *tx_desc)
1962 {
1963 }
1964 
1965 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1966 					       struct dp_tx_desc_s *tx_desc,
1967 					       qdf_nbuf_t nbuf)
1968 {
1969 	return qdf_nbuf_map_nbytes_single(vdev->osdev,
1970 					  nbuf,
1971 					  QDF_DMA_TO_DEVICE,
1972 					  nbuf->len);
1973 }
1974 #endif
1975 
1976 static inline
1977 qdf_dma_addr_t dp_tx_nbuf_map_regular(struct dp_vdev *vdev,
1978 				      struct dp_tx_desc_s *tx_desc,
1979 				      qdf_nbuf_t nbuf)
1980 {
1981 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
1982 
1983 	ret = dp_tx_msdu_single_map(vdev, tx_desc, nbuf);
1984 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret)))
1985 		return 0;
1986 
1987 	return qdf_nbuf_mapped_paddr_get(nbuf);
1988 }
1989 
1990 static inline
1991 void dp_tx_nbuf_unmap_regular(struct dp_soc *soc, struct dp_tx_desc_s *desc)
1992 {
1993 	qdf_nbuf_unmap_nbytes_single_paddr(soc->osdev,
1994 					   desc->nbuf,
1995 					   desc->dma_addr,
1996 					   QDF_DMA_TO_DEVICE,
1997 					   desc->length);
1998 }
1999 
2000 #if defined(QCA_DP_TX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
2001 static inline
2002 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
2003 			      struct dp_tx_desc_s *tx_desc,
2004 			      qdf_nbuf_t nbuf)
2005 {
2006 	if (qdf_likely(tx_desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
2007 		qdf_nbuf_dma_clean_range((void *)nbuf->data,
2008 					 (void *)(nbuf->data + nbuf->len));
2009 		return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2010 	} else {
2011 		return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
2012 	}
2013 }
2014 
2015 static inline
2016 void dp_tx_nbuf_unmap(struct dp_soc *soc,
2017 		      struct dp_tx_desc_s *desc)
2018 {
2019 	if (qdf_unlikely(!(desc->flags & DP_TX_DESC_FLAG_SIMPLE)))
2020 		return dp_tx_nbuf_unmap_regular(soc, desc);
2021 }
2022 #else
2023 static inline
2024 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
2025 			      struct dp_tx_desc_s *tx_desc,
2026 			      qdf_nbuf_t nbuf)
2027 {
2028 	return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
2029 }
2030 
2031 static inline
2032 void dp_tx_nbuf_unmap(struct dp_soc *soc,
2033 		      struct dp_tx_desc_s *desc)
2034 {
2035 	return dp_tx_nbuf_unmap_regular(soc, desc);
2036 }
2037 #endif
2038 
2039 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
2040 static inline
2041 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2042 {
2043 	dp_tx_nbuf_unmap(soc, desc);
2044 	desc->flags |= DP_TX_DESC_FLAG_UNMAP_DONE;
2045 }
2046 
2047 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2048 {
2049 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE)))
2050 		dp_tx_nbuf_unmap(soc, desc);
2051 }
2052 #else
2053 static inline
2054 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2055 {
2056 }
2057 
2058 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2059 {
2060 	dp_tx_nbuf_unmap(soc, desc);
2061 }
2062 #endif
2063 
2064 #ifdef MESH_MODE_SUPPORT
2065 /**
2066  * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
2067  * @soc: datapath SOC
2068  * @vdev: datapath vdev
2069  * @tx_desc: TX descriptor
2070  *
2071  * Return: None
2072  */
2073 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
2074 					   struct dp_vdev *vdev,
2075 					   struct dp_tx_desc_s *tx_desc)
2076 {
2077 	if (qdf_unlikely(vdev->mesh_vdev))
2078 		tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
2079 }
2080 
2081 /**
2082  * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
2083  * @soc: dp_soc handle
2084  * @tx_desc: TX descriptor
2085  * @delayed_free: delay the nbuf free
2086  *
2087  * Return: nbuf to be freed late
2088  */
2089 static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
2090 						   struct dp_tx_desc_s *tx_desc,
2091 						   bool delayed_free)
2092 {
2093 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2094 	struct dp_vdev *vdev = NULL;
2095 
2096 	vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, DP_MOD_ID_MESH);
2097 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2098 		if (vdev)
2099 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2100 
2101 		if (delayed_free)
2102 			return nbuf;
2103 
2104 		qdf_nbuf_free(nbuf);
2105 	} else {
2106 		if (vdev && vdev->osif_tx_free_ext) {
2107 			vdev->osif_tx_free_ext((nbuf));
2108 		} else {
2109 			if (delayed_free)
2110 				return nbuf;
2111 
2112 			qdf_nbuf_free(nbuf);
2113 		}
2114 	}
2115 
2116 	if (vdev)
2117 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
2118 
2119 	return NULL;
2120 }
2121 #else
2122 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
2123 					   struct dp_vdev *vdev,
2124 					   struct dp_tx_desc_s *tx_desc)
2125 {
2126 }
2127 
2128 static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
2129 						   struct dp_tx_desc_s *tx_desc,
2130 						   bool delayed_free)
2131 {
2132 	return NULL;
2133 }
2134 #endif
2135 
2136 /**
2137  * dp_tx_frame_is_drop() - checks if the packet is loopback
2138  * @vdev: DP vdev handle
2139  * @nbuf: skb
2140  *
2141  * Return: 1 if frame needs to be dropped else 0
2142  */
2143 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
2144 {
2145 	struct dp_pdev *pdev = NULL;
2146 	struct dp_ast_entry *src_ast_entry = NULL;
2147 	struct dp_ast_entry *dst_ast_entry = NULL;
2148 	struct dp_soc *soc = NULL;
2149 
2150 	qdf_assert(vdev);
2151 	pdev = vdev->pdev;
2152 	qdf_assert(pdev);
2153 	soc = pdev->soc;
2154 
2155 	dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
2156 				(soc, dstmac, vdev->pdev->pdev_id);
2157 
2158 	src_ast_entry = dp_peer_ast_hash_find_by_pdevid
2159 				(soc, srcmac, vdev->pdev->pdev_id);
2160 	if (dst_ast_entry && src_ast_entry) {
2161 		if (dst_ast_entry->peer_id ==
2162 				src_ast_entry->peer_id)
2163 			return 1;
2164 	}
2165 
2166 	return 0;
2167 }
2168 
2169 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
2170 	defined(WLAN_MCAST_MLO)
2171 /* MLO peer id for reinject*/
2172 #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
2173 /* MLO vdev id inc offset */
2174 #define DP_MLO_VDEV_ID_OFFSET 0x80
2175 
2176 static inline void
2177 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2178 {
2179 	if (!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) {
2180 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2181 		qdf_atomic_inc(&soc->num_tx_exception);
2182 	}
2183 }
2184 
2185 static inline void
2186 dp_tx_update_mcast_param(uint16_t peer_id,
2187 			 uint16_t *htt_tcl_metadata,
2188 			 struct dp_vdev *vdev,
2189 			 struct dp_tx_msdu_info_s *msdu_info)
2190 {
2191 	if (peer_id == DP_MLO_MCAST_REINJECT_PEER_ID) {
2192 		*htt_tcl_metadata = 0;
2193 		DP_TX_TCL_METADATA_TYPE_SET(
2194 				*htt_tcl_metadata,
2195 				HTT_TCL_METADATA_V2_TYPE_GLOBAL_SEQ_BASED);
2196 		HTT_TX_TCL_METADATA_GLBL_SEQ_NO_SET(*htt_tcl_metadata,
2197 						    msdu_info->gsn);
2198 
2199 		msdu_info->vdev_id = vdev->vdev_id + DP_MLO_VDEV_ID_OFFSET;
2200 		if (qdf_unlikely(vdev->nawds_enabled))
2201 			HTT_TX_TCL_METADATA_GLBL_SEQ_HOST_INSPECTED_SET(
2202 							*htt_tcl_metadata, 1);
2203 	} else {
2204 		msdu_info->vdev_id = vdev->vdev_id;
2205 	}
2206 }
2207 #else
2208 static inline void
2209 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2210 {
2211 }
2212 
2213 static inline void
2214 dp_tx_update_mcast_param(uint16_t peer_id,
2215 			 uint16_t *htt_tcl_metadata,
2216 			 struct dp_vdev *vdev,
2217 			 struct dp_tx_msdu_info_s *msdu_info)
2218 {
2219 }
2220 #endif
2221 /**
2222  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
2223  * @vdev: DP vdev handle
2224  * @nbuf: skb
2225  * @tid: TID from HLOS for overriding default DSCP-TID mapping
2226  * @meta_data: Metadata to the fw
2227  * @tx_q: Tx queue to be used for this Tx frame
2228  * @peer_id: peer_id of the peer in case of NAWDS frames
2229  * @tx_exc_metadata: Handle that holds exception path metadata
2230  *
2231  * Return: NULL on success,
2232  *         nbuf when it fails to send
2233  */
2234 qdf_nbuf_t
2235 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2236 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
2237 		       struct cdp_tx_exception_metadata *tx_exc_metadata)
2238 {
2239 	struct dp_pdev *pdev = vdev->pdev;
2240 	struct dp_soc *soc = pdev->soc;
2241 	struct dp_tx_desc_s *tx_desc;
2242 	QDF_STATUS status;
2243 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
2244 	uint16_t htt_tcl_metadata = 0;
2245 	enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
2246 	uint8_t tid = msdu_info->tid;
2247 	struct cdp_tid_tx_stats *tid_stats = NULL;
2248 	qdf_dma_addr_t paddr;
2249 
2250 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
2251 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
2252 			msdu_info, tx_exc_metadata);
2253 	if (!tx_desc) {
2254 		dp_err_rl("Tx_desc prepare Fail vdev_id %d vdev %pK queue %d",
2255 			  vdev->vdev_id, vdev, tx_q->desc_pool_id);
2256 		drop_code = TX_DESC_ERR;
2257 		goto fail_return;
2258 	}
2259 
2260 	dp_tx_update_tdls_flags(soc, vdev, tx_desc);
2261 
2262 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
2263 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2264 		DP_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
2265 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
2266 		DP_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
2267 					    DP_TCL_METADATA_TYPE_PEER_BASED);
2268 		DP_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
2269 					       peer_id);
2270 		dp_tx_bypass_reinjection(soc, tx_desc);
2271 	} else
2272 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2273 
2274 	if (msdu_info->exception_fw)
2275 		DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2276 
2277 	dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
2278 					 !pdev->enhanced_stats_en);
2279 
2280 	dp_tx_update_mesh_flags(soc, vdev, tx_desc);
2281 
2282 	paddr =  dp_tx_nbuf_map(vdev, tx_desc, nbuf);
2283 	if (!paddr) {
2284 		/* Handle failure */
2285 		dp_err("qdf_nbuf_map failed");
2286 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
2287 		drop_code = TX_DMA_MAP_ERR;
2288 		goto release_desc;
2289 	}
2290 
2291 	tx_desc->dma_addr = paddr;
2292 	dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
2293 			       tx_desc->id, DP_TX_DESC_MAP);
2294 	dp_tx_update_mcast_param(peer_id, &htt_tcl_metadata, vdev, msdu_info);
2295 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
2296 	status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
2297 					     htt_tcl_metadata,
2298 					     tx_exc_metadata, msdu_info);
2299 
2300 	if (status != QDF_STATUS_SUCCESS) {
2301 		dp_tx_err_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2302 			     tx_desc, tx_q->ring_id);
2303 		dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
2304 				       tx_desc->id, DP_TX_DESC_UNMAP);
2305 		dp_tx_nbuf_unmap(soc, tx_desc);
2306 		drop_code = TX_HW_ENQUEUE;
2307 		goto release_desc;
2308 	}
2309 
2310 	return NULL;
2311 
2312 release_desc:
2313 	dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2314 
2315 fail_return:
2316 	dp_tx_get_tid(vdev, nbuf, msdu_info);
2317 	tid_stats = &pdev->stats.tid_stats.
2318 		    tid_tx_stats[tx_q->ring_id][tid];
2319 	tid_stats->swdrop_cnt[drop_code]++;
2320 	return nbuf;
2321 }
2322 
2323 /**
2324  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2325  * @soc: Soc handle
2326  * @desc: software Tx descriptor to be processed
2327  * @delayed_free: defer freeing of nbuf
2328  *
2329  * Return: nbuf to be freed later
2330  */
2331 qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
2332 			       bool delayed_free)
2333 {
2334 	qdf_nbuf_t nbuf = desc->nbuf;
2335 	enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
2336 
2337 	/* nbuf already freed in vdev detach path */
2338 	if (!nbuf)
2339 		return NULL;
2340 
2341 	/* If it is TDLS mgmt, don't unmap or free the frame */
2342 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) {
2343 		dp_non_std_htt_tx_comp_free_buff(soc, desc);
2344 		return NULL;
2345 	}
2346 
2347 	/* 0 : MSDU buffer, 1 : MLE */
2348 	if (desc->msdu_ext_desc) {
2349 		/* TSO free */
2350 		if (hal_tx_ext_desc_get_tso_enable(
2351 					desc->msdu_ext_desc->vaddr)) {
2352 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
2353 					       desc->id, DP_TX_COMP_MSDU_EXT);
2354 			dp_tx_tso_seg_history_add(soc,
2355 						  desc->msdu_ext_desc->tso_desc,
2356 						  desc->nbuf, desc->id, type);
2357 			/* unmap eash TSO seg before free the nbuf */
2358 			dp_tx_tso_unmap_segment(soc,
2359 						desc->msdu_ext_desc->tso_desc,
2360 						desc->msdu_ext_desc->
2361 						tso_num_desc);
2362 			goto nbuf_free;
2363 		}
2364 
2365 		if (qdf_unlikely(desc->frm_type == dp_tx_frm_sg)) {
2366 			void *msdu_ext_desc = desc->msdu_ext_desc->vaddr;
2367 			qdf_dma_addr_t iova;
2368 			uint32_t frag_len;
2369 			uint32_t i;
2370 
2371 			qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
2372 						     QDF_DMA_TO_DEVICE,
2373 						     qdf_nbuf_headlen(nbuf));
2374 
2375 			for (i = 1; i < DP_TX_MAX_NUM_FRAGS; i++) {
2376 				hal_tx_ext_desc_get_frag_info(msdu_ext_desc, i,
2377 							      &iova,
2378 							      &frag_len);
2379 				if (!iova || !frag_len)
2380 					break;
2381 
2382 				qdf_mem_unmap_page(soc->osdev, iova, frag_len,
2383 						   QDF_DMA_TO_DEVICE);
2384 			}
2385 
2386 			goto nbuf_free;
2387 		}
2388 	}
2389 	/* If it's ME frame, dont unmap the cloned nbuf's */
2390 	if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
2391 		goto nbuf_free;
2392 
2393 	dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, desc->id, type);
2394 	dp_tx_unmap(soc, desc);
2395 
2396 	if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
2397 		return dp_mesh_tx_comp_free_buff(soc, desc, delayed_free);
2398 
2399 	if (dp_tx_traffic_end_indication_enq_ind_pkt(soc, desc, nbuf))
2400 		return NULL;
2401 
2402 nbuf_free:
2403 	if (delayed_free)
2404 		return nbuf;
2405 
2406 	qdf_nbuf_free(nbuf);
2407 
2408 	return NULL;
2409 }
2410 
2411 /**
2412  * dp_tx_sg_unmap_buf() - Unmap scatter gather fragments
2413  * @soc: DP soc handle
2414  * @nbuf: skb
2415  * @msdu_info: MSDU info
2416  *
2417  * Return: None
2418  */
2419 static inline void
2420 dp_tx_sg_unmap_buf(struct dp_soc *soc, qdf_nbuf_t nbuf,
2421 		   struct dp_tx_msdu_info_s *msdu_info)
2422 {
2423 	uint32_t cur_idx;
2424 	struct dp_tx_seg_info_s *seg = msdu_info->u.sg_info.curr_seg;
2425 
2426 	qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE,
2427 				     qdf_nbuf_headlen(nbuf));
2428 
2429 	for (cur_idx = 1; cur_idx < seg->frag_cnt; cur_idx++)
2430 		qdf_mem_unmap_page(soc->osdev, (qdf_dma_addr_t)
2431 				   (seg->frags[cur_idx].paddr_lo | ((uint64_t)
2432 				    seg->frags[cur_idx].paddr_hi) << 32),
2433 				   seg->frags[cur_idx].len,
2434 				   QDF_DMA_TO_DEVICE);
2435 }
2436 
2437 /**
2438  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
2439  * @vdev: DP vdev handle
2440  * @nbuf: skb
2441  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
2442  *
2443  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
2444  *
2445  * Return: NULL on success,
2446  *         nbuf when it fails to send
2447  */
2448 #if QDF_LOCK_STATS
2449 noinline
2450 #else
2451 #endif
2452 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2453 				    struct dp_tx_msdu_info_s *msdu_info)
2454 {
2455 	uint32_t i;
2456 	struct dp_pdev *pdev = vdev->pdev;
2457 	struct dp_soc *soc = pdev->soc;
2458 	struct dp_tx_desc_s *tx_desc;
2459 	bool is_cce_classified = false;
2460 	QDF_STATUS status;
2461 	uint16_t htt_tcl_metadata = 0;
2462 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
2463 	struct cdp_tid_tx_stats *tid_stats = NULL;
2464 	uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
2465 
2466 	if (msdu_info->frm_type == dp_tx_frm_me)
2467 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2468 
2469 	i = 0;
2470 	/* Print statement to track i and num_seg */
2471 	/*
2472 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
2473 	 * descriptors using information in msdu_info
2474 	 */
2475 	while (i < msdu_info->num_seg) {
2476 		/*
2477 		 * Setup Tx descriptor for an MSDU, and MSDU extension
2478 		 * descriptor
2479 		 */
2480 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
2481 				tx_q->desc_pool_id);
2482 
2483 		if (!tx_desc) {
2484 			if (msdu_info->frm_type == dp_tx_frm_me) {
2485 				prep_desc_fail++;
2486 				dp_tx_me_free_buf(pdev,
2487 					(void *)(msdu_info->u.sg_info
2488 						.curr_seg->frags[0].vaddr));
2489 				if (prep_desc_fail == msdu_info->num_seg) {
2490 					/*
2491 					 * Unmap is needed only if descriptor
2492 					 * preparation failed for all segments.
2493 					 */
2494 					qdf_nbuf_unmap(soc->osdev,
2495 						       msdu_info->u.sg_info.
2496 						       curr_seg->nbuf,
2497 						       QDF_DMA_TO_DEVICE);
2498 				}
2499 				/*
2500 				 * Free the nbuf for the current segment
2501 				 * and make it point to the next in the list.
2502 				 * For me, there are as many segments as there
2503 				 * are no of clients.
2504 				 */
2505 				qdf_nbuf_free(msdu_info->u.sg_info
2506 					      .curr_seg->nbuf);
2507 				if (msdu_info->u.sg_info.curr_seg->next) {
2508 					msdu_info->u.sg_info.curr_seg =
2509 						msdu_info->u.sg_info
2510 						.curr_seg->next;
2511 					nbuf = msdu_info->u.sg_info
2512 					       .curr_seg->nbuf;
2513 				}
2514 				i++;
2515 				continue;
2516 			}
2517 
2518 			if (msdu_info->frm_type == dp_tx_frm_tso) {
2519 				dp_tx_tso_seg_history_add(
2520 						soc,
2521 						msdu_info->u.tso_info.curr_seg,
2522 						nbuf, 0, DP_TX_DESC_UNMAP);
2523 				dp_tx_tso_unmap_segment(soc,
2524 							msdu_info->u.tso_info.
2525 							curr_seg,
2526 							msdu_info->u.tso_info.
2527 							tso_num_seg_list);
2528 
2529 				if (msdu_info->u.tso_info.curr_seg->next) {
2530 					msdu_info->u.tso_info.curr_seg =
2531 					msdu_info->u.tso_info.curr_seg->next;
2532 					i++;
2533 					continue;
2534 				}
2535 			}
2536 
2537 			if (msdu_info->frm_type == dp_tx_frm_sg)
2538 				dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
2539 
2540 			goto done;
2541 		}
2542 
2543 		if (msdu_info->frm_type == dp_tx_frm_me) {
2544 			tx_desc->msdu_ext_desc->me_buffer =
2545 				(struct dp_tx_me_buf_t *)msdu_info->
2546 				u.sg_info.curr_seg->frags[0].vaddr;
2547 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
2548 		}
2549 
2550 		if (is_cce_classified)
2551 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2552 
2553 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2554 		if (msdu_info->exception_fw) {
2555 			DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2556 		}
2557 
2558 		dp_tx_is_hp_update_required(i, msdu_info);
2559 
2560 		/*
2561 		 * For frames with multiple segments (TSO, ME), jump to next
2562 		 * segment.
2563 		 */
2564 		if (msdu_info->frm_type == dp_tx_frm_tso) {
2565 			if (msdu_info->u.tso_info.curr_seg->next) {
2566 				msdu_info->u.tso_info.curr_seg =
2567 					msdu_info->u.tso_info.curr_seg->next;
2568 
2569 				/*
2570 				 * If this is a jumbo nbuf, then increment the
2571 				 * number of nbuf users for each additional
2572 				 * segment of the msdu. This will ensure that
2573 				 * the skb is freed only after receiving tx
2574 				 * completion for all segments of an nbuf
2575 				 */
2576 				qdf_nbuf_inc_users(nbuf);
2577 
2578 				/* Check with MCL if this is needed */
2579 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
2580 				 */
2581 			}
2582 		}
2583 
2584 		dp_tx_update_mcast_param(DP_INVALID_PEER,
2585 					 &htt_tcl_metadata,
2586 					 vdev,
2587 					 msdu_info);
2588 		/*
2589 		 * Enqueue the Tx MSDU descriptor to HW for transmit
2590 		 */
2591 		status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
2592 						     htt_tcl_metadata,
2593 						     NULL, msdu_info);
2594 
2595 		dp_tx_check_and_flush_hp(soc, status, msdu_info);
2596 
2597 		if (status != QDF_STATUS_SUCCESS) {
2598 			dp_info_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2599 				   tx_desc, tx_q->ring_id);
2600 
2601 			dp_tx_get_tid(vdev, nbuf, msdu_info);
2602 			tid_stats = &pdev->stats.tid_stats.
2603 				    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
2604 			tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
2605 
2606 			if (msdu_info->frm_type == dp_tx_frm_me) {
2607 				hw_enq_fail++;
2608 				if (hw_enq_fail == msdu_info->num_seg) {
2609 					/*
2610 					 * Unmap is needed only if enqueue
2611 					 * failed for all segments.
2612 					 */
2613 					qdf_nbuf_unmap(soc->osdev,
2614 						       msdu_info->u.sg_info.
2615 						       curr_seg->nbuf,
2616 						       QDF_DMA_TO_DEVICE);
2617 				}
2618 				/*
2619 				 * Free the nbuf for the current segment
2620 				 * and make it point to the next in the list.
2621 				 * For me, there are as many segments as there
2622 				 * are no of clients.
2623 				 */
2624 				qdf_nbuf_free(msdu_info->u.sg_info
2625 					      .curr_seg->nbuf);
2626 				dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2627 				if (msdu_info->u.sg_info.curr_seg->next) {
2628 					msdu_info->u.sg_info.curr_seg =
2629 						msdu_info->u.sg_info
2630 						.curr_seg->next;
2631 					nbuf = msdu_info->u.sg_info
2632 					       .curr_seg->nbuf;
2633 				} else
2634 					break;
2635 				i++;
2636 				continue;
2637 			}
2638 
2639 			/*
2640 			 * For TSO frames, the nbuf users increment done for
2641 			 * the current segment has to be reverted, since the
2642 			 * hw enqueue for this segment failed
2643 			 */
2644 			if (msdu_info->frm_type == dp_tx_frm_tso &&
2645 			    msdu_info->u.tso_info.curr_seg) {
2646 				/*
2647 				 * unmap and free current,
2648 				 * retransmit remaining segments
2649 				 */
2650 				dp_tx_comp_free_buf(soc, tx_desc, false);
2651 				i++;
2652 				dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2653 				continue;
2654 			}
2655 
2656 			if (msdu_info->frm_type == dp_tx_frm_sg)
2657 				dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
2658 
2659 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2660 			goto done;
2661 		}
2662 
2663 		/*
2664 		 * TODO
2665 		 * if tso_info structure can be modified to have curr_seg
2666 		 * as first element, following 2 blocks of code (for TSO and SG)
2667 		 * can be combined into 1
2668 		 */
2669 
2670 		/*
2671 		 * For Multicast-Unicast converted packets,
2672 		 * each converted frame (for a client) is represented as
2673 		 * 1 segment
2674 		 */
2675 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
2676 				(msdu_info->frm_type == dp_tx_frm_me)) {
2677 			if (msdu_info->u.sg_info.curr_seg->next) {
2678 				msdu_info->u.sg_info.curr_seg =
2679 					msdu_info->u.sg_info.curr_seg->next;
2680 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2681 			} else
2682 				break;
2683 		}
2684 		i++;
2685 	}
2686 
2687 	nbuf = NULL;
2688 
2689 done:
2690 	return nbuf;
2691 }
2692 
2693 /**
2694  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
2695  *                     for SG frames
2696  * @vdev: DP vdev handle
2697  * @nbuf: skb
2698  * @seg_info: Pointer to Segment info Descriptor to be prepared
2699  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2700  *
2701  * Return: NULL on success,
2702  *         nbuf when it fails to send
2703  */
2704 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2705 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
2706 {
2707 	uint32_t cur_frag, nr_frags, i;
2708 	qdf_dma_addr_t paddr;
2709 	struct dp_tx_sg_info_s *sg_info;
2710 
2711 	sg_info = &msdu_info->u.sg_info;
2712 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
2713 
2714 	if (QDF_STATUS_SUCCESS !=
2715 		qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
2716 					   QDF_DMA_TO_DEVICE,
2717 					   qdf_nbuf_headlen(nbuf))) {
2718 		dp_tx_err("dma map error");
2719 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2720 		qdf_nbuf_free(nbuf);
2721 		return NULL;
2722 	}
2723 
2724 	paddr = qdf_nbuf_mapped_paddr_get(nbuf);
2725 	seg_info->frags[0].paddr_lo = paddr;
2726 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
2727 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
2728 	seg_info->frags[0].vaddr = (void *) nbuf;
2729 
2730 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
2731 		if (QDF_STATUS_SUCCESS != qdf_nbuf_frag_map(vdev->osdev,
2732 							    nbuf, 0,
2733 							    QDF_DMA_TO_DEVICE,
2734 							    cur_frag)) {
2735 			dp_tx_err("frag dma map error");
2736 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2737 			goto map_err;
2738 		}
2739 
2740 		paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
2741 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
2742 		seg_info->frags[cur_frag + 1].paddr_hi =
2743 			((uint64_t) paddr) >> 32;
2744 		seg_info->frags[cur_frag + 1].len =
2745 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
2746 	}
2747 
2748 	seg_info->frag_cnt = (cur_frag + 1);
2749 	seg_info->total_len = qdf_nbuf_len(nbuf);
2750 	seg_info->next = NULL;
2751 
2752 	sg_info->curr_seg = seg_info;
2753 
2754 	msdu_info->frm_type = dp_tx_frm_sg;
2755 	msdu_info->num_seg = 1;
2756 
2757 	return nbuf;
2758 map_err:
2759 	/* restore paddr into nbuf before calling unmap */
2760 	qdf_nbuf_mapped_paddr_set(nbuf,
2761 				  (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
2762 				  ((uint64_t)
2763 				  seg_info->frags[0].paddr_hi) << 32));
2764 	qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
2765 				     QDF_DMA_TO_DEVICE,
2766 				     seg_info->frags[0].len);
2767 	for (i = 1; i <= cur_frag; i++) {
2768 		qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
2769 				   (seg_info->frags[i].paddr_lo | ((uint64_t)
2770 				   seg_info->frags[i].paddr_hi) << 32),
2771 				   seg_info->frags[i].len,
2772 				   QDF_DMA_TO_DEVICE);
2773 	}
2774 	qdf_nbuf_free(nbuf);
2775 	return NULL;
2776 }
2777 
2778 /**
2779  * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
2780  * @vdev: DP vdev handle
2781  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2782  * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
2783  *
2784  * Return: NULL on failure,
2785  *         nbuf when extracted successfully
2786  */
2787 static
2788 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
2789 				    struct dp_tx_msdu_info_s *msdu_info,
2790 				    uint16_t ppdu_cookie)
2791 {
2792 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2793 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2794 
2795 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2796 
2797 	HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
2798 				(msdu_info->meta_data[5], 1);
2799 	HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
2800 				(msdu_info->meta_data[5], 1);
2801 	HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
2802 				(msdu_info->meta_data[6], ppdu_cookie);
2803 
2804 	msdu_info->exception_fw = 1;
2805 	msdu_info->is_tx_sniffer = 1;
2806 }
2807 
2808 #ifdef MESH_MODE_SUPPORT
2809 
2810 /**
2811  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
2812 				and prepare msdu_info for mesh frames.
2813  * @vdev: DP vdev handle
2814  * @nbuf: skb
2815  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2816  *
2817  * Return: NULL on failure,
2818  *         nbuf when extracted successfully
2819  */
2820 static
2821 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2822 				struct dp_tx_msdu_info_s *msdu_info)
2823 {
2824 	struct meta_hdr_s *mhdr;
2825 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2826 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2827 
2828 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2829 
2830 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
2831 		msdu_info->exception_fw = 0;
2832 		goto remove_meta_hdr;
2833 	}
2834 
2835 	msdu_info->exception_fw = 1;
2836 
2837 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2838 
2839 	meta_data->host_tx_desc_pool = 1;
2840 	meta_data->update_peer_cache = 1;
2841 	meta_data->learning_frame = 1;
2842 
2843 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
2844 		meta_data->power = mhdr->power;
2845 
2846 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
2847 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
2848 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
2849 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
2850 
2851 		meta_data->dyn_bw = 1;
2852 
2853 		meta_data->valid_pwr = 1;
2854 		meta_data->valid_mcs_mask = 1;
2855 		meta_data->valid_nss_mask = 1;
2856 		meta_data->valid_preamble_type  = 1;
2857 		meta_data->valid_retries = 1;
2858 		meta_data->valid_bw_info = 1;
2859 	}
2860 
2861 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
2862 		meta_data->encrypt_type = 0;
2863 		meta_data->valid_encrypt_type = 1;
2864 		meta_data->learning_frame = 0;
2865 	}
2866 
2867 	meta_data->valid_key_flags = 1;
2868 	meta_data->key_flags = (mhdr->keyix & 0x3);
2869 
2870 remove_meta_hdr:
2871 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
2872 		dp_tx_err("qdf_nbuf_pull_head failed");
2873 		qdf_nbuf_free(nbuf);
2874 		return NULL;
2875 	}
2876 
2877 	msdu_info->tid = qdf_nbuf_get_priority(nbuf);
2878 
2879 	dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x"
2880 		   " tid %d to_fw %d",
2881 		   msdu_info->meta_data[0],
2882 		   msdu_info->meta_data[1],
2883 		   msdu_info->meta_data[2],
2884 		   msdu_info->meta_data[3],
2885 		   msdu_info->meta_data[4],
2886 		   msdu_info->meta_data[5],
2887 		   msdu_info->tid, msdu_info->exception_fw);
2888 
2889 	return nbuf;
2890 }
2891 #else
2892 static
2893 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2894 				struct dp_tx_msdu_info_s *msdu_info)
2895 {
2896 	return nbuf;
2897 }
2898 
2899 #endif
2900 
2901 /**
2902  * dp_check_exc_metadata() - Checks if parameters are valid
2903  * @tx_exc - holds all exception path parameters
2904  *
2905  * Returns true when all the parameters are valid else false
2906  *
2907  */
2908 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
2909 {
2910 	bool invalid_tid = (tx_exc->tid >= DP_MAX_TIDS && tx_exc->tid !=
2911 			    HTT_INVALID_TID);
2912 	bool invalid_encap_type =
2913 			(tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
2914 			 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
2915 	bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
2916 				 tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
2917 	bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
2918 			       tx_exc->ppdu_cookie == 0);
2919 
2920 	if (tx_exc->is_intrabss_fwd)
2921 		return true;
2922 
2923 	if (invalid_tid || invalid_encap_type || invalid_sec_type ||
2924 	    invalid_cookie) {
2925 		return false;
2926 	}
2927 
2928 	return true;
2929 }
2930 
2931 #ifdef ATH_SUPPORT_IQUE
2932 /**
2933  * dp_tx_mcast_enhance() - Multicast enhancement on TX
2934  * @vdev: vdev handle
2935  * @nbuf: skb
2936  *
2937  * Return: true on success,
2938  *         false on failure
2939  */
2940 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
2941 {
2942 	qdf_ether_header_t *eh;
2943 
2944 	/* Mcast to Ucast Conversion*/
2945 	if (qdf_likely(!vdev->mcast_enhancement_en))
2946 		return true;
2947 
2948 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2949 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2950 	    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2951 		dp_verbose_debug("Mcast frm for ME %pK", vdev);
2952 		qdf_nbuf_set_next(nbuf, NULL);
2953 
2954 		DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
2955 				 qdf_nbuf_len(nbuf));
2956 		if (dp_tx_prepare_send_me(vdev, nbuf) ==
2957 				QDF_STATUS_SUCCESS) {
2958 			return false;
2959 		}
2960 
2961 		if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
2962 			if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
2963 					QDF_STATUS_SUCCESS) {
2964 				return false;
2965 			}
2966 		}
2967 	}
2968 
2969 	return true;
2970 }
2971 #else
2972 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
2973 {
2974 	return true;
2975 }
2976 #endif
2977 
2978 /**
2979  * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
2980  * @nbuf: qdf_nbuf_t
2981  * @vdev: struct dp_vdev *
2982  *
2983  * Allow packet for processing only if it is for peer client which is
2984  * connected with same vap. Drop packet if client is connected to
2985  * different vap.
2986  *
2987  * Return: QDF_STATUS
2988  */
2989 static inline QDF_STATUS
2990 dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
2991 {
2992 	struct dp_ast_entry *dst_ast_entry = NULL;
2993 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2994 
2995 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
2996 	    DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
2997 		return QDF_STATUS_SUCCESS;
2998 
2999 	qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
3000 	dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
3001 							eh->ether_dhost,
3002 							vdev->vdev_id);
3003 
3004 	/* If there is no ast entry, return failure */
3005 	if (qdf_unlikely(!dst_ast_entry)) {
3006 		qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
3007 		return QDF_STATUS_E_FAILURE;
3008 	}
3009 	qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
3010 
3011 	return QDF_STATUS_SUCCESS;
3012 }
3013 
3014 /**
3015  * dp_tx_nawds_handler() - NAWDS handler
3016  *
3017  * @soc: DP soc handle
3018  * @vdev_id: id of DP vdev handle
3019  * @msdu_info: msdu_info required to create HTT metadata
3020  * @nbuf: skb
3021  *
3022  * This API transfers the multicast frames with the peer id
3023  * on NAWDS enabled peer.
3024 
3025  * Return: none
3026  */
3027 
3028 static inline
3029 void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
3030 			 struct dp_tx_msdu_info_s *msdu_info,
3031 			 qdf_nbuf_t nbuf, uint16_t sa_peer_id)
3032 {
3033 	struct dp_peer *peer = NULL;
3034 	qdf_nbuf_t nbuf_clone = NULL;
3035 	uint16_t peer_id = DP_INVALID_PEER;
3036 	struct dp_txrx_peer *txrx_peer;
3037 
3038 	/* This check avoids pkt forwarding which is entered
3039 	 * in the ast table but still doesn't have valid peerid.
3040 	 */
3041 	if (sa_peer_id == HTT_INVALID_PEER)
3042 		return;
3043 
3044 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3045 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3046 		txrx_peer = dp_get_txrx_peer(peer);
3047 		if (!txrx_peer)
3048 			continue;
3049 
3050 		if (!txrx_peer->bss_peer && txrx_peer->nawds_enabled) {
3051 			peer_id = peer->peer_id;
3052 
3053 			if (!dp_peer_is_primary_link_peer(peer))
3054 				continue;
3055 
3056 			/* Multicast packets needs to be
3057 			 * dropped in case of intra bss forwarding
3058 			 */
3059 			if (sa_peer_id == txrx_peer->peer_id) {
3060 				dp_tx_debug("multicast packet");
3061 				DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3062 							  tx.nawds_mcast_drop,
3063 							  1);
3064 				continue;
3065 			}
3066 
3067 			nbuf_clone = qdf_nbuf_clone(nbuf);
3068 
3069 			if (!nbuf_clone) {
3070 				QDF_TRACE(QDF_MODULE_ID_DP,
3071 					  QDF_TRACE_LEVEL_ERROR,
3072 					  FL("nbuf clone failed"));
3073 				break;
3074 			}
3075 
3076 			nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
3077 							    msdu_info, peer_id,
3078 							    NULL);
3079 
3080 			if (nbuf_clone) {
3081 				dp_tx_debug("pkt send failed");
3082 				qdf_nbuf_free(nbuf_clone);
3083 			} else {
3084 				if (peer_id != DP_INVALID_PEER)
3085 					DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
3086 								      tx.nawds_mcast,
3087 								      1, qdf_nbuf_len(nbuf));
3088 			}
3089 		}
3090 	}
3091 
3092 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3093 }
3094 
3095 /**
3096  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
3097  * @soc: DP soc handle
3098  * @vdev_id: id of DP vdev handle
3099  * @nbuf: skb
3100  * @tx_exc_metadata: Handle that holds exception path meta data
3101  *
3102  * Entry point for Core Tx layer (DP_TX) invoked from
3103  * hard_start_xmit in OSIF/HDD to transmit frames through fw
3104  *
3105  * Return: NULL on success,
3106  *         nbuf when it fails to send
3107  */
3108 qdf_nbuf_t
3109 dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3110 		     qdf_nbuf_t nbuf,
3111 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
3112 {
3113 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3114 	qdf_ether_header_t *eh = NULL;
3115 	struct dp_tx_msdu_info_s msdu_info;
3116 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3117 						     DP_MOD_ID_TX_EXCEPTION);
3118 
3119 	if (qdf_unlikely(!vdev))
3120 		goto fail;
3121 
3122 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
3123 
3124 	if (!tx_exc_metadata)
3125 		goto fail;
3126 
3127 	msdu_info.tid = tx_exc_metadata->tid;
3128 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3129 	dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
3130 			 QDF_MAC_ADDR_REF(nbuf->data));
3131 
3132 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
3133 
3134 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
3135 		dp_tx_err("Invalid parameters in exception path");
3136 		goto fail;
3137 	}
3138 
3139 	/* for peer based metadata check if peer is valid */
3140 	if (tx_exc_metadata->peer_id != CDP_INVALID_PEER) {
3141 		struct dp_peer *peer = NULL;
3142 
3143 		 peer = dp_peer_get_ref_by_id(vdev->pdev->soc,
3144 					      tx_exc_metadata->peer_id,
3145 					      DP_MOD_ID_TX_EXCEPTION);
3146 		if (qdf_unlikely(!peer)) {
3147 			DP_STATS_INC(vdev,
3148 				     tx_i.dropped.invalid_peer_id_in_exc_path,
3149 				     1);
3150 			goto fail;
3151 		}
3152 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_EXCEPTION);
3153 	}
3154 	/* Basic sanity checks for unsupported packets */
3155 
3156 	/* MESH mode */
3157 	if (qdf_unlikely(vdev->mesh_vdev)) {
3158 		dp_tx_err("Mesh mode is not supported in exception path");
3159 		goto fail;
3160 	}
3161 
3162 	/*
3163 	 * Classify the frame and call corresponding
3164 	 * "prepare" function which extracts the segment (TSO)
3165 	 * and fragmentation information (for TSO , SG, ME, or Raw)
3166 	 * into MSDU_INFO structure which is later used to fill
3167 	 * SW and HW descriptors.
3168 	 */
3169 	if (qdf_nbuf_is_tso(nbuf)) {
3170 		dp_verbose_debug("TSO frame %pK", vdev);
3171 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
3172 				 qdf_nbuf_len(nbuf));
3173 
3174 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
3175 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
3176 					 qdf_nbuf_len(nbuf));
3177 			goto fail;
3178 		}
3179 
3180 		goto send_multiple;
3181 	}
3182 
3183 	/* SG */
3184 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
3185 		struct dp_tx_seg_info_s seg_info = {0};
3186 
3187 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
3188 		if (!nbuf)
3189 			goto fail;
3190 
3191 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
3192 
3193 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
3194 				 qdf_nbuf_len(nbuf));
3195 
3196 		goto send_multiple;
3197 	}
3198 
3199 	if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
3200 		DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
3201 				 qdf_nbuf_len(nbuf));
3202 
3203 		dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
3204 					       tx_exc_metadata->ppdu_cookie);
3205 	}
3206 
3207 	/*
3208 	 * Get HW Queue to use for this frame.
3209 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
3210 	 * dedicated for data and 1 for command.
3211 	 * "queue_id" maps to one hardware ring.
3212 	 *  With each ring, we also associate a unique Tx descriptor pool
3213 	 *  to minimize lock contention for these resources.
3214 	 */
3215 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3216 
3217 	if (qdf_likely(tx_exc_metadata->is_intrabss_fwd)) {
3218 		if (qdf_unlikely(vdev->nawds_enabled)) {
3219 			/*
3220 			 * This is a multicast packet
3221 			 */
3222 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
3223 					    tx_exc_metadata->peer_id);
3224 			DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
3225 					 1, qdf_nbuf_len(nbuf));
3226 		}
3227 
3228 		nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
3229 					      DP_INVALID_PEER, NULL);
3230 	} else {
3231 		/*
3232 		 * Check exception descriptors
3233 		 */
3234 		if (dp_tx_exception_limit_check(vdev))
3235 			goto fail;
3236 
3237 		/*  Single linear frame */
3238 		/*
3239 		 * If nbuf is a simple linear frame, use send_single function to
3240 		 * prepare direct-buffer type TCL descriptor and enqueue to TCL
3241 		 * SRNG. There is no need to setup a MSDU extension descriptor.
3242 		 */
3243 		nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
3244 					      tx_exc_metadata->peer_id,
3245 					      tx_exc_metadata);
3246 	}
3247 
3248 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3249 	return nbuf;
3250 
3251 send_multiple:
3252 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3253 
3254 fail:
3255 	if (vdev)
3256 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3257 	dp_verbose_debug("pkt send failed");
3258 	return nbuf;
3259 }
3260 
3261 /**
3262  * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
3263  *      in exception path in special case to avoid regular exception path chk.
3264  * @soc: DP soc handle
3265  * @vdev_id: id of DP vdev handle
3266  * @nbuf: skb
3267  * @tx_exc_metadata: Handle that holds exception path meta data
3268  *
3269  * Entry point for Core Tx layer (DP_TX) invoked from
3270  * hard_start_xmit in OSIF/HDD to transmit frames through fw
3271  *
3272  * Return: NULL on success,
3273  *         nbuf when it fails to send
3274  */
3275 qdf_nbuf_t
3276 dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
3277 				   uint8_t vdev_id, qdf_nbuf_t nbuf,
3278 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
3279 {
3280 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3281 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3282 						     DP_MOD_ID_TX_EXCEPTION);
3283 
3284 	if (qdf_unlikely(!vdev))
3285 		goto fail;
3286 
3287 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
3288 			== QDF_STATUS_E_FAILURE)) {
3289 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
3290 		goto fail;
3291 	}
3292 
3293 	/* Unref count as it will agin be taken inside dp_tx_exception */
3294 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3295 
3296 	return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
3297 
3298 fail:
3299 	if (vdev)
3300 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3301 	dp_verbose_debug("pkt send failed");
3302 	return nbuf;
3303 }
3304 
3305 /**
3306  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
3307  * @soc: DP soc handle
3308  * @vdev_id: DP vdev handle
3309  * @nbuf: skb
3310  *
3311  * Entry point for Core Tx layer (DP_TX) invoked from
3312  * hard_start_xmit in OSIF/HDD
3313  *
3314  * Return: NULL on success,
3315  *         nbuf when it fails to send
3316  */
3317 #ifdef MESH_MODE_SUPPORT
3318 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3319 			   qdf_nbuf_t nbuf)
3320 {
3321 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3322 	struct meta_hdr_s *mhdr;
3323 	qdf_nbuf_t nbuf_mesh = NULL;
3324 	qdf_nbuf_t nbuf_clone = NULL;
3325 	struct dp_vdev *vdev;
3326 	uint8_t no_enc_frame = 0;
3327 
3328 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
3329 	if (!nbuf_mesh) {
3330 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3331 				"qdf_nbuf_unshare failed");
3332 		return nbuf;
3333 	}
3334 
3335 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
3336 	if (!vdev) {
3337 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3338 				"vdev is NULL for vdev_id %d", vdev_id);
3339 		return nbuf;
3340 	}
3341 
3342 	nbuf = nbuf_mesh;
3343 
3344 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
3345 
3346 	if ((vdev->sec_type != cdp_sec_type_none) &&
3347 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
3348 		no_enc_frame = 1;
3349 
3350 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
3351 		qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
3352 
3353 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
3354 		       !no_enc_frame) {
3355 		nbuf_clone = qdf_nbuf_clone(nbuf);
3356 		if (!nbuf_clone) {
3357 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3358 				"qdf_nbuf_clone failed");
3359 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
3360 			return nbuf;
3361 		}
3362 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
3363 	}
3364 
3365 	if (nbuf_clone) {
3366 		if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
3367 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
3368 		} else {
3369 			qdf_nbuf_free(nbuf_clone);
3370 		}
3371 	}
3372 
3373 	if (no_enc_frame)
3374 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
3375 	else
3376 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
3377 
3378 	nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
3379 	if ((!nbuf) && no_enc_frame) {
3380 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
3381 	}
3382 
3383 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
3384 	return nbuf;
3385 }
3386 
3387 #else
3388 
3389 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
3390 			   qdf_nbuf_t nbuf)
3391 {
3392 	return dp_tx_send(soc, vdev_id, nbuf);
3393 }
3394 
3395 #endif
3396 
3397 #ifdef QCA_DP_TX_NBUF_AND_NBUF_DATA_PREFETCH
3398 static inline
3399 void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
3400 {
3401 	if (nbuf) {
3402 		qdf_prefetch(&nbuf->len);
3403 		qdf_prefetch(&nbuf->data);
3404 	}
3405 }
3406 #else
3407 static inline
3408 void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
3409 {
3410 }
3411 #endif
3412 
3413 #ifdef DP_UMAC_HW_RESET_SUPPORT
3414 /*
3415  * dp_tx_drop() - Drop the frame on a given VAP
3416  * @soc: DP soc handle
3417  * @vdev_id: id of DP vdev handle
3418  * @nbuf: skb
3419  *
3420  * Drop all the incoming packets
3421  *
3422  * Return: nbuf
3423  *
3424  */
3425 qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3426 		      qdf_nbuf_t nbuf)
3427 {
3428 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3429 	struct dp_vdev *vdev = NULL;
3430 
3431 	vdev = soc->vdev_id_map[vdev_id];
3432 	if (qdf_unlikely(!vdev))
3433 		return nbuf;
3434 
3435 	DP_STATS_INC(vdev, tx_i.dropped.drop_ingress, 1);
3436 	return nbuf;
3437 }
3438 
3439 /*
3440  * dp_tx_exc_drop() - Drop the frame on a given VAP
3441  * @soc: DP soc handle
3442  * @vdev_id: id of DP vdev handle
3443  * @nbuf: skb
3444  * @tx_exc_metadata: Handle that holds exception path meta data
3445  *
3446  * Drop all the incoming packets
3447  *
3448  * Return: nbuf
3449  *
3450  */
3451 qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3452 			  qdf_nbuf_t nbuf,
3453 			  struct cdp_tx_exception_metadata *tx_exc_metadata)
3454 {
3455 	return dp_tx_drop(soc_hdl, vdev_id, nbuf);
3456 }
3457 #endif
3458 
3459 /*
3460  * dp_tx_send() - Transmit a frame on a given VAP
3461  * @soc: DP soc handle
3462  * @vdev_id: id of DP vdev handle
3463  * @nbuf: skb
3464  *
3465  * Entry point for Core Tx layer (DP_TX) invoked from
3466  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
3467  * cases
3468  *
3469  * Return: NULL on success,
3470  *         nbuf when it fails to send
3471  */
3472 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3473 		      qdf_nbuf_t nbuf)
3474 {
3475 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3476 	uint16_t peer_id = HTT_INVALID_PEER;
3477 	/*
3478 	 * doing a memzero is causing additional function call overhead
3479 	 * so doing static stack clearing
3480 	 */
3481 	struct dp_tx_msdu_info_s msdu_info = {0};
3482 	struct dp_vdev *vdev = NULL;
3483 	qdf_nbuf_t end_nbuf = NULL;
3484 
3485 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3486 		return nbuf;
3487 
3488 	/*
3489 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3490 	 * this in per packet path.
3491 	 *
3492 	 * As in this path vdev memory is already protected with netdev
3493 	 * tx lock
3494 	 */
3495 	vdev = soc->vdev_id_map[vdev_id];
3496 	if (qdf_unlikely(!vdev))
3497 		return nbuf;
3498 
3499 	/*
3500 	 * Set Default Host TID value to invalid TID
3501 	 * (TID override disabled)
3502 	 */
3503 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
3504 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_headlen(nbuf));
3505 
3506 	if (qdf_unlikely(vdev->mesh_vdev)) {
3507 		qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
3508 								&msdu_info);
3509 		if (!nbuf_mesh) {
3510 			dp_verbose_debug("Extracting mesh metadata failed");
3511 			return nbuf;
3512 		}
3513 		nbuf = nbuf_mesh;
3514 	}
3515 
3516 	/*
3517 	 * Get HW Queue to use for this frame.
3518 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
3519 	 * dedicated for data and 1 for command.
3520 	 * "queue_id" maps to one hardware ring.
3521 	 *  With each ring, we also associate a unique Tx descriptor pool
3522 	 *  to minimize lock contention for these resources.
3523 	 */
3524 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3525 	DP_STATS_INC(vdev, tx_i.rcvd_per_core[msdu_info.tx_queue.desc_pool_id],
3526 		     1);
3527 
3528 	/*
3529 	 * TCL H/W supports 2 DSCP-TID mapping tables.
3530 	 *  Table 1 - Default DSCP-TID mapping table
3531 	 *  Table 2 - 1 DSCP-TID override table
3532 	 *
3533 	 * If we need a different DSCP-TID mapping for this vap,
3534 	 * call tid_classify to extract DSCP/ToS from frame and
3535 	 * map to a TID and store in msdu_info. This is later used
3536 	 * to fill in TCL Input descriptor (per-packet TID override).
3537 	 */
3538 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
3539 
3540 	/*
3541 	 * Classify the frame and call corresponding
3542 	 * "prepare" function which extracts the segment (TSO)
3543 	 * and fragmentation information (for TSO , SG, ME, or Raw)
3544 	 * into MSDU_INFO structure which is later used to fill
3545 	 * SW and HW descriptors.
3546 	 */
3547 	if (qdf_nbuf_is_tso(nbuf)) {
3548 		dp_verbose_debug("TSO frame %pK", vdev);
3549 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
3550 				 qdf_nbuf_len(nbuf));
3551 
3552 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
3553 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
3554 					 qdf_nbuf_len(nbuf));
3555 			return nbuf;
3556 		}
3557 
3558 		goto send_multiple;
3559 	}
3560 
3561 	/* SG */
3562 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
3563 		if (qdf_nbuf_get_nr_frags(nbuf) > DP_TX_MAX_NUM_FRAGS - 1) {
3564 			if (qdf_unlikely(qdf_nbuf_linearize(nbuf)))
3565 				return nbuf;
3566 		} else {
3567 			struct dp_tx_seg_info_s seg_info = {0};
3568 
3569 			nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info,
3570 						&msdu_info);
3571 			if (!nbuf)
3572 				return NULL;
3573 
3574 			dp_verbose_debug("non-TSO SG frame %pK", vdev);
3575 
3576 			DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
3577 					 qdf_nbuf_len(nbuf));
3578 
3579 			goto send_multiple;
3580 		}
3581 	}
3582 
3583 	if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
3584 		return NULL;
3585 
3586 	/* RAW */
3587 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
3588 		struct dp_tx_seg_info_s seg_info = {0};
3589 
3590 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
3591 		if (!nbuf)
3592 			return NULL;
3593 
3594 		dp_verbose_debug("Raw frame %pK", vdev);
3595 
3596 		goto send_multiple;
3597 
3598 	}
3599 
3600 	if (qdf_unlikely(vdev->nawds_enabled)) {
3601 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
3602 					  qdf_nbuf_data(nbuf);
3603 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
3604 			uint16_t sa_peer_id = DP_INVALID_PEER;
3605 
3606 			if (!soc->ast_offload_support) {
3607 				struct dp_ast_entry *ast_entry = NULL;
3608 
3609 				qdf_spin_lock_bh(&soc->ast_lock);
3610 				ast_entry = dp_peer_ast_hash_find_by_pdevid
3611 					(soc,
3612 					 (uint8_t *)(eh->ether_shost),
3613 					 vdev->pdev->pdev_id);
3614 				if (ast_entry)
3615 					sa_peer_id = ast_entry->peer_id;
3616 				qdf_spin_unlock_bh(&soc->ast_lock);
3617 			}
3618 
3619 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
3620 					    sa_peer_id);
3621 		}
3622 		peer_id = DP_INVALID_PEER;
3623 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
3624 				 1, qdf_nbuf_len(nbuf));
3625 	}
3626 
3627 	/*  Single linear frame */
3628 	/*
3629 	 * If nbuf is a simple linear frame, use send_single function to
3630 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
3631 	 * SRNG. There is no need to setup a MSDU extension descriptor.
3632 	 */
3633 	dp_tx_prefetch_nbuf_data(nbuf);
3634 
3635 	nbuf = dp_tx_send_msdu_single_wrapper(vdev, nbuf, &msdu_info,
3636 					      peer_id, end_nbuf);
3637 	return nbuf;
3638 
3639 send_multiple:
3640 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3641 
3642 	if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
3643 		dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
3644 
3645 	return nbuf;
3646 }
3647 
3648 /**
3649  * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
3650  *      case to vaoid check in perpkt path.
3651  * @soc: DP soc handle
3652  * @vdev_id: id of DP vdev handle
3653  * @nbuf: skb
3654  *
3655  * Entry point for Core Tx layer (DP_TX) invoked from
3656  * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
3657  * with special condition to avoid per pkt check in dp_tx_send
3658  *
3659  * Return: NULL on success,
3660  *         nbuf when it fails to send
3661  */
3662 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
3663 				    uint8_t vdev_id, qdf_nbuf_t nbuf)
3664 {
3665 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3666 	struct dp_vdev *vdev = NULL;
3667 
3668 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3669 		return nbuf;
3670 
3671 	/*
3672 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3673 	 * this in per packet path.
3674 	 *
3675 	 * As in this path vdev memory is already protected with netdev
3676 	 * tx lock
3677 	 */
3678 	vdev = soc->vdev_id_map[vdev_id];
3679 	if (qdf_unlikely(!vdev))
3680 		return nbuf;
3681 
3682 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
3683 			== QDF_STATUS_E_FAILURE)) {
3684 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
3685 		return nbuf;
3686 	}
3687 
3688 	return dp_tx_send(soc_hdl, vdev_id, nbuf);
3689 }
3690 
3691 #ifdef UMAC_SUPPORT_PROXY_ARP
3692 /**
3693  * dp_tx_proxy_arp() - Tx proxy arp handler
3694  * @vdev: datapath vdev handle
3695  * @buf: sk buffer
3696  *
3697  * Return: status
3698  */
3699 static inline
3700 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3701 {
3702 	if (vdev->osif_proxy_arp)
3703 		return vdev->osif_proxy_arp(vdev->osif_vdev, nbuf);
3704 
3705 	/*
3706 	 * when UMAC_SUPPORT_PROXY_ARP is defined, we expect
3707 	 * osif_proxy_arp has a valid function pointer assigned
3708 	 * to it
3709 	 */
3710 	dp_tx_err("valid function pointer for osif_proxy_arp is expected!!\n");
3711 
3712 	return QDF_STATUS_NOT_INITIALIZED;
3713 }
3714 #else
3715 /**
3716  * dp_tx_proxy_arp() - Tx proxy arp handler
3717  * @vdev: datapath vdev handle
3718  * @buf: sk buffer
3719  *
3720  * This function always return 0 when UMAC_SUPPORT_PROXY_ARP
3721  * is not defined.
3722  *
3723  * Return: status
3724  */
3725 static inline
3726 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3727 {
3728 	return QDF_STATUS_SUCCESS;
3729 }
3730 #endif
3731 
3732 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
3733 #ifdef WLAN_MCAST_MLO
3734 static bool
3735 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
3736 		       struct dp_tx_desc_s *tx_desc,
3737 		       qdf_nbuf_t nbuf,
3738 		       uint8_t reinject_reason)
3739 {
3740 	if (reinject_reason == HTT_TX_FW2WBM_REINJECT_REASON_MLO_MCAST) {
3741 		if (soc->arch_ops.dp_tx_mcast_handler)
3742 			soc->arch_ops.dp_tx_mcast_handler(soc, vdev, nbuf);
3743 
3744 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3745 		return true;
3746 	}
3747 
3748 	return false;
3749 }
3750 #else /* WLAN_MCAST_MLO */
3751 static inline bool
3752 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
3753 		       struct dp_tx_desc_s *tx_desc,
3754 		       qdf_nbuf_t nbuf,
3755 		       uint8_t reinject_reason)
3756 {
3757 	return false;
3758 }
3759 #endif /* WLAN_MCAST_MLO */
3760 #else
3761 static inline bool
3762 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
3763 		       struct dp_tx_desc_s *tx_desc,
3764 		       qdf_nbuf_t nbuf,
3765 		       uint8_t reinject_reason)
3766 {
3767 	return false;
3768 }
3769 #endif
3770 
3771 /**
3772  * dp_tx_reinject_handler() - Tx Reinject Handler
3773  * @soc: datapath soc handle
3774  * @vdev: datapath vdev handle
3775  * @tx_desc: software descriptor head pointer
3776  * @status : Tx completion status from HTT descriptor
3777  * @reinject_reason : reinject reason from HTT descriptor
3778  *
3779  * This function reinjects frames back to Target.
3780  * Todo - Host queue needs to be added
3781  *
3782  * Return: none
3783  */
3784 void dp_tx_reinject_handler(struct dp_soc *soc,
3785 			    struct dp_vdev *vdev,
3786 			    struct dp_tx_desc_s *tx_desc,
3787 			    uint8_t *status,
3788 			    uint8_t reinject_reason)
3789 {
3790 	struct dp_peer *peer = NULL;
3791 	uint32_t peer_id = HTT_INVALID_PEER;
3792 	qdf_nbuf_t nbuf = tx_desc->nbuf;
3793 	qdf_nbuf_t nbuf_copy = NULL;
3794 	struct dp_tx_msdu_info_s msdu_info;
3795 #ifdef WDS_VENDOR_EXTENSION
3796 	int is_mcast = 0, is_ucast = 0;
3797 	int num_peers_3addr = 0;
3798 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
3799 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
3800 #endif
3801 	struct dp_txrx_peer *txrx_peer;
3802 
3803 	qdf_assert(vdev);
3804 
3805 	dp_tx_debug("Tx reinject path");
3806 
3807 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
3808 			qdf_nbuf_len(tx_desc->nbuf));
3809 
3810 	if (dp_tx_reinject_mlo_hdl(soc, vdev, tx_desc, nbuf, reinject_reason))
3811 		return;
3812 
3813 #ifdef WDS_VENDOR_EXTENSION
3814 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
3815 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
3816 	} else {
3817 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
3818 	}
3819 	is_ucast = !is_mcast;
3820 
3821 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3822 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3823 		txrx_peer = dp_get_txrx_peer(peer);
3824 
3825 		if (!txrx_peer || txrx_peer->bss_peer)
3826 			continue;
3827 
3828 		/* Detect wds peers that use 3-addr framing for mcast.
3829 		 * if there are any, the bss_peer is used to send the
3830 		 * the mcast frame using 3-addr format. all wds enabled
3831 		 * peers that use 4-addr framing for mcast frames will
3832 		 * be duplicated and sent as 4-addr frames below.
3833 		 */
3834 		if (!txrx_peer->wds_enabled ||
3835 		    !txrx_peer->wds_ecm.wds_tx_mcast_4addr) {
3836 			num_peers_3addr = 1;
3837 			break;
3838 		}
3839 	}
3840 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3841 #endif
3842 
3843 	if (qdf_unlikely(vdev->mesh_vdev)) {
3844 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
3845 	} else {
3846 		qdf_spin_lock_bh(&vdev->peer_list_lock);
3847 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3848 			txrx_peer = dp_get_txrx_peer(peer);
3849 			if (!txrx_peer)
3850 				continue;
3851 
3852 			if ((txrx_peer->peer_id != HTT_INVALID_PEER) &&
3853 #ifdef WDS_VENDOR_EXTENSION
3854 			/*
3855 			 * . if 3-addr STA, then send on BSS Peer
3856 			 * . if Peer WDS enabled and accept 4-addr mcast,
3857 			 * send mcast on that peer only
3858 			 * . if Peer WDS enabled and accept 4-addr ucast,
3859 			 * send ucast on that peer only
3860 			 */
3861 			((txrx_peer->bss_peer && num_peers_3addr && is_mcast) ||
3862 			 (txrx_peer->wds_enabled &&
3863 			 ((is_mcast && txrx_peer->wds_ecm.wds_tx_mcast_4addr) ||
3864 			 (is_ucast &&
3865 			 txrx_peer->wds_ecm.wds_tx_ucast_4addr))))) {
3866 #else
3867 			(txrx_peer->bss_peer &&
3868 			 (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) {
3869 #endif
3870 				peer_id = DP_INVALID_PEER;
3871 
3872 				nbuf_copy = qdf_nbuf_copy(nbuf);
3873 
3874 				if (!nbuf_copy) {
3875 					dp_tx_debug("nbuf copy failed");
3876 					break;
3877 				}
3878 				qdf_mem_zero(&msdu_info, sizeof(msdu_info));
3879 				dp_tx_get_queue(vdev, nbuf,
3880 						&msdu_info.tx_queue);
3881 
3882 				nbuf_copy = dp_tx_send_msdu_single(vdev,
3883 						nbuf_copy,
3884 						&msdu_info,
3885 						peer_id,
3886 						NULL);
3887 
3888 				if (nbuf_copy) {
3889 					dp_tx_debug("pkt send failed");
3890 					qdf_nbuf_free(nbuf_copy);
3891 				}
3892 			}
3893 		}
3894 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
3895 
3896 		qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
3897 					     QDF_DMA_TO_DEVICE, nbuf->len);
3898 		qdf_nbuf_free(nbuf);
3899 	}
3900 
3901 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3902 }
3903 
3904 /**
3905  * dp_tx_inspect_handler() - Tx Inspect Handler
3906  * @soc: datapath soc handle
3907  * @vdev: datapath vdev handle
3908  * @tx_desc: software descriptor head pointer
3909  * @status : Tx completion status from HTT descriptor
3910  *
3911  * Handles Tx frames sent back to Host for inspection
3912  * (ProxyARP)
3913  *
3914  * Return: none
3915  */
3916 void dp_tx_inspect_handler(struct dp_soc *soc,
3917 			   struct dp_vdev *vdev,
3918 			   struct dp_tx_desc_s *tx_desc,
3919 			   uint8_t *status)
3920 {
3921 
3922 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3923 			"%s Tx inspect path",
3924 			__func__);
3925 
3926 	DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
3927 			 qdf_nbuf_len(tx_desc->nbuf));
3928 
3929 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
3930 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3931 }
3932 
3933 #ifdef MESH_MODE_SUPPORT
3934 /**
3935  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
3936  *                                         in mesh meta header
3937  * @tx_desc: software descriptor head pointer
3938  * @ts: pointer to tx completion stats
3939  * Return: none
3940  */
3941 static
3942 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
3943 		struct hal_tx_completion_status *ts)
3944 {
3945 	qdf_nbuf_t netbuf = tx_desc->nbuf;
3946 
3947 	if (!tx_desc->msdu_ext_desc) {
3948 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
3949 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3950 				"netbuf %pK offset %d",
3951 				netbuf, tx_desc->pkt_offset);
3952 			return;
3953 		}
3954 	}
3955 }
3956 
3957 #else
3958 static
3959 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
3960 		struct hal_tx_completion_status *ts)
3961 {
3962 }
3963 
3964 #endif
3965 
3966 #ifdef CONFIG_SAWF
3967 static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
3968 					 struct dp_vdev *vdev,
3969 					 struct dp_txrx_peer *txrx_peer,
3970 					 struct dp_tx_desc_s *tx_desc,
3971 					 struct hal_tx_completion_status *ts,
3972 					 uint8_t tid)
3973 {
3974 	dp_sawf_tx_compl_update_peer_stats(soc, vdev, txrx_peer, tx_desc,
3975 					   ts, tid);
3976 }
3977 
3978 static void dp_tx_compute_delay_avg(struct cdp_delay_tx_stats  *tx_delay,
3979 				    uint32_t nw_delay,
3980 				    uint32_t sw_delay,
3981 				    uint32_t hw_delay)
3982 {
3983 	dp_peer_tid_delay_avg(tx_delay,
3984 			      nw_delay,
3985 			      sw_delay,
3986 			      hw_delay);
3987 }
3988 #else
3989 static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
3990 					 struct dp_vdev *vdev,
3991 					 struct dp_txrx_peer *txrx_peer,
3992 					 struct dp_tx_desc_s *tx_desc,
3993 					 struct hal_tx_completion_status *ts,
3994 					 uint8_t tid)
3995 {
3996 }
3997 
3998 static inline void
3999 dp_tx_compute_delay_avg(struct cdp_delay_tx_stats *tx_delay,
4000 			uint32_t nw_delay, uint32_t sw_delay,
4001 			uint32_t hw_delay)
4002 {
4003 }
4004 #endif
4005 
4006 #ifdef QCA_PEER_EXT_STATS
4007 #ifdef WLAN_CONFIG_TX_DELAY
4008 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
4009 				    struct dp_tx_desc_s *tx_desc,
4010 				    struct hal_tx_completion_status *ts,
4011 				    struct dp_vdev *vdev)
4012 {
4013 	struct dp_soc *soc = vdev->pdev->soc;
4014 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
4015 	int64_t timestamp_ingress, timestamp_hw_enqueue;
4016 	uint32_t sw_enqueue_delay, fwhw_transmit_delay = 0;
4017 
4018 	if (!ts->valid)
4019 		return;
4020 
4021 	timestamp_ingress = qdf_nbuf_get_timestamp_us(tx_desc->nbuf);
4022 	timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
4023 
4024 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4025 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
4026 
4027 	if (soc->arch_ops.dp_tx_compute_hw_delay)
4028 		if (!soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts,
4029 							  &fwhw_transmit_delay))
4030 			dp_hist_update_stats(&tx_delay->hwtx_delay,
4031 					     fwhw_transmit_delay);
4032 
4033 	dp_tx_compute_delay_avg(tx_delay, 0, sw_enqueue_delay,
4034 				fwhw_transmit_delay);
4035 }
4036 #else
4037 /*
4038  * dp_tx_compute_tid_delay() - Compute per TID delay
4039  * @stats: Per TID delay stats
4040  * @tx_desc: Software Tx descriptor
4041  * @ts: Tx completion status
4042  * @vdev: vdev
4043  *
4044  * Compute the software enqueue and hw enqueue delays and
4045  * update the respective histograms
4046  *
4047  * Return: void
4048  */
4049 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
4050 				    struct dp_tx_desc_s *tx_desc,
4051 				    struct hal_tx_completion_status *ts,
4052 				    struct dp_vdev *vdev)
4053 {
4054 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
4055 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
4056 	uint32_t sw_enqueue_delay, fwhw_transmit_delay;
4057 
4058 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
4059 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
4060 	timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
4061 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4062 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
4063 					 timestamp_hw_enqueue);
4064 
4065 	/*
4066 	 * Update the Tx software enqueue delay and HW enque-Completion delay.
4067 	 */
4068 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
4069 	dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
4070 }
4071 #endif
4072 
4073 /*
4074  * dp_tx_update_peer_delay_stats() - Update the peer delay stats
4075  * @txrx_peer: DP peer context
4076  * @tx_desc: Tx software descriptor
4077  * @tid: Transmission ID
4078  * @ring_id: Rx CPU context ID/CPU_ID
4079  *
4080  * Update the peer extended stats. These are enhanced other
4081  * delay stats per msdu level.
4082  *
4083  * Return: void
4084  */
4085 static void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
4086 					  struct dp_tx_desc_s *tx_desc,
4087 					  struct hal_tx_completion_status *ts,
4088 					  uint8_t ring_id)
4089 {
4090 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4091 	struct dp_soc *soc = NULL;
4092 	struct dp_peer_delay_stats *delay_stats = NULL;
4093 	uint8_t tid;
4094 
4095 	soc = pdev->soc;
4096 	if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
4097 		return;
4098 
4099 	tid = ts->tid;
4100 	delay_stats = txrx_peer->delay_stats;
4101 
4102 	qdf_assert(delay_stats);
4103 	qdf_assert(ring < CDP_MAX_TXRX_CTX);
4104 
4105 	/*
4106 	 * For non-TID packets use the TID 9
4107 	 */
4108 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4109 		tid = CDP_MAX_DATA_TIDS - 1;
4110 
4111 	dp_tx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
4112 				tx_desc, ts, txrx_peer->vdev);
4113 }
4114 #else
4115 static inline
4116 void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
4117 				   struct dp_tx_desc_s *tx_desc,
4118 				   struct hal_tx_completion_status *ts,
4119 				   uint8_t ring_id)
4120 {
4121 }
4122 #endif
4123 
4124 #ifdef HW_TX_DELAY_STATS_ENABLE
4125 /**
4126  * dp_update_tx_delay_stats() - update the delay stats
4127  * @vdev: vdev handle
4128  * @delay: delay in ms or us based on the flag delay_in_us
4129  * @tid: tid value
4130  * @mode: type of tx delay mode
4131  * @ring id: ring number
4132  * @delay_in_us: flag to indicate whether the delay is in ms or us
4133  *
4134  * Return: none
4135  */
4136 static inline
4137 void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
4138 			      uint8_t mode, uint8_t ring_id, bool delay_in_us)
4139 {
4140 	struct cdp_tid_tx_stats *tstats =
4141 		&vdev->stats.tid_tx_stats[ring_id][tid];
4142 
4143 	dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
4144 			      delay_in_us);
4145 }
4146 #else
4147 static inline
4148 void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
4149 			      uint8_t mode, uint8_t ring_id, bool delay_in_us)
4150 {
4151 	struct cdp_tid_tx_stats *tstats =
4152 		&vdev->pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
4153 
4154 	dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
4155 			      delay_in_us);
4156 }
4157 #endif
4158 
4159 /**
4160  * dp_tx_compute_delay() - Compute and fill in all timestamps
4161  *				to pass in correct fields
4162  *
4163  * @vdev: pdev handle
4164  * @tx_desc: tx descriptor
4165  * @tid: tid value
4166  * @ring_id: TCL or WBM ring number for transmit path
4167  * Return: none
4168  */
4169 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
4170 			 uint8_t tid, uint8_t ring_id)
4171 {
4172 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
4173 	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
4174 	uint32_t fwhw_transmit_delay_us;
4175 
4176 	if (qdf_likely(!vdev->pdev->delay_stats_flag) &&
4177 	    qdf_likely(!dp_is_vdev_tx_delay_stats_enabled(vdev)))
4178 		return;
4179 
4180 	if (dp_is_vdev_tx_delay_stats_enabled(vdev)) {
4181 		fwhw_transmit_delay_us =
4182 			qdf_ktime_to_us(qdf_ktime_real_get()) -
4183 			qdf_ktime_to_us(tx_desc->timestamp);
4184 
4185 		/*
4186 		 * Delay between packet enqueued to HW and Tx completion in us
4187 		 */
4188 		dp_update_tx_delay_stats(vdev, fwhw_transmit_delay_us, tid,
4189 					 CDP_DELAY_STATS_FW_HW_TRANSMIT,
4190 					 ring_id, true);
4191 		/*
4192 		 * For MCL, only enqueue to completion delay is required
4193 		 * so return if the vdev flag is enabled.
4194 		 */
4195 		return;
4196 	}
4197 
4198 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
4199 	timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
4200 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
4201 					 timestamp_hw_enqueue);
4202 
4203 	/*
4204 	 * Delay between packet enqueued to HW and Tx completion in ms
4205 	 */
4206 	dp_update_tx_delay_stats(vdev, fwhw_transmit_delay, tid,
4207 				 CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id,
4208 				 false);
4209 
4210 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
4211 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4212 	interframe_delay = (uint32_t)(timestamp_ingress -
4213 				      vdev->prev_tx_enq_tstamp);
4214 
4215 	/*
4216 	 * Delay in software enqueue
4217 	 */
4218 	dp_update_tx_delay_stats(vdev, sw_enqueue_delay, tid,
4219 				 CDP_DELAY_STATS_SW_ENQ, ring_id,
4220 				 false);
4221 
4222 	/*
4223 	 * Update interframe delay stats calculated at hardstart receive point.
4224 	 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
4225 	 * interframe delay will not be calculate correctly for 1st frame.
4226 	 * On the other side, this will help in avoiding extra per packet check
4227 	 * of !vdev->prev_tx_enq_tstamp.
4228 	 */
4229 	dp_update_tx_delay_stats(vdev, interframe_delay, tid,
4230 				 CDP_DELAY_STATS_TX_INTERFRAME, ring_id,
4231 				 false);
4232 	vdev->prev_tx_enq_tstamp = timestamp_ingress;
4233 }
4234 
4235 #ifdef DISABLE_DP_STATS
4236 static
4237 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf,
4238 				   struct dp_txrx_peer *txrx_peer)
4239 {
4240 }
4241 #else
4242 static inline void
4243 dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer)
4244 {
4245 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
4246 
4247 	DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
4248 	if (subtype != QDF_PROTO_INVALID)
4249 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.no_ack_count[subtype],
4250 					  1);
4251 }
4252 #endif
4253 
4254 #ifndef QCA_ENHANCED_STATS_SUPPORT
4255 #ifdef DP_PEER_EXTENDED_API
4256 static inline uint8_t
4257 dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
4258 {
4259 	return txrx_peer->mpdu_retry_threshold;
4260 }
4261 #else
4262 static inline uint8_t
4263 dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
4264 {
4265 	return 0;
4266 }
4267 #endif
4268 
4269 /**
4270  * dp_tx_update_peer_extd_stats()- Update Tx extended path stats for peer
4271  *
4272  * @ts: Tx compltion status
4273  * @txrx_peer: datapath txrx_peer handle
4274  *
4275  * Return: void
4276  */
4277 static inline void
4278 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
4279 			     struct dp_txrx_peer *txrx_peer)
4280 {
4281 	uint8_t mcs, pkt_type, dst_mcs_idx;
4282 	uint8_t retry_threshold = dp_tx_get_mpdu_retry_threshold(txrx_peer);
4283 
4284 	mcs = ts->mcs;
4285 	pkt_type = ts->pkt_type;
4286 	/* do HW to SW pkt type conversion */
4287 	pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX :
4288 		    hal_2_dp_pkt_type_map[pkt_type]);
4289 
4290 	dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs);
4291 	if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
4292 		DP_PEER_EXTD_STATS_INC(txrx_peer,
4293 				       tx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
4294 				       1);
4295 
4296 	DP_PEER_EXTD_STATS_INC(txrx_peer, tx.sgi_count[ts->sgi], 1);
4297 	DP_PEER_EXTD_STATS_INC(txrx_peer, tx.bw[ts->bw], 1);
4298 	DP_PEER_EXTD_STATS_UPD(txrx_peer, tx.last_ack_rssi, ts->ack_frame_rssi);
4299 	DP_PEER_EXTD_STATS_INC(txrx_peer,
4300 			       tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
4301 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.stbc, 1, ts->stbc);
4302 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.ldpc, 1, ts->ldpc);
4303 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries, 1, ts->transmit_cnt > 1);
4304 	if (ts->first_msdu) {
4305 		DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries_mpdu, 1,
4306 					ts->transmit_cnt > 1);
4307 
4308 		if (!retry_threshold)
4309 			return;
4310 		DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.mpdu_success_with_retries,
4311 					qdf_do_div(ts->transmit_cnt,
4312 						   retry_threshold),
4313 					ts->transmit_cnt > retry_threshold);
4314 	}
4315 }
4316 #else
4317 static inline void
4318 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
4319 			     struct dp_txrx_peer *txrx_peer)
4320 {
4321 }
4322 #endif
4323 
4324 /**
4325  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
4326  *				per wbm ring
4327  *
4328  * @tx_desc: software descriptor head pointer
4329  * @ts: Tx completion status
4330  * @peer: peer handle
4331  * @ring_id: ring number
4332  *
4333  * Return: None
4334  */
4335 static inline void
4336 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
4337 			struct hal_tx_completion_status *ts,
4338 			struct dp_txrx_peer *txrx_peer, uint8_t ring_id)
4339 {
4340 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4341 	uint8_t tid = ts->tid;
4342 	uint32_t length;
4343 	struct cdp_tid_tx_stats *tid_stats;
4344 
4345 	if (!pdev)
4346 		return;
4347 
4348 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4349 		tid = CDP_MAX_DATA_TIDS - 1;
4350 
4351 	tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
4352 
4353 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
4354 		dp_err_rl("Release source:%d is not from TQM", ts->release_src);
4355 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.release_src_not_tqm, 1);
4356 		return;
4357 	}
4358 
4359 	length = qdf_nbuf_len(tx_desc->nbuf);
4360 	DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
4361 
4362 	if (qdf_unlikely(pdev->delay_stats_flag) ||
4363 	    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(txrx_peer->vdev)))
4364 		dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id);
4365 
4366 	if (ts->status < CDP_MAX_TX_TQM_STATUS) {
4367 		tid_stats->tqm_status_cnt[ts->status]++;
4368 	}
4369 
4370 	if (qdf_likely(ts->status == HAL_TX_TQM_RR_FRAME_ACKED)) {
4371 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.retry_count, 1,
4372 					   ts->transmit_cnt > 1);
4373 
4374 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.multiple_retry_count,
4375 					   1, ts->transmit_cnt > 2);
4376 
4377 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.ofdma, 1, ts->ofdma);
4378 
4379 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.amsdu_cnt, 1,
4380 					   ts->msdu_part_of_amsdu);
4381 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.non_amsdu_cnt, 1,
4382 					   !ts->msdu_part_of_amsdu);
4383 
4384 		txrx_peer->stats.per_pkt_stats.tx.last_tx_ts =
4385 							qdf_system_ticks();
4386 
4387 		dp_tx_update_peer_extd_stats(ts, txrx_peer);
4388 
4389 		return;
4390 	}
4391 
4392 	/*
4393 	 * tx_failed is ideally supposed to be updated from HTT ppdu
4394 	 * completion stats. But in IPQ807X/IPQ6018 chipsets owing to
4395 	 * hw limitation there are no completions for failed cases.
4396 	 * Hence updating tx_failed from data path. Please note that
4397 	 * if tx_failed is fixed to be from ppdu, then this has to be
4398 	 * removed
4399 	 */
4400 	DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
4401 
4402 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.failed_retry_count, 1,
4403 				   ts->transmit_cnt > DP_RETRY_COUNT);
4404 	dp_update_no_ack_stats(tx_desc->nbuf, txrx_peer);
4405 
4406 	if (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED) {
4407 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.age_out, 1);
4408 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_REM) {
4409 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.dropped.fw_rem, 1,
4410 					      length);
4411 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX) {
4412 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_notx, 1);
4413 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TX) {
4414 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_tx, 1);
4415 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON1) {
4416 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason1, 1);
4417 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON2) {
4418 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason2, 1);
4419 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON3) {
4420 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason3, 1);
4421 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_DISABLE_QUEUE) {
4422 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4423 					  tx.dropped.fw_rem_queue_disable, 1);
4424 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TILL_NONMATCHING) {
4425 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4426 					  tx.dropped.fw_rem_no_match, 1);
4427 	} else if (ts->status == HAL_TX_TQM_RR_DROP_THRESHOLD) {
4428 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4429 					  tx.dropped.drop_threshold, 1);
4430 	} else if (ts->status == HAL_TX_TQM_RR_LINK_DESC_UNAVAILABLE) {
4431 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4432 					  tx.dropped.drop_link_desc_na, 1);
4433 	} else if (ts->status == HAL_TX_TQM_RR_DROP_OR_INVALID_MSDU) {
4434 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4435 					  tx.dropped.invalid_drop, 1);
4436 	} else if (ts->status == HAL_TX_TQM_RR_MULTICAST_DROP) {
4437 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4438 					  tx.dropped.mcast_vdev_drop, 1);
4439 	} else {
4440 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.invalid_rr, 1);
4441 	}
4442 }
4443 
4444 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
4445 /**
4446  * dp_tx_flow_pool_lock() - take flow pool lock
4447  * @soc: core txrx main context
4448  * @tx_desc: tx desc
4449  *
4450  * Return: None
4451  */
4452 static inline
4453 void dp_tx_flow_pool_lock(struct dp_soc *soc,
4454 			  struct dp_tx_desc_s *tx_desc)
4455 {
4456 	struct dp_tx_desc_pool_s *pool;
4457 	uint8_t desc_pool_id;
4458 
4459 	desc_pool_id = tx_desc->pool_id;
4460 	pool = &soc->tx_desc[desc_pool_id];
4461 
4462 	qdf_spin_lock_bh(&pool->flow_pool_lock);
4463 }
4464 
4465 /**
4466  * dp_tx_flow_pool_unlock() - release flow pool lock
4467  * @soc: core txrx main context
4468  * @tx_desc: tx desc
4469  *
4470  * Return: None
4471  */
4472 static inline
4473 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
4474 			    struct dp_tx_desc_s *tx_desc)
4475 {
4476 	struct dp_tx_desc_pool_s *pool;
4477 	uint8_t desc_pool_id;
4478 
4479 	desc_pool_id = tx_desc->pool_id;
4480 	pool = &soc->tx_desc[desc_pool_id];
4481 
4482 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
4483 }
4484 #else
4485 static inline
4486 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
4487 {
4488 }
4489 
4490 static inline
4491 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
4492 {
4493 }
4494 #endif
4495 
4496 /**
4497  * dp_tx_notify_completion() - Notify tx completion for this desc
4498  * @soc: core txrx main context
4499  * @vdev: datapath vdev handle
4500  * @tx_desc: tx desc
4501  * @netbuf:  buffer
4502  * @status: tx status
4503  *
4504  * Return: none
4505  */
4506 static inline void dp_tx_notify_completion(struct dp_soc *soc,
4507 					   struct dp_vdev *vdev,
4508 					   struct dp_tx_desc_s *tx_desc,
4509 					   qdf_nbuf_t netbuf,
4510 					   uint8_t status)
4511 {
4512 	void *osif_dev;
4513 	ol_txrx_completion_fp tx_compl_cbk = NULL;
4514 	uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
4515 
4516 	qdf_assert(tx_desc);
4517 
4518 	if (!vdev ||
4519 	    !vdev->osif_vdev) {
4520 		return;
4521 	}
4522 
4523 	osif_dev = vdev->osif_vdev;
4524 	tx_compl_cbk = vdev->tx_comp;
4525 
4526 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
4527 		flag |= BIT(QDF_TX_RX_STATUS_OK);
4528 
4529 	if (tx_compl_cbk)
4530 		tx_compl_cbk(netbuf, osif_dev, flag);
4531 }
4532 
4533 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
4534  * @pdev: pdev handle
4535  * @tid: tid value
4536  * @txdesc_ts: timestamp from txdesc
4537  * @ppdu_id: ppdu id
4538  *
4539  * Return: none
4540  */
4541 #ifdef FEATURE_PERPKT_INFO
4542 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
4543 					       struct dp_txrx_peer *txrx_peer,
4544 					       uint8_t tid,
4545 					       uint64_t txdesc_ts,
4546 					       uint32_t ppdu_id)
4547 {
4548 	uint64_t delta_ms;
4549 	struct cdp_tx_sojourn_stats *sojourn_stats;
4550 	struct dp_peer *primary_link_peer = NULL;
4551 	struct dp_soc *link_peer_soc = NULL;
4552 
4553 	if (qdf_unlikely(!pdev->enhanced_stats_en))
4554 		return;
4555 
4556 	if (qdf_unlikely(tid == HTT_INVALID_TID ||
4557 			 tid >= CDP_DATA_TID_MAX))
4558 		return;
4559 
4560 	if (qdf_unlikely(!pdev->sojourn_buf))
4561 		return;
4562 
4563 	primary_link_peer = dp_get_primary_link_peer_by_id(pdev->soc,
4564 							   txrx_peer->peer_id,
4565 							   DP_MOD_ID_TX_COMP);
4566 
4567 	if (qdf_unlikely(!primary_link_peer))
4568 		return;
4569 
4570 	sojourn_stats = (struct cdp_tx_sojourn_stats *)
4571 		qdf_nbuf_data(pdev->sojourn_buf);
4572 
4573 	link_peer_soc = primary_link_peer->vdev->pdev->soc;
4574 	sojourn_stats->cookie = (void *)
4575 			dp_monitor_peer_get_peerstats_ctx(link_peer_soc,
4576 							  primary_link_peer);
4577 
4578 	delta_ms = qdf_ktime_to_ms(qdf_ktime_real_get()) -
4579 				txdesc_ts;
4580 	qdf_ewma_tx_lag_add(&txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid],
4581 			    delta_ms);
4582 	sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
4583 	sojourn_stats->num_msdus[tid] = 1;
4584 	sojourn_stats->avg_sojourn_msdu[tid].internal =
4585 		txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid].internal;
4586 	dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
4587 			     pdev->sojourn_buf, HTT_INVALID_PEER,
4588 			     WDI_NO_VAL, pdev->pdev_id);
4589 	sojourn_stats->sum_sojourn_msdu[tid] = 0;
4590 	sojourn_stats->num_msdus[tid] = 0;
4591 	sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
4592 
4593 	dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_TX_COMP);
4594 }
4595 #else
4596 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
4597 					       struct dp_txrx_peer *txrx_peer,
4598 					       uint8_t tid,
4599 					       uint64_t txdesc_ts,
4600 					       uint32_t ppdu_id)
4601 {
4602 }
4603 #endif
4604 
4605 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
4606 /**
4607  * dp_send_completion_to_pkt_capture() - send tx completion to packet capture
4608  * @soc: dp_soc handle
4609  * @desc: Tx Descriptor
4610  * @ts: HAL Tx completion descriptor contents
4611  *
4612  * This function is used to send tx completion to packet capture
4613  */
4614 void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
4615 				       struct dp_tx_desc_s *desc,
4616 				       struct hal_tx_completion_status *ts)
4617 {
4618 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc,
4619 			     desc, ts->peer_id,
4620 			     WDI_NO_VAL, desc->pdev->pdev_id);
4621 }
4622 #endif
4623 
4624 /**
4625  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
4626  * @soc: DP Soc handle
4627  * @tx_desc: software Tx descriptor
4628  * @ts : Tx completion status from HAL/HTT descriptor
4629  *
4630  * Return: none
4631  */
4632 void
4633 dp_tx_comp_process_desc(struct dp_soc *soc,
4634 			struct dp_tx_desc_s *desc,
4635 			struct hal_tx_completion_status *ts,
4636 			struct dp_txrx_peer *txrx_peer)
4637 {
4638 	uint64_t time_latency = 0;
4639 	uint16_t peer_id = DP_INVALID_PEER_ID;
4640 
4641 	/*
4642 	 * m_copy/tx_capture modes are not supported for
4643 	 * scatter gather packets
4644 	 */
4645 	if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
4646 		time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
4647 				qdf_ktime_to_ms(desc->timestamp));
4648 	}
4649 
4650 	dp_send_completion_to_pkt_capture(soc, desc, ts);
4651 
4652 	if (dp_tx_pkt_tracepoints_enabled())
4653 		qdf_trace_dp_packet(desc->nbuf, QDF_TX,
4654 				    desc->msdu_ext_desc ?
4655 				    desc->msdu_ext_desc->tso_desc : NULL,
4656 				    qdf_ktime_to_ms(desc->timestamp));
4657 
4658 	if (!(desc->msdu_ext_desc)) {
4659 		dp_tx_enh_unmap(soc, desc);
4660 		if (txrx_peer)
4661 			peer_id = txrx_peer->peer_id;
4662 
4663 		if (QDF_STATUS_SUCCESS ==
4664 		    dp_monitor_tx_add_to_comp_queue(soc, desc, ts, peer_id)) {
4665 			return;
4666 		}
4667 
4668 		if (QDF_STATUS_SUCCESS ==
4669 		    dp_get_completion_indication_for_stack(soc,
4670 							   desc->pdev,
4671 							   txrx_peer, ts,
4672 							   desc->nbuf,
4673 							   time_latency)) {
4674 			dp_send_completion_to_stack(soc,
4675 						    desc->pdev,
4676 						    ts->peer_id,
4677 						    ts->ppdu_id,
4678 						    desc->nbuf);
4679 			return;
4680 		}
4681 	}
4682 
4683 	desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX;
4684 	dp_tx_comp_free_buf(soc, desc, false);
4685 }
4686 
4687 #ifdef DISABLE_DP_STATS
4688 /**
4689  * dp_tx_update_connectivity_stats() - update tx connectivity stats
4690  * @soc: core txrx main context
4691  * @tx_desc: tx desc
4692  * @status: tx status
4693  *
4694  * Return: none
4695  */
4696 static inline
4697 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
4698 				     struct dp_vdev *vdev,
4699 				     struct dp_tx_desc_s *tx_desc,
4700 				     uint8_t status)
4701 {
4702 }
4703 #else
4704 static inline
4705 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
4706 				     struct dp_vdev *vdev,
4707 				     struct dp_tx_desc_s *tx_desc,
4708 				     uint8_t status)
4709 {
4710 	void *osif_dev;
4711 	ol_txrx_stats_rx_fp stats_cbk;
4712 	uint8_t pkt_type;
4713 
4714 	qdf_assert(tx_desc);
4715 
4716 	if (!vdev ||
4717 	    !vdev->osif_vdev ||
4718 	    !vdev->stats_cb)
4719 		return;
4720 
4721 	osif_dev = vdev->osif_vdev;
4722 	stats_cbk = vdev->stats_cb;
4723 
4724 	stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
4725 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
4726 		stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
4727 			  &pkt_type);
4728 }
4729 #endif
4730 
4731 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
4732 QDF_STATUS
4733 dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
4734 			  uint32_t delta_tsf,
4735 			  uint32_t *delay_us)
4736 {
4737 	uint32_t buffer_ts;
4738 	uint32_t delay;
4739 
4740 	if (!delay_us)
4741 		return QDF_STATUS_E_INVAL;
4742 
4743 	/* Tx_rate_stats_info_valid is 0 and tsf is invalid then */
4744 	if (!ts->valid)
4745 		return QDF_STATUS_E_INVAL;
4746 
4747 	/* buffer_timestamp is in units of 1024 us and is [31:13] of
4748 	 * WBM_RELEASE_RING_4. After left shift 10 bits, it's
4749 	 * valid up to 29 bits.
4750 	 */
4751 	buffer_ts = ts->buffer_timestamp << 10;
4752 
4753 	delay = ts->tsf - buffer_ts - delta_tsf;
4754 
4755 	if (qdf_unlikely(delay & 0x80000000)) {
4756 		dp_err_rl("delay = 0x%x (-ve)\n"
4757 			  "release_src = %d\n"
4758 			  "ppdu_id = 0x%x\n"
4759 			  "peer_id = 0x%x\n"
4760 			  "tid = 0x%x\n"
4761 			  "release_reason = %d\n"
4762 			  "tsf = %u (0x%x)\n"
4763 			  "buffer_timestamp = %u (0x%x)\n"
4764 			  "delta_tsf = %u (0x%x)\n",
4765 			  delay, ts->release_src, ts->ppdu_id, ts->peer_id,
4766 			  ts->tid, ts->status, ts->tsf, ts->tsf,
4767 			  ts->buffer_timestamp, ts->buffer_timestamp,
4768 			  delta_tsf, delta_tsf);
4769 
4770 		delay = 0;
4771 		goto end;
4772 	}
4773 
4774 	delay &= 0x1FFFFFFF; /* mask 29 BITS */
4775 	if (delay > 0x1000000) {
4776 		dp_info_rl("----------------------\n"
4777 			   "Tx completion status:\n"
4778 			   "----------------------\n"
4779 			   "release_src = %d\n"
4780 			   "ppdu_id = 0x%x\n"
4781 			   "release_reason = %d\n"
4782 			   "tsf = %u (0x%x)\n"
4783 			   "buffer_timestamp = %u (0x%x)\n"
4784 			   "delta_tsf = %u (0x%x)\n",
4785 			   ts->release_src, ts->ppdu_id, ts->status,
4786 			   ts->tsf, ts->tsf, ts->buffer_timestamp,
4787 			   ts->buffer_timestamp, delta_tsf, delta_tsf);
4788 		return QDF_STATUS_E_FAILURE;
4789 	}
4790 
4791 
4792 end:
4793 	*delay_us = delay;
4794 
4795 	return QDF_STATUS_SUCCESS;
4796 }
4797 
4798 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4799 		      uint32_t delta_tsf)
4800 {
4801 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4802 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4803 						     DP_MOD_ID_CDP);
4804 
4805 	if (!vdev) {
4806 		dp_err_rl("vdev %d does not exist", vdev_id);
4807 		return;
4808 	}
4809 
4810 	vdev->delta_tsf = delta_tsf;
4811 	dp_debug("vdev id %u delta_tsf %u", vdev_id, delta_tsf);
4812 
4813 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4814 }
4815 #endif
4816 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
4817 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
4818 				      uint8_t vdev_id, bool enable)
4819 {
4820 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4821 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4822 						     DP_MOD_ID_CDP);
4823 
4824 	if (!vdev) {
4825 		dp_err_rl("vdev %d does not exist", vdev_id);
4826 		return QDF_STATUS_E_FAILURE;
4827 	}
4828 
4829 	qdf_atomic_set(&vdev->ul_delay_report, enable);
4830 
4831 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4832 
4833 	return QDF_STATUS_SUCCESS;
4834 }
4835 
4836 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4837 			       uint32_t *val)
4838 {
4839 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4840 	struct dp_vdev *vdev;
4841 	uint32_t delay_accum;
4842 	uint32_t pkts_accum;
4843 
4844 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
4845 	if (!vdev) {
4846 		dp_err_rl("vdev %d does not exist", vdev_id);
4847 		return QDF_STATUS_E_FAILURE;
4848 	}
4849 
4850 	if (!qdf_atomic_read(&vdev->ul_delay_report)) {
4851 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4852 		return QDF_STATUS_E_FAILURE;
4853 	}
4854 
4855 	/* Average uplink delay based on current accumulated values */
4856 	delay_accum = qdf_atomic_read(&vdev->ul_delay_accum);
4857 	pkts_accum = qdf_atomic_read(&vdev->ul_pkts_accum);
4858 
4859 	*val = delay_accum / pkts_accum;
4860 	dp_debug("uplink_delay %u delay_accum %u pkts_accum %u", *val,
4861 		 delay_accum, pkts_accum);
4862 
4863 	/* Reset accumulated values to 0 */
4864 	qdf_atomic_set(&vdev->ul_delay_accum, 0);
4865 	qdf_atomic_set(&vdev->ul_pkts_accum, 0);
4866 
4867 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4868 
4869 	return QDF_STATUS_SUCCESS;
4870 }
4871 
4872 static void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
4873 				      struct hal_tx_completion_status *ts)
4874 {
4875 	uint32_t ul_delay;
4876 
4877 	if (qdf_unlikely(!vdev)) {
4878 		dp_info_rl("vdev is null or delete in progrss");
4879 		return;
4880 	}
4881 
4882 	if (!qdf_atomic_read(&vdev->ul_delay_report))
4883 		return;
4884 
4885 	if (QDF_IS_STATUS_ERROR(dp_tx_compute_hw_delay_us(ts,
4886 							  vdev->delta_tsf,
4887 							  &ul_delay)))
4888 		return;
4889 
4890 	ul_delay /= 1000; /* in unit of ms */
4891 
4892 	qdf_atomic_add(ul_delay, &vdev->ul_delay_accum);
4893 	qdf_atomic_inc(&vdev->ul_pkts_accum);
4894 }
4895 #else /* !WLAN_FEATURE_TSF_UPLINK_DELAY */
4896 static inline
4897 void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
4898 			       struct hal_tx_completion_status *ts)
4899 {
4900 }
4901 #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
4902 
4903 /**
4904  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
4905  * @soc: DP soc handle
4906  * @tx_desc: software descriptor head pointer
4907  * @ts: Tx completion status
4908  * @txrx_peer: txrx peer handle
4909  * @ring_id: ring number
4910  *
4911  * Return: none
4912  */
4913 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
4914 				  struct dp_tx_desc_s *tx_desc,
4915 				  struct hal_tx_completion_status *ts,
4916 				  struct dp_txrx_peer *txrx_peer,
4917 				  uint8_t ring_id)
4918 {
4919 	uint32_t length;
4920 	qdf_ether_header_t *eh;
4921 	struct dp_vdev *vdev = NULL;
4922 	qdf_nbuf_t nbuf = tx_desc->nbuf;
4923 	enum qdf_dp_tx_rx_status dp_status;
4924 
4925 	if (!nbuf) {
4926 		dp_info_rl("invalid tx descriptor. nbuf NULL");
4927 		goto out;
4928 	}
4929 
4930 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
4931 	length = qdf_nbuf_len(nbuf);
4932 
4933 	dp_status = dp_tx_hw_to_qdf(ts->status);
4934 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
4935 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
4936 				 QDF_TRACE_DEFAULT_PDEV_ID,
4937 				 qdf_nbuf_data_addr(nbuf),
4938 				 sizeof(qdf_nbuf_data(nbuf)),
4939 				 tx_desc->id, ts->status, dp_status));
4940 
4941 	dp_tx_comp_debug("-------------------- \n"
4942 			 "Tx Completion Stats: \n"
4943 			 "-------------------- \n"
4944 			 "ack_frame_rssi = %d \n"
4945 			 "first_msdu = %d \n"
4946 			 "last_msdu = %d \n"
4947 			 "msdu_part_of_amsdu = %d \n"
4948 			 "rate_stats valid = %d \n"
4949 			 "bw = %d \n"
4950 			 "pkt_type = %d \n"
4951 			 "stbc = %d \n"
4952 			 "ldpc = %d \n"
4953 			 "sgi = %d \n"
4954 			 "mcs = %d \n"
4955 			 "ofdma = %d \n"
4956 			 "tones_in_ru = %d \n"
4957 			 "tsf = %d \n"
4958 			 "ppdu_id = %d \n"
4959 			 "transmit_cnt = %d \n"
4960 			 "tid = %d \n"
4961 			 "peer_id = %d\n"
4962 			 "tx_status = %d\n",
4963 			 ts->ack_frame_rssi, ts->first_msdu,
4964 			 ts->last_msdu, ts->msdu_part_of_amsdu,
4965 			 ts->valid, ts->bw, ts->pkt_type, ts->stbc,
4966 			 ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
4967 			 ts->tones_in_ru, ts->tsf, ts->ppdu_id,
4968 			 ts->transmit_cnt, ts->tid, ts->peer_id,
4969 			 ts->status);
4970 
4971 	/* Update SoC level stats */
4972 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
4973 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
4974 
4975 	if (!txrx_peer) {
4976 		dp_info_rl("peer is null or deletion in progress");
4977 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
4978 		goto out;
4979 	}
4980 	vdev = txrx_peer->vdev;
4981 
4982 	dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
4983 	dp_tx_update_uplink_delay(soc, vdev, ts);
4984 
4985 	/* check tx complete notification */
4986 	if (qdf_nbuf_tx_notify_comp_get(nbuf))
4987 		dp_tx_notify_completion(soc, vdev, tx_desc,
4988 					nbuf, ts->status);
4989 
4990 	/* Update per-packet stats for mesh mode */
4991 	if (qdf_unlikely(vdev->mesh_vdev) &&
4992 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
4993 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
4994 
4995 	/* Update peer level stats */
4996 	if (qdf_unlikely(txrx_peer->bss_peer &&
4997 			 vdev->opmode == wlan_op_mode_ap)) {
4998 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
4999 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1,
5000 						      length);
5001 
5002 			if (txrx_peer->vdev->tx_encap_type ==
5003 				htt_cmn_pkt_type_ethernet &&
5004 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
5005 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
5006 							      tx.bcast, 1,
5007 							      length);
5008 			}
5009 		}
5010 	} else {
5011 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.ucast, 1, length);
5012 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
5013 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.tx_success,
5014 						      1, length);
5015 			if (qdf_unlikely(txrx_peer->in_twt)) {
5016 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
5017 							      tx.tx_success_twt,
5018 							      1, length);
5019 			}
5020 		}
5021 	}
5022 
5023 	dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id);
5024 	dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts, ring_id);
5025 	dp_tx_update_peer_sawf_stats(soc, vdev, txrx_peer, tx_desc,
5026 				     ts, ts->tid);
5027 	dp_tx_send_pktlog(soc, vdev->pdev, tx_desc, nbuf, dp_status);
5028 
5029 #ifdef QCA_SUPPORT_RDK_STATS
5030 	if (soc->peerstats_enabled)
5031 		dp_tx_sojourn_stats_process(vdev->pdev, txrx_peer, ts->tid,
5032 					    qdf_ktime_to_ms(tx_desc->timestamp),
5033 					    ts->ppdu_id);
5034 #endif
5035 
5036 out:
5037 	return;
5038 }
5039 
5040 #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \
5041 	defined(QCA_ENHANCED_STATS_SUPPORT)
5042 /*
5043  * dp_tx_update_peer_basic_stats(): Update peer basic stats
5044  * @txrx_peer: Datapath txrx_peer handle
5045  * @length: Length of the packet
5046  * @tx_status: Tx status from TQM/FW
5047  * @update: enhanced flag value present in dp_pdev
5048  *
5049  * Return: none
5050  */
5051 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
5052 				   uint32_t length, uint8_t tx_status,
5053 				   bool update)
5054 {
5055 	if (update || (!txrx_peer->hw_txrx_stats_en)) {
5056 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
5057 
5058 		if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
5059 			DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
5060 	}
5061 }
5062 #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
5063 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
5064 				   uint32_t length, uint8_t tx_status,
5065 				   bool update)
5066 {
5067 	if (!txrx_peer->hw_txrx_stats_en) {
5068 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
5069 
5070 		if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
5071 			DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
5072 	}
5073 }
5074 
5075 #else
5076 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
5077 				   uint32_t length, uint8_t tx_status,
5078 				   bool update)
5079 {
5080 	DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
5081 
5082 	if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
5083 		DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
5084 }
5085 #endif
5086 
5087 /*
5088  * dp_tx_prefetch_next_nbuf_data(): Prefetch nbuf and nbuf data
5089  * @nbuf: skb buffer
5090  *
5091  * Return: none
5092  */
5093 #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
5094 static inline
5095 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
5096 {
5097 	qdf_nbuf_t nbuf = NULL;
5098 
5099 	if (next)
5100 		nbuf = next->nbuf;
5101 	if (nbuf) {
5102 		/* prefetch skb->next and first few bytes of skb->cb */
5103 		qdf_prefetch(next->shinfo_addr);
5104 		qdf_prefetch(nbuf);
5105 		/* prefetch skb fields present in different cachelines */
5106 		qdf_prefetch(&nbuf->len);
5107 		qdf_prefetch(&nbuf->users);
5108 	}
5109 }
5110 #else
5111 static inline
5112 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
5113 {
5114 }
5115 #endif
5116 
5117 /**
5118  * dp_tx_mcast_reinject_handler() - Tx reinjected multicast packets handler
5119  * @soc: core txrx main context
5120  * @desc: software descriptor
5121  *
5122  * Return: true when packet is reinjected
5123  */
5124 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
5125 	defined(WLAN_MCAST_MLO)
5126 static inline bool
5127 dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
5128 {
5129 	struct dp_vdev *vdev = NULL;
5130 
5131 	if (desc->tx_status == HAL_TX_TQM_RR_MULTICAST_DROP) {
5132 		if (!soc->arch_ops.dp_tx_mcast_handler)
5133 			return false;
5134 
5135 		vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
5136 					     DP_MOD_ID_REINJECT);
5137 
5138 		if (qdf_unlikely(!vdev)) {
5139 			dp_tx_comp_info_rl("Unable to get vdev ref  %d",
5140 					   desc->id);
5141 			return false;
5142 		}
5143 		DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
5144 				 qdf_nbuf_len(desc->nbuf));
5145 		soc->arch_ops.dp_tx_mcast_handler(soc, vdev, desc->nbuf);
5146 		dp_tx_desc_release(desc, desc->pool_id);
5147 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
5148 		return true;
5149 	}
5150 
5151 	return false;
5152 }
5153 #else
5154 static inline bool
5155 dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
5156 {
5157 	return false;
5158 }
5159 #endif
5160 
5161 /**
5162  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
5163  * @soc: core txrx main context
5164  * @comp_head: software descriptor head pointer
5165  * @ring_id: ring number
5166  *
5167  * This function will process batch of descriptors reaped by dp_tx_comp_handler
5168  * and release the software descriptors after processing is complete
5169  *
5170  * Return: none
5171  */
5172 static void
5173 dp_tx_comp_process_desc_list(struct dp_soc *soc,
5174 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id)
5175 {
5176 	struct dp_tx_desc_s *desc;
5177 	struct dp_tx_desc_s *next;
5178 	struct hal_tx_completion_status ts;
5179 	struct dp_txrx_peer *txrx_peer = NULL;
5180 	uint16_t peer_id = DP_INVALID_PEER;
5181 	dp_txrx_ref_handle txrx_ref_handle = NULL;
5182 
5183 	desc = comp_head;
5184 
5185 	while (desc) {
5186 		next = desc->next;
5187 		dp_tx_prefetch_next_nbuf_data(next);
5188 
5189 		if (peer_id != desc->peer_id) {
5190 			if (txrx_peer)
5191 				dp_txrx_peer_unref_delete(txrx_ref_handle,
5192 							  DP_MOD_ID_TX_COMP);
5193 			peer_id = desc->peer_id;
5194 			txrx_peer =
5195 				dp_txrx_peer_get_ref_by_id(soc, peer_id,
5196 							   &txrx_ref_handle,
5197 							   DP_MOD_ID_TX_COMP);
5198 		}
5199 
5200 		if (dp_tx_mcast_reinject_handler(soc, desc)) {
5201 			desc = next;
5202 			continue;
5203 		}
5204 		if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
5205 			struct dp_pdev *pdev = desc->pdev;
5206 
5207 			if (qdf_likely(txrx_peer))
5208 				dp_tx_update_peer_basic_stats(txrx_peer,
5209 							      desc->length,
5210 							      desc->tx_status,
5211 							      false);
5212 			qdf_assert(pdev);
5213 			dp_tx_outstanding_dec(pdev);
5214 
5215 			/*
5216 			 * Calling a QDF WRAPPER here is creating signifcant
5217 			 * performance impact so avoided the wrapper call here
5218 			 */
5219 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
5220 					       desc->id, DP_TX_COMP_UNMAP);
5221 			dp_tx_nbuf_unmap(soc, desc);
5222 			qdf_nbuf_free_simple(desc->nbuf);
5223 			dp_tx_desc_free(soc, desc, desc->pool_id);
5224 			desc = next;
5225 			continue;
5226 		}
5227 
5228 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
5229 
5230 		dp_tx_comp_process_tx_status(soc, desc, &ts, txrx_peer,
5231 					     ring_id);
5232 
5233 		dp_tx_comp_process_desc(soc, desc, &ts, txrx_peer);
5234 
5235 		dp_tx_desc_release(desc, desc->pool_id);
5236 		desc = next;
5237 	}
5238 	if (txrx_peer)
5239 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
5240 }
5241 
5242 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
5243 static inline
5244 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
5245 				   int max_reap_limit)
5246 {
5247 	bool limit_hit = false;
5248 
5249 	limit_hit =
5250 		(num_reaped >= max_reap_limit) ? true : false;
5251 
5252 	if (limit_hit)
5253 		DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
5254 
5255 	return limit_hit;
5256 }
5257 
5258 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
5259 {
5260 	return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
5261 }
5262 
5263 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
5264 {
5265 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
5266 
5267 	return cfg->tx_comp_loop_pkt_limit;
5268 }
5269 #else
5270 static inline
5271 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
5272 				   int max_reap_limit)
5273 {
5274 	return false;
5275 }
5276 
5277 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
5278 {
5279 	return false;
5280 }
5281 
5282 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
5283 {
5284 	return 0;
5285 }
5286 #endif
5287 
5288 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
5289 static inline int
5290 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
5291 				  int *max_reap_limit)
5292 {
5293 	return soc->arch_ops.dp_srng_test_and_update_nf_params(soc, dp_srng,
5294 							       max_reap_limit);
5295 }
5296 #else
5297 static inline int
5298 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
5299 				  int *max_reap_limit)
5300 {
5301 	return 0;
5302 }
5303 #endif
5304 
5305 #ifdef DP_TX_TRACKING
5306 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
5307 {
5308 	if ((tx_desc->magic != DP_TX_MAGIC_PATTERN_INUSE) &&
5309 	    (tx_desc->magic != DP_TX_MAGIC_PATTERN_FREE)) {
5310 		dp_err_rl("tx_desc %u is corrupted", tx_desc->id);
5311 		qdf_trigger_self_recovery(NULL, QDF_TX_DESC_LEAK);
5312 	}
5313 }
5314 #endif
5315 
5316 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
5317 			    hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
5318 			    uint32_t quota)
5319 {
5320 	void *tx_comp_hal_desc;
5321 	void *last_prefetched_hw_desc = NULL;
5322 	struct dp_tx_desc_s *last_prefetched_sw_desc = NULL;
5323 	hal_soc_handle_t hal_soc;
5324 	uint8_t buffer_src;
5325 	struct dp_tx_desc_s *tx_desc = NULL;
5326 	struct dp_tx_desc_s *head_desc = NULL;
5327 	struct dp_tx_desc_s *tail_desc = NULL;
5328 	uint32_t num_processed = 0;
5329 	uint32_t count;
5330 	uint32_t num_avail_for_reap = 0;
5331 	bool force_break = false;
5332 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
5333 	int max_reap_limit, ring_near_full;
5334 
5335 	DP_HIST_INIT();
5336 
5337 more_data:
5338 
5339 	hal_soc = soc->hal_soc;
5340 	/* Re-initialize local variables to be re-used */
5341 	head_desc = NULL;
5342 	tail_desc = NULL;
5343 	count = 0;
5344 	max_reap_limit = dp_tx_comp_get_loop_pkt_limit(soc);
5345 
5346 	ring_near_full = dp_srng_test_and_update_nf_params(soc, tx_comp_ring,
5347 							   &max_reap_limit);
5348 
5349 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
5350 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
5351 		return 0;
5352 	}
5353 
5354 	num_avail_for_reap = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0);
5355 
5356 	if (num_avail_for_reap >= quota)
5357 		num_avail_for_reap = quota;
5358 
5359 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
5360 	last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc,
5361 							    hal_ring_hdl,
5362 							    num_avail_for_reap);
5363 
5364 	/* Find head descriptor from completion ring */
5365 	while (qdf_likely(num_avail_for_reap--)) {
5366 
5367 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
5368 		if (qdf_unlikely(!tx_comp_hal_desc))
5369 			break;
5370 		buffer_src = hal_tx_comp_get_buffer_source(hal_soc,
5371 							   tx_comp_hal_desc);
5372 
5373 		/* If this buffer was not released by TQM or FW, then it is not
5374 		 * Tx completion indication, assert */
5375 		if (qdf_unlikely(buffer_src !=
5376 					HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
5377 				 (qdf_unlikely(buffer_src !=
5378 					HAL_TX_COMP_RELEASE_SOURCE_FW))) {
5379 			uint8_t wbm_internal_error;
5380 
5381 			dp_err_rl(
5382 				"Tx comp release_src != TQM | FW but from %d",
5383 				buffer_src);
5384 			hal_dump_comp_desc(tx_comp_hal_desc);
5385 			DP_STATS_INC(soc, tx.invalid_release_source, 1);
5386 
5387 			/* When WBM sees NULL buffer_addr_info in any of
5388 			 * ingress rings it sends an error indication,
5389 			 * with wbm_internal_error=1, to a specific ring.
5390 			 * The WBM2SW ring used to indicate these errors is
5391 			 * fixed in HW, and that ring is being used as Tx
5392 			 * completion ring. These errors are not related to
5393 			 * Tx completions, and should just be ignored
5394 			 */
5395 			wbm_internal_error = hal_get_wbm_internal_error(
5396 							hal_soc,
5397 							tx_comp_hal_desc);
5398 
5399 			if (wbm_internal_error) {
5400 				dp_err_rl("Tx comp wbm_internal_error!!");
5401 				DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
5402 
5403 				if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
5404 								buffer_src)
5405 					dp_handle_wbm_internal_error(
5406 						soc,
5407 						tx_comp_hal_desc,
5408 						hal_tx_comp_get_buffer_type(
5409 							tx_comp_hal_desc));
5410 
5411 			} else {
5412 				dp_err_rl("Tx comp wbm_internal_error false");
5413 				DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
5414 			}
5415 			continue;
5416 		}
5417 
5418 		soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
5419 							       tx_comp_hal_desc,
5420 							       &tx_desc);
5421 		if (!tx_desc) {
5422 			dp_err("unable to retrieve tx_desc!");
5423 			QDF_BUG(0);
5424 			continue;
5425 		}
5426 		tx_desc->buffer_src = buffer_src;
5427 		/*
5428 		 * If the release source is FW, process the HTT status
5429 		 */
5430 		if (qdf_unlikely(buffer_src ==
5431 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
5432 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
5433 
5434 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
5435 					htt_tx_status);
5436 			/* Collect hw completion contents */
5437 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
5438 					      &tx_desc->comp, 1);
5439 			soc->arch_ops.dp_tx_process_htt_completion(
5440 							soc,
5441 							tx_desc,
5442 							htt_tx_status,
5443 							ring_id);
5444 		} else {
5445 			tx_desc->tx_status =
5446 				hal_tx_comp_get_tx_status(tx_comp_hal_desc);
5447 			tx_desc->buffer_src = buffer_src;
5448 			/*
5449 			 * If the fast completion mode is enabled extended
5450 			 * metadata from descriptor is not copied
5451 			 */
5452 			if (qdf_likely(tx_desc->flags &
5453 						DP_TX_DESC_FLAG_SIMPLE))
5454 				goto add_to_pool;
5455 
5456 			/*
5457 			 * If the descriptor is already freed in vdev_detach,
5458 			 * continue to next descriptor
5459 			 */
5460 			if (qdf_unlikely
5461 				((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
5462 				 !tx_desc->flags)) {
5463 				dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
5464 						   tx_desc->id);
5465 				DP_STATS_INC(soc, tx.tx_comp_exception, 1);
5466 				dp_tx_desc_check_corruption(tx_desc);
5467 				continue;
5468 			}
5469 
5470 			if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
5471 				dp_tx_comp_info_rl("pdev in down state %d",
5472 						   tx_desc->id);
5473 				tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
5474 				dp_tx_comp_free_buf(soc, tx_desc, false);
5475 				dp_tx_desc_release(tx_desc, tx_desc->pool_id);
5476 				goto next_desc;
5477 			}
5478 
5479 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
5480 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
5481 				dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
5482 						 tx_desc->flags, tx_desc->id);
5483 				qdf_assert_always(0);
5484 			}
5485 
5486 			/* Collect hw completion contents */
5487 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
5488 					      &tx_desc->comp, 1);
5489 add_to_pool:
5490 			DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
5491 
5492 			/* First ring descriptor on the cycle */
5493 			if (!head_desc) {
5494 				head_desc = tx_desc;
5495 				tail_desc = tx_desc;
5496 			}
5497 
5498 			tail_desc->next = tx_desc;
5499 			tx_desc->next = NULL;
5500 			tail_desc = tx_desc;
5501 		}
5502 next_desc:
5503 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
5504 
5505 		/*
5506 		 * Processed packet count is more than given quota
5507 		 * stop to processing
5508 		 */
5509 
5510 		count++;
5511 
5512 		dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
5513 					       num_avail_for_reap,
5514 					       hal_ring_hdl,
5515 					       &last_prefetched_hw_desc,
5516 					       &last_prefetched_sw_desc);
5517 
5518 		if (dp_tx_comp_loop_pkt_limit_hit(soc, count, max_reap_limit))
5519 			break;
5520 	}
5521 
5522 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
5523 
5524 	/* Process the reaped descriptors */
5525 	if (head_desc)
5526 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
5527 
5528 	DP_STATS_INC(soc, tx.tx_comp[ring_id], count);
5529 
5530 	/*
5531 	 * If we are processing in near-full condition, there are 3 scenario
5532 	 * 1) Ring entries has reached critical state
5533 	 * 2) Ring entries are still near high threshold
5534 	 * 3) Ring entries are below the safe level
5535 	 *
5536 	 * One more loop will move te state to normal processing and yield
5537 	 */
5538 	if (ring_near_full)
5539 		goto more_data;
5540 
5541 	if (dp_tx_comp_enable_eol_data_check(soc)) {
5542 
5543 		if (num_processed >= quota)
5544 			force_break = true;
5545 
5546 		if (!force_break &&
5547 		    hal_srng_dst_peek_sync_locked(soc->hal_soc,
5548 						  hal_ring_hdl)) {
5549 			DP_STATS_INC(soc, tx.hp_oos2, 1);
5550 			if (!hif_exec_should_yield(soc->hif_handle,
5551 						   int_ctx->dp_intr_id))
5552 				goto more_data;
5553 		}
5554 	}
5555 	DP_TX_HIST_STATS_PER_PDEV();
5556 
5557 	return num_processed;
5558 }
5559 
5560 #ifdef FEATURE_WLAN_TDLS
5561 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5562 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
5563 {
5564 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5565 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5566 						     DP_MOD_ID_TDLS);
5567 
5568 	if (!vdev) {
5569 		dp_err("vdev handle for id %d is NULL", vdev_id);
5570 		return NULL;
5571 	}
5572 
5573 	if (tx_spec & OL_TX_SPEC_NO_FREE)
5574 		vdev->is_tdls_frame = true;
5575 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
5576 
5577 	return dp_tx_send(soc_hdl, vdev_id, msdu_list);
5578 }
5579 #endif
5580 
5581 /**
5582  * dp_tx_vdev_attach() - attach vdev to dp tx
5583  * @vdev: virtual device instance
5584  *
5585  * Return: QDF_STATUS_SUCCESS: success
5586  *         QDF_STATUS_E_RESOURCES: Error return
5587  */
5588 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
5589 {
5590 	int pdev_id;
5591 	/*
5592 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
5593 	 */
5594 	DP_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
5595 				    DP_TCL_METADATA_TYPE_VDEV_BASED);
5596 
5597 	DP_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
5598 				       vdev->vdev_id);
5599 
5600 	pdev_id =
5601 		dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
5602 						       vdev->pdev->pdev_id);
5603 	DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
5604 
5605 	/*
5606 	 * Set HTT Extension Valid bit to 0 by default
5607 	 */
5608 	DP_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
5609 
5610 	dp_tx_vdev_update_search_flags(vdev);
5611 
5612 	return QDF_STATUS_SUCCESS;
5613 }
5614 
5615 #ifndef FEATURE_WDS
5616 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
5617 {
5618 	return false;
5619 }
5620 #endif
5621 
5622 /**
5623  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
5624  * @vdev: virtual device instance
5625  *
5626  * Return: void
5627  *
5628  */
5629 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
5630 {
5631 	struct dp_soc *soc = vdev->pdev->soc;
5632 
5633 	/*
5634 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
5635 	 * for TDLS link
5636 	 *
5637 	 * Enable AddrY (SA based search) only for non-WDS STA and
5638 	 * ProxySTA VAP (in HKv1) modes.
5639 	 *
5640 	 * In all other VAP modes, only DA based search should be
5641 	 * enabled
5642 	 */
5643 	if (vdev->opmode == wlan_op_mode_sta &&
5644 	    vdev->tdls_link_connected)
5645 		vdev->hal_desc_addr_search_flags =
5646 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
5647 	else if ((vdev->opmode == wlan_op_mode_sta) &&
5648 		 !dp_tx_da_search_override(vdev))
5649 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
5650 	else
5651 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
5652 
5653 	if (vdev->opmode == wlan_op_mode_sta && !vdev->tdls_link_connected)
5654 		vdev->search_type = soc->sta_mode_search_policy;
5655 	else
5656 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
5657 }
5658 
5659 static inline bool
5660 dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
5661 			  struct dp_vdev *vdev,
5662 			  struct dp_tx_desc_s *tx_desc)
5663 {
5664 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
5665 		return false;
5666 
5667 	/*
5668 	 * if vdev is given, then only check whether desc
5669 	 * vdev match. if vdev is NULL, then check whether
5670 	 * desc pdev match.
5671 	 */
5672 	return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
5673 		(tx_desc->pdev == pdev);
5674 }
5675 
5676 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
5677 /**
5678  * dp_tx_desc_flush() - release resources associated
5679  *                      to TX Desc
5680  *
5681  * @dp_pdev: Handle to DP pdev structure
5682  * @vdev: virtual device instance
5683  * NULL: no specific Vdev is required and check all allcated TX desc
5684  * on this pdev.
5685  * Non-NULL: only check the allocated TX Desc associated to this Vdev.
5686  *
5687  * @force_free:
5688  * true: flush the TX desc.
5689  * false: only reset the Vdev in each allocated TX desc
5690  * that associated to current Vdev.
5691  *
5692  * This function will go through the TX desc pool to flush
5693  * the outstanding TX data or reset Vdev to NULL in associated TX
5694  * Desc.
5695  */
5696 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
5697 		      bool force_free)
5698 {
5699 	uint8_t i;
5700 	uint32_t j;
5701 	uint32_t num_desc, page_id, offset;
5702 	uint16_t num_desc_per_page;
5703 	struct dp_soc *soc = pdev->soc;
5704 	struct dp_tx_desc_s *tx_desc = NULL;
5705 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
5706 
5707 	if (!vdev && !force_free) {
5708 		dp_err("Reset TX desc vdev, Vdev param is required!");
5709 		return;
5710 	}
5711 
5712 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
5713 		tx_desc_pool = &soc->tx_desc[i];
5714 		if (!(tx_desc_pool->pool_size) ||
5715 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
5716 		    !(tx_desc_pool->desc_pages.cacheable_pages))
5717 			continue;
5718 
5719 		/*
5720 		 * Add flow pool lock protection in case pool is freed
5721 		 * due to all tx_desc is recycled when handle TX completion.
5722 		 * this is not necessary when do force flush as:
5723 		 * a. double lock will happen if dp_tx_desc_release is
5724 		 *    also trying to acquire it.
5725 		 * b. dp interrupt has been disabled before do force TX desc
5726 		 *    flush in dp_pdev_deinit().
5727 		 */
5728 		if (!force_free)
5729 			qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
5730 		num_desc = tx_desc_pool->pool_size;
5731 		num_desc_per_page =
5732 			tx_desc_pool->desc_pages.num_element_per_page;
5733 		for (j = 0; j < num_desc; j++) {
5734 			page_id = j / num_desc_per_page;
5735 			offset = j % num_desc_per_page;
5736 
5737 			if (qdf_unlikely(!(tx_desc_pool->
5738 					 desc_pages.cacheable_pages)))
5739 				break;
5740 
5741 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
5742 
5743 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
5744 				/*
5745 				 * Free TX desc if force free is
5746 				 * required, otherwise only reset vdev
5747 				 * in this TX desc.
5748 				 */
5749 				if (force_free) {
5750 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
5751 					dp_tx_comp_free_buf(soc, tx_desc,
5752 							    false);
5753 					dp_tx_desc_release(tx_desc, i);
5754 				} else {
5755 					tx_desc->vdev_id = DP_INVALID_VDEV_ID;
5756 				}
5757 			}
5758 		}
5759 		if (!force_free)
5760 			qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
5761 	}
5762 }
5763 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
5764 /**
5765  * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
5766  *
5767  * @soc: Handle to DP soc structure
5768  * @tx_desc: pointer of one TX desc
5769  * @desc_pool_id: TX Desc pool id
5770  */
5771 static inline void
5772 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
5773 		      uint8_t desc_pool_id)
5774 {
5775 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
5776 
5777 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
5778 
5779 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
5780 }
5781 
5782 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
5783 		      bool force_free)
5784 {
5785 	uint8_t i, num_pool;
5786 	uint32_t j;
5787 	uint32_t num_desc, page_id, offset;
5788 	uint16_t num_desc_per_page;
5789 	struct dp_soc *soc = pdev->soc;
5790 	struct dp_tx_desc_s *tx_desc = NULL;
5791 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
5792 
5793 	if (!vdev && !force_free) {
5794 		dp_err("Reset TX desc vdev, Vdev param is required!");
5795 		return;
5796 	}
5797 
5798 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5799 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5800 
5801 	for (i = 0; i < num_pool; i++) {
5802 		tx_desc_pool = &soc->tx_desc[i];
5803 		if (!tx_desc_pool->desc_pages.cacheable_pages)
5804 			continue;
5805 
5806 		num_desc_per_page =
5807 			tx_desc_pool->desc_pages.num_element_per_page;
5808 		for (j = 0; j < num_desc; j++) {
5809 			page_id = j / num_desc_per_page;
5810 			offset = j % num_desc_per_page;
5811 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
5812 
5813 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
5814 				if (force_free) {
5815 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
5816 					dp_tx_comp_free_buf(soc, tx_desc,
5817 							    false);
5818 					dp_tx_desc_release(tx_desc, i);
5819 				} else {
5820 					dp_tx_desc_reset_vdev(soc, tx_desc,
5821 							      i);
5822 				}
5823 			}
5824 		}
5825 	}
5826 }
5827 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
5828 
5829 /**
5830  * dp_tx_vdev_detach() - detach vdev from dp tx
5831  * @vdev: virtual device instance
5832  *
5833  * Return: QDF_STATUS_SUCCESS: success
5834  *         QDF_STATUS_E_RESOURCES: Error return
5835  */
5836 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
5837 {
5838 	struct dp_pdev *pdev = vdev->pdev;
5839 
5840 	/* Reset TX desc associated to this Vdev as NULL */
5841 	dp_tx_desc_flush(pdev, vdev, false);
5842 
5843 	return QDF_STATUS_SUCCESS;
5844 }
5845 
5846 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
5847 /* Pools will be allocated dynamically */
5848 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
5849 					   int num_desc)
5850 {
5851 	uint8_t i;
5852 
5853 	for (i = 0; i < num_pool; i++) {
5854 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
5855 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
5856 	}
5857 
5858 	return QDF_STATUS_SUCCESS;
5859 }
5860 
5861 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
5862 					  uint32_t num_desc)
5863 {
5864 	return QDF_STATUS_SUCCESS;
5865 }
5866 
5867 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
5868 {
5869 }
5870 
5871 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
5872 {
5873 	uint8_t i;
5874 
5875 	for (i = 0; i < num_pool; i++)
5876 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
5877 }
5878 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
5879 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
5880 					   uint32_t num_desc)
5881 {
5882 	uint8_t i, count;
5883 
5884 	/* Allocate software Tx descriptor pools */
5885 	for (i = 0; i < num_pool; i++) {
5886 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
5887 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5888 				  FL("Tx Desc Pool alloc %d failed %pK"),
5889 				  i, soc);
5890 			goto fail;
5891 		}
5892 	}
5893 	return QDF_STATUS_SUCCESS;
5894 
5895 fail:
5896 	for (count = 0; count < i; count++)
5897 		dp_tx_desc_pool_free(soc, count);
5898 
5899 	return QDF_STATUS_E_NOMEM;
5900 }
5901 
5902 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
5903 					  uint32_t num_desc)
5904 {
5905 	uint8_t i;
5906 	for (i = 0; i < num_pool; i++) {
5907 		if (dp_tx_desc_pool_init(soc, i, num_desc)) {
5908 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5909 				  FL("Tx Desc Pool init %d failed %pK"),
5910 				  i, soc);
5911 			return QDF_STATUS_E_NOMEM;
5912 		}
5913 	}
5914 	return QDF_STATUS_SUCCESS;
5915 }
5916 
5917 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
5918 {
5919 	uint8_t i;
5920 
5921 	for (i = 0; i < num_pool; i++)
5922 		dp_tx_desc_pool_deinit(soc, i);
5923 }
5924 
5925 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
5926 {
5927 	uint8_t i;
5928 
5929 	for (i = 0; i < num_pool; i++)
5930 		dp_tx_desc_pool_free(soc, i);
5931 }
5932 
5933 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
5934 
5935 /**
5936  * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
5937  * @soc: core txrx main context
5938  * @num_pool: number of pools
5939  *
5940  */
5941 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
5942 {
5943 	dp_tx_tso_desc_pool_deinit(soc, num_pool);
5944 	dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
5945 }
5946 
5947 /**
5948  * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
5949  * @soc: core txrx main context
5950  * @num_pool: number of pools
5951  *
5952  */
5953 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
5954 {
5955 	dp_tx_tso_desc_pool_free(soc, num_pool);
5956 	dp_tx_tso_num_seg_pool_free(soc, num_pool);
5957 }
5958 
5959 /**
5960  * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
5961  * @soc: core txrx main context
5962  *
5963  * This function frees all tx related descriptors as below
5964  * 1. Regular TX descriptors (static pools)
5965  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
5966  * 3. TSO descriptors
5967  *
5968  */
5969 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
5970 {
5971 	uint8_t num_pool;
5972 
5973 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5974 
5975 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
5976 	dp_tx_ext_desc_pool_free(soc, num_pool);
5977 	dp_tx_delete_static_pools(soc, num_pool);
5978 }
5979 
5980 /**
5981  * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
5982  * @soc: core txrx main context
5983  *
5984  * This function de-initializes all tx related descriptors as below
5985  * 1. Regular TX descriptors (static pools)
5986  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
5987  * 3. TSO descriptors
5988  *
5989  */
5990 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
5991 {
5992 	uint8_t num_pool;
5993 
5994 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5995 
5996 	dp_tx_flow_control_deinit(soc);
5997 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
5998 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
5999 	dp_tx_deinit_static_pools(soc, num_pool);
6000 }
6001 
6002 /**
6003  * dp_tso_attach() - TSO attach handler
6004  * @txrx_soc: Opaque Dp handle
6005  *
6006  * Reserve TSO descriptor buffers
6007  *
6008  * Return: QDF_STATUS_E_FAILURE on failure or
6009  * QDF_STATUS_SUCCESS on success
6010  */
6011 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
6012 					 uint8_t num_pool,
6013 					 uint32_t num_desc)
6014 {
6015 	if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
6016 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
6017 		return QDF_STATUS_E_FAILURE;
6018 	}
6019 
6020 	if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
6021 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
6022 		       num_pool, soc);
6023 		return QDF_STATUS_E_FAILURE;
6024 	}
6025 	return QDF_STATUS_SUCCESS;
6026 }
6027 
6028 /**
6029  * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
6030  * @soc: DP soc handle
6031  * @num_pool: Number of pools
6032  * @num_desc: Number of descriptors
6033  *
6034  * Initialize TSO descriptor pools
6035  *
6036  * Return: QDF_STATUS_E_FAILURE on failure or
6037  * QDF_STATUS_SUCCESS on success
6038  */
6039 
6040 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
6041 					uint8_t num_pool,
6042 					uint32_t num_desc)
6043 {
6044 	if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
6045 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
6046 		return QDF_STATUS_E_FAILURE;
6047 	}
6048 
6049 	if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
6050 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
6051 		       num_pool, soc);
6052 		return QDF_STATUS_E_FAILURE;
6053 	}
6054 	return QDF_STATUS_SUCCESS;
6055 }
6056 
6057 /**
6058  * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
6059  * @soc: core txrx main context
6060  *
6061  * This function allocates memory for following descriptor pools
6062  * 1. regular sw tx descriptor pools (static pools)
6063  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
6064  * 3. TSO descriptor pools
6065  *
6066  * Return: QDF_STATUS_SUCCESS: success
6067  *         QDF_STATUS_E_RESOURCES: Error return
6068  */
6069 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
6070 {
6071 	uint8_t num_pool;
6072 	uint32_t num_desc;
6073 	uint32_t num_ext_desc;
6074 
6075 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6076 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6077 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
6078 
6079 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6080 		  "%s Tx Desc Alloc num_pool = %d, descs = %d",
6081 		  __func__, num_pool, num_desc);
6082 
6083 	if ((num_pool > MAX_TXDESC_POOLS) ||
6084 	    (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
6085 		goto fail1;
6086 
6087 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
6088 		goto fail1;
6089 
6090 	if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
6091 		goto fail2;
6092 
6093 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
6094 		return QDF_STATUS_SUCCESS;
6095 
6096 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
6097 		goto fail3;
6098 
6099 	return QDF_STATUS_SUCCESS;
6100 
6101 fail3:
6102 	dp_tx_ext_desc_pool_free(soc, num_pool);
6103 fail2:
6104 	dp_tx_delete_static_pools(soc, num_pool);
6105 fail1:
6106 	return QDF_STATUS_E_RESOURCES;
6107 }
6108 
6109 /**
6110  * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
6111  * @soc: core txrx main context
6112  *
6113  * This function initializes the following TX descriptor pools
6114  * 1. regular sw tx descriptor pools (static pools)
6115  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
6116  * 3. TSO descriptor pools
6117  *
6118  * Return: QDF_STATUS_SUCCESS: success
6119  *	   QDF_STATUS_E_RESOURCES: Error return
6120  */
6121 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
6122 {
6123 	uint8_t num_pool;
6124 	uint32_t num_desc;
6125 	uint32_t num_ext_desc;
6126 
6127 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6128 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6129 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
6130 
6131 	if (dp_tx_init_static_pools(soc, num_pool, num_desc))
6132 		goto fail1;
6133 
6134 	if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
6135 		goto fail2;
6136 
6137 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
6138 		return QDF_STATUS_SUCCESS;
6139 
6140 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
6141 		goto fail3;
6142 
6143 	dp_tx_flow_control_init(soc);
6144 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
6145 	return QDF_STATUS_SUCCESS;
6146 
6147 fail3:
6148 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
6149 fail2:
6150 	dp_tx_deinit_static_pools(soc, num_pool);
6151 fail1:
6152 	return QDF_STATUS_E_RESOURCES;
6153 }
6154 
6155 /**
6156  * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
6157  * @txrx_soc: dp soc handle
6158  *
6159  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
6160  *			QDF_STATUS_E_FAILURE
6161  */
6162 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
6163 {
6164 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6165 	uint8_t num_pool;
6166 	uint32_t num_desc;
6167 	uint32_t num_ext_desc;
6168 
6169 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6170 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6171 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
6172 
6173 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
6174 		return QDF_STATUS_E_FAILURE;
6175 
6176 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
6177 		return QDF_STATUS_E_FAILURE;
6178 
6179 	return QDF_STATUS_SUCCESS;
6180 }
6181 
6182 /**
6183  * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
6184  * @txrx_soc: dp soc handle
6185  *
6186  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
6187  */
6188 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
6189 {
6190 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6191 	uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6192 
6193 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
6194 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
6195 
6196 	return QDF_STATUS_SUCCESS;
6197 }
6198 
6199 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
6200 void dp_pkt_add_timestamp(struct dp_vdev *vdev,
6201 			  enum qdf_pkt_timestamp_index index, uint64_t time,
6202 			  qdf_nbuf_t nbuf)
6203 {
6204 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled())) {
6205 		uint64_t tsf_time;
6206 
6207 		if (vdev->get_tsf_time) {
6208 			vdev->get_tsf_time(vdev->osif_vdev, time, &tsf_time);
6209 			qdf_add_dp_pkt_timestamp(nbuf, index, tsf_time);
6210 		}
6211 	}
6212 }
6213 
6214 void dp_pkt_get_timestamp(uint64_t *time)
6215 {
6216 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled()))
6217 		*time = qdf_get_log_timestamp();
6218 }
6219 #endif
6220 
6221