xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision 2888b71da71bce103343119fa1b31f4a0cee07c8)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "htt.h"
21 #include "dp_htt.h"
22 #include "hal_hw_headers.h"
23 #include "dp_tx.h"
24 #include "dp_tx_desc.h"
25 #include "dp_peer.h"
26 #include "dp_types.h"
27 #include "hal_tx.h"
28 #include "qdf_mem.h"
29 #include "qdf_nbuf.h"
30 #include "qdf_net_types.h"
31 #include "qdf_module.h"
32 #include <wlan_cfg.h>
33 #include "dp_ipa.h"
34 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
35 #include "if_meta_hdr.h"
36 #endif
37 #include "enet.h"
38 #include "dp_internal.h"
39 #ifdef ATH_SUPPORT_IQUE
40 #include "dp_txrx_me.h"
41 #endif
42 #include "dp_hist.h"
43 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
44 #include <wlan_dp_swlm.h>
45 #endif
46 #ifdef WIFI_MONITOR_SUPPORT
47 #include <dp_mon.h>
48 #endif
49 #ifdef FEATURE_WDS
50 #include "dp_txrx_wds.h"
51 #endif
52 #include "cdp_txrx_cmn_reg.h"
53 #ifdef CONFIG_SAWF
54 #include <dp_sawf.h>
55 #endif
56 
57 /* Flag to skip CCE classify when mesh or tid override enabled */
58 #define DP_TX_SKIP_CCE_CLASSIFY \
59 	(DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
60 
61 /* TODO Add support in TSO */
62 #define DP_DESC_NUM_FRAG(x) 0
63 
64 /* disable TQM_BYPASS */
65 #define TQM_BYPASS_WAR 0
66 
67 /* invalid peer id for reinject*/
68 #define DP_INVALID_PEER 0XFFFE
69 
70 #define DP_RETRY_COUNT 7
71 
72 #ifdef QCA_DP_TX_FW_METADATA_V2
73 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
74 	HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
75 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
76 	HTT_TX_TCL_METADATA_V2_VALID_HTT_SET(_var, _val)
77 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
78 	HTT_TX_TCL_METADATA_TYPE_V2_SET(_var, _val)
79 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
80 	HTT_TX_TCL_METADATA_V2_HOST_INSPECTED_SET(_var, _val)
81 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
82 	 HTT_TX_TCL_METADATA_V2_PEER_ID_SET(_var, _val)
83 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
84 	HTT_TX_TCL_METADATA_V2_VDEV_ID_SET(_var, _val)
85 #define DP_TCL_METADATA_TYPE_PEER_BASED \
86 	HTT_TCL_METADATA_V2_TYPE_PEER_BASED
87 #define DP_TCL_METADATA_TYPE_VDEV_BASED \
88 	HTT_TCL_METADATA_V2_TYPE_VDEV_BASED
89 #else
90 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
91 	HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
92 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
93 	HTT_TX_TCL_METADATA_VALID_HTT_SET(_var, _val)
94 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
95 	HTT_TX_TCL_METADATA_TYPE_SET(_var, _val)
96 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
97 	HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val)
98 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
99 	HTT_TX_TCL_METADATA_PEER_ID_SET(_var, _val)
100 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
101 	HTT_TX_TCL_METADATA_VDEV_ID_SET(_var, _val)
102 #define DP_TCL_METADATA_TYPE_PEER_BASED \
103 	HTT_TCL_METADATA_TYPE_PEER_BASED
104 #define DP_TCL_METADATA_TYPE_VDEV_BASED \
105 	HTT_TCL_METADATA_TYPE_VDEV_BASED
106 #endif
107 
108 /*mapping between hal encrypt type and cdp_sec_type*/
109 uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
110 					  HAL_TX_ENCRYPT_TYPE_WEP_128,
111 					  HAL_TX_ENCRYPT_TYPE_WEP_104,
112 					  HAL_TX_ENCRYPT_TYPE_WEP_40,
113 					  HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
114 					  HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
115 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
116 					  HAL_TX_ENCRYPT_TYPE_WAPI,
117 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
118 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
119 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
120 					  HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
121 qdf_export_symbol(sec_type_map);
122 
123 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
124 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
125 {
126 	enum dp_tx_event_type type;
127 
128 	if (flags & DP_TX_DESC_FLAG_FLUSH)
129 		type = DP_TX_DESC_FLUSH;
130 	else if (flags & DP_TX_DESC_FLAG_TX_COMP_ERR)
131 		type = DP_TX_COMP_UNMAP_ERR;
132 	else if (flags & DP_TX_DESC_FLAG_COMPLETED_TX)
133 		type = DP_TX_COMP_UNMAP;
134 	else
135 		type = DP_TX_DESC_UNMAP;
136 
137 	return type;
138 }
139 
140 static inline void
141 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
142 		       qdf_nbuf_t skb, uint32_t sw_cookie,
143 		       enum dp_tx_event_type type)
144 {
145 	struct dp_tx_tcl_history *tx_tcl_history = &soc->tx_tcl_history;
146 	struct dp_tx_comp_history *tx_comp_history = &soc->tx_comp_history;
147 	struct dp_tx_desc_event *entry;
148 	uint32_t idx;
149 	uint16_t slot;
150 
151 	switch (type) {
152 	case DP_TX_COMP_UNMAP:
153 	case DP_TX_COMP_UNMAP_ERR:
154 	case DP_TX_COMP_MSDU_EXT:
155 		if (qdf_unlikely(!tx_comp_history->allocated))
156 			return;
157 
158 		dp_get_frag_hist_next_atomic_idx(&tx_comp_history->index, &idx,
159 						 &slot,
160 						 DP_TX_COMP_HIST_SLOT_SHIFT,
161 						 DP_TX_COMP_HIST_PER_SLOT_MAX,
162 						 DP_TX_COMP_HISTORY_SIZE);
163 		entry = &tx_comp_history->entry[slot][idx];
164 		break;
165 	case DP_TX_DESC_MAP:
166 	case DP_TX_DESC_UNMAP:
167 	case DP_TX_DESC_COOKIE:
168 	case DP_TX_DESC_FLUSH:
169 		if (qdf_unlikely(!tx_tcl_history->allocated))
170 			return;
171 
172 		dp_get_frag_hist_next_atomic_idx(&tx_tcl_history->index, &idx,
173 						 &slot,
174 						 DP_TX_TCL_HIST_SLOT_SHIFT,
175 						 DP_TX_TCL_HIST_PER_SLOT_MAX,
176 						 DP_TX_TCL_HISTORY_SIZE);
177 		entry = &tx_tcl_history->entry[slot][idx];
178 		break;
179 	default:
180 		dp_info_rl("Invalid dp_tx_event_type: %d", type);
181 		return;
182 	}
183 
184 	entry->skb = skb;
185 	entry->paddr = paddr;
186 	entry->sw_cookie = sw_cookie;
187 	entry->type = type;
188 	entry->ts = qdf_get_log_timestamp();
189 }
190 
191 static inline void
192 dp_tx_tso_seg_history_add(struct dp_soc *soc,
193 			  struct qdf_tso_seg_elem_t *tso_seg,
194 			  qdf_nbuf_t skb, uint32_t sw_cookie,
195 			  enum dp_tx_event_type type)
196 {
197 	int i;
198 
199 	for (i = 1; i < tso_seg->seg.num_frags; i++) {
200 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[i].paddr,
201 				       skb, sw_cookie, type);
202 	}
203 
204 	if (!tso_seg->next)
205 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[0].paddr,
206 				       skb, 0xFFFFFFFF, type);
207 }
208 
209 static inline void
210 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
211 		      qdf_nbuf_t skb, uint32_t sw_cookie,
212 		      enum dp_tx_event_type type)
213 {
214 	struct qdf_tso_seg_elem_t *curr_seg = tso_info.tso_seg_list;
215 	uint32_t num_segs = tso_info.num_segs;
216 
217 	while (num_segs) {
218 		dp_tx_tso_seg_history_add(soc, curr_seg, skb, sw_cookie, type);
219 		curr_seg = curr_seg->next;
220 		num_segs--;
221 	}
222 }
223 
224 #else
225 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
226 {
227 	return DP_TX_DESC_INVAL_EVT;
228 }
229 
230 static inline void
231 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
232 		       qdf_nbuf_t skb, uint32_t sw_cookie,
233 		       enum dp_tx_event_type type)
234 {
235 }
236 
237 static inline void
238 dp_tx_tso_seg_history_add(struct dp_soc *soc,
239 			  struct qdf_tso_seg_elem_t *tso_seg,
240 			  qdf_nbuf_t skb, uint32_t sw_cookie,
241 			  enum dp_tx_event_type type)
242 {
243 }
244 
245 static inline void
246 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
247 		      qdf_nbuf_t skb, uint32_t sw_cookie,
248 		      enum dp_tx_event_type type)
249 {
250 }
251 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
252 
253 static int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc);
254 
255 /**
256  * dp_is_tput_high() - Check if throughput is high
257  *
258  * @soc - core txrx main context
259  *
260  * The current function is based of the RTPM tput policy variable where RTPM is
261  * avoided based on throughput.
262  */
263 static inline int dp_is_tput_high(struct dp_soc *soc)
264 {
265 	return dp_get_rtpm_tput_policy_requirement(soc);
266 }
267 
268 #if defined(FEATURE_TSO)
269 /**
270  * dp_tx_tso_unmap_segment() - Unmap TSO segment
271  *
272  * @soc - core txrx main context
273  * @seg_desc - tso segment descriptor
274  * @num_seg_desc - tso number segment descriptor
275  */
276 static void dp_tx_tso_unmap_segment(
277 		struct dp_soc *soc,
278 		struct qdf_tso_seg_elem_t *seg_desc,
279 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
280 {
281 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
282 	if (qdf_unlikely(!seg_desc)) {
283 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
284 			 __func__, __LINE__);
285 		qdf_assert(0);
286 	} else if (qdf_unlikely(!num_seg_desc)) {
287 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
288 			 __func__, __LINE__);
289 		qdf_assert(0);
290 	} else {
291 		bool is_last_seg;
292 		/* no tso segment left to do dma unmap */
293 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
294 			return;
295 
296 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
297 					true : false;
298 		qdf_nbuf_unmap_tso_segment(soc->osdev,
299 					   seg_desc, is_last_seg);
300 		num_seg_desc->num_seg.tso_cmn_num_seg--;
301 	}
302 }
303 
304 /**
305  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
306  *                            back to the freelist
307  *
308  * @soc - soc device handle
309  * @tx_desc - Tx software descriptor
310  */
311 static void dp_tx_tso_desc_release(struct dp_soc *soc,
312 				   struct dp_tx_desc_s *tx_desc)
313 {
314 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
315 	if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_desc)) {
316 		dp_tx_err("SO desc is NULL!");
317 		qdf_assert(0);
318 	} else if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_num_desc)) {
319 		dp_tx_err("TSO num desc is NULL!");
320 		qdf_assert(0);
321 	} else {
322 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
323 			(struct qdf_tso_num_seg_elem_t *)tx_desc->
324 				msdu_ext_desc->tso_num_desc;
325 
326 		/* Add the tso num segment into the free list */
327 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
328 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
329 					    tx_desc->msdu_ext_desc->
330 					    tso_num_desc);
331 			tx_desc->msdu_ext_desc->tso_num_desc = NULL;
332 			DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
333 		}
334 
335 		/* Add the tso segment into the free list*/
336 		dp_tx_tso_desc_free(soc,
337 				    tx_desc->pool_id, tx_desc->msdu_ext_desc->
338 				    tso_desc);
339 		tx_desc->msdu_ext_desc->tso_desc = NULL;
340 	}
341 }
342 #else
343 static void dp_tx_tso_unmap_segment(
344 		struct dp_soc *soc,
345 		struct qdf_tso_seg_elem_t *seg_desc,
346 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
347 
348 {
349 }
350 
351 static void dp_tx_tso_desc_release(struct dp_soc *soc,
352 				   struct dp_tx_desc_s *tx_desc)
353 {
354 }
355 #endif
356 
357 /**
358  * dp_tx_desc_release() - Release Tx Descriptor
359  * @tx_desc : Tx Descriptor
360  * @desc_pool_id: Descriptor Pool ID
361  *
362  * Deallocate all resources attached to Tx descriptor and free the Tx
363  * descriptor.
364  *
365  * Return:
366  */
367 void
368 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
369 {
370 	struct dp_pdev *pdev = tx_desc->pdev;
371 	struct dp_soc *soc;
372 	uint8_t comp_status = 0;
373 
374 	qdf_assert(pdev);
375 
376 	soc = pdev->soc;
377 
378 	dp_tx_outstanding_dec(pdev);
379 
380 	if (tx_desc->msdu_ext_desc) {
381 		if (tx_desc->frm_type == dp_tx_frm_tso)
382 			dp_tx_tso_desc_release(soc, tx_desc);
383 
384 		if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
385 			dp_tx_me_free_buf(tx_desc->pdev,
386 					  tx_desc->msdu_ext_desc->me_buffer);
387 
388 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
389 	}
390 
391 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
392 		qdf_atomic_dec(&soc->num_tx_exception);
393 
394 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
395 				tx_desc->buffer_src)
396 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
397 							     soc->hal_soc);
398 	else
399 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
400 
401 	dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d",
402 		    tx_desc->id, comp_status,
403 		    qdf_atomic_read(&pdev->num_tx_outstanding));
404 
405 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
406 	return;
407 }
408 
409 /**
410  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
411  * @vdev: DP vdev Handle
412  * @nbuf: skb
413  * @msdu_info: msdu_info required to create HTT metadata
414  *
415  * Prepares and fills HTT metadata in the frame pre-header for special frames
416  * that should be transmitted using varying transmit parameters.
417  * There are 2 VDEV modes that currently needs this special metadata -
418  *  1) Mesh Mode
419  *  2) DSRC Mode
420  *
421  * Return: HTT metadata size
422  *
423  */
424 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
425 					  struct dp_tx_msdu_info_s *msdu_info)
426 {
427 	uint32_t *meta_data = msdu_info->meta_data;
428 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
429 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
430 
431 	uint8_t htt_desc_size;
432 
433 	/* Size rounded of multiple of 8 bytes */
434 	uint8_t htt_desc_size_aligned;
435 
436 	uint8_t *hdr = NULL;
437 
438 	/*
439 	 * Metadata - HTT MSDU Extension header
440 	 */
441 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
442 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
443 
444 	if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
445 	    HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
446 							   meta_data[0]) ||
447 	    msdu_info->exception_fw) {
448 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
449 				 htt_desc_size_aligned)) {
450 			nbuf = qdf_nbuf_realloc_headroom(nbuf,
451 							 htt_desc_size_aligned);
452 			if (!nbuf) {
453 				/*
454 				 * qdf_nbuf_realloc_headroom won't do skb_clone
455 				 * as skb_realloc_headroom does. so, no free is
456 				 * needed here.
457 				 */
458 				DP_STATS_INC(vdev,
459 					     tx_i.dropped.headroom_insufficient,
460 					     1);
461 				qdf_print(" %s[%d] skb_realloc_headroom failed",
462 					  __func__, __LINE__);
463 				return 0;
464 			}
465 		}
466 		/* Fill and add HTT metaheader */
467 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
468 		if (!hdr) {
469 			dp_tx_err("Error in filling HTT metadata");
470 
471 			return 0;
472 		}
473 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
474 
475 	} else if (vdev->opmode == wlan_op_mode_ocb) {
476 		/* Todo - Add support for DSRC */
477 	}
478 
479 	return htt_desc_size_aligned;
480 }
481 
482 /**
483  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
484  * @tso_seg: TSO segment to process
485  * @ext_desc: Pointer to MSDU extension descriptor
486  *
487  * Return: void
488  */
489 #if defined(FEATURE_TSO)
490 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
491 		void *ext_desc)
492 {
493 	uint8_t num_frag;
494 	uint32_t tso_flags;
495 
496 	/*
497 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
498 	 * tcp_flag_mask
499 	 *
500 	 * Checksum enable flags are set in TCL descriptor and not in Extension
501 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
502 	 */
503 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
504 
505 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
506 
507 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
508 		tso_seg->tso_flags.ip_len);
509 
510 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
511 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
512 
513 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
514 		uint32_t lo = 0;
515 		uint32_t hi = 0;
516 
517 		qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
518 				  (tso_seg->tso_frags[num_frag].length));
519 
520 		qdf_dmaaddr_to_32s(
521 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
522 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
523 			tso_seg->tso_frags[num_frag].length);
524 	}
525 
526 	return;
527 }
528 #else
529 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
530 		void *ext_desc)
531 {
532 	return;
533 }
534 #endif
535 
536 #if defined(FEATURE_TSO)
537 /**
538  * dp_tx_free_tso_seg_list() - Loop through the tso segments
539  *                             allocated and free them
540  *
541  * @soc: soc handle
542  * @free_seg: list of tso segments
543  * @msdu_info: msdu descriptor
544  *
545  * Return - void
546  */
547 static void dp_tx_free_tso_seg_list(
548 		struct dp_soc *soc,
549 		struct qdf_tso_seg_elem_t *free_seg,
550 		struct dp_tx_msdu_info_s *msdu_info)
551 {
552 	struct qdf_tso_seg_elem_t *next_seg;
553 
554 	while (free_seg) {
555 		next_seg = free_seg->next;
556 		dp_tx_tso_desc_free(soc,
557 				    msdu_info->tx_queue.desc_pool_id,
558 				    free_seg);
559 		free_seg = next_seg;
560 	}
561 }
562 
563 /**
564  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
565  *                                 allocated and free them
566  *
567  * @soc:  soc handle
568  * @free_num_seg: list of tso number segments
569  * @msdu_info: msdu descriptor
570  * Return - void
571  */
572 static void dp_tx_free_tso_num_seg_list(
573 		struct dp_soc *soc,
574 		struct qdf_tso_num_seg_elem_t *free_num_seg,
575 		struct dp_tx_msdu_info_s *msdu_info)
576 {
577 	struct qdf_tso_num_seg_elem_t *next_num_seg;
578 
579 	while (free_num_seg) {
580 		next_num_seg = free_num_seg->next;
581 		dp_tso_num_seg_free(soc,
582 				    msdu_info->tx_queue.desc_pool_id,
583 				    free_num_seg);
584 		free_num_seg = next_num_seg;
585 	}
586 }
587 
588 /**
589  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
590  *                              do dma unmap for each segment
591  *
592  * @soc: soc handle
593  * @free_seg: list of tso segments
594  * @num_seg_desc: tso number segment descriptor
595  *
596  * Return - void
597  */
598 static void dp_tx_unmap_tso_seg_list(
599 		struct dp_soc *soc,
600 		struct qdf_tso_seg_elem_t *free_seg,
601 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
602 {
603 	struct qdf_tso_seg_elem_t *next_seg;
604 
605 	if (qdf_unlikely(!num_seg_desc)) {
606 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
607 		return;
608 	}
609 
610 	while (free_seg) {
611 		next_seg = free_seg->next;
612 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
613 		free_seg = next_seg;
614 	}
615 }
616 
617 #ifdef FEATURE_TSO_STATS
618 /**
619  * dp_tso_get_stats_idx: Retrieve the tso packet id
620  * @pdev - pdev handle
621  *
622  * Return: id
623  */
624 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
625 {
626 	uint32_t stats_idx;
627 
628 	stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
629 						% CDP_MAX_TSO_PACKETS);
630 	return stats_idx;
631 }
632 #else
633 static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
634 {
635 	return 0;
636 }
637 #endif /* FEATURE_TSO_STATS */
638 
639 /**
640  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
641  *				     free the tso segments descriptor and
642  *				     tso num segments descriptor
643  *
644  * @soc:  soc handle
645  * @msdu_info: msdu descriptor
646  * @tso_seg_unmap: flag to show if dma unmap is necessary
647  *
648  * Return - void
649  */
650 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
651 					  struct dp_tx_msdu_info_s *msdu_info,
652 					  bool tso_seg_unmap)
653 {
654 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
655 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
656 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
657 					tso_info->tso_num_seg_list;
658 
659 	/* do dma unmap for each segment */
660 	if (tso_seg_unmap)
661 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
662 
663 	/* free all tso number segment descriptor though looks only have 1 */
664 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
665 
666 	/* free all tso segment descriptor */
667 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
668 }
669 
670 /**
671  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
672  * @vdev: virtual device handle
673  * @msdu: network buffer
674  * @msdu_info: meta data associated with the msdu
675  *
676  * Return: QDF_STATUS_SUCCESS success
677  */
678 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
679 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
680 {
681 	struct qdf_tso_seg_elem_t *tso_seg;
682 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
683 	struct dp_soc *soc = vdev->pdev->soc;
684 	struct dp_pdev *pdev = vdev->pdev;
685 	struct qdf_tso_info_t *tso_info;
686 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
687 	tso_info = &msdu_info->u.tso_info;
688 	tso_info->curr_seg = NULL;
689 	tso_info->tso_seg_list = NULL;
690 	tso_info->num_segs = num_seg;
691 	msdu_info->frm_type = dp_tx_frm_tso;
692 	tso_info->tso_num_seg_list = NULL;
693 
694 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
695 
696 	while (num_seg) {
697 		tso_seg = dp_tx_tso_desc_alloc(
698 				soc, msdu_info->tx_queue.desc_pool_id);
699 		if (tso_seg) {
700 			tso_seg->next = tso_info->tso_seg_list;
701 			tso_info->tso_seg_list = tso_seg;
702 			num_seg--;
703 		} else {
704 			dp_err_rl("Failed to alloc tso seg desc");
705 			DP_STATS_INC_PKT(vdev->pdev,
706 					 tso_stats.tso_no_mem_dropped, 1,
707 					 qdf_nbuf_len(msdu));
708 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
709 
710 			return QDF_STATUS_E_NOMEM;
711 		}
712 	}
713 
714 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
715 
716 	tso_num_seg = dp_tso_num_seg_alloc(soc,
717 			msdu_info->tx_queue.desc_pool_id);
718 
719 	if (tso_num_seg) {
720 		tso_num_seg->next = tso_info->tso_num_seg_list;
721 		tso_info->tso_num_seg_list = tso_num_seg;
722 	} else {
723 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
724 			 __func__);
725 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
726 
727 		return QDF_STATUS_E_NOMEM;
728 	}
729 
730 	msdu_info->num_seg =
731 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
732 
733 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
734 			msdu_info->num_seg);
735 
736 	if (!(msdu_info->num_seg)) {
737 		/*
738 		 * Free allocated TSO seg desc and number seg desc,
739 		 * do unmap for segments if dma map has done.
740 		 */
741 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
742 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
743 
744 		return QDF_STATUS_E_INVAL;
745 	}
746 	dp_tx_tso_history_add(soc, msdu_info->u.tso_info,
747 			      msdu, 0, DP_TX_DESC_MAP);
748 
749 	tso_info->curr_seg = tso_info->tso_seg_list;
750 
751 	tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
752 	dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
753 			     msdu, msdu_info->num_seg);
754 	dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
755 				    tso_info->msdu_stats_idx);
756 	dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
757 	return QDF_STATUS_SUCCESS;
758 }
759 #else
760 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
761 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
762 {
763 	return QDF_STATUS_E_NOMEM;
764 }
765 #endif
766 
767 QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
768 			(DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
769 			 sizeof(struct htt_tx_msdu_desc_ext2_t)));
770 
771 /**
772  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
773  * @vdev: DP Vdev handle
774  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
775  * @desc_pool_id: Descriptor Pool ID
776  *
777  * Return:
778  */
779 static
780 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
781 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
782 {
783 	uint8_t i;
784 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
785 	struct dp_tx_seg_info_s *seg_info;
786 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
787 	struct dp_soc *soc = vdev->pdev->soc;
788 
789 	/* Allocate an extension descriptor */
790 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
791 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
792 
793 	if (!msdu_ext_desc) {
794 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
795 		return NULL;
796 	}
797 
798 	if (msdu_info->exception_fw &&
799 			qdf_unlikely(vdev->mesh_vdev)) {
800 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
801 				&msdu_info->meta_data[0],
802 				sizeof(struct htt_tx_msdu_desc_ext2_t));
803 		qdf_atomic_inc(&soc->num_tx_exception);
804 		msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
805 	}
806 
807 	switch (msdu_info->frm_type) {
808 	case dp_tx_frm_sg:
809 	case dp_tx_frm_me:
810 	case dp_tx_frm_raw:
811 		seg_info = msdu_info->u.sg_info.curr_seg;
812 		/* Update the buffer pointers in MSDU Extension Descriptor */
813 		for (i = 0; i < seg_info->frag_cnt; i++) {
814 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
815 				seg_info->frags[i].paddr_lo,
816 				seg_info->frags[i].paddr_hi,
817 				seg_info->frags[i].len);
818 		}
819 
820 		break;
821 
822 	case dp_tx_frm_tso:
823 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
824 				&cached_ext_desc[0]);
825 		break;
826 
827 
828 	default:
829 		break;
830 	}
831 
832 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
833 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
834 
835 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
836 			msdu_ext_desc->vaddr);
837 
838 	return msdu_ext_desc;
839 }
840 
841 /**
842  * dp_tx_trace_pkt() - Trace TX packet at DP layer
843  *
844  * @skb: skb to be traced
845  * @msdu_id: msdu_id of the packet
846  * @vdev_id: vdev_id of the packet
847  *
848  * Return: None
849  */
850 #ifdef DP_DISABLE_TX_PKT_TRACE
851 static void dp_tx_trace_pkt(struct dp_soc *soc,
852 			    qdf_nbuf_t skb, uint16_t msdu_id,
853 			    uint8_t vdev_id)
854 {
855 }
856 #else
857 static void dp_tx_trace_pkt(struct dp_soc *soc,
858 			    qdf_nbuf_t skb, uint16_t msdu_id,
859 			    uint8_t vdev_id)
860 {
861 	if (dp_is_tput_high(soc))
862 		return;
863 
864 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
865 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
866 	DPTRACE(qdf_dp_trace_ptr(skb,
867 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
868 				 QDF_TRACE_DEFAULT_PDEV_ID,
869 				 qdf_nbuf_data_addr(skb),
870 				 sizeof(qdf_nbuf_data(skb)),
871 				 msdu_id, vdev_id, 0));
872 
873 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
874 
875 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
876 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
877 				      msdu_id, QDF_TX));
878 }
879 #endif
880 
881 #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
882 /**
883  * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as
884  *				      exception by the upper layer (OS_IF)
885  * @soc: DP soc handle
886  * @nbuf: packet to be transmitted
887  *
888  * Returns: 1 if the packet is marked as exception,
889  *	    0, if the packet is not marked as exception.
890  */
891 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
892 						 qdf_nbuf_t nbuf)
893 {
894 	return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf);
895 }
896 #else
897 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
898 						 qdf_nbuf_t nbuf)
899 {
900 	return 0;
901 }
902 #endif
903 
904 #ifdef DP_TRAFFIC_END_INDICATION
905 /**
906  * dp_tx_get_traffic_end_indication_pkt() - Allocate and prepare packet to send
907  *                                          as indication to fw to inform that
908  *                                          data stream has ended
909  * @vdev: DP vdev handle
910  * @nbuf: original buffer from network stack
911  *
912  * Return: NULL on failure,
913  *         nbuf on success
914  */
915 static inline qdf_nbuf_t
916 dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
917 				     qdf_nbuf_t nbuf)
918 {
919 	/* Packet length should be enough to copy upto L3 header */
920 	uint8_t end_nbuf_len = 64;
921 	uint8_t htt_desc_size_aligned;
922 	uint8_t htt_desc_size;
923 	qdf_nbuf_t end_nbuf;
924 
925 	if (qdf_unlikely(QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) ==
926 			 QDF_NBUF_CB_PACKET_TYPE_END_INDICATION)) {
927 		htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
928 		htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
929 
930 		end_nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q);
931 		if (!end_nbuf) {
932 			end_nbuf = qdf_nbuf_alloc(NULL,
933 						  (htt_desc_size_aligned +
934 						  end_nbuf_len),
935 						  htt_desc_size_aligned,
936 						  8, false);
937 			if (!end_nbuf) {
938 				dp_err("Packet allocation failed");
939 				goto out;
940 			}
941 		} else {
942 			qdf_nbuf_reset(end_nbuf, htt_desc_size_aligned, 8);
943 		}
944 		qdf_mem_copy(qdf_nbuf_data(end_nbuf), qdf_nbuf_data(nbuf),
945 			     end_nbuf_len);
946 		qdf_nbuf_set_pktlen(end_nbuf, end_nbuf_len);
947 
948 		return end_nbuf;
949 	}
950 out:
951 	return NULL;
952 }
953 
954 /**
955  * dp_tx_send_traffic_end_indication_pkt() - Send indication packet to FW
956  *                                           via exception path.
957  * @vdev: DP vdev handle
958  * @end_nbuf: skb to send as indication
959  * @msdu_info: msdu_info of original nbuf
960  * @peer_id: peer id
961  *
962  * Return: None
963  */
964 static inline void
965 dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
966 				      qdf_nbuf_t end_nbuf,
967 				      struct dp_tx_msdu_info_s *msdu_info,
968 				      uint16_t peer_id)
969 {
970 	struct dp_tx_msdu_info_s e_msdu_info = {0};
971 	qdf_nbuf_t nbuf;
972 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
973 		(struct htt_tx_msdu_desc_ext2_t *)(e_msdu_info.meta_data);
974 	e_msdu_info.tx_queue = msdu_info->tx_queue;
975 	e_msdu_info.tid = msdu_info->tid;
976 	e_msdu_info.exception_fw = 1;
977 	desc_ext->host_tx_desc_pool = 1;
978 	desc_ext->traffic_end_indication = 1;
979 	nbuf = dp_tx_send_msdu_single(vdev, end_nbuf, &e_msdu_info,
980 				      peer_id, NULL);
981 	if (nbuf) {
982 		dp_err("Traffic end indication packet tx failed");
983 		qdf_nbuf_free(nbuf);
984 	}
985 }
986 
987 /**
988  * dp_tx_traffic_end_indication_set_desc_flag() - Set tx descriptor flag to
989  *                                                mark it trafic end indication
990  *                                                packet.
991  * @tx_desc: Tx descriptor pointer
992  * @msdu_info: msdu_info structure pointer
993  *
994  * Return: None
995  */
996 static inline void
997 dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
998 					   struct dp_tx_msdu_info_s *msdu_info)
999 {
1000 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
1001 		(struct htt_tx_msdu_desc_ext2_t *)(msdu_info->meta_data);
1002 
1003 	if (qdf_unlikely(desc_ext->traffic_end_indication))
1004 		tx_desc->flags |= DP_TX_DESC_FLAG_TRAFFIC_END_IND;
1005 }
1006 
1007 /**
1008  * dp_tx_traffic_end_indication_enq_ind_pkt() - Enqueue the packet instead of
1009  *                                              freeing which are associated
1010  *                                              with traffic end indication
1011  *                                              flagged descriptor.
1012  * @soc: dp soc handle
1013  * @desc: Tx descriptor pointer
1014  * @nbuf: buffer pointer
1015  *
1016  * Return: True if packet gets enqueued else false
1017  */
1018 static bool
1019 dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
1020 					 struct dp_tx_desc_s *desc,
1021 					 qdf_nbuf_t nbuf)
1022 {
1023 	struct dp_vdev *vdev = NULL;
1024 
1025 	if (qdf_unlikely((desc->flags &
1026 			  DP_TX_DESC_FLAG_TRAFFIC_END_IND) != 0)) {
1027 		vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
1028 					     DP_MOD_ID_TX_COMP);
1029 		if (vdev) {
1030 			qdf_nbuf_queue_add(&vdev->end_ind_pkt_q, nbuf);
1031 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_COMP);
1032 			return true;
1033 		}
1034 	}
1035 	return false;
1036 }
1037 
1038 /**
1039  * dp_tx_traffic_end_indication_is_enabled() - get the feature
1040  *                                             enable/disable status
1041  * @vdev: dp vdev handle
1042  *
1043  * Return: True if feature is enable else false
1044  */
1045 static inline bool
1046 dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
1047 {
1048 	return qdf_unlikely(vdev->traffic_end_ind_en);
1049 }
1050 
1051 static inline qdf_nbuf_t
1052 dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1053 			       struct dp_tx_msdu_info_s *msdu_info,
1054 			       uint16_t peer_id, qdf_nbuf_t end_nbuf)
1055 {
1056 	if (dp_tx_traffic_end_indication_is_enabled(vdev))
1057 		end_nbuf = dp_tx_get_traffic_end_indication_pkt(vdev, nbuf);
1058 
1059 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
1060 
1061 	if (qdf_unlikely(end_nbuf))
1062 		dp_tx_send_traffic_end_indication_pkt(vdev, end_nbuf,
1063 						      msdu_info, peer_id);
1064 	return nbuf;
1065 }
1066 #else
1067 static inline qdf_nbuf_t
1068 dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
1069 				     qdf_nbuf_t nbuf)
1070 {
1071 	return NULL;
1072 }
1073 
1074 static inline void
1075 dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
1076 				      qdf_nbuf_t end_nbuf,
1077 				      struct dp_tx_msdu_info_s *msdu_info,
1078 				      uint16_t peer_id)
1079 {}
1080 
1081 static inline void
1082 dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
1083 					   struct dp_tx_msdu_info_s *msdu_info)
1084 {}
1085 
1086 static inline bool
1087 dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
1088 					 struct dp_tx_desc_s *desc,
1089 					 qdf_nbuf_t nbuf)
1090 {
1091 	return false;
1092 }
1093 
1094 static inline bool
1095 dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
1096 {
1097 	return false;
1098 }
1099 
1100 static inline qdf_nbuf_t
1101 dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1102 			       struct dp_tx_msdu_info_s *msdu_info,
1103 			       uint16_t peer_id, qdf_nbuf_t end_nbuf)
1104 {
1105 	return dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
1106 }
1107 #endif
1108 
1109 /**
1110  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
1111  * @vdev: DP vdev handle
1112  * @nbuf: skb
1113  * @desc_pool_id: Descriptor pool ID
1114  * @meta_data: Metadata to the fw
1115  * @tx_exc_metadata: Handle that holds exception path metadata
1116  * Allocate and prepare Tx descriptor with msdu information.
1117  *
1118  * Return: Pointer to Tx Descriptor on success,
1119  *         NULL on failure
1120  */
1121 static
1122 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
1123 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
1124 		struct dp_tx_msdu_info_s *msdu_info,
1125 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1126 {
1127 	uint8_t align_pad;
1128 	uint8_t is_exception = 0;
1129 	uint8_t htt_hdr_size;
1130 	struct dp_tx_desc_s *tx_desc;
1131 	struct dp_pdev *pdev = vdev->pdev;
1132 	struct dp_soc *soc = pdev->soc;
1133 
1134 	if (dp_tx_limit_check(vdev))
1135 		return NULL;
1136 
1137 	/* Allocate software Tx descriptor */
1138 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1139 
1140 	if (qdf_unlikely(!tx_desc)) {
1141 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1142 		DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
1143 		return NULL;
1144 	}
1145 
1146 	dp_tx_outstanding_inc(pdev);
1147 
1148 	/* Initialize the SW tx descriptor */
1149 	tx_desc->nbuf = nbuf;
1150 	tx_desc->frm_type = dp_tx_frm_std;
1151 	tx_desc->tx_encap_type = ((tx_exc_metadata &&
1152 		(tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
1153 		tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
1154 	tx_desc->vdev_id = vdev->vdev_id;
1155 	tx_desc->pdev = pdev;
1156 	tx_desc->msdu_ext_desc = NULL;
1157 	tx_desc->pkt_offset = 0;
1158 	tx_desc->length = qdf_nbuf_headlen(nbuf);
1159 	tx_desc->shinfo_addr = skb_end_pointer(nbuf);
1160 
1161 	dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
1162 
1163 	if (qdf_unlikely(vdev->multipass_en)) {
1164 		if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
1165 			goto failure;
1166 	}
1167 
1168 	/* Packets marked by upper layer (OS-IF) to be sent to FW */
1169 	if (dp_tx_is_nbuf_marked_exception(soc, nbuf))
1170 		is_exception = 1;
1171 	/*
1172 	 * For special modes (vdev_type == ocb or mesh), data frames should be
1173 	 * transmitted using varying transmit parameters (tx spec) which include
1174 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
1175 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
1176 	 * These frames are sent as exception packets to firmware.
1177 	 *
1178 	 * HW requirement is that metadata should always point to a
1179 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
1180 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
1181 	 *  to get 8-byte aligned start address along with align_pad added
1182 	 *
1183 	 *  |-----------------------------|
1184 	 *  |                             |
1185 	 *  |-----------------------------| <-----Buffer Pointer Address given
1186 	 *  |                             |  ^    in HW descriptor (aligned)
1187 	 *  |       HTT Metadata          |  |
1188 	 *  |                             |  |
1189 	 *  |                             |  | Packet Offset given in descriptor
1190 	 *  |                             |  |
1191 	 *  |-----------------------------|  |
1192 	 *  |       Alignment Pad         |  v
1193 	 *  |-----------------------------| <----- Actual buffer start address
1194 	 *  |        SKB Data             |           (Unaligned)
1195 	 *  |                             |
1196 	 *  |                             |
1197 	 *  |                             |
1198 	 *  |                             |
1199 	 *  |                             |
1200 	 *  |-----------------------------|
1201 	 */
1202 	if (qdf_unlikely((msdu_info->exception_fw)) ||
1203 				(vdev->opmode == wlan_op_mode_ocb) ||
1204 				(tx_exc_metadata &&
1205 				tx_exc_metadata->is_tx_sniffer)) {
1206 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
1207 
1208 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
1209 			DP_STATS_INC(vdev,
1210 				     tx_i.dropped.headroom_insufficient, 1);
1211 			goto failure;
1212 		}
1213 
1214 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
1215 			dp_tx_err("qdf_nbuf_push_head failed");
1216 			goto failure;
1217 		}
1218 
1219 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
1220 				msdu_info);
1221 		if (htt_hdr_size == 0)
1222 			goto failure;
1223 
1224 		tx_desc->length = qdf_nbuf_headlen(nbuf);
1225 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
1226 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1227 		dp_tx_traffic_end_indication_set_desc_flag(tx_desc,
1228 							   msdu_info);
1229 		is_exception = 1;
1230 		tx_desc->length -= tx_desc->pkt_offset;
1231 	}
1232 
1233 #if !TQM_BYPASS_WAR
1234 	if (is_exception || tx_exc_metadata)
1235 #endif
1236 	{
1237 		/* Temporary WAR due to TQM VP issues */
1238 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1239 		qdf_atomic_inc(&soc->num_tx_exception);
1240 	}
1241 
1242 	return tx_desc;
1243 
1244 failure:
1245 	dp_tx_desc_release(tx_desc, desc_pool_id);
1246 	return NULL;
1247 }
1248 
1249 /**
1250  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
1251  * @vdev: DP vdev handle
1252  * @nbuf: skb
1253  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
1254  * @desc_pool_id : Descriptor Pool ID
1255  *
1256  * Allocate and prepare Tx descriptor with msdu and fragment descritor
1257  * information. For frames wth fragments, allocate and prepare
1258  * an MSDU extension descriptor
1259  *
1260  * Return: Pointer to Tx Descriptor on success,
1261  *         NULL on failure
1262  */
1263 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
1264 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
1265 		uint8_t desc_pool_id)
1266 {
1267 	struct dp_tx_desc_s *tx_desc;
1268 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
1269 	struct dp_pdev *pdev = vdev->pdev;
1270 	struct dp_soc *soc = pdev->soc;
1271 
1272 	if (dp_tx_limit_check(vdev))
1273 		return NULL;
1274 
1275 	/* Allocate software Tx descriptor */
1276 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1277 	if (!tx_desc) {
1278 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1279 		return NULL;
1280 	}
1281 	dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg,
1282 				  nbuf, tx_desc->id, DP_TX_DESC_COOKIE);
1283 
1284 	dp_tx_outstanding_inc(pdev);
1285 
1286 	/* Initialize the SW tx descriptor */
1287 	tx_desc->nbuf = nbuf;
1288 	tx_desc->frm_type = msdu_info->frm_type;
1289 	tx_desc->tx_encap_type = vdev->tx_encap_type;
1290 	tx_desc->vdev_id = vdev->vdev_id;
1291 	tx_desc->pdev = pdev;
1292 	tx_desc->pkt_offset = 0;
1293 
1294 	dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
1295 
1296 	/* Handle scattered frames - TSO/SG/ME */
1297 	/* Allocate and prepare an extension descriptor for scattered frames */
1298 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
1299 	if (!msdu_ext_desc) {
1300 		dp_tx_info("Tx Extension Descriptor Alloc Fail");
1301 		goto failure;
1302 	}
1303 
1304 #if TQM_BYPASS_WAR
1305 	/* Temporary WAR due to TQM VP issues */
1306 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1307 	qdf_atomic_inc(&soc->num_tx_exception);
1308 #endif
1309 	if (qdf_unlikely(msdu_info->exception_fw))
1310 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1311 
1312 	tx_desc->msdu_ext_desc = msdu_ext_desc;
1313 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
1314 
1315 	msdu_ext_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
1316 	msdu_ext_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
1317 
1318 	tx_desc->dma_addr = msdu_ext_desc->paddr;
1319 
1320 	if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
1321 		tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
1322 	else
1323 		tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
1324 
1325 	return tx_desc;
1326 failure:
1327 	dp_tx_desc_release(tx_desc, desc_pool_id);
1328 	return NULL;
1329 }
1330 
1331 /**
1332  * dp_tx_prepare_raw() - Prepare RAW packet TX
1333  * @vdev: DP vdev handle
1334  * @nbuf: buffer pointer
1335  * @seg_info: Pointer to Segment info Descriptor to be prepared
1336  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
1337  *     descriptor
1338  *
1339  * Return:
1340  */
1341 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1342 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1343 {
1344 	qdf_nbuf_t curr_nbuf = NULL;
1345 	uint16_t total_len = 0;
1346 	qdf_dma_addr_t paddr;
1347 	int32_t i;
1348 	int32_t mapped_buf_num = 0;
1349 
1350 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
1351 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1352 
1353 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
1354 
1355 	/* Continue only if frames are of DATA type */
1356 	if (!DP_FRAME_IS_DATA(qos_wh)) {
1357 		DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
1358 		dp_tx_debug("Pkt. recd is of not data type");
1359 		goto error;
1360 	}
1361 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
1362 	if (vdev->raw_mode_war &&
1363 	    (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
1364 	    (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
1365 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
1366 
1367 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
1368 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
1369 		/*
1370 		 * Number of nbuf's must not exceed the size of the frags
1371 		 * array in seg_info.
1372 		 */
1373 		if (i >= DP_TX_MAX_NUM_FRAGS) {
1374 			dp_err_rl("nbuf cnt exceeds the max number of segs");
1375 			DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
1376 			goto error;
1377 		}
1378 		if (QDF_STATUS_SUCCESS !=
1379 			qdf_nbuf_map_nbytes_single(vdev->osdev,
1380 						   curr_nbuf,
1381 						   QDF_DMA_TO_DEVICE,
1382 						   curr_nbuf->len)) {
1383 			dp_tx_err("%s dma map error ", __func__);
1384 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
1385 			goto error;
1386 		}
1387 		/* Update the count of mapped nbuf's */
1388 		mapped_buf_num++;
1389 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
1390 		seg_info->frags[i].paddr_lo = paddr;
1391 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
1392 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
1393 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
1394 		total_len += qdf_nbuf_len(curr_nbuf);
1395 	}
1396 
1397 	seg_info->frag_cnt = i;
1398 	seg_info->total_len = total_len;
1399 	seg_info->next = NULL;
1400 
1401 	sg_info->curr_seg = seg_info;
1402 
1403 	msdu_info->frm_type = dp_tx_frm_raw;
1404 	msdu_info->num_seg = 1;
1405 
1406 	return nbuf;
1407 
1408 error:
1409 	i = 0;
1410 	while (nbuf) {
1411 		curr_nbuf = nbuf;
1412 		if (i < mapped_buf_num) {
1413 			qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
1414 						     QDF_DMA_TO_DEVICE,
1415 						     curr_nbuf->len);
1416 			i++;
1417 		}
1418 		nbuf = qdf_nbuf_next(nbuf);
1419 		qdf_nbuf_free(curr_nbuf);
1420 	}
1421 	return NULL;
1422 
1423 }
1424 
1425 /**
1426  * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
1427  * @soc: DP soc handle
1428  * @nbuf: Buffer pointer
1429  *
1430  * unmap the chain of nbufs that belong to this RAW frame.
1431  *
1432  * Return: None
1433  */
1434 static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
1435 				    qdf_nbuf_t nbuf)
1436 {
1437 	qdf_nbuf_t cur_nbuf = nbuf;
1438 
1439 	do {
1440 		qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
1441 					     QDF_DMA_TO_DEVICE,
1442 					     cur_nbuf->len);
1443 		cur_nbuf = qdf_nbuf_next(cur_nbuf);
1444 	} while (cur_nbuf);
1445 }
1446 
1447 #ifdef VDEV_PEER_PROTOCOL_COUNT
1448 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
1449 					       qdf_nbuf_t nbuf)
1450 {
1451 	qdf_nbuf_t nbuf_local;
1452 	struct dp_vdev *vdev_local = vdev_hdl;
1453 
1454 	do {
1455 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track)))
1456 			break;
1457 		nbuf_local = nbuf;
1458 		if (qdf_unlikely(((vdev_local)->tx_encap_type) ==
1459 			 htt_cmn_pkt_type_raw))
1460 			break;
1461 		else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local))))
1462 			break;
1463 		else if (qdf_nbuf_is_tso((nbuf_local)))
1464 			break;
1465 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local),
1466 						       (nbuf_local),
1467 						       NULL, 1, 0);
1468 	} while (0);
1469 }
1470 #endif
1471 
1472 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1473 /**
1474  * dp_tx_update_stats() - Update soc level tx stats
1475  * @soc: DP soc handle
1476  * @tx_desc: TX descriptor reference
1477  * @ring_id: TCL ring id
1478  *
1479  * Returns: none
1480  */
1481 void dp_tx_update_stats(struct dp_soc *soc,
1482 			struct dp_tx_desc_s *tx_desc,
1483 			uint8_t ring_id)
1484 {
1485 	uint32_t stats_len = 0;
1486 
1487 	if (tx_desc->frm_type == dp_tx_frm_tso)
1488 		stats_len  = tx_desc->msdu_ext_desc->tso_desc->seg.total_len;
1489 	else
1490 		stats_len = qdf_nbuf_len(tx_desc->nbuf);
1491 
1492 	DP_STATS_INC_PKT(soc, tx.egress[ring_id], 1, stats_len);
1493 }
1494 
1495 int
1496 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1497 			 struct dp_tx_desc_s *tx_desc,
1498 			 uint8_t tid,
1499 			 struct dp_tx_msdu_info_s *msdu_info,
1500 			 uint8_t ring_id)
1501 {
1502 	struct dp_swlm *swlm = &soc->swlm;
1503 	union swlm_data swlm_query_data;
1504 	struct dp_swlm_tcl_data tcl_data;
1505 	QDF_STATUS status;
1506 	int ret;
1507 
1508 	if (!swlm->is_enabled)
1509 		return msdu_info->skip_hp_update;
1510 
1511 	tcl_data.nbuf = tx_desc->nbuf;
1512 	tcl_data.tid = tid;
1513 	tcl_data.ring_id = ring_id;
1514 	if (tx_desc->frm_type == dp_tx_frm_tso) {
1515 		tcl_data.pkt_len  =
1516 			tx_desc->msdu_ext_desc->tso_desc->seg.total_len;
1517 	} else {
1518 		tcl_data.pkt_len = qdf_nbuf_len(tx_desc->nbuf);
1519 	}
1520 	tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
1521 	swlm_query_data.tcl_data = &tcl_data;
1522 
1523 	status = dp_swlm_tcl_pre_check(soc, &tcl_data);
1524 	if (QDF_IS_STATUS_ERROR(status)) {
1525 		dp_swlm_tcl_reset_session_data(soc, ring_id);
1526 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
1527 		return 0;
1528 	}
1529 
1530 	ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
1531 	if (ret) {
1532 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_success, 1);
1533 	} else {
1534 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
1535 	}
1536 
1537 	return ret;
1538 }
1539 
1540 void
1541 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1542 		      int coalesce)
1543 {
1544 	if (coalesce)
1545 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1546 	else
1547 		dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1548 }
1549 
1550 static inline void
1551 dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
1552 {
1553 	if (((i + 1) < msdu_info->num_seg))
1554 		msdu_info->skip_hp_update = 1;
1555 	else
1556 		msdu_info->skip_hp_update = 0;
1557 }
1558 
1559 static inline void
1560 dp_flush_tcp_hp(struct dp_soc *soc, uint8_t ring_id)
1561 {
1562 	hal_ring_handle_t hal_ring_hdl =
1563 		dp_tx_get_hal_ring_hdl(soc, ring_id);
1564 
1565 	if (dp_tx_hal_ring_access_start(soc, hal_ring_hdl)) {
1566 		dp_err("Fillmore: SRNG access start failed");
1567 		return;
1568 	}
1569 
1570 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
1571 }
1572 
1573 static inline void
1574 dp_tx_check_and_flush_hp(struct dp_soc *soc,
1575 			 QDF_STATUS status,
1576 			 struct dp_tx_msdu_info_s *msdu_info)
1577 {
1578 	if (QDF_IS_STATUS_ERROR(status) && !msdu_info->skip_hp_update) {
1579 		dp_flush_tcp_hp(soc,
1580 			(msdu_info->tx_queue.ring_id & DP_TX_QUEUE_MASK));
1581 	}
1582 }
1583 #else
1584 static inline void
1585 dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
1586 {
1587 }
1588 
1589 static inline void
1590 dp_tx_check_and_flush_hp(struct dp_soc *soc,
1591 			 QDF_STATUS status,
1592 			 struct dp_tx_msdu_info_s *msdu_info)
1593 {
1594 }
1595 #endif
1596 
1597 #ifdef FEATURE_RUNTIME_PM
1598 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
1599 {
1600 	int ret;
1601 
1602 	ret = qdf_atomic_read(&soc->rtpm_high_tput_flag) &&
1603 	      (hif_rtpm_get_state() <= HIF_RTPM_STATE_ON);
1604 	return ret;
1605 }
1606 /**
1607  * dp_tx_ring_access_end_wrapper() - Wrapper for ring access end
1608  * @soc: Datapath soc handle
1609  * @hal_ring_hdl: HAL ring handle
1610  * @coalesce: Coalesce the current write or not
1611  *
1612  * Wrapper for HAL ring access end for data transmission for
1613  * FEATURE_RUNTIME_PM
1614  *
1615  * Returns: none
1616  */
1617 void
1618 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1619 			      hal_ring_handle_t hal_ring_hdl,
1620 			      int coalesce)
1621 {
1622 	int ret;
1623 
1624 	/*
1625 	 * Avoid runtime get and put APIs under high throughput scenarios.
1626 	 */
1627 	if (dp_get_rtpm_tput_policy_requirement(soc)) {
1628 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1629 		return;
1630 	}
1631 
1632 	ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
1633 	if (QDF_IS_STATUS_SUCCESS(ret)) {
1634 		if (hif_system_pm_state_check(soc->hif_handle)) {
1635 			dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1636 			hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1637 			hal_srng_inc_flush_cnt(hal_ring_hdl);
1638 		} else {
1639 			dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1640 		}
1641 		hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
1642 	} else {
1643 		dp_runtime_get(soc);
1644 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1645 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1646 		qdf_atomic_inc(&soc->tx_pending_rtpm);
1647 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1648 		dp_runtime_put(soc);
1649 	}
1650 }
1651 #else
1652 
1653 #ifdef DP_POWER_SAVE
1654 void
1655 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1656 			      hal_ring_handle_t hal_ring_hdl,
1657 			      int coalesce)
1658 {
1659 	if (hif_system_pm_state_check(soc->hif_handle)) {
1660 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1661 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1662 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1663 	} else {
1664 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1665 	}
1666 }
1667 #endif
1668 
1669 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
1670 {
1671 	return 0;
1672 }
1673 #endif
1674 
1675 /**
1676  * dp_tx_get_tid() - Obtain TID to be used for this frame
1677  * @vdev: DP vdev handle
1678  * @nbuf: skb
1679  *
1680  * Extract the DSCP or PCP information from frame and map into TID value.
1681  *
1682  * Return: void
1683  */
1684 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1685 			  struct dp_tx_msdu_info_s *msdu_info)
1686 {
1687 	uint8_t tos = 0, dscp_tid_override = 0;
1688 	uint8_t *hdr_ptr, *L3datap;
1689 	uint8_t is_mcast = 0;
1690 	qdf_ether_header_t *eh = NULL;
1691 	qdf_ethervlan_header_t *evh = NULL;
1692 	uint16_t   ether_type;
1693 	qdf_llc_t *llcHdr;
1694 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1695 
1696 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1697 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1698 		eh = (qdf_ether_header_t *)nbuf->data;
1699 		hdr_ptr = (uint8_t *)(eh->ether_dhost);
1700 		L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
1701 	} else {
1702 		qdf_dot3_qosframe_t *qos_wh =
1703 			(qdf_dot3_qosframe_t *) nbuf->data;
1704 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1705 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1706 		return;
1707 	}
1708 
1709 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1710 	ether_type = eh->ether_type;
1711 
1712 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
1713 	/*
1714 	 * Check if packet is dot3 or eth2 type.
1715 	 */
1716 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1717 		ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1718 				sizeof(*llcHdr));
1719 
1720 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1721 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1722 				sizeof(*llcHdr);
1723 			ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
1724 					+ sizeof(*llcHdr) +
1725 					sizeof(qdf_net_vlanhdr_t));
1726 		} else {
1727 			L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
1728 				sizeof(*llcHdr);
1729 		}
1730 	} else {
1731 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1732 			evh = (qdf_ethervlan_header_t *) eh;
1733 			ether_type = evh->ether_type;
1734 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1735 		}
1736 	}
1737 
1738 	/*
1739 	 * Find priority from IP TOS DSCP field
1740 	 */
1741 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1742 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1743 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1744 			/* Only for unicast frames */
1745 			if (!is_mcast) {
1746 				/* send it on VO queue */
1747 				msdu_info->tid = DP_VO_TID;
1748 			}
1749 		} else {
1750 			/*
1751 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1752 			 * from TOS byte.
1753 			 */
1754 			tos = ip->ip_tos;
1755 			dscp_tid_override = 1;
1756 
1757 		}
1758 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1759 		/* TODO
1760 		 * use flowlabel
1761 		 *igmpmld cases to be handled in phase 2
1762 		 */
1763 		unsigned long ver_pri_flowlabel;
1764 		unsigned long pri;
1765 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1766 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1767 			DP_IPV6_PRIORITY_SHIFT;
1768 		tos = pri;
1769 		dscp_tid_override = 1;
1770 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1771 		msdu_info->tid = DP_VO_TID;
1772 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1773 		/* Only for unicast frames */
1774 		if (!is_mcast) {
1775 			/* send ucast arp on VO queue */
1776 			msdu_info->tid = DP_VO_TID;
1777 		}
1778 	}
1779 
1780 	/*
1781 	 * Assign all MCAST packets to BE
1782 	 */
1783 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1784 		if (is_mcast) {
1785 			tos = 0;
1786 			dscp_tid_override = 1;
1787 		}
1788 	}
1789 
1790 	if (dscp_tid_override == 1) {
1791 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1792 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1793 	}
1794 
1795 	if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
1796 		msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
1797 
1798 	return;
1799 }
1800 
1801 /**
1802  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1803  * @vdev: DP vdev handle
1804  * @nbuf: skb
1805  *
1806  * Software based TID classification is required when more than 2 DSCP-TID
1807  * mapping tables are needed.
1808  * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
1809  *
1810  * Return: void
1811  */
1812 static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1813 				      struct dp_tx_msdu_info_s *msdu_info)
1814 {
1815 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1816 
1817 	/*
1818 	 * skip_sw_tid_classification flag will set in below cases-
1819 	 * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
1820 	 * 2. hlos_tid_override enabled for vdev
1821 	 * 3. mesh mode enabled for vdev
1822 	 */
1823 	if (qdf_likely(vdev->skip_sw_tid_classification)) {
1824 		/* Update tid in msdu_info from skb priority */
1825 		if (qdf_unlikely(vdev->skip_sw_tid_classification
1826 			& DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
1827 			uint32_t tid = qdf_nbuf_get_priority(nbuf);
1828 
1829 			if (tid == DP_TX_INVALID_QOS_TAG)
1830 				return;
1831 
1832 			msdu_info->tid = tid;
1833 			return;
1834 		}
1835 		return;
1836 	}
1837 
1838 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1839 }
1840 
1841 #ifdef FEATURE_WLAN_TDLS
1842 /**
1843  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1844  * @soc: datapath SOC
1845  * @vdev: datapath vdev
1846  * @tx_desc: TX descriptor
1847  *
1848  * Return: None
1849  */
1850 static void dp_tx_update_tdls_flags(struct dp_soc *soc,
1851 				    struct dp_vdev *vdev,
1852 				    struct dp_tx_desc_s *tx_desc)
1853 {
1854 	if (vdev) {
1855 		if (vdev->is_tdls_frame) {
1856 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1857 			vdev->is_tdls_frame = false;
1858 		}
1859 	}
1860 }
1861 
1862 static uint8_t dp_htt_tx_comp_get_status(struct dp_soc *soc, char *htt_desc)
1863 {
1864 	uint8_t tx_status = HTT_TX_FW2WBM_TX_STATUS_MAX;
1865 
1866 	switch (soc->arch_id) {
1867 	case CDP_ARCH_TYPE_LI:
1868 		tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
1869 		break;
1870 
1871 	case CDP_ARCH_TYPE_BE:
1872 		tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
1873 		break;
1874 
1875 	default:
1876 		dp_err("Incorrect CDP_ARCH %d", soc->arch_id);
1877 		QDF_BUG(0);
1878 	}
1879 
1880 	return tx_status;
1881 }
1882 
1883 /**
1884  * dp_non_std_htt_tx_comp_free_buff() - Free the non std tx packet buffer
1885  * @soc: dp_soc handle
1886  * @tx_desc: TX descriptor
1887  * @vdev: datapath vdev handle
1888  *
1889  * Return: None
1890  */
1891 static void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
1892 					 struct dp_tx_desc_s *tx_desc)
1893 {
1894 	uint8_t tx_status = 0;
1895 	uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
1896 
1897 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1898 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
1899 						     DP_MOD_ID_TDLS);
1900 
1901 	if (qdf_unlikely(!vdev)) {
1902 		dp_err_rl("vdev is null!");
1903 		goto error;
1904 	}
1905 
1906 	hal_tx_comp_get_htt_desc(&tx_desc->comp, htt_tx_status);
1907 	tx_status = dp_htt_tx_comp_get_status(soc, htt_tx_status);
1908 	dp_debug("vdev_id: %d tx_status: %d", tx_desc->vdev_id, tx_status);
1909 
1910 	if (vdev->tx_non_std_data_callback.func) {
1911 		qdf_nbuf_set_next(nbuf, NULL);
1912 		vdev->tx_non_std_data_callback.func(
1913 				vdev->tx_non_std_data_callback.ctxt,
1914 				nbuf, tx_status);
1915 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1916 		return;
1917 	} else {
1918 		dp_err_rl("callback func is null");
1919 	}
1920 
1921 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1922 error:
1923 	qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
1924 	qdf_nbuf_free(nbuf);
1925 }
1926 
1927 /**
1928  * dp_tx_msdu_single_map() - do nbuf map
1929  * @vdev: DP vdev handle
1930  * @tx_desc: DP TX descriptor pointer
1931  * @nbuf: skb pointer
1932  *
1933  * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
1934  * operation done in other component.
1935  *
1936  * Return: QDF_STATUS
1937  */
1938 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1939 					       struct dp_tx_desc_s *tx_desc,
1940 					       qdf_nbuf_t nbuf)
1941 {
1942 	if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
1943 		return qdf_nbuf_map_nbytes_single(vdev->osdev,
1944 						  nbuf,
1945 						  QDF_DMA_TO_DEVICE,
1946 						  nbuf->len);
1947 	else
1948 		return qdf_nbuf_map_single(vdev->osdev, nbuf,
1949 					   QDF_DMA_TO_DEVICE);
1950 }
1951 #else
1952 static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
1953 					   struct dp_vdev *vdev,
1954 					   struct dp_tx_desc_s *tx_desc)
1955 {
1956 }
1957 
1958 static inline void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
1959 						struct dp_tx_desc_s *tx_desc)
1960 {
1961 }
1962 
1963 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1964 					       struct dp_tx_desc_s *tx_desc,
1965 					       qdf_nbuf_t nbuf)
1966 {
1967 	return qdf_nbuf_map_nbytes_single(vdev->osdev,
1968 					  nbuf,
1969 					  QDF_DMA_TO_DEVICE,
1970 					  nbuf->len);
1971 }
1972 #endif
1973 
1974 static inline
1975 qdf_dma_addr_t dp_tx_nbuf_map_regular(struct dp_vdev *vdev,
1976 				      struct dp_tx_desc_s *tx_desc,
1977 				      qdf_nbuf_t nbuf)
1978 {
1979 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
1980 
1981 	ret = dp_tx_msdu_single_map(vdev, tx_desc, nbuf);
1982 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret)))
1983 		return 0;
1984 
1985 	return qdf_nbuf_mapped_paddr_get(nbuf);
1986 }
1987 
1988 static inline
1989 void dp_tx_nbuf_unmap_regular(struct dp_soc *soc, struct dp_tx_desc_s *desc)
1990 {
1991 	qdf_nbuf_unmap_nbytes_single_paddr(soc->osdev,
1992 					   desc->nbuf,
1993 					   desc->dma_addr,
1994 					   QDF_DMA_TO_DEVICE,
1995 					   desc->length);
1996 }
1997 
1998 #if defined(QCA_DP_TX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
1999 static inline
2000 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
2001 			      struct dp_tx_desc_s *tx_desc,
2002 			      qdf_nbuf_t nbuf)
2003 {
2004 	if (qdf_likely(tx_desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
2005 		qdf_nbuf_dma_clean_range((void *)nbuf->data,
2006 					 (void *)(nbuf->data + nbuf->len));
2007 		return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2008 	} else {
2009 		return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
2010 	}
2011 }
2012 
2013 static inline
2014 void dp_tx_nbuf_unmap(struct dp_soc *soc,
2015 		      struct dp_tx_desc_s *desc)
2016 {
2017 	if (qdf_unlikely(!(desc->flags & DP_TX_DESC_FLAG_SIMPLE)))
2018 		return dp_tx_nbuf_unmap_regular(soc, desc);
2019 }
2020 #else
2021 static inline
2022 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
2023 			      struct dp_tx_desc_s *tx_desc,
2024 			      qdf_nbuf_t nbuf)
2025 {
2026 	return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
2027 }
2028 
2029 static inline
2030 void dp_tx_nbuf_unmap(struct dp_soc *soc,
2031 		      struct dp_tx_desc_s *desc)
2032 {
2033 	return dp_tx_nbuf_unmap_regular(soc, desc);
2034 }
2035 #endif
2036 
2037 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
2038 static inline
2039 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2040 {
2041 	dp_tx_nbuf_unmap(soc, desc);
2042 	desc->flags |= DP_TX_DESC_FLAG_UNMAP_DONE;
2043 }
2044 
2045 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2046 {
2047 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE)))
2048 		dp_tx_nbuf_unmap(soc, desc);
2049 }
2050 #else
2051 static inline
2052 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2053 {
2054 }
2055 
2056 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2057 {
2058 	dp_tx_nbuf_unmap(soc, desc);
2059 }
2060 #endif
2061 
2062 #ifdef MESH_MODE_SUPPORT
2063 /**
2064  * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
2065  * @soc: datapath SOC
2066  * @vdev: datapath vdev
2067  * @tx_desc: TX descriptor
2068  *
2069  * Return: None
2070  */
2071 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
2072 					   struct dp_vdev *vdev,
2073 					   struct dp_tx_desc_s *tx_desc)
2074 {
2075 	if (qdf_unlikely(vdev->mesh_vdev))
2076 		tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
2077 }
2078 
2079 /**
2080  * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
2081  * @soc: dp_soc handle
2082  * @tx_desc: TX descriptor
2083  * @delayed_free: delay the nbuf free
2084  *
2085  * Return: nbuf to be freed late
2086  */
2087 static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
2088 						   struct dp_tx_desc_s *tx_desc,
2089 						   bool delayed_free)
2090 {
2091 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2092 	struct dp_vdev *vdev = NULL;
2093 
2094 	vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, DP_MOD_ID_MESH);
2095 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2096 		if (vdev)
2097 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2098 
2099 		if (delayed_free)
2100 			return nbuf;
2101 
2102 		qdf_nbuf_free(nbuf);
2103 	} else {
2104 		if (vdev && vdev->osif_tx_free_ext) {
2105 			vdev->osif_tx_free_ext((nbuf));
2106 		} else {
2107 			if (delayed_free)
2108 				return nbuf;
2109 
2110 			qdf_nbuf_free(nbuf);
2111 		}
2112 	}
2113 
2114 	if (vdev)
2115 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
2116 
2117 	return NULL;
2118 }
2119 #else
2120 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
2121 					   struct dp_vdev *vdev,
2122 					   struct dp_tx_desc_s *tx_desc)
2123 {
2124 }
2125 
2126 static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
2127 						   struct dp_tx_desc_s *tx_desc,
2128 						   bool delayed_free)
2129 {
2130 	return NULL;
2131 }
2132 #endif
2133 
2134 /**
2135  * dp_tx_frame_is_drop() - checks if the packet is loopback
2136  * @vdev: DP vdev handle
2137  * @nbuf: skb
2138  *
2139  * Return: 1 if frame needs to be dropped else 0
2140  */
2141 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
2142 {
2143 	struct dp_pdev *pdev = NULL;
2144 	struct dp_ast_entry *src_ast_entry = NULL;
2145 	struct dp_ast_entry *dst_ast_entry = NULL;
2146 	struct dp_soc *soc = NULL;
2147 
2148 	qdf_assert(vdev);
2149 	pdev = vdev->pdev;
2150 	qdf_assert(pdev);
2151 	soc = pdev->soc;
2152 
2153 	dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
2154 				(soc, dstmac, vdev->pdev->pdev_id);
2155 
2156 	src_ast_entry = dp_peer_ast_hash_find_by_pdevid
2157 				(soc, srcmac, vdev->pdev->pdev_id);
2158 	if (dst_ast_entry && src_ast_entry) {
2159 		if (dst_ast_entry->peer_id ==
2160 				src_ast_entry->peer_id)
2161 			return 1;
2162 	}
2163 
2164 	return 0;
2165 }
2166 
2167 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
2168 	defined(WLAN_MCAST_MLO)
2169 /* MLO peer id for reinject*/
2170 #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
2171 /* MLO vdev id inc offset */
2172 #define DP_MLO_VDEV_ID_OFFSET 0x80
2173 
2174 static inline void
2175 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2176 {
2177 	if (!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) {
2178 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2179 		qdf_atomic_inc(&soc->num_tx_exception);
2180 	}
2181 }
2182 
2183 static inline void
2184 dp_tx_update_mcast_param(uint16_t peer_id,
2185 			 uint16_t *htt_tcl_metadata,
2186 			 struct dp_vdev *vdev,
2187 			 struct dp_tx_msdu_info_s *msdu_info)
2188 {
2189 	if (peer_id == DP_MLO_MCAST_REINJECT_PEER_ID) {
2190 		*htt_tcl_metadata = 0;
2191 		DP_TX_TCL_METADATA_TYPE_SET(
2192 				*htt_tcl_metadata,
2193 				HTT_TCL_METADATA_V2_TYPE_GLOBAL_SEQ_BASED);
2194 		HTT_TX_TCL_METADATA_GLBL_SEQ_NO_SET(*htt_tcl_metadata,
2195 						    msdu_info->gsn);
2196 
2197 		msdu_info->vdev_id = vdev->vdev_id + DP_MLO_VDEV_ID_OFFSET;
2198 		if (qdf_unlikely(vdev->nawds_enabled))
2199 			HTT_TX_TCL_METADATA_GLBL_SEQ_HOST_INSPECTED_SET(
2200 							*htt_tcl_metadata, 1);
2201 	} else {
2202 		msdu_info->vdev_id = vdev->vdev_id;
2203 	}
2204 }
2205 #else
2206 static inline void
2207 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2208 {
2209 }
2210 
2211 static inline void
2212 dp_tx_update_mcast_param(uint16_t peer_id,
2213 			 uint16_t *htt_tcl_metadata,
2214 			 struct dp_vdev *vdev,
2215 			 struct dp_tx_msdu_info_s *msdu_info)
2216 {
2217 }
2218 #endif
2219 /**
2220  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
2221  * @vdev: DP vdev handle
2222  * @nbuf: skb
2223  * @tid: TID from HLOS for overriding default DSCP-TID mapping
2224  * @meta_data: Metadata to the fw
2225  * @tx_q: Tx queue to be used for this Tx frame
2226  * @peer_id: peer_id of the peer in case of NAWDS frames
2227  * @tx_exc_metadata: Handle that holds exception path metadata
2228  *
2229  * Return: NULL on success,
2230  *         nbuf when it fails to send
2231  */
2232 qdf_nbuf_t
2233 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2234 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
2235 		       struct cdp_tx_exception_metadata *tx_exc_metadata)
2236 {
2237 	struct dp_pdev *pdev = vdev->pdev;
2238 	struct dp_soc *soc = pdev->soc;
2239 	struct dp_tx_desc_s *tx_desc;
2240 	QDF_STATUS status;
2241 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
2242 	uint16_t htt_tcl_metadata = 0;
2243 	enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
2244 	uint8_t tid = msdu_info->tid;
2245 	struct cdp_tid_tx_stats *tid_stats = NULL;
2246 	qdf_dma_addr_t paddr;
2247 
2248 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
2249 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
2250 			msdu_info, tx_exc_metadata);
2251 	if (!tx_desc) {
2252 		dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
2253 			  vdev, tx_q->desc_pool_id);
2254 		drop_code = TX_DESC_ERR;
2255 		goto fail_return;
2256 	}
2257 
2258 	dp_tx_update_tdls_flags(soc, vdev, tx_desc);
2259 
2260 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
2261 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2262 		DP_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
2263 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
2264 		DP_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
2265 					    DP_TCL_METADATA_TYPE_PEER_BASED);
2266 		DP_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
2267 					       peer_id);
2268 		dp_tx_bypass_reinjection(soc, tx_desc);
2269 	} else
2270 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2271 
2272 	if (msdu_info->exception_fw)
2273 		DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2274 
2275 	dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
2276 					 !pdev->enhanced_stats_en);
2277 
2278 	dp_tx_update_mesh_flags(soc, vdev, tx_desc);
2279 
2280 	paddr =  dp_tx_nbuf_map(vdev, tx_desc, nbuf);
2281 	if (!paddr) {
2282 		/* Handle failure */
2283 		dp_err("qdf_nbuf_map failed");
2284 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
2285 		drop_code = TX_DMA_MAP_ERR;
2286 		goto release_desc;
2287 	}
2288 
2289 	tx_desc->dma_addr = paddr;
2290 	dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
2291 			       tx_desc->id, DP_TX_DESC_MAP);
2292 	dp_tx_update_mcast_param(peer_id, &htt_tcl_metadata, vdev, msdu_info);
2293 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
2294 	status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
2295 					     htt_tcl_metadata,
2296 					     tx_exc_metadata, msdu_info);
2297 
2298 	if (status != QDF_STATUS_SUCCESS) {
2299 		dp_tx_err_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2300 			     tx_desc, tx_q->ring_id);
2301 		dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
2302 				       tx_desc->id, DP_TX_DESC_UNMAP);
2303 		dp_tx_nbuf_unmap(soc, tx_desc);
2304 		drop_code = TX_HW_ENQUEUE;
2305 		goto release_desc;
2306 	}
2307 
2308 	return NULL;
2309 
2310 release_desc:
2311 	dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2312 
2313 fail_return:
2314 	dp_tx_get_tid(vdev, nbuf, msdu_info);
2315 	tid_stats = &pdev->stats.tid_stats.
2316 		    tid_tx_stats[tx_q->ring_id][tid];
2317 	tid_stats->swdrop_cnt[drop_code]++;
2318 	return nbuf;
2319 }
2320 
2321 /**
2322  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2323  * @soc: Soc handle
2324  * @desc: software Tx descriptor to be processed
2325  * @delayed_free: defer freeing of nbuf
2326  *
2327  * Return: nbuf to be freed later
2328  */
2329 qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
2330 			       bool delayed_free)
2331 {
2332 	qdf_nbuf_t nbuf = desc->nbuf;
2333 	enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
2334 
2335 	/* nbuf already freed in vdev detach path */
2336 	if (!nbuf)
2337 		return NULL;
2338 
2339 	/* If it is TDLS mgmt, don't unmap or free the frame */
2340 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) {
2341 		dp_non_std_htt_tx_comp_free_buff(soc, desc);
2342 		return NULL;
2343 	}
2344 
2345 	/* 0 : MSDU buffer, 1 : MLE */
2346 	if (desc->msdu_ext_desc) {
2347 		/* TSO free */
2348 		if (hal_tx_ext_desc_get_tso_enable(
2349 					desc->msdu_ext_desc->vaddr)) {
2350 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
2351 					       desc->id, DP_TX_COMP_MSDU_EXT);
2352 			dp_tx_tso_seg_history_add(soc,
2353 						  desc->msdu_ext_desc->tso_desc,
2354 						  desc->nbuf, desc->id, type);
2355 			/* unmap eash TSO seg before free the nbuf */
2356 			dp_tx_tso_unmap_segment(soc,
2357 						desc->msdu_ext_desc->tso_desc,
2358 						desc->msdu_ext_desc->
2359 						tso_num_desc);
2360 			goto nbuf_free;
2361 		}
2362 
2363 		if (qdf_unlikely(desc->frm_type == dp_tx_frm_sg)) {
2364 			void *msdu_ext_desc = desc->msdu_ext_desc->vaddr;
2365 			qdf_dma_addr_t iova;
2366 			uint32_t frag_len;
2367 			uint32_t i;
2368 
2369 			qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
2370 						     QDF_DMA_TO_DEVICE,
2371 						     qdf_nbuf_headlen(nbuf));
2372 
2373 			for (i = 1; i < DP_TX_MAX_NUM_FRAGS; i++) {
2374 				hal_tx_ext_desc_get_frag_info(msdu_ext_desc, i,
2375 							      &iova,
2376 							      &frag_len);
2377 				if (!iova || !frag_len)
2378 					break;
2379 
2380 				qdf_mem_unmap_page(soc->osdev, iova, frag_len,
2381 						   QDF_DMA_TO_DEVICE);
2382 			}
2383 
2384 			goto nbuf_free;
2385 		}
2386 	}
2387 	/* If it's ME frame, dont unmap the cloned nbuf's */
2388 	if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
2389 		goto nbuf_free;
2390 
2391 	dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, desc->id, type);
2392 	dp_tx_unmap(soc, desc);
2393 
2394 	if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
2395 		return dp_mesh_tx_comp_free_buff(soc, desc, delayed_free);
2396 
2397 	if (dp_tx_traffic_end_indication_enq_ind_pkt(soc, desc, nbuf))
2398 		return NULL;
2399 
2400 nbuf_free:
2401 	if (delayed_free)
2402 		return nbuf;
2403 
2404 	qdf_nbuf_free(nbuf);
2405 
2406 	return NULL;
2407 }
2408 
2409 /**
2410  * dp_tx_sg_unmap_buf() - Unmap scatter gather fragments
2411  * @soc: DP soc handle
2412  * @nbuf: skb
2413  * @msdu_info: MSDU info
2414  *
2415  * Return: None
2416  */
2417 static inline void
2418 dp_tx_sg_unmap_buf(struct dp_soc *soc, qdf_nbuf_t nbuf,
2419 		   struct dp_tx_msdu_info_s *msdu_info)
2420 {
2421 	uint32_t cur_idx;
2422 	struct dp_tx_seg_info_s *seg = msdu_info->u.sg_info.curr_seg;
2423 
2424 	qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE,
2425 				     qdf_nbuf_headlen(nbuf));
2426 
2427 	for (cur_idx = 1; cur_idx < seg->frag_cnt; cur_idx++)
2428 		qdf_mem_unmap_page(soc->osdev, (qdf_dma_addr_t)
2429 				   (seg->frags[cur_idx].paddr_lo | ((uint64_t)
2430 				    seg->frags[cur_idx].paddr_hi) << 32),
2431 				   seg->frags[cur_idx].len,
2432 				   QDF_DMA_TO_DEVICE);
2433 }
2434 
2435 /**
2436  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
2437  * @vdev: DP vdev handle
2438  * @nbuf: skb
2439  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
2440  *
2441  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
2442  *
2443  * Return: NULL on success,
2444  *         nbuf when it fails to send
2445  */
2446 #if QDF_LOCK_STATS
2447 noinline
2448 #else
2449 #endif
2450 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2451 				    struct dp_tx_msdu_info_s *msdu_info)
2452 {
2453 	uint32_t i;
2454 	struct dp_pdev *pdev = vdev->pdev;
2455 	struct dp_soc *soc = pdev->soc;
2456 	struct dp_tx_desc_s *tx_desc;
2457 	bool is_cce_classified = false;
2458 	QDF_STATUS status;
2459 	uint16_t htt_tcl_metadata = 0;
2460 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
2461 	struct cdp_tid_tx_stats *tid_stats = NULL;
2462 	uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
2463 
2464 	if (msdu_info->frm_type == dp_tx_frm_me)
2465 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2466 
2467 	i = 0;
2468 	/* Print statement to track i and num_seg */
2469 	/*
2470 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
2471 	 * descriptors using information in msdu_info
2472 	 */
2473 	while (i < msdu_info->num_seg) {
2474 		/*
2475 		 * Setup Tx descriptor for an MSDU, and MSDU extension
2476 		 * descriptor
2477 		 */
2478 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
2479 				tx_q->desc_pool_id);
2480 
2481 		if (!tx_desc) {
2482 			if (msdu_info->frm_type == dp_tx_frm_me) {
2483 				prep_desc_fail++;
2484 				dp_tx_me_free_buf(pdev,
2485 					(void *)(msdu_info->u.sg_info
2486 						.curr_seg->frags[0].vaddr));
2487 				if (prep_desc_fail == msdu_info->num_seg) {
2488 					/*
2489 					 * Unmap is needed only if descriptor
2490 					 * preparation failed for all segments.
2491 					 */
2492 					qdf_nbuf_unmap(soc->osdev,
2493 						       msdu_info->u.sg_info.
2494 						       curr_seg->nbuf,
2495 						       QDF_DMA_TO_DEVICE);
2496 				}
2497 				/*
2498 				 * Free the nbuf for the current segment
2499 				 * and make it point to the next in the list.
2500 				 * For me, there are as many segments as there
2501 				 * are no of clients.
2502 				 */
2503 				qdf_nbuf_free(msdu_info->u.sg_info
2504 					      .curr_seg->nbuf);
2505 				if (msdu_info->u.sg_info.curr_seg->next) {
2506 					msdu_info->u.sg_info.curr_seg =
2507 						msdu_info->u.sg_info
2508 						.curr_seg->next;
2509 					nbuf = msdu_info->u.sg_info
2510 					       .curr_seg->nbuf;
2511 				}
2512 				i++;
2513 				continue;
2514 			}
2515 
2516 			if (msdu_info->frm_type == dp_tx_frm_tso) {
2517 				dp_tx_tso_seg_history_add(
2518 						soc,
2519 						msdu_info->u.tso_info.curr_seg,
2520 						nbuf, 0, DP_TX_DESC_UNMAP);
2521 				dp_tx_tso_unmap_segment(soc,
2522 							msdu_info->u.tso_info.
2523 							curr_seg,
2524 							msdu_info->u.tso_info.
2525 							tso_num_seg_list);
2526 
2527 				if (msdu_info->u.tso_info.curr_seg->next) {
2528 					msdu_info->u.tso_info.curr_seg =
2529 					msdu_info->u.tso_info.curr_seg->next;
2530 					i++;
2531 					continue;
2532 				}
2533 			}
2534 
2535 			if (msdu_info->frm_type == dp_tx_frm_sg)
2536 				dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
2537 
2538 			goto done;
2539 		}
2540 
2541 		if (msdu_info->frm_type == dp_tx_frm_me) {
2542 			tx_desc->msdu_ext_desc->me_buffer =
2543 				(struct dp_tx_me_buf_t *)msdu_info->
2544 				u.sg_info.curr_seg->frags[0].vaddr;
2545 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
2546 		}
2547 
2548 		if (is_cce_classified)
2549 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2550 
2551 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2552 		if (msdu_info->exception_fw) {
2553 			DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2554 		}
2555 
2556 		dp_tx_is_hp_update_required(i, msdu_info);
2557 
2558 		/*
2559 		 * For frames with multiple segments (TSO, ME), jump to next
2560 		 * segment.
2561 		 */
2562 		if (msdu_info->frm_type == dp_tx_frm_tso) {
2563 			if (msdu_info->u.tso_info.curr_seg->next) {
2564 				msdu_info->u.tso_info.curr_seg =
2565 					msdu_info->u.tso_info.curr_seg->next;
2566 
2567 				/*
2568 				 * If this is a jumbo nbuf, then increment the
2569 				 * number of nbuf users for each additional
2570 				 * segment of the msdu. This will ensure that
2571 				 * the skb is freed only after receiving tx
2572 				 * completion for all segments of an nbuf
2573 				 */
2574 				qdf_nbuf_inc_users(nbuf);
2575 
2576 				/* Check with MCL if this is needed */
2577 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
2578 				 */
2579 			}
2580 		}
2581 
2582 		dp_tx_update_mcast_param(DP_INVALID_PEER,
2583 					 &htt_tcl_metadata,
2584 					 vdev,
2585 					 msdu_info);
2586 		/*
2587 		 * Enqueue the Tx MSDU descriptor to HW for transmit
2588 		 */
2589 		status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
2590 						     htt_tcl_metadata,
2591 						     NULL, msdu_info);
2592 
2593 		dp_tx_check_and_flush_hp(soc, status, msdu_info);
2594 
2595 		if (status != QDF_STATUS_SUCCESS) {
2596 			dp_info_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2597 				   tx_desc, tx_q->ring_id);
2598 
2599 			dp_tx_get_tid(vdev, nbuf, msdu_info);
2600 			tid_stats = &pdev->stats.tid_stats.
2601 				    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
2602 			tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
2603 
2604 			if (msdu_info->frm_type == dp_tx_frm_me) {
2605 				hw_enq_fail++;
2606 				if (hw_enq_fail == msdu_info->num_seg) {
2607 					/*
2608 					 * Unmap is needed only if enqueue
2609 					 * failed for all segments.
2610 					 */
2611 					qdf_nbuf_unmap(soc->osdev,
2612 						       msdu_info->u.sg_info.
2613 						       curr_seg->nbuf,
2614 						       QDF_DMA_TO_DEVICE);
2615 				}
2616 				/*
2617 				 * Free the nbuf for the current segment
2618 				 * and make it point to the next in the list.
2619 				 * For me, there are as many segments as there
2620 				 * are no of clients.
2621 				 */
2622 				qdf_nbuf_free(msdu_info->u.sg_info
2623 					      .curr_seg->nbuf);
2624 				dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2625 				if (msdu_info->u.sg_info.curr_seg->next) {
2626 					msdu_info->u.sg_info.curr_seg =
2627 						msdu_info->u.sg_info
2628 						.curr_seg->next;
2629 					nbuf = msdu_info->u.sg_info
2630 					       .curr_seg->nbuf;
2631 				} else
2632 					break;
2633 				i++;
2634 				continue;
2635 			}
2636 
2637 			/*
2638 			 * For TSO frames, the nbuf users increment done for
2639 			 * the current segment has to be reverted, since the
2640 			 * hw enqueue for this segment failed
2641 			 */
2642 			if (msdu_info->frm_type == dp_tx_frm_tso &&
2643 			    msdu_info->u.tso_info.curr_seg) {
2644 				/*
2645 				 * unmap and free current,
2646 				 * retransmit remaining segments
2647 				 */
2648 				dp_tx_comp_free_buf(soc, tx_desc, false);
2649 				i++;
2650 				dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2651 				continue;
2652 			}
2653 
2654 			if (msdu_info->frm_type == dp_tx_frm_sg)
2655 				dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
2656 
2657 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2658 			goto done;
2659 		}
2660 
2661 		/*
2662 		 * TODO
2663 		 * if tso_info structure can be modified to have curr_seg
2664 		 * as first element, following 2 blocks of code (for TSO and SG)
2665 		 * can be combined into 1
2666 		 */
2667 
2668 		/*
2669 		 * For Multicast-Unicast converted packets,
2670 		 * each converted frame (for a client) is represented as
2671 		 * 1 segment
2672 		 */
2673 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
2674 				(msdu_info->frm_type == dp_tx_frm_me)) {
2675 			if (msdu_info->u.sg_info.curr_seg->next) {
2676 				msdu_info->u.sg_info.curr_seg =
2677 					msdu_info->u.sg_info.curr_seg->next;
2678 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2679 			} else
2680 				break;
2681 		}
2682 		i++;
2683 	}
2684 
2685 	nbuf = NULL;
2686 
2687 done:
2688 	return nbuf;
2689 }
2690 
2691 /**
2692  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
2693  *                     for SG frames
2694  * @vdev: DP vdev handle
2695  * @nbuf: skb
2696  * @seg_info: Pointer to Segment info Descriptor to be prepared
2697  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2698  *
2699  * Return: NULL on success,
2700  *         nbuf when it fails to send
2701  */
2702 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2703 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
2704 {
2705 	uint32_t cur_frag, nr_frags, i;
2706 	qdf_dma_addr_t paddr;
2707 	struct dp_tx_sg_info_s *sg_info;
2708 
2709 	sg_info = &msdu_info->u.sg_info;
2710 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
2711 
2712 	if (QDF_STATUS_SUCCESS !=
2713 		qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
2714 					   QDF_DMA_TO_DEVICE,
2715 					   qdf_nbuf_headlen(nbuf))) {
2716 		dp_tx_err("dma map error");
2717 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2718 		qdf_nbuf_free(nbuf);
2719 		return NULL;
2720 	}
2721 
2722 	paddr = qdf_nbuf_mapped_paddr_get(nbuf);
2723 	seg_info->frags[0].paddr_lo = paddr;
2724 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
2725 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
2726 	seg_info->frags[0].vaddr = (void *) nbuf;
2727 
2728 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
2729 		if (QDF_STATUS_SUCCESS != qdf_nbuf_frag_map(vdev->osdev,
2730 							    nbuf, 0,
2731 							    QDF_DMA_TO_DEVICE,
2732 							    cur_frag)) {
2733 			dp_tx_err("frag dma map error");
2734 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2735 			goto map_err;
2736 		}
2737 
2738 		paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
2739 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
2740 		seg_info->frags[cur_frag + 1].paddr_hi =
2741 			((uint64_t) paddr) >> 32;
2742 		seg_info->frags[cur_frag + 1].len =
2743 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
2744 	}
2745 
2746 	seg_info->frag_cnt = (cur_frag + 1);
2747 	seg_info->total_len = qdf_nbuf_len(nbuf);
2748 	seg_info->next = NULL;
2749 
2750 	sg_info->curr_seg = seg_info;
2751 
2752 	msdu_info->frm_type = dp_tx_frm_sg;
2753 	msdu_info->num_seg = 1;
2754 
2755 	return nbuf;
2756 map_err:
2757 	/* restore paddr into nbuf before calling unmap */
2758 	qdf_nbuf_mapped_paddr_set(nbuf,
2759 				  (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
2760 				  ((uint64_t)
2761 				  seg_info->frags[0].paddr_hi) << 32));
2762 	qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
2763 				     QDF_DMA_TO_DEVICE,
2764 				     seg_info->frags[0].len);
2765 	for (i = 1; i <= cur_frag; i++) {
2766 		qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
2767 				   (seg_info->frags[i].paddr_lo | ((uint64_t)
2768 				   seg_info->frags[i].paddr_hi) << 32),
2769 				   seg_info->frags[i].len,
2770 				   QDF_DMA_TO_DEVICE);
2771 	}
2772 	qdf_nbuf_free(nbuf);
2773 	return NULL;
2774 }
2775 
2776 /**
2777  * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
2778  * @vdev: DP vdev handle
2779  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2780  * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
2781  *
2782  * Return: NULL on failure,
2783  *         nbuf when extracted successfully
2784  */
2785 static
2786 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
2787 				    struct dp_tx_msdu_info_s *msdu_info,
2788 				    uint16_t ppdu_cookie)
2789 {
2790 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2791 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2792 
2793 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2794 
2795 	HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
2796 				(msdu_info->meta_data[5], 1);
2797 	HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
2798 				(msdu_info->meta_data[5], 1);
2799 	HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
2800 				(msdu_info->meta_data[6], ppdu_cookie);
2801 
2802 	msdu_info->exception_fw = 1;
2803 	msdu_info->is_tx_sniffer = 1;
2804 }
2805 
2806 #ifdef MESH_MODE_SUPPORT
2807 
2808 /**
2809  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
2810 				and prepare msdu_info for mesh frames.
2811  * @vdev: DP vdev handle
2812  * @nbuf: skb
2813  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2814  *
2815  * Return: NULL on failure,
2816  *         nbuf when extracted successfully
2817  */
2818 static
2819 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2820 				struct dp_tx_msdu_info_s *msdu_info)
2821 {
2822 	struct meta_hdr_s *mhdr;
2823 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2824 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2825 
2826 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2827 
2828 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
2829 		msdu_info->exception_fw = 0;
2830 		goto remove_meta_hdr;
2831 	}
2832 
2833 	msdu_info->exception_fw = 1;
2834 
2835 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2836 
2837 	meta_data->host_tx_desc_pool = 1;
2838 	meta_data->update_peer_cache = 1;
2839 	meta_data->learning_frame = 1;
2840 
2841 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
2842 		meta_data->power = mhdr->power;
2843 
2844 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
2845 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
2846 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
2847 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
2848 
2849 		meta_data->dyn_bw = 1;
2850 
2851 		meta_data->valid_pwr = 1;
2852 		meta_data->valid_mcs_mask = 1;
2853 		meta_data->valid_nss_mask = 1;
2854 		meta_data->valid_preamble_type  = 1;
2855 		meta_data->valid_retries = 1;
2856 		meta_data->valid_bw_info = 1;
2857 	}
2858 
2859 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
2860 		meta_data->encrypt_type = 0;
2861 		meta_data->valid_encrypt_type = 1;
2862 		meta_data->learning_frame = 0;
2863 	}
2864 
2865 	meta_data->valid_key_flags = 1;
2866 	meta_data->key_flags = (mhdr->keyix & 0x3);
2867 
2868 remove_meta_hdr:
2869 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
2870 		dp_tx_err("qdf_nbuf_pull_head failed");
2871 		qdf_nbuf_free(nbuf);
2872 		return NULL;
2873 	}
2874 
2875 	msdu_info->tid = qdf_nbuf_get_priority(nbuf);
2876 
2877 	dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x"
2878 		   " tid %d to_fw %d",
2879 		   msdu_info->meta_data[0],
2880 		   msdu_info->meta_data[1],
2881 		   msdu_info->meta_data[2],
2882 		   msdu_info->meta_data[3],
2883 		   msdu_info->meta_data[4],
2884 		   msdu_info->meta_data[5],
2885 		   msdu_info->tid, msdu_info->exception_fw);
2886 
2887 	return nbuf;
2888 }
2889 #else
2890 static
2891 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2892 				struct dp_tx_msdu_info_s *msdu_info)
2893 {
2894 	return nbuf;
2895 }
2896 
2897 #endif
2898 
2899 /**
2900  * dp_check_exc_metadata() - Checks if parameters are valid
2901  * @tx_exc - holds all exception path parameters
2902  *
2903  * Returns true when all the parameters are valid else false
2904  *
2905  */
2906 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
2907 {
2908 	bool invalid_tid = (tx_exc->tid >= DP_MAX_TIDS && tx_exc->tid !=
2909 			    HTT_INVALID_TID);
2910 	bool invalid_encap_type =
2911 			(tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
2912 			 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
2913 	bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
2914 				 tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
2915 	bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
2916 			       tx_exc->ppdu_cookie == 0);
2917 
2918 	if (tx_exc->is_intrabss_fwd)
2919 		return true;
2920 
2921 	if (invalid_tid || invalid_encap_type || invalid_sec_type ||
2922 	    invalid_cookie) {
2923 		return false;
2924 	}
2925 
2926 	return true;
2927 }
2928 
2929 #ifdef ATH_SUPPORT_IQUE
2930 /**
2931  * dp_tx_mcast_enhance() - Multicast enhancement on TX
2932  * @vdev: vdev handle
2933  * @nbuf: skb
2934  *
2935  * Return: true on success,
2936  *         false on failure
2937  */
2938 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
2939 {
2940 	qdf_ether_header_t *eh;
2941 
2942 	/* Mcast to Ucast Conversion*/
2943 	if (qdf_likely(!vdev->mcast_enhancement_en))
2944 		return true;
2945 
2946 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2947 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2948 	    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2949 		dp_verbose_debug("Mcast frm for ME %pK", vdev);
2950 		qdf_nbuf_set_next(nbuf, NULL);
2951 
2952 		DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
2953 				 qdf_nbuf_len(nbuf));
2954 		if (dp_tx_prepare_send_me(vdev, nbuf) ==
2955 				QDF_STATUS_SUCCESS) {
2956 			return false;
2957 		}
2958 
2959 		if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
2960 			if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
2961 					QDF_STATUS_SUCCESS) {
2962 				return false;
2963 			}
2964 		}
2965 	}
2966 
2967 	return true;
2968 }
2969 #else
2970 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
2971 {
2972 	return true;
2973 }
2974 #endif
2975 
2976 /**
2977  * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
2978  * @nbuf: qdf_nbuf_t
2979  * @vdev: struct dp_vdev *
2980  *
2981  * Allow packet for processing only if it is for peer client which is
2982  * connected with same vap. Drop packet if client is connected to
2983  * different vap.
2984  *
2985  * Return: QDF_STATUS
2986  */
2987 static inline QDF_STATUS
2988 dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
2989 {
2990 	struct dp_ast_entry *dst_ast_entry = NULL;
2991 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2992 
2993 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
2994 	    DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
2995 		return QDF_STATUS_SUCCESS;
2996 
2997 	qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
2998 	dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
2999 							eh->ether_dhost,
3000 							vdev->vdev_id);
3001 
3002 	/* If there is no ast entry, return failure */
3003 	if (qdf_unlikely(!dst_ast_entry)) {
3004 		qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
3005 		return QDF_STATUS_E_FAILURE;
3006 	}
3007 	qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
3008 
3009 	return QDF_STATUS_SUCCESS;
3010 }
3011 
3012 /**
3013  * dp_tx_nawds_handler() - NAWDS handler
3014  *
3015  * @soc: DP soc handle
3016  * @vdev_id: id of DP vdev handle
3017  * @msdu_info: msdu_info required to create HTT metadata
3018  * @nbuf: skb
3019  *
3020  * This API transfers the multicast frames with the peer id
3021  * on NAWDS enabled peer.
3022 
3023  * Return: none
3024  */
3025 
3026 static inline
3027 void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
3028 			 struct dp_tx_msdu_info_s *msdu_info,
3029 			 qdf_nbuf_t nbuf, uint16_t sa_peer_id)
3030 {
3031 	struct dp_peer *peer = NULL;
3032 	qdf_nbuf_t nbuf_clone = NULL;
3033 	uint16_t peer_id = DP_INVALID_PEER;
3034 	struct dp_txrx_peer *txrx_peer;
3035 
3036 	/* This check avoids pkt forwarding which is entered
3037 	 * in the ast table but still doesn't have valid peerid.
3038 	 */
3039 	if (sa_peer_id == HTT_INVALID_PEER)
3040 		return;
3041 
3042 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3043 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3044 		txrx_peer = dp_get_txrx_peer(peer);
3045 		if (!txrx_peer)
3046 			continue;
3047 
3048 		if (!txrx_peer->bss_peer && txrx_peer->nawds_enabled) {
3049 			peer_id = peer->peer_id;
3050 
3051 			if (!dp_peer_is_primary_link_peer(peer))
3052 				continue;
3053 
3054 			/* Multicast packets needs to be
3055 			 * dropped in case of intra bss forwarding
3056 			 */
3057 			if (sa_peer_id == txrx_peer->peer_id) {
3058 				dp_tx_debug("multicast packet");
3059 				DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3060 							  tx.nawds_mcast_drop,
3061 							  1);
3062 				continue;
3063 			}
3064 
3065 			nbuf_clone = qdf_nbuf_clone(nbuf);
3066 
3067 			if (!nbuf_clone) {
3068 				QDF_TRACE(QDF_MODULE_ID_DP,
3069 					  QDF_TRACE_LEVEL_ERROR,
3070 					  FL("nbuf clone failed"));
3071 				break;
3072 			}
3073 
3074 			nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
3075 							    msdu_info, peer_id,
3076 							    NULL);
3077 
3078 			if (nbuf_clone) {
3079 				dp_tx_debug("pkt send failed");
3080 				qdf_nbuf_free(nbuf_clone);
3081 			} else {
3082 				if (peer_id != DP_INVALID_PEER)
3083 					DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
3084 								      tx.nawds_mcast,
3085 								      1, qdf_nbuf_len(nbuf));
3086 			}
3087 		}
3088 	}
3089 
3090 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3091 }
3092 
3093 /**
3094  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
3095  * @soc: DP soc handle
3096  * @vdev_id: id of DP vdev handle
3097  * @nbuf: skb
3098  * @tx_exc_metadata: Handle that holds exception path meta data
3099  *
3100  * Entry point for Core Tx layer (DP_TX) invoked from
3101  * hard_start_xmit in OSIF/HDD to transmit frames through fw
3102  *
3103  * Return: NULL on success,
3104  *         nbuf when it fails to send
3105  */
3106 qdf_nbuf_t
3107 dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3108 		     qdf_nbuf_t nbuf,
3109 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
3110 {
3111 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3112 	qdf_ether_header_t *eh = NULL;
3113 	struct dp_tx_msdu_info_s msdu_info;
3114 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3115 						     DP_MOD_ID_TX_EXCEPTION);
3116 
3117 	if (qdf_unlikely(!vdev))
3118 		goto fail;
3119 
3120 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
3121 
3122 	if (!tx_exc_metadata)
3123 		goto fail;
3124 
3125 	msdu_info.tid = tx_exc_metadata->tid;
3126 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3127 	dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
3128 			 QDF_MAC_ADDR_REF(nbuf->data));
3129 
3130 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
3131 
3132 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
3133 		dp_tx_err("Invalid parameters in exception path");
3134 		goto fail;
3135 	}
3136 
3137 	/* Basic sanity checks for unsupported packets */
3138 
3139 	/* MESH mode */
3140 	if (qdf_unlikely(vdev->mesh_vdev)) {
3141 		dp_tx_err("Mesh mode is not supported in exception path");
3142 		goto fail;
3143 	}
3144 
3145 	/*
3146 	 * Classify the frame and call corresponding
3147 	 * "prepare" function which extracts the segment (TSO)
3148 	 * and fragmentation information (for TSO , SG, ME, or Raw)
3149 	 * into MSDU_INFO structure which is later used to fill
3150 	 * SW and HW descriptors.
3151 	 */
3152 	if (qdf_nbuf_is_tso(nbuf)) {
3153 		dp_verbose_debug("TSO frame %pK", vdev);
3154 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
3155 				 qdf_nbuf_len(nbuf));
3156 
3157 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
3158 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
3159 					 qdf_nbuf_len(nbuf));
3160 			goto fail;
3161 		}
3162 
3163 		goto send_multiple;
3164 	}
3165 
3166 	/* SG */
3167 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
3168 		struct dp_tx_seg_info_s seg_info = {0};
3169 
3170 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
3171 		if (!nbuf)
3172 			goto fail;
3173 
3174 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
3175 
3176 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
3177 				 qdf_nbuf_len(nbuf));
3178 
3179 		goto send_multiple;
3180 	}
3181 
3182 	if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
3183 		DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
3184 				 qdf_nbuf_len(nbuf));
3185 
3186 		dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
3187 					       tx_exc_metadata->ppdu_cookie);
3188 	}
3189 
3190 	/*
3191 	 * Get HW Queue to use for this frame.
3192 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
3193 	 * dedicated for data and 1 for command.
3194 	 * "queue_id" maps to one hardware ring.
3195 	 *  With each ring, we also associate a unique Tx descriptor pool
3196 	 *  to minimize lock contention for these resources.
3197 	 */
3198 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3199 
3200 	if (qdf_likely(tx_exc_metadata->is_intrabss_fwd)) {
3201 		if (qdf_unlikely(vdev->nawds_enabled)) {
3202 			/*
3203 			 * This is a multicast packet
3204 			 */
3205 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
3206 					    tx_exc_metadata->peer_id);
3207 			DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
3208 					 1, qdf_nbuf_len(nbuf));
3209 		}
3210 
3211 		nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
3212 					      DP_INVALID_PEER, NULL);
3213 	} else {
3214 		/*
3215 		 * Check exception descriptors
3216 		 */
3217 		if (dp_tx_exception_limit_check(vdev))
3218 			goto fail;
3219 
3220 		/*  Single linear frame */
3221 		/*
3222 		 * If nbuf is a simple linear frame, use send_single function to
3223 		 * prepare direct-buffer type TCL descriptor and enqueue to TCL
3224 		 * SRNG. There is no need to setup a MSDU extension descriptor.
3225 		 */
3226 		nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
3227 					      tx_exc_metadata->peer_id,
3228 					      tx_exc_metadata);
3229 	}
3230 
3231 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3232 	return nbuf;
3233 
3234 send_multiple:
3235 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3236 
3237 fail:
3238 	if (vdev)
3239 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3240 	dp_verbose_debug("pkt send failed");
3241 	return nbuf;
3242 }
3243 
3244 /**
3245  * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
3246  *      in exception path in special case to avoid regular exception path chk.
3247  * @soc: DP soc handle
3248  * @vdev_id: id of DP vdev handle
3249  * @nbuf: skb
3250  * @tx_exc_metadata: Handle that holds exception path meta data
3251  *
3252  * Entry point for Core Tx layer (DP_TX) invoked from
3253  * hard_start_xmit in OSIF/HDD to transmit frames through fw
3254  *
3255  * Return: NULL on success,
3256  *         nbuf when it fails to send
3257  */
3258 qdf_nbuf_t
3259 dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
3260 				   uint8_t vdev_id, qdf_nbuf_t nbuf,
3261 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
3262 {
3263 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3264 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3265 						     DP_MOD_ID_TX_EXCEPTION);
3266 
3267 	if (qdf_unlikely(!vdev))
3268 		goto fail;
3269 
3270 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
3271 			== QDF_STATUS_E_FAILURE)) {
3272 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
3273 		goto fail;
3274 	}
3275 
3276 	/* Unref count as it will agin be taken inside dp_tx_exception */
3277 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3278 
3279 	return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
3280 
3281 fail:
3282 	if (vdev)
3283 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3284 	dp_verbose_debug("pkt send failed");
3285 	return nbuf;
3286 }
3287 
3288 /**
3289  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
3290  * @soc: DP soc handle
3291  * @vdev_id: DP vdev handle
3292  * @nbuf: skb
3293  *
3294  * Entry point for Core Tx layer (DP_TX) invoked from
3295  * hard_start_xmit in OSIF/HDD
3296  *
3297  * Return: NULL on success,
3298  *         nbuf when it fails to send
3299  */
3300 #ifdef MESH_MODE_SUPPORT
3301 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3302 			   qdf_nbuf_t nbuf)
3303 {
3304 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3305 	struct meta_hdr_s *mhdr;
3306 	qdf_nbuf_t nbuf_mesh = NULL;
3307 	qdf_nbuf_t nbuf_clone = NULL;
3308 	struct dp_vdev *vdev;
3309 	uint8_t no_enc_frame = 0;
3310 
3311 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
3312 	if (!nbuf_mesh) {
3313 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3314 				"qdf_nbuf_unshare failed");
3315 		return nbuf;
3316 	}
3317 
3318 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
3319 	if (!vdev) {
3320 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3321 				"vdev is NULL for vdev_id %d", vdev_id);
3322 		return nbuf;
3323 	}
3324 
3325 	nbuf = nbuf_mesh;
3326 
3327 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
3328 
3329 	if ((vdev->sec_type != cdp_sec_type_none) &&
3330 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
3331 		no_enc_frame = 1;
3332 
3333 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
3334 		qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
3335 
3336 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
3337 		       !no_enc_frame) {
3338 		nbuf_clone = qdf_nbuf_clone(nbuf);
3339 		if (!nbuf_clone) {
3340 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3341 				"qdf_nbuf_clone failed");
3342 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
3343 			return nbuf;
3344 		}
3345 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
3346 	}
3347 
3348 	if (nbuf_clone) {
3349 		if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
3350 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
3351 		} else {
3352 			qdf_nbuf_free(nbuf_clone);
3353 		}
3354 	}
3355 
3356 	if (no_enc_frame)
3357 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
3358 	else
3359 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
3360 
3361 	nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
3362 	if ((!nbuf) && no_enc_frame) {
3363 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
3364 	}
3365 
3366 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
3367 	return nbuf;
3368 }
3369 
3370 #else
3371 
3372 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
3373 			   qdf_nbuf_t nbuf)
3374 {
3375 	return dp_tx_send(soc, vdev_id, nbuf);
3376 }
3377 
3378 #endif
3379 
3380 #ifdef QCA_DP_TX_NBUF_AND_NBUF_DATA_PREFETCH
3381 static inline
3382 void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
3383 {
3384 	if (nbuf) {
3385 		qdf_prefetch(&nbuf->len);
3386 		qdf_prefetch(&nbuf->data);
3387 	}
3388 }
3389 #else
3390 static inline
3391 void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
3392 {
3393 }
3394 #endif
3395 
3396 #ifdef DP_UMAC_HW_RESET_SUPPORT
3397 /*
3398  * dp_tx_drop() - Drop the frame on a given VAP
3399  * @soc: DP soc handle
3400  * @vdev_id: id of DP vdev handle
3401  * @nbuf: skb
3402  *
3403  * Drop all the incoming packets
3404  *
3405  * Return: nbuf
3406  *
3407  */
3408 qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3409 		      qdf_nbuf_t nbuf)
3410 {
3411 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3412 	struct dp_vdev *vdev = NULL;
3413 
3414 	vdev = soc->vdev_id_map[vdev_id];
3415 	if (qdf_unlikely(!vdev))
3416 		return nbuf;
3417 
3418 	DP_STATS_INC(vdev, tx_i.dropped.drop_ingress, 1);
3419 	return nbuf;
3420 }
3421 
3422 /*
3423  * dp_tx_exc_drop() - Drop the frame on a given VAP
3424  * @soc: DP soc handle
3425  * @vdev_id: id of DP vdev handle
3426  * @nbuf: skb
3427  * @tx_exc_metadata: Handle that holds exception path meta data
3428  *
3429  * Drop all the incoming packets
3430  *
3431  * Return: nbuf
3432  *
3433  */
3434 qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3435 			  qdf_nbuf_t nbuf,
3436 			  struct cdp_tx_exception_metadata *tx_exc_metadata)
3437 {
3438 	return dp_tx_drop(soc_hdl, vdev_id, nbuf);
3439 }
3440 #endif
3441 
3442 /*
3443  * dp_tx_send() - Transmit a frame on a given VAP
3444  * @soc: DP soc handle
3445  * @vdev_id: id of DP vdev handle
3446  * @nbuf: skb
3447  *
3448  * Entry point for Core Tx layer (DP_TX) invoked from
3449  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
3450  * cases
3451  *
3452  * Return: NULL on success,
3453  *         nbuf when it fails to send
3454  */
3455 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3456 		      qdf_nbuf_t nbuf)
3457 {
3458 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3459 	uint16_t peer_id = HTT_INVALID_PEER;
3460 	/*
3461 	 * doing a memzero is causing additional function call overhead
3462 	 * so doing static stack clearing
3463 	 */
3464 	struct dp_tx_msdu_info_s msdu_info = {0};
3465 	struct dp_vdev *vdev = NULL;
3466 	qdf_nbuf_t end_nbuf = NULL;
3467 
3468 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3469 		return nbuf;
3470 
3471 	/*
3472 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3473 	 * this in per packet path.
3474 	 *
3475 	 * As in this path vdev memory is already protected with netdev
3476 	 * tx lock
3477 	 */
3478 	vdev = soc->vdev_id_map[vdev_id];
3479 	if (qdf_unlikely(!vdev))
3480 		return nbuf;
3481 
3482 	/*
3483 	 * Set Default Host TID value to invalid TID
3484 	 * (TID override disabled)
3485 	 */
3486 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
3487 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_headlen(nbuf));
3488 
3489 	if (qdf_unlikely(vdev->mesh_vdev)) {
3490 		qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
3491 								&msdu_info);
3492 		if (!nbuf_mesh) {
3493 			dp_verbose_debug("Extracting mesh metadata failed");
3494 			return nbuf;
3495 		}
3496 		nbuf = nbuf_mesh;
3497 	}
3498 
3499 	/*
3500 	 * Get HW Queue to use for this frame.
3501 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
3502 	 * dedicated for data and 1 for command.
3503 	 * "queue_id" maps to one hardware ring.
3504 	 *  With each ring, we also associate a unique Tx descriptor pool
3505 	 *  to minimize lock contention for these resources.
3506 	 */
3507 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3508 	DP_STATS_INC(vdev, tx_i.rcvd_per_core[msdu_info.tx_queue.desc_pool_id],
3509 		     1);
3510 
3511 	/*
3512 	 * TCL H/W supports 2 DSCP-TID mapping tables.
3513 	 *  Table 1 - Default DSCP-TID mapping table
3514 	 *  Table 2 - 1 DSCP-TID override table
3515 	 *
3516 	 * If we need a different DSCP-TID mapping for this vap,
3517 	 * call tid_classify to extract DSCP/ToS from frame and
3518 	 * map to a TID and store in msdu_info. This is later used
3519 	 * to fill in TCL Input descriptor (per-packet TID override).
3520 	 */
3521 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
3522 
3523 	/*
3524 	 * Classify the frame and call corresponding
3525 	 * "prepare" function which extracts the segment (TSO)
3526 	 * and fragmentation information (for TSO , SG, ME, or Raw)
3527 	 * into MSDU_INFO structure which is later used to fill
3528 	 * SW and HW descriptors.
3529 	 */
3530 	if (qdf_nbuf_is_tso(nbuf)) {
3531 		dp_verbose_debug("TSO frame %pK", vdev);
3532 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
3533 				 qdf_nbuf_len(nbuf));
3534 
3535 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
3536 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
3537 					 qdf_nbuf_len(nbuf));
3538 			return nbuf;
3539 		}
3540 
3541 		goto send_multiple;
3542 	}
3543 
3544 	/* SG */
3545 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
3546 		if (qdf_nbuf_get_nr_frags(nbuf) > DP_TX_MAX_NUM_FRAGS - 1) {
3547 			if (qdf_unlikely(qdf_nbuf_linearize(nbuf)))
3548 				return nbuf;
3549 		} else {
3550 			struct dp_tx_seg_info_s seg_info = {0};
3551 
3552 			nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info,
3553 						&msdu_info);
3554 			if (!nbuf)
3555 				return NULL;
3556 
3557 			dp_verbose_debug("non-TSO SG frame %pK", vdev);
3558 
3559 			DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
3560 					 qdf_nbuf_len(nbuf));
3561 
3562 			goto send_multiple;
3563 		}
3564 	}
3565 
3566 	if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
3567 		return NULL;
3568 
3569 	/* RAW */
3570 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
3571 		struct dp_tx_seg_info_s seg_info = {0};
3572 
3573 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
3574 		if (!nbuf)
3575 			return NULL;
3576 
3577 		dp_verbose_debug("Raw frame %pK", vdev);
3578 
3579 		goto send_multiple;
3580 
3581 	}
3582 
3583 	if (qdf_unlikely(vdev->nawds_enabled)) {
3584 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
3585 					  qdf_nbuf_data(nbuf);
3586 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
3587 			uint16_t sa_peer_id = DP_INVALID_PEER;
3588 
3589 			if (!soc->ast_offload_support) {
3590 				struct dp_ast_entry *ast_entry = NULL;
3591 
3592 				qdf_spin_lock_bh(&soc->ast_lock);
3593 				ast_entry = dp_peer_ast_hash_find_by_pdevid
3594 					(soc,
3595 					 (uint8_t *)(eh->ether_shost),
3596 					 vdev->pdev->pdev_id);
3597 				if (ast_entry)
3598 					sa_peer_id = ast_entry->peer_id;
3599 				qdf_spin_unlock_bh(&soc->ast_lock);
3600 			}
3601 
3602 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
3603 					    sa_peer_id);
3604 		}
3605 		peer_id = DP_INVALID_PEER;
3606 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
3607 				 1, qdf_nbuf_len(nbuf));
3608 	}
3609 
3610 	/*  Single linear frame */
3611 	/*
3612 	 * If nbuf is a simple linear frame, use send_single function to
3613 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
3614 	 * SRNG. There is no need to setup a MSDU extension descriptor.
3615 	 */
3616 	dp_tx_prefetch_nbuf_data(nbuf);
3617 
3618 	nbuf = dp_tx_send_msdu_single_wrapper(vdev, nbuf, &msdu_info,
3619 					      peer_id, end_nbuf);
3620 	return nbuf;
3621 
3622 send_multiple:
3623 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3624 
3625 	if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
3626 		dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
3627 
3628 	return nbuf;
3629 }
3630 
3631 /**
3632  * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
3633  *      case to vaoid check in perpkt path.
3634  * @soc: DP soc handle
3635  * @vdev_id: id of DP vdev handle
3636  * @nbuf: skb
3637  *
3638  * Entry point for Core Tx layer (DP_TX) invoked from
3639  * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
3640  * with special condition to avoid per pkt check in dp_tx_send
3641  *
3642  * Return: NULL on success,
3643  *         nbuf when it fails to send
3644  */
3645 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
3646 				    uint8_t vdev_id, qdf_nbuf_t nbuf)
3647 {
3648 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3649 	struct dp_vdev *vdev = NULL;
3650 
3651 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3652 		return nbuf;
3653 
3654 	/*
3655 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3656 	 * this in per packet path.
3657 	 *
3658 	 * As in this path vdev memory is already protected with netdev
3659 	 * tx lock
3660 	 */
3661 	vdev = soc->vdev_id_map[vdev_id];
3662 	if (qdf_unlikely(!vdev))
3663 		return nbuf;
3664 
3665 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
3666 			== QDF_STATUS_E_FAILURE)) {
3667 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
3668 		return nbuf;
3669 	}
3670 
3671 	return dp_tx_send(soc_hdl, vdev_id, nbuf);
3672 }
3673 
3674 #ifdef UMAC_SUPPORT_PROXY_ARP
3675 /**
3676  * dp_tx_proxy_arp() - Tx proxy arp handler
3677  * @vdev: datapath vdev handle
3678  * @buf: sk buffer
3679  *
3680  * Return: status
3681  */
3682 static inline
3683 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3684 {
3685 	if (vdev->osif_proxy_arp)
3686 		return vdev->osif_proxy_arp(vdev->osif_vdev, nbuf);
3687 
3688 	/*
3689 	 * when UMAC_SUPPORT_PROXY_ARP is defined, we expect
3690 	 * osif_proxy_arp has a valid function pointer assigned
3691 	 * to it
3692 	 */
3693 	dp_tx_err("valid function pointer for osif_proxy_arp is expected!!\n");
3694 
3695 	return QDF_STATUS_NOT_INITIALIZED;
3696 }
3697 #else
3698 /**
3699  * dp_tx_proxy_arp() - Tx proxy arp handler
3700  * @vdev: datapath vdev handle
3701  * @buf: sk buffer
3702  *
3703  * This function always return 0 when UMAC_SUPPORT_PROXY_ARP
3704  * is not defined.
3705  *
3706  * Return: status
3707  */
3708 static inline
3709 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3710 {
3711 	return QDF_STATUS_SUCCESS;
3712 }
3713 #endif
3714 
3715 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
3716 #ifdef WLAN_MCAST_MLO
3717 static bool
3718 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
3719 		       struct dp_tx_desc_s *tx_desc,
3720 		       qdf_nbuf_t nbuf,
3721 		       uint8_t reinject_reason)
3722 {
3723 	if (reinject_reason == HTT_TX_FW2WBM_REINJECT_REASON_MLO_MCAST) {
3724 		if (soc->arch_ops.dp_tx_mcast_handler)
3725 			soc->arch_ops.dp_tx_mcast_handler(soc, vdev, nbuf);
3726 
3727 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3728 		return true;
3729 	}
3730 
3731 	return false;
3732 }
3733 #else /* WLAN_MCAST_MLO */
3734 static inline bool
3735 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
3736 		       struct dp_tx_desc_s *tx_desc,
3737 		       qdf_nbuf_t nbuf,
3738 		       uint8_t reinject_reason)
3739 {
3740 	return false;
3741 }
3742 #endif /* WLAN_MCAST_MLO */
3743 #else
3744 static inline bool
3745 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
3746 		       struct dp_tx_desc_s *tx_desc,
3747 		       qdf_nbuf_t nbuf,
3748 		       uint8_t reinject_reason)
3749 {
3750 	return false;
3751 }
3752 #endif
3753 
3754 /**
3755  * dp_tx_reinject_handler() - Tx Reinject Handler
3756  * @soc: datapath soc handle
3757  * @vdev: datapath vdev handle
3758  * @tx_desc: software descriptor head pointer
3759  * @status : Tx completion status from HTT descriptor
3760  * @reinject_reason : reinject reason from HTT descriptor
3761  *
3762  * This function reinjects frames back to Target.
3763  * Todo - Host queue needs to be added
3764  *
3765  * Return: none
3766  */
3767 void dp_tx_reinject_handler(struct dp_soc *soc,
3768 			    struct dp_vdev *vdev,
3769 			    struct dp_tx_desc_s *tx_desc,
3770 			    uint8_t *status,
3771 			    uint8_t reinject_reason)
3772 {
3773 	struct dp_peer *peer = NULL;
3774 	uint32_t peer_id = HTT_INVALID_PEER;
3775 	qdf_nbuf_t nbuf = tx_desc->nbuf;
3776 	qdf_nbuf_t nbuf_copy = NULL;
3777 	struct dp_tx_msdu_info_s msdu_info;
3778 #ifdef WDS_VENDOR_EXTENSION
3779 	int is_mcast = 0, is_ucast = 0;
3780 	int num_peers_3addr = 0;
3781 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
3782 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
3783 #endif
3784 	struct dp_txrx_peer *txrx_peer;
3785 
3786 	qdf_assert(vdev);
3787 
3788 	dp_tx_debug("Tx reinject path");
3789 
3790 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
3791 			qdf_nbuf_len(tx_desc->nbuf));
3792 
3793 	if (dp_tx_reinject_mlo_hdl(soc, vdev, tx_desc, nbuf, reinject_reason))
3794 		return;
3795 
3796 #ifdef WDS_VENDOR_EXTENSION
3797 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
3798 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
3799 	} else {
3800 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
3801 	}
3802 	is_ucast = !is_mcast;
3803 
3804 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3805 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3806 		txrx_peer = dp_get_txrx_peer(peer);
3807 
3808 		if (!txrx_peer || txrx_peer->bss_peer)
3809 			continue;
3810 
3811 		/* Detect wds peers that use 3-addr framing for mcast.
3812 		 * if there are any, the bss_peer is used to send the
3813 		 * the mcast frame using 3-addr format. all wds enabled
3814 		 * peers that use 4-addr framing for mcast frames will
3815 		 * be duplicated and sent as 4-addr frames below.
3816 		 */
3817 		if (!txrx_peer->wds_enabled ||
3818 		    !txrx_peer->wds_ecm.wds_tx_mcast_4addr) {
3819 			num_peers_3addr = 1;
3820 			break;
3821 		}
3822 	}
3823 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3824 #endif
3825 
3826 	if (qdf_unlikely(vdev->mesh_vdev)) {
3827 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
3828 	} else {
3829 		qdf_spin_lock_bh(&vdev->peer_list_lock);
3830 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3831 			txrx_peer = dp_get_txrx_peer(peer);
3832 			if (!txrx_peer)
3833 				continue;
3834 
3835 			if ((txrx_peer->peer_id != HTT_INVALID_PEER) &&
3836 #ifdef WDS_VENDOR_EXTENSION
3837 			/*
3838 			 * . if 3-addr STA, then send on BSS Peer
3839 			 * . if Peer WDS enabled and accept 4-addr mcast,
3840 			 * send mcast on that peer only
3841 			 * . if Peer WDS enabled and accept 4-addr ucast,
3842 			 * send ucast on that peer only
3843 			 */
3844 			((txrx_peer->bss_peer && num_peers_3addr && is_mcast) ||
3845 			 (txrx_peer->wds_enabled &&
3846 			 ((is_mcast && txrx_peer->wds_ecm.wds_tx_mcast_4addr) ||
3847 			 (is_ucast &&
3848 			 txrx_peer->wds_ecm.wds_tx_ucast_4addr))))) {
3849 #else
3850 			(txrx_peer->bss_peer &&
3851 			 (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) {
3852 #endif
3853 				peer_id = DP_INVALID_PEER;
3854 
3855 				nbuf_copy = qdf_nbuf_copy(nbuf);
3856 
3857 				if (!nbuf_copy) {
3858 					dp_tx_debug("nbuf copy failed");
3859 					break;
3860 				}
3861 				qdf_mem_zero(&msdu_info, sizeof(msdu_info));
3862 				dp_tx_get_queue(vdev, nbuf,
3863 						&msdu_info.tx_queue);
3864 
3865 				nbuf_copy = dp_tx_send_msdu_single(vdev,
3866 						nbuf_copy,
3867 						&msdu_info,
3868 						peer_id,
3869 						NULL);
3870 
3871 				if (nbuf_copy) {
3872 					dp_tx_debug("pkt send failed");
3873 					qdf_nbuf_free(nbuf_copy);
3874 				}
3875 			}
3876 		}
3877 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
3878 	}
3879 
3880 	qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE,
3881 				     nbuf->len);
3882 	qdf_nbuf_free(nbuf);
3883 
3884 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3885 }
3886 
3887 /**
3888  * dp_tx_inspect_handler() - Tx Inspect Handler
3889  * @soc: datapath soc handle
3890  * @vdev: datapath vdev handle
3891  * @tx_desc: software descriptor head pointer
3892  * @status : Tx completion status from HTT descriptor
3893  *
3894  * Handles Tx frames sent back to Host for inspection
3895  * (ProxyARP)
3896  *
3897  * Return: none
3898  */
3899 void dp_tx_inspect_handler(struct dp_soc *soc,
3900 			   struct dp_vdev *vdev,
3901 			   struct dp_tx_desc_s *tx_desc,
3902 			   uint8_t *status)
3903 {
3904 
3905 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3906 			"%s Tx inspect path",
3907 			__func__);
3908 
3909 	DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
3910 			 qdf_nbuf_len(tx_desc->nbuf));
3911 
3912 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
3913 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3914 }
3915 
3916 #ifdef MESH_MODE_SUPPORT
3917 /**
3918  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
3919  *                                         in mesh meta header
3920  * @tx_desc: software descriptor head pointer
3921  * @ts: pointer to tx completion stats
3922  * Return: none
3923  */
3924 static
3925 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
3926 		struct hal_tx_completion_status *ts)
3927 {
3928 	qdf_nbuf_t netbuf = tx_desc->nbuf;
3929 
3930 	if (!tx_desc->msdu_ext_desc) {
3931 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
3932 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3933 				"netbuf %pK offset %d",
3934 				netbuf, tx_desc->pkt_offset);
3935 			return;
3936 		}
3937 	}
3938 }
3939 
3940 #else
3941 static
3942 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
3943 		struct hal_tx_completion_status *ts)
3944 {
3945 }
3946 
3947 #endif
3948 
3949 #ifdef CONFIG_SAWF
3950 static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
3951 					 struct dp_vdev *vdev,
3952 					 struct dp_txrx_peer *txrx_peer,
3953 					 struct dp_tx_desc_s *tx_desc,
3954 					 struct hal_tx_completion_status *ts,
3955 					 uint8_t tid)
3956 {
3957 	dp_sawf_tx_compl_update_peer_stats(soc, vdev, txrx_peer, tx_desc,
3958 					   ts, tid);
3959 }
3960 
3961 #else
3962 static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
3963 					 struct dp_vdev *vdev,
3964 					 struct dp_txrx_peer *txrx_peer,
3965 					 struct dp_tx_desc_s *tx_desc,
3966 					 struct hal_tx_completion_status *ts,
3967 					 uint8_t tid)
3968 {
3969 }
3970 
3971 #endif
3972 
3973 #ifdef QCA_PEER_EXT_STATS
3974 /*
3975  * dp_tx_compute_tid_delay() - Compute per TID delay
3976  * @stats: Per TID delay stats
3977  * @tx_desc: Software Tx descriptor
3978  *
3979  * Compute the software enqueue and hw enqueue delays and
3980  * update the respective histograms
3981  *
3982  * Return: void
3983  */
3984 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
3985 				    struct dp_tx_desc_s *tx_desc)
3986 {
3987 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
3988 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
3989 	uint32_t sw_enqueue_delay, fwhw_transmit_delay;
3990 
3991 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
3992 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
3993 	timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
3994 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
3995 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
3996 					 timestamp_hw_enqueue);
3997 
3998 	/*
3999 	 * Update the Tx software enqueue delay and HW enque-Completion delay.
4000 	 */
4001 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
4002 	dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
4003 }
4004 
4005 /*
4006  * dp_tx_update_peer_delay_stats() - Update the peer delay stats
4007  * @txrx_peer: DP peer context
4008  * @tx_desc: Tx software descriptor
4009  * @tid: Transmission ID
4010  * @ring_id: Rx CPU context ID/CPU_ID
4011  *
4012  * Update the peer extended stats. These are enhanced other
4013  * delay stats per msdu level.
4014  *
4015  * Return: void
4016  */
4017 static void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
4018 					  struct dp_tx_desc_s *tx_desc,
4019 					  uint8_t tid, uint8_t ring_id)
4020 {
4021 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4022 	struct dp_soc *soc = NULL;
4023 	struct dp_peer_delay_stats *delay_stats = NULL;
4024 
4025 	soc = pdev->soc;
4026 	if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
4027 		return;
4028 
4029 	delay_stats = txrx_peer->delay_stats;
4030 
4031 	qdf_assert(delay_stats);
4032 	qdf_assert(ring < CDP_MAX_TXRX_CTX);
4033 
4034 	/*
4035 	 * For non-TID packets use the TID 9
4036 	 */
4037 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4038 		tid = CDP_MAX_DATA_TIDS - 1;
4039 
4040 	dp_tx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
4041 				tx_desc);
4042 }
4043 #else
4044 static inline void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
4045 						 struct dp_tx_desc_s *tx_desc,
4046 						 uint8_t tid, uint8_t ring_id)
4047 {
4048 }
4049 #endif
4050 
4051 #ifdef HW_TX_DELAY_STATS_ENABLE
4052 /**
4053  * dp_update_tx_delay_stats() - update the delay stats
4054  * @vdev: vdev handle
4055  * @delay: delay in ms or us based on the flag delay_in_us
4056  * @tid: tid value
4057  * @mode: type of tx delay mode
4058  * @ring id: ring number
4059  * @delay_in_us: flag to indicate whether the delay is in ms or us
4060  *
4061  * Return: none
4062  */
4063 static inline
4064 void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
4065 			      uint8_t mode, uint8_t ring_id, bool delay_in_us)
4066 {
4067 	struct cdp_tid_tx_stats *tstats =
4068 		&vdev->stats.tid_tx_stats[ring_id][tid];
4069 
4070 	dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
4071 			      delay_in_us);
4072 }
4073 #else
4074 static inline
4075 void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
4076 			      uint8_t mode, uint8_t ring_id, bool delay_in_us)
4077 {
4078 	struct cdp_tid_tx_stats *tstats =
4079 		&vdev->pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
4080 
4081 	dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
4082 			      delay_in_us);
4083 }
4084 #endif
4085 
4086 /**
4087  * dp_tx_compute_delay() - Compute and fill in all timestamps
4088  *				to pass in correct fields
4089  *
4090  * @vdev: pdev handle
4091  * @tx_desc: tx descriptor
4092  * @tid: tid value
4093  * @ring_id: TCL or WBM ring number for transmit path
4094  * Return: none
4095  */
4096 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
4097 			 uint8_t tid, uint8_t ring_id)
4098 {
4099 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
4100 	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
4101 	uint32_t fwhw_transmit_delay_us;
4102 
4103 	if (qdf_likely(!vdev->pdev->delay_stats_flag) &&
4104 	    qdf_likely(!dp_is_vdev_tx_delay_stats_enabled(vdev)))
4105 		return;
4106 
4107 	if (dp_is_vdev_tx_delay_stats_enabled(vdev)) {
4108 		fwhw_transmit_delay_us =
4109 			qdf_ktime_to_us(qdf_ktime_real_get()) -
4110 			qdf_ktime_to_us(tx_desc->timestamp);
4111 
4112 		/*
4113 		 * Delay between packet enqueued to HW and Tx completion in us
4114 		 */
4115 		dp_update_tx_delay_stats(vdev, fwhw_transmit_delay_us, tid,
4116 					 CDP_DELAY_STATS_FW_HW_TRANSMIT,
4117 					 ring_id, true);
4118 		/*
4119 		 * For MCL, only enqueue to completion delay is required
4120 		 * so return if the vdev flag is enabled.
4121 		 */
4122 		return;
4123 	}
4124 
4125 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
4126 	timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
4127 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
4128 					 timestamp_hw_enqueue);
4129 
4130 	/*
4131 	 * Delay between packet enqueued to HW and Tx completion in ms
4132 	 */
4133 	dp_update_tx_delay_stats(vdev, fwhw_transmit_delay, tid,
4134 				 CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id,
4135 				 false);
4136 
4137 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
4138 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4139 	interframe_delay = (uint32_t)(timestamp_ingress -
4140 				      vdev->prev_tx_enq_tstamp);
4141 
4142 	/*
4143 	 * Delay in software enqueue
4144 	 */
4145 	dp_update_tx_delay_stats(vdev, sw_enqueue_delay, tid,
4146 				 CDP_DELAY_STATS_SW_ENQ, ring_id,
4147 				 false);
4148 
4149 	/*
4150 	 * Update interframe delay stats calculated at hardstart receive point.
4151 	 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
4152 	 * interframe delay will not be calculate correctly for 1st frame.
4153 	 * On the other side, this will help in avoiding extra per packet check
4154 	 * of !vdev->prev_tx_enq_tstamp.
4155 	 */
4156 	dp_update_tx_delay_stats(vdev, interframe_delay, tid,
4157 				 CDP_DELAY_STATS_TX_INTERFRAME, ring_id,
4158 				 false);
4159 	vdev->prev_tx_enq_tstamp = timestamp_ingress;
4160 }
4161 
4162 #ifdef DISABLE_DP_STATS
4163 static
4164 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf,
4165 				   struct dp_txrx_peer *txrx_peer)
4166 {
4167 }
4168 #else
4169 static inline void
4170 dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer)
4171 {
4172 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
4173 
4174 	DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
4175 	if (subtype != QDF_PROTO_INVALID)
4176 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.no_ack_count[subtype],
4177 					  1);
4178 }
4179 #endif
4180 
4181 #ifndef QCA_ENHANCED_STATS_SUPPORT
4182 #ifdef DP_PEER_EXTENDED_API
4183 static inline uint8_t
4184 dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
4185 {
4186 	return txrx_peer->mpdu_retry_threshold;
4187 }
4188 #else
4189 static inline uint8_t
4190 dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
4191 {
4192 	return 0;
4193 }
4194 #endif
4195 
4196 /**
4197  * dp_tx_update_peer_extd_stats()- Update Tx extended path stats for peer
4198  *
4199  * @ts: Tx compltion status
4200  * @txrx_peer: datapath txrx_peer handle
4201  *
4202  * Return: void
4203  */
4204 static inline void
4205 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
4206 			     struct dp_txrx_peer *txrx_peer)
4207 {
4208 	uint8_t mcs, pkt_type, dst_mcs_idx;
4209 	uint8_t retry_threshold = dp_tx_get_mpdu_retry_threshold(txrx_peer);
4210 
4211 	mcs = ts->mcs;
4212 	pkt_type = ts->pkt_type;
4213 	/* do HW to SW pkt type conversion */
4214 	pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX :
4215 		    hal_2_dp_pkt_type_map[pkt_type]);
4216 
4217 	dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs);
4218 	if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
4219 		DP_PEER_EXTD_STATS_INC(txrx_peer,
4220 				       tx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
4221 				       1);
4222 
4223 	DP_PEER_EXTD_STATS_INC(txrx_peer, tx.sgi_count[ts->sgi], 1);
4224 	DP_PEER_EXTD_STATS_INC(txrx_peer, tx.bw[ts->bw], 1);
4225 	DP_PEER_EXTD_STATS_UPD(txrx_peer, tx.last_ack_rssi, ts->ack_frame_rssi);
4226 	DP_PEER_EXTD_STATS_INC(txrx_peer,
4227 			       tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
4228 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.stbc, 1, ts->stbc);
4229 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.ldpc, 1, ts->ldpc);
4230 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries, 1, ts->transmit_cnt > 1);
4231 	if (ts->first_msdu) {
4232 		DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries_mpdu, 1,
4233 					ts->transmit_cnt > 1);
4234 
4235 		if (!retry_threshold)
4236 			return;
4237 		DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.mpdu_success_with_retries,
4238 					qdf_do_div(ts->transmit_cnt,
4239 						   retry_threshold),
4240 					ts->transmit_cnt > retry_threshold);
4241 	}
4242 }
4243 #else
4244 static inline void
4245 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
4246 			     struct dp_txrx_peer *txrx_peer)
4247 {
4248 }
4249 #endif
4250 
4251 /**
4252  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
4253  *				per wbm ring
4254  *
4255  * @tx_desc: software descriptor head pointer
4256  * @ts: Tx completion status
4257  * @peer: peer handle
4258  * @ring_id: ring number
4259  *
4260  * Return: None
4261  */
4262 static inline void
4263 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
4264 			struct hal_tx_completion_status *ts,
4265 			struct dp_txrx_peer *txrx_peer, uint8_t ring_id)
4266 {
4267 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4268 	uint8_t tid = ts->tid;
4269 	uint32_t length;
4270 	struct cdp_tid_tx_stats *tid_stats;
4271 
4272 	if (!pdev)
4273 		return;
4274 
4275 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4276 		tid = CDP_MAX_DATA_TIDS - 1;
4277 
4278 	tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
4279 
4280 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
4281 		dp_err_rl("Release source:%d is not from TQM", ts->release_src);
4282 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.release_src_not_tqm, 1);
4283 		return;
4284 	}
4285 
4286 	length = qdf_nbuf_len(tx_desc->nbuf);
4287 	DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
4288 
4289 	if (qdf_unlikely(pdev->delay_stats_flag) ||
4290 	    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(txrx_peer->vdev)))
4291 		dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id);
4292 
4293 	if (ts->status < CDP_MAX_TX_TQM_STATUS) {
4294 		tid_stats->tqm_status_cnt[ts->status]++;
4295 	}
4296 
4297 	if (qdf_likely(ts->status == HAL_TX_TQM_RR_FRAME_ACKED)) {
4298 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.retry_count, 1,
4299 					   ts->transmit_cnt > 1);
4300 
4301 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.multiple_retry_count,
4302 					   1, ts->transmit_cnt > 2);
4303 
4304 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.ofdma, 1, ts->ofdma);
4305 
4306 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.amsdu_cnt, 1,
4307 					   ts->msdu_part_of_amsdu);
4308 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.non_amsdu_cnt, 1,
4309 					   !ts->msdu_part_of_amsdu);
4310 
4311 		txrx_peer->stats.per_pkt_stats.tx.last_tx_ts =
4312 							qdf_system_ticks();
4313 
4314 		dp_tx_update_peer_extd_stats(ts, txrx_peer);
4315 
4316 		return;
4317 	}
4318 
4319 	/*
4320 	 * tx_failed is ideally supposed to be updated from HTT ppdu
4321 	 * completion stats. But in IPQ807X/IPQ6018 chipsets owing to
4322 	 * hw limitation there are no completions for failed cases.
4323 	 * Hence updating tx_failed from data path. Please note that
4324 	 * if tx_failed is fixed to be from ppdu, then this has to be
4325 	 * removed
4326 	 */
4327 	DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
4328 
4329 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.failed_retry_count, 1,
4330 				   ts->transmit_cnt > DP_RETRY_COUNT);
4331 	dp_update_no_ack_stats(tx_desc->nbuf, txrx_peer);
4332 
4333 	if (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED) {
4334 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.age_out, 1);
4335 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_REM) {
4336 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.dropped.fw_rem, 1,
4337 					      length);
4338 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX) {
4339 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_notx, 1);
4340 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TX) {
4341 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_tx, 1);
4342 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON1) {
4343 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason1, 1);
4344 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON2) {
4345 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason2, 1);
4346 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON3) {
4347 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason3, 1);
4348 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_DISABLE_QUEUE) {
4349 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4350 					  tx.dropped.fw_rem_queue_disable, 1);
4351 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TILL_NONMATCHING) {
4352 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4353 					  tx.dropped.fw_rem_no_match, 1);
4354 	} else if (ts->status == HAL_TX_TQM_RR_DROP_THRESHOLD) {
4355 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4356 					  tx.dropped.drop_threshold, 1);
4357 	} else if (ts->status == HAL_TX_TQM_RR_LINK_DESC_UNAVAILABLE) {
4358 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4359 					  tx.dropped.drop_link_desc_na, 1);
4360 	} else if (ts->status == HAL_TX_TQM_RR_DROP_OR_INVALID_MSDU) {
4361 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4362 					  tx.dropped.invalid_drop, 1);
4363 	} else if (ts->status == HAL_TX_TQM_RR_MULTICAST_DROP) {
4364 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4365 					  tx.dropped.mcast_vdev_drop, 1);
4366 	} else {
4367 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.invalid_rr, 1);
4368 	}
4369 }
4370 
4371 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
4372 /**
4373  * dp_tx_flow_pool_lock() - take flow pool lock
4374  * @soc: core txrx main context
4375  * @tx_desc: tx desc
4376  *
4377  * Return: None
4378  */
4379 static inline
4380 void dp_tx_flow_pool_lock(struct dp_soc *soc,
4381 			  struct dp_tx_desc_s *tx_desc)
4382 {
4383 	struct dp_tx_desc_pool_s *pool;
4384 	uint8_t desc_pool_id;
4385 
4386 	desc_pool_id = tx_desc->pool_id;
4387 	pool = &soc->tx_desc[desc_pool_id];
4388 
4389 	qdf_spin_lock_bh(&pool->flow_pool_lock);
4390 }
4391 
4392 /**
4393  * dp_tx_flow_pool_unlock() - release flow pool lock
4394  * @soc: core txrx main context
4395  * @tx_desc: tx desc
4396  *
4397  * Return: None
4398  */
4399 static inline
4400 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
4401 			    struct dp_tx_desc_s *tx_desc)
4402 {
4403 	struct dp_tx_desc_pool_s *pool;
4404 	uint8_t desc_pool_id;
4405 
4406 	desc_pool_id = tx_desc->pool_id;
4407 	pool = &soc->tx_desc[desc_pool_id];
4408 
4409 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
4410 }
4411 #else
4412 static inline
4413 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
4414 {
4415 }
4416 
4417 static inline
4418 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
4419 {
4420 }
4421 #endif
4422 
4423 /**
4424  * dp_tx_notify_completion() - Notify tx completion for this desc
4425  * @soc: core txrx main context
4426  * @vdev: datapath vdev handle
4427  * @tx_desc: tx desc
4428  * @netbuf:  buffer
4429  * @status: tx status
4430  *
4431  * Return: none
4432  */
4433 static inline void dp_tx_notify_completion(struct dp_soc *soc,
4434 					   struct dp_vdev *vdev,
4435 					   struct dp_tx_desc_s *tx_desc,
4436 					   qdf_nbuf_t netbuf,
4437 					   uint8_t status)
4438 {
4439 	void *osif_dev;
4440 	ol_txrx_completion_fp tx_compl_cbk = NULL;
4441 	uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
4442 
4443 	qdf_assert(tx_desc);
4444 
4445 	if (!vdev ||
4446 	    !vdev->osif_vdev) {
4447 		return;
4448 	}
4449 
4450 	osif_dev = vdev->osif_vdev;
4451 	tx_compl_cbk = vdev->tx_comp;
4452 
4453 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
4454 		flag |= BIT(QDF_TX_RX_STATUS_OK);
4455 
4456 	if (tx_compl_cbk)
4457 		tx_compl_cbk(netbuf, osif_dev, flag);
4458 }
4459 
4460 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
4461  * @pdev: pdev handle
4462  * @tid: tid value
4463  * @txdesc_ts: timestamp from txdesc
4464  * @ppdu_id: ppdu id
4465  *
4466  * Return: none
4467  */
4468 #ifdef FEATURE_PERPKT_INFO
4469 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
4470 					       struct dp_txrx_peer *txrx_peer,
4471 					       uint8_t tid,
4472 					       uint64_t txdesc_ts,
4473 					       uint32_t ppdu_id)
4474 {
4475 	uint64_t delta_ms;
4476 	struct cdp_tx_sojourn_stats *sojourn_stats;
4477 	struct dp_peer *primary_link_peer = NULL;
4478 	struct dp_soc *link_peer_soc = NULL;
4479 
4480 	if (qdf_unlikely(!pdev->enhanced_stats_en))
4481 		return;
4482 
4483 	if (qdf_unlikely(tid == HTT_INVALID_TID ||
4484 			 tid >= CDP_DATA_TID_MAX))
4485 		return;
4486 
4487 	if (qdf_unlikely(!pdev->sojourn_buf))
4488 		return;
4489 
4490 	primary_link_peer = dp_get_primary_link_peer_by_id(pdev->soc,
4491 							   txrx_peer->peer_id,
4492 							   DP_MOD_ID_TX_COMP);
4493 
4494 	if (qdf_unlikely(!primary_link_peer))
4495 		return;
4496 
4497 	sojourn_stats = (struct cdp_tx_sojourn_stats *)
4498 		qdf_nbuf_data(pdev->sojourn_buf);
4499 
4500 	link_peer_soc = primary_link_peer->vdev->pdev->soc;
4501 	sojourn_stats->cookie = (void *)
4502 			dp_monitor_peer_get_peerstats_ctx(link_peer_soc,
4503 							  primary_link_peer);
4504 
4505 	delta_ms = qdf_ktime_to_ms(qdf_ktime_real_get()) -
4506 				txdesc_ts;
4507 	qdf_ewma_tx_lag_add(&txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid],
4508 			    delta_ms);
4509 	sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
4510 	sojourn_stats->num_msdus[tid] = 1;
4511 	sojourn_stats->avg_sojourn_msdu[tid].internal =
4512 		txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid].internal;
4513 	dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
4514 			     pdev->sojourn_buf, HTT_INVALID_PEER,
4515 			     WDI_NO_VAL, pdev->pdev_id);
4516 	sojourn_stats->sum_sojourn_msdu[tid] = 0;
4517 	sojourn_stats->num_msdus[tid] = 0;
4518 	sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
4519 
4520 	dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_TX_COMP);
4521 }
4522 #else
4523 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
4524 					       struct dp_txrx_peer *txrx_peer,
4525 					       uint8_t tid,
4526 					       uint64_t txdesc_ts,
4527 					       uint32_t ppdu_id)
4528 {
4529 }
4530 #endif
4531 
4532 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
4533 /**
4534  * dp_send_completion_to_pkt_capture() - send tx completion to packet capture
4535  * @soc: dp_soc handle
4536  * @desc: Tx Descriptor
4537  * @ts: HAL Tx completion descriptor contents
4538  *
4539  * This function is used to send tx completion to packet capture
4540  */
4541 void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
4542 				       struct dp_tx_desc_s *desc,
4543 				       struct hal_tx_completion_status *ts)
4544 {
4545 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc,
4546 			     desc, ts->peer_id,
4547 			     WDI_NO_VAL, desc->pdev->pdev_id);
4548 }
4549 #endif
4550 
4551 /**
4552  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
4553  * @soc: DP Soc handle
4554  * @tx_desc: software Tx descriptor
4555  * @ts : Tx completion status from HAL/HTT descriptor
4556  *
4557  * Return: none
4558  */
4559 void
4560 dp_tx_comp_process_desc(struct dp_soc *soc,
4561 			struct dp_tx_desc_s *desc,
4562 			struct hal_tx_completion_status *ts,
4563 			struct dp_txrx_peer *txrx_peer)
4564 {
4565 	uint64_t time_latency = 0;
4566 	uint16_t peer_id = DP_INVALID_PEER_ID;
4567 
4568 	/*
4569 	 * m_copy/tx_capture modes are not supported for
4570 	 * scatter gather packets
4571 	 */
4572 	if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
4573 		time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
4574 				qdf_ktime_to_ms(desc->timestamp));
4575 	}
4576 
4577 	dp_send_completion_to_pkt_capture(soc, desc, ts);
4578 
4579 	if (dp_tx_pkt_tracepoints_enabled())
4580 		qdf_trace_dp_packet(desc->nbuf, QDF_TX,
4581 				    desc->msdu_ext_desc ?
4582 				    desc->msdu_ext_desc->tso_desc : NULL,
4583 				    qdf_ktime_to_ms(desc->timestamp));
4584 
4585 	if (!(desc->msdu_ext_desc)) {
4586 		dp_tx_enh_unmap(soc, desc);
4587 		if (txrx_peer)
4588 			peer_id = txrx_peer->peer_id;
4589 
4590 		if (QDF_STATUS_SUCCESS ==
4591 		    dp_monitor_tx_add_to_comp_queue(soc, desc, ts, peer_id)) {
4592 			return;
4593 		}
4594 
4595 		if (QDF_STATUS_SUCCESS ==
4596 		    dp_get_completion_indication_for_stack(soc,
4597 							   desc->pdev,
4598 							   txrx_peer, ts,
4599 							   desc->nbuf,
4600 							   time_latency)) {
4601 			dp_send_completion_to_stack(soc,
4602 						    desc->pdev,
4603 						    ts->peer_id,
4604 						    ts->ppdu_id,
4605 						    desc->nbuf);
4606 			return;
4607 		}
4608 	}
4609 
4610 	desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX;
4611 	dp_tx_comp_free_buf(soc, desc, false);
4612 }
4613 
4614 #ifdef DISABLE_DP_STATS
4615 /**
4616  * dp_tx_update_connectivity_stats() - update tx connectivity stats
4617  * @soc: core txrx main context
4618  * @tx_desc: tx desc
4619  * @status: tx status
4620  *
4621  * Return: none
4622  */
4623 static inline
4624 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
4625 				     struct dp_vdev *vdev,
4626 				     struct dp_tx_desc_s *tx_desc,
4627 				     uint8_t status)
4628 {
4629 }
4630 #else
4631 static inline
4632 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
4633 				     struct dp_vdev *vdev,
4634 				     struct dp_tx_desc_s *tx_desc,
4635 				     uint8_t status)
4636 {
4637 	void *osif_dev;
4638 	ol_txrx_stats_rx_fp stats_cbk;
4639 	uint8_t pkt_type;
4640 
4641 	qdf_assert(tx_desc);
4642 
4643 	if (!vdev ||
4644 	    !vdev->osif_vdev ||
4645 	    !vdev->stats_cb)
4646 		return;
4647 
4648 	osif_dev = vdev->osif_vdev;
4649 	stats_cbk = vdev->stats_cb;
4650 
4651 	stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
4652 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
4653 		stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
4654 			  &pkt_type);
4655 }
4656 #endif
4657 
4658 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
4659 QDF_STATUS
4660 dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
4661 			  uint32_t delta_tsf,
4662 			  uint32_t *delay_us)
4663 {
4664 	uint32_t buffer_ts;
4665 	uint32_t delay;
4666 
4667 	if (!delay_us)
4668 		return QDF_STATUS_E_INVAL;
4669 
4670 	/* Tx_rate_stats_info_valid is 0 and tsf is invalid then */
4671 	if (!ts->valid)
4672 		return QDF_STATUS_E_INVAL;
4673 
4674 	/* buffer_timestamp is in units of 1024 us and is [31:13] of
4675 	 * WBM_RELEASE_RING_4. After left shift 10 bits, it's
4676 	 * valid up to 29 bits.
4677 	 */
4678 	buffer_ts = ts->buffer_timestamp << 10;
4679 
4680 	delay = ts->tsf - buffer_ts - delta_tsf;
4681 	delay &= 0x1FFFFFFF; /* mask 29 BITS */
4682 	if (delay > 0x1000000) {
4683 		dp_info_rl("----------------------\n"
4684 			   "Tx completion status:\n"
4685 			   "----------------------\n"
4686 			   "release_src = %d\n"
4687 			   "ppdu_id = 0x%x\n"
4688 			   "release_reason = %d\n"
4689 			   "tsf = %u (0x%x)\n"
4690 			   "buffer_timestamp = %u (0x%x)\n"
4691 			   "delta_tsf = %u (0x%x)\n",
4692 			   ts->release_src, ts->ppdu_id, ts->status,
4693 			   ts->tsf, ts->tsf, ts->buffer_timestamp,
4694 			   ts->buffer_timestamp, delta_tsf, delta_tsf);
4695 		return QDF_STATUS_E_FAILURE;
4696 	}
4697 
4698 	*delay_us = delay;
4699 
4700 	return QDF_STATUS_SUCCESS;
4701 }
4702 
4703 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4704 		      uint32_t delta_tsf)
4705 {
4706 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4707 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4708 						     DP_MOD_ID_CDP);
4709 
4710 	if (!vdev) {
4711 		dp_err_rl("vdev %d does not exist", vdev_id);
4712 		return;
4713 	}
4714 
4715 	vdev->delta_tsf = delta_tsf;
4716 	dp_debug("vdev id %u delta_tsf %u", vdev_id, delta_tsf);
4717 
4718 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4719 }
4720 #endif
4721 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
4722 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
4723 				      uint8_t vdev_id, bool enable)
4724 {
4725 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4726 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4727 						     DP_MOD_ID_CDP);
4728 
4729 	if (!vdev) {
4730 		dp_err_rl("vdev %d does not exist", vdev_id);
4731 		return QDF_STATUS_E_FAILURE;
4732 	}
4733 
4734 	qdf_atomic_set(&vdev->ul_delay_report, enable);
4735 
4736 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4737 
4738 	return QDF_STATUS_SUCCESS;
4739 }
4740 
4741 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4742 			       uint32_t *val)
4743 {
4744 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4745 	struct dp_vdev *vdev;
4746 	uint32_t delay_accum;
4747 	uint32_t pkts_accum;
4748 
4749 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
4750 	if (!vdev) {
4751 		dp_err_rl("vdev %d does not exist", vdev_id);
4752 		return QDF_STATUS_E_FAILURE;
4753 	}
4754 
4755 	if (!qdf_atomic_read(&vdev->ul_delay_report)) {
4756 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4757 		return QDF_STATUS_E_FAILURE;
4758 	}
4759 
4760 	/* Average uplink delay based on current accumulated values */
4761 	delay_accum = qdf_atomic_read(&vdev->ul_delay_accum);
4762 	pkts_accum = qdf_atomic_read(&vdev->ul_pkts_accum);
4763 
4764 	*val = delay_accum / pkts_accum;
4765 	dp_debug("uplink_delay %u delay_accum %u pkts_accum %u", *val,
4766 		 delay_accum, pkts_accum);
4767 
4768 	/* Reset accumulated values to 0 */
4769 	qdf_atomic_set(&vdev->ul_delay_accum, 0);
4770 	qdf_atomic_set(&vdev->ul_pkts_accum, 0);
4771 
4772 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4773 
4774 	return QDF_STATUS_SUCCESS;
4775 }
4776 
4777 static void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
4778 				      struct hal_tx_completion_status *ts)
4779 {
4780 	uint32_t ul_delay;
4781 
4782 	if (qdf_unlikely(!vdev)) {
4783 		dp_info_rl("vdev is null or delete in progrss");
4784 		return;
4785 	}
4786 
4787 	if (!qdf_atomic_read(&vdev->ul_delay_report))
4788 		return;
4789 
4790 	if (QDF_IS_STATUS_ERROR(dp_tx_compute_hw_delay_us(ts,
4791 							  vdev->delta_tsf,
4792 							  &ul_delay)))
4793 		return;
4794 
4795 	ul_delay /= 1000; /* in unit of ms */
4796 
4797 	qdf_atomic_add(ul_delay, &vdev->ul_delay_accum);
4798 	qdf_atomic_inc(&vdev->ul_pkts_accum);
4799 }
4800 #else /* !WLAN_FEATURE_TSF_UPLINK_DELAY */
4801 static inline
4802 void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
4803 			       struct hal_tx_completion_status *ts)
4804 {
4805 }
4806 #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
4807 
4808 /**
4809  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
4810  * @soc: DP soc handle
4811  * @tx_desc: software descriptor head pointer
4812  * @ts: Tx completion status
4813  * @txrx_peer: txrx peer handle
4814  * @ring_id: ring number
4815  *
4816  * Return: none
4817  */
4818 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
4819 				  struct dp_tx_desc_s *tx_desc,
4820 				  struct hal_tx_completion_status *ts,
4821 				  struct dp_txrx_peer *txrx_peer,
4822 				  uint8_t ring_id)
4823 {
4824 	uint32_t length;
4825 	qdf_ether_header_t *eh;
4826 	struct dp_vdev *vdev = NULL;
4827 	qdf_nbuf_t nbuf = tx_desc->nbuf;
4828 	enum qdf_dp_tx_rx_status dp_status;
4829 
4830 	if (!nbuf) {
4831 		dp_info_rl("invalid tx descriptor. nbuf NULL");
4832 		goto out;
4833 	}
4834 
4835 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
4836 	length = qdf_nbuf_len(nbuf);
4837 
4838 	dp_status = dp_tx_hw_to_qdf(ts->status);
4839 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
4840 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
4841 				 QDF_TRACE_DEFAULT_PDEV_ID,
4842 				 qdf_nbuf_data_addr(nbuf),
4843 				 sizeof(qdf_nbuf_data(nbuf)),
4844 				 tx_desc->id, ts->status, dp_status));
4845 
4846 	dp_tx_comp_debug("-------------------- \n"
4847 			 "Tx Completion Stats: \n"
4848 			 "-------------------- \n"
4849 			 "ack_frame_rssi = %d \n"
4850 			 "first_msdu = %d \n"
4851 			 "last_msdu = %d \n"
4852 			 "msdu_part_of_amsdu = %d \n"
4853 			 "rate_stats valid = %d \n"
4854 			 "bw = %d \n"
4855 			 "pkt_type = %d \n"
4856 			 "stbc = %d \n"
4857 			 "ldpc = %d \n"
4858 			 "sgi = %d \n"
4859 			 "mcs = %d \n"
4860 			 "ofdma = %d \n"
4861 			 "tones_in_ru = %d \n"
4862 			 "tsf = %d \n"
4863 			 "ppdu_id = %d \n"
4864 			 "transmit_cnt = %d \n"
4865 			 "tid = %d \n"
4866 			 "peer_id = %d\n"
4867 			 "tx_status = %d\n",
4868 			 ts->ack_frame_rssi, ts->first_msdu,
4869 			 ts->last_msdu, ts->msdu_part_of_amsdu,
4870 			 ts->valid, ts->bw, ts->pkt_type, ts->stbc,
4871 			 ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
4872 			 ts->tones_in_ru, ts->tsf, ts->ppdu_id,
4873 			 ts->transmit_cnt, ts->tid, ts->peer_id,
4874 			 ts->status);
4875 
4876 	/* Update SoC level stats */
4877 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
4878 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
4879 
4880 	if (!txrx_peer) {
4881 		dp_info_rl("peer is null or deletion in progress");
4882 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
4883 		goto out;
4884 	}
4885 	vdev = txrx_peer->vdev;
4886 
4887 	dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
4888 	dp_tx_update_uplink_delay(soc, vdev, ts);
4889 
4890 	/* check tx complete notification */
4891 	if (qdf_nbuf_tx_notify_comp_get(nbuf))
4892 		dp_tx_notify_completion(soc, vdev, tx_desc,
4893 					nbuf, ts->status);
4894 
4895 	/* Update per-packet stats for mesh mode */
4896 	if (qdf_unlikely(vdev->mesh_vdev) &&
4897 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
4898 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
4899 
4900 	/* Update peer level stats */
4901 	if (qdf_unlikely(txrx_peer->bss_peer &&
4902 			 vdev->opmode == wlan_op_mode_ap)) {
4903 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
4904 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1,
4905 						      length);
4906 
4907 			if (txrx_peer->vdev->tx_encap_type ==
4908 				htt_cmn_pkt_type_ethernet &&
4909 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
4910 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
4911 							      tx.bcast, 1,
4912 							      length);
4913 			}
4914 		}
4915 	} else {
4916 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.ucast, 1, length);
4917 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
4918 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.tx_success,
4919 						      1, length);
4920 			if (qdf_unlikely(txrx_peer->in_twt)) {
4921 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
4922 							      tx.tx_success_twt,
4923 							      1, length);
4924 			}
4925 		}
4926 	}
4927 
4928 	dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id);
4929 	dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts->tid, ring_id);
4930 	dp_tx_update_peer_sawf_stats(soc, vdev, txrx_peer, tx_desc,
4931 				     ts, ts->tid);
4932 	dp_tx_send_pktlog(soc, vdev->pdev, tx_desc, nbuf, dp_status);
4933 
4934 #ifdef QCA_SUPPORT_RDK_STATS
4935 	if (soc->peerstats_enabled)
4936 		dp_tx_sojourn_stats_process(vdev->pdev, txrx_peer, ts->tid,
4937 					    qdf_ktime_to_ms(tx_desc->timestamp),
4938 					    ts->ppdu_id);
4939 #endif
4940 
4941 out:
4942 	return;
4943 }
4944 
4945 #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \
4946 	defined(QCA_ENHANCED_STATS_SUPPORT)
4947 /*
4948  * dp_tx_update_peer_basic_stats(): Update peer basic stats
4949  * @txrx_peer: Datapath txrx_peer handle
4950  * @length: Length of the packet
4951  * @tx_status: Tx status from TQM/FW
4952  * @update: enhanced flag value present in dp_pdev
4953  *
4954  * Return: none
4955  */
4956 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
4957 				   uint32_t length, uint8_t tx_status,
4958 				   bool update)
4959 {
4960 	if (update || (!txrx_peer->hw_txrx_stats_en)) {
4961 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
4962 
4963 		if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
4964 			DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
4965 	}
4966 }
4967 #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
4968 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
4969 				   uint32_t length, uint8_t tx_status,
4970 				   bool update)
4971 {
4972 	if (!txrx_peer->hw_txrx_stats_en) {
4973 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
4974 
4975 		if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
4976 			DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
4977 	}
4978 }
4979 
4980 #else
4981 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
4982 				   uint32_t length, uint8_t tx_status,
4983 				   bool update)
4984 {
4985 	DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
4986 
4987 	if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
4988 		DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
4989 }
4990 #endif
4991 
4992 /*
4993  * dp_tx_prefetch_next_nbuf_data(): Prefetch nbuf and nbuf data
4994  * @nbuf: skb buffer
4995  *
4996  * Return: none
4997  */
4998 #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
4999 static inline
5000 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
5001 {
5002 	qdf_nbuf_t nbuf = NULL;
5003 
5004 	if (next)
5005 		nbuf = next->nbuf;
5006 	if (nbuf) {
5007 		/* prefetch skb->next and first few bytes of skb->cb */
5008 		qdf_prefetch(next->shinfo_addr);
5009 		qdf_prefetch(nbuf);
5010 		/* prefetch skb fields present in different cachelines */
5011 		qdf_prefetch(&nbuf->len);
5012 		qdf_prefetch(&nbuf->users);
5013 	}
5014 }
5015 #else
5016 static inline
5017 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
5018 {
5019 }
5020 #endif
5021 
5022 /**
5023  * dp_tx_mcast_reinject_handler() - Tx reinjected multicast packets handler
5024  * @soc: core txrx main context
5025  * @desc: software descriptor
5026  *
5027  * Return: true when packet is reinjected
5028  */
5029 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
5030 	defined(WLAN_MCAST_MLO)
5031 static inline bool
5032 dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
5033 {
5034 	struct dp_vdev *vdev = NULL;
5035 
5036 	if (desc->tx_status == HAL_TX_TQM_RR_MULTICAST_DROP) {
5037 		if (!soc->arch_ops.dp_tx_mcast_handler)
5038 			return false;
5039 
5040 		vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
5041 					     DP_MOD_ID_REINJECT);
5042 
5043 		if (qdf_unlikely(!vdev)) {
5044 			dp_tx_comp_info_rl("Unable to get vdev ref  %d",
5045 					   desc->id);
5046 			return false;
5047 		}
5048 		DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
5049 				 qdf_nbuf_len(desc->nbuf));
5050 		soc->arch_ops.dp_tx_mcast_handler(soc, vdev, desc->nbuf);
5051 		dp_tx_desc_release(desc, desc->pool_id);
5052 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
5053 		return true;
5054 	}
5055 
5056 	return false;
5057 }
5058 #else
5059 static inline bool
5060 dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
5061 {
5062 	return false;
5063 }
5064 #endif
5065 
5066 /**
5067  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
5068  * @soc: core txrx main context
5069  * @comp_head: software descriptor head pointer
5070  * @ring_id: ring number
5071  *
5072  * This function will process batch of descriptors reaped by dp_tx_comp_handler
5073  * and release the software descriptors after processing is complete
5074  *
5075  * Return: none
5076  */
5077 static void
5078 dp_tx_comp_process_desc_list(struct dp_soc *soc,
5079 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id)
5080 {
5081 	struct dp_tx_desc_s *desc;
5082 	struct dp_tx_desc_s *next;
5083 	struct hal_tx_completion_status ts;
5084 	struct dp_txrx_peer *txrx_peer = NULL;
5085 	uint16_t peer_id = DP_INVALID_PEER;
5086 	dp_txrx_ref_handle txrx_ref_handle = NULL;
5087 
5088 	desc = comp_head;
5089 
5090 	while (desc) {
5091 		next = desc->next;
5092 		dp_tx_prefetch_next_nbuf_data(next);
5093 
5094 		if (peer_id != desc->peer_id) {
5095 			if (txrx_peer)
5096 				dp_txrx_peer_unref_delete(txrx_ref_handle,
5097 							  DP_MOD_ID_TX_COMP);
5098 			peer_id = desc->peer_id;
5099 			txrx_peer =
5100 				dp_txrx_peer_get_ref_by_id(soc, peer_id,
5101 							   &txrx_ref_handle,
5102 							   DP_MOD_ID_TX_COMP);
5103 		}
5104 
5105 		if (dp_tx_mcast_reinject_handler(soc, desc)) {
5106 			desc = next;
5107 			continue;
5108 		}
5109 		if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
5110 			struct dp_pdev *pdev = desc->pdev;
5111 
5112 			if (qdf_likely(txrx_peer))
5113 				dp_tx_update_peer_basic_stats(txrx_peer,
5114 							      desc->length,
5115 							      desc->tx_status,
5116 							      false);
5117 			qdf_assert(pdev);
5118 			dp_tx_outstanding_dec(pdev);
5119 
5120 			/*
5121 			 * Calling a QDF WRAPPER here is creating signifcant
5122 			 * performance impact so avoided the wrapper call here
5123 			 */
5124 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
5125 					       desc->id, DP_TX_COMP_UNMAP);
5126 			dp_tx_nbuf_unmap(soc, desc);
5127 			qdf_nbuf_free_simple(desc->nbuf);
5128 			dp_tx_desc_free(soc, desc, desc->pool_id);
5129 			desc = next;
5130 			continue;
5131 		}
5132 
5133 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
5134 
5135 		dp_tx_comp_process_tx_status(soc, desc, &ts, txrx_peer,
5136 					     ring_id);
5137 
5138 		dp_tx_comp_process_desc(soc, desc, &ts, txrx_peer);
5139 
5140 		dp_tx_desc_release(desc, desc->pool_id);
5141 		desc = next;
5142 	}
5143 	if (txrx_peer)
5144 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
5145 }
5146 
5147 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
5148 static inline
5149 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
5150 				   int max_reap_limit)
5151 {
5152 	bool limit_hit = false;
5153 
5154 	limit_hit =
5155 		(num_reaped >= max_reap_limit) ? true : false;
5156 
5157 	if (limit_hit)
5158 		DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
5159 
5160 	return limit_hit;
5161 }
5162 
5163 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
5164 {
5165 	return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
5166 }
5167 
5168 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
5169 {
5170 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
5171 
5172 	return cfg->tx_comp_loop_pkt_limit;
5173 }
5174 #else
5175 static inline
5176 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
5177 				   int max_reap_limit)
5178 {
5179 	return false;
5180 }
5181 
5182 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
5183 {
5184 	return false;
5185 }
5186 
5187 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
5188 {
5189 	return 0;
5190 }
5191 #endif
5192 
5193 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
5194 static inline int
5195 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
5196 				  int *max_reap_limit)
5197 {
5198 	return soc->arch_ops.dp_srng_test_and_update_nf_params(soc, dp_srng,
5199 							       max_reap_limit);
5200 }
5201 #else
5202 static inline int
5203 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
5204 				  int *max_reap_limit)
5205 {
5206 	return 0;
5207 }
5208 #endif
5209 
5210 #ifdef DP_TX_TRACKING
5211 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
5212 {
5213 	if ((tx_desc->magic != DP_TX_MAGIC_PATTERN_INUSE) &&
5214 	    (tx_desc->magic != DP_TX_MAGIC_PATTERN_FREE)) {
5215 		dp_err_rl("tx_desc %u is corrupted", tx_desc->id);
5216 		qdf_trigger_self_recovery(NULL, QDF_TX_DESC_LEAK);
5217 	}
5218 }
5219 #endif
5220 
5221 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
5222 			    hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
5223 			    uint32_t quota)
5224 {
5225 	void *tx_comp_hal_desc;
5226 	void *last_prefetched_hw_desc = NULL;
5227 	struct dp_tx_desc_s *last_prefetched_sw_desc = NULL;
5228 	hal_soc_handle_t hal_soc;
5229 	uint8_t buffer_src;
5230 	struct dp_tx_desc_s *tx_desc = NULL;
5231 	struct dp_tx_desc_s *head_desc = NULL;
5232 	struct dp_tx_desc_s *tail_desc = NULL;
5233 	uint32_t num_processed = 0;
5234 	uint32_t count;
5235 	uint32_t num_avail_for_reap = 0;
5236 	bool force_break = false;
5237 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
5238 	int max_reap_limit, ring_near_full;
5239 
5240 	DP_HIST_INIT();
5241 
5242 more_data:
5243 
5244 	hal_soc = soc->hal_soc;
5245 	/* Re-initialize local variables to be re-used */
5246 	head_desc = NULL;
5247 	tail_desc = NULL;
5248 	count = 0;
5249 	max_reap_limit = dp_tx_comp_get_loop_pkt_limit(soc);
5250 
5251 	ring_near_full = dp_srng_test_and_update_nf_params(soc, tx_comp_ring,
5252 							   &max_reap_limit);
5253 
5254 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
5255 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
5256 		return 0;
5257 	}
5258 
5259 	num_avail_for_reap = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0);
5260 
5261 	if (num_avail_for_reap >= quota)
5262 		num_avail_for_reap = quota;
5263 
5264 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
5265 	last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc,
5266 							    hal_ring_hdl,
5267 							    num_avail_for_reap);
5268 
5269 	/* Find head descriptor from completion ring */
5270 	while (qdf_likely(num_avail_for_reap--)) {
5271 
5272 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
5273 		if (qdf_unlikely(!tx_comp_hal_desc))
5274 			break;
5275 		buffer_src = hal_tx_comp_get_buffer_source(hal_soc,
5276 							   tx_comp_hal_desc);
5277 
5278 		/* If this buffer was not released by TQM or FW, then it is not
5279 		 * Tx completion indication, assert */
5280 		if (qdf_unlikely(buffer_src !=
5281 					HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
5282 				 (qdf_unlikely(buffer_src !=
5283 					HAL_TX_COMP_RELEASE_SOURCE_FW))) {
5284 			uint8_t wbm_internal_error;
5285 
5286 			dp_err_rl(
5287 				"Tx comp release_src != TQM | FW but from %d",
5288 				buffer_src);
5289 			hal_dump_comp_desc(tx_comp_hal_desc);
5290 			DP_STATS_INC(soc, tx.invalid_release_source, 1);
5291 
5292 			/* When WBM sees NULL buffer_addr_info in any of
5293 			 * ingress rings it sends an error indication,
5294 			 * with wbm_internal_error=1, to a specific ring.
5295 			 * The WBM2SW ring used to indicate these errors is
5296 			 * fixed in HW, and that ring is being used as Tx
5297 			 * completion ring. These errors are not related to
5298 			 * Tx completions, and should just be ignored
5299 			 */
5300 			wbm_internal_error = hal_get_wbm_internal_error(
5301 							hal_soc,
5302 							tx_comp_hal_desc);
5303 
5304 			if (wbm_internal_error) {
5305 				dp_err_rl("Tx comp wbm_internal_error!!");
5306 				DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
5307 
5308 				if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
5309 								buffer_src)
5310 					dp_handle_wbm_internal_error(
5311 						soc,
5312 						tx_comp_hal_desc,
5313 						hal_tx_comp_get_buffer_type(
5314 							tx_comp_hal_desc));
5315 
5316 			} else {
5317 				dp_err_rl("Tx comp wbm_internal_error false");
5318 				DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
5319 			}
5320 			continue;
5321 		}
5322 
5323 		soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
5324 							       tx_comp_hal_desc,
5325 							       &tx_desc);
5326 		if (!tx_desc) {
5327 			dp_err("unable to retrieve tx_desc!");
5328 			QDF_BUG(0);
5329 			continue;
5330 		}
5331 		tx_desc->buffer_src = buffer_src;
5332 		/*
5333 		 * If the release source is FW, process the HTT status
5334 		 */
5335 		if (qdf_unlikely(buffer_src ==
5336 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
5337 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
5338 
5339 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
5340 					htt_tx_status);
5341 			/* Collect hw completion contents */
5342 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
5343 					      &tx_desc->comp, 1);
5344 			soc->arch_ops.dp_tx_process_htt_completion(
5345 							soc,
5346 							tx_desc,
5347 							htt_tx_status,
5348 							ring_id);
5349 		} else {
5350 			tx_desc->tx_status =
5351 				hal_tx_comp_get_tx_status(tx_comp_hal_desc);
5352 			tx_desc->buffer_src = buffer_src;
5353 			/*
5354 			 * If the fast completion mode is enabled extended
5355 			 * metadata from descriptor is not copied
5356 			 */
5357 			if (qdf_likely(tx_desc->flags &
5358 						DP_TX_DESC_FLAG_SIMPLE))
5359 				goto add_to_pool;
5360 
5361 			/*
5362 			 * If the descriptor is already freed in vdev_detach,
5363 			 * continue to next descriptor
5364 			 */
5365 			if (qdf_unlikely
5366 				((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
5367 				 !tx_desc->flags)) {
5368 				dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
5369 						   tx_desc->id);
5370 				DP_STATS_INC(soc, tx.tx_comp_exception, 1);
5371 				dp_tx_desc_check_corruption(tx_desc);
5372 				continue;
5373 			}
5374 
5375 			if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
5376 				dp_tx_comp_info_rl("pdev in down state %d",
5377 						   tx_desc->id);
5378 				tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
5379 				dp_tx_comp_free_buf(soc, tx_desc, false);
5380 				dp_tx_desc_release(tx_desc, tx_desc->pool_id);
5381 				goto next_desc;
5382 			}
5383 
5384 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
5385 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
5386 				dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
5387 						 tx_desc->flags, tx_desc->id);
5388 				qdf_assert_always(0);
5389 			}
5390 
5391 			/* Collect hw completion contents */
5392 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
5393 					      &tx_desc->comp, 1);
5394 add_to_pool:
5395 			DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
5396 
5397 			/* First ring descriptor on the cycle */
5398 			if (!head_desc) {
5399 				head_desc = tx_desc;
5400 				tail_desc = tx_desc;
5401 			}
5402 
5403 			tail_desc->next = tx_desc;
5404 			tx_desc->next = NULL;
5405 			tail_desc = tx_desc;
5406 		}
5407 next_desc:
5408 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
5409 
5410 		/*
5411 		 * Processed packet count is more than given quota
5412 		 * stop to processing
5413 		 */
5414 
5415 		count++;
5416 
5417 		dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
5418 					       num_avail_for_reap,
5419 					       hal_ring_hdl,
5420 					       &last_prefetched_hw_desc,
5421 					       &last_prefetched_sw_desc);
5422 
5423 		if (dp_tx_comp_loop_pkt_limit_hit(soc, count, max_reap_limit))
5424 			break;
5425 	}
5426 
5427 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
5428 
5429 	/* Process the reaped descriptors */
5430 	if (head_desc)
5431 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
5432 
5433 	DP_STATS_INC(soc, tx.tx_comp[ring_id], count);
5434 
5435 	/*
5436 	 * If we are processing in near-full condition, there are 3 scenario
5437 	 * 1) Ring entries has reached critical state
5438 	 * 2) Ring entries are still near high threshold
5439 	 * 3) Ring entries are below the safe level
5440 	 *
5441 	 * One more loop will move te state to normal processing and yield
5442 	 */
5443 	if (ring_near_full)
5444 		goto more_data;
5445 
5446 	if (dp_tx_comp_enable_eol_data_check(soc)) {
5447 
5448 		if (num_processed >= quota)
5449 			force_break = true;
5450 
5451 		if (!force_break &&
5452 		    hal_srng_dst_peek_sync_locked(soc->hal_soc,
5453 						  hal_ring_hdl)) {
5454 			DP_STATS_INC(soc, tx.hp_oos2, 1);
5455 			if (!hif_exec_should_yield(soc->hif_handle,
5456 						   int_ctx->dp_intr_id))
5457 				goto more_data;
5458 		}
5459 	}
5460 	DP_TX_HIST_STATS_PER_PDEV();
5461 
5462 	return num_processed;
5463 }
5464 
5465 #ifdef FEATURE_WLAN_TDLS
5466 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5467 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
5468 {
5469 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5470 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5471 						     DP_MOD_ID_TDLS);
5472 
5473 	if (!vdev) {
5474 		dp_err("vdev handle for id %d is NULL", vdev_id);
5475 		return NULL;
5476 	}
5477 
5478 	if (tx_spec & OL_TX_SPEC_NO_FREE)
5479 		vdev->is_tdls_frame = true;
5480 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
5481 
5482 	return dp_tx_send(soc_hdl, vdev_id, msdu_list);
5483 }
5484 #endif
5485 
5486 /**
5487  * dp_tx_vdev_attach() - attach vdev to dp tx
5488  * @vdev: virtual device instance
5489  *
5490  * Return: QDF_STATUS_SUCCESS: success
5491  *         QDF_STATUS_E_RESOURCES: Error return
5492  */
5493 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
5494 {
5495 	int pdev_id;
5496 	/*
5497 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
5498 	 */
5499 	DP_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
5500 				    DP_TCL_METADATA_TYPE_VDEV_BASED);
5501 
5502 	DP_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
5503 				       vdev->vdev_id);
5504 
5505 	pdev_id =
5506 		dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
5507 						       vdev->pdev->pdev_id);
5508 	DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
5509 
5510 	/*
5511 	 * Set HTT Extension Valid bit to 0 by default
5512 	 */
5513 	DP_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
5514 
5515 	dp_tx_vdev_update_search_flags(vdev);
5516 
5517 	return QDF_STATUS_SUCCESS;
5518 }
5519 
5520 #ifndef FEATURE_WDS
5521 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
5522 {
5523 	return false;
5524 }
5525 #endif
5526 
5527 /**
5528  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
5529  * @vdev: virtual device instance
5530  *
5531  * Return: void
5532  *
5533  */
5534 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
5535 {
5536 	struct dp_soc *soc = vdev->pdev->soc;
5537 
5538 	/*
5539 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
5540 	 * for TDLS link
5541 	 *
5542 	 * Enable AddrY (SA based search) only for non-WDS STA and
5543 	 * ProxySTA VAP (in HKv1) modes.
5544 	 *
5545 	 * In all other VAP modes, only DA based search should be
5546 	 * enabled
5547 	 */
5548 	if (vdev->opmode == wlan_op_mode_sta &&
5549 	    vdev->tdls_link_connected)
5550 		vdev->hal_desc_addr_search_flags =
5551 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
5552 	else if ((vdev->opmode == wlan_op_mode_sta) &&
5553 		 !dp_tx_da_search_override(vdev))
5554 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
5555 	else
5556 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
5557 
5558 	if (vdev->opmode == wlan_op_mode_sta && !vdev->tdls_link_connected)
5559 		vdev->search_type = soc->sta_mode_search_policy;
5560 	else
5561 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
5562 }
5563 
5564 static inline bool
5565 dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
5566 			  struct dp_vdev *vdev,
5567 			  struct dp_tx_desc_s *tx_desc)
5568 {
5569 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
5570 		return false;
5571 
5572 	/*
5573 	 * if vdev is given, then only check whether desc
5574 	 * vdev match. if vdev is NULL, then check whether
5575 	 * desc pdev match.
5576 	 */
5577 	return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
5578 		(tx_desc->pdev == pdev);
5579 }
5580 
5581 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
5582 /**
5583  * dp_tx_desc_flush() - release resources associated
5584  *                      to TX Desc
5585  *
5586  * @dp_pdev: Handle to DP pdev structure
5587  * @vdev: virtual device instance
5588  * NULL: no specific Vdev is required and check all allcated TX desc
5589  * on this pdev.
5590  * Non-NULL: only check the allocated TX Desc associated to this Vdev.
5591  *
5592  * @force_free:
5593  * true: flush the TX desc.
5594  * false: only reset the Vdev in each allocated TX desc
5595  * that associated to current Vdev.
5596  *
5597  * This function will go through the TX desc pool to flush
5598  * the outstanding TX data or reset Vdev to NULL in associated TX
5599  * Desc.
5600  */
5601 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
5602 		      bool force_free)
5603 {
5604 	uint8_t i;
5605 	uint32_t j;
5606 	uint32_t num_desc, page_id, offset;
5607 	uint16_t num_desc_per_page;
5608 	struct dp_soc *soc = pdev->soc;
5609 	struct dp_tx_desc_s *tx_desc = NULL;
5610 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
5611 
5612 	if (!vdev && !force_free) {
5613 		dp_err("Reset TX desc vdev, Vdev param is required!");
5614 		return;
5615 	}
5616 
5617 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
5618 		tx_desc_pool = &soc->tx_desc[i];
5619 		if (!(tx_desc_pool->pool_size) ||
5620 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
5621 		    !(tx_desc_pool->desc_pages.cacheable_pages))
5622 			continue;
5623 
5624 		/*
5625 		 * Add flow pool lock protection in case pool is freed
5626 		 * due to all tx_desc is recycled when handle TX completion.
5627 		 * this is not necessary when do force flush as:
5628 		 * a. double lock will happen if dp_tx_desc_release is
5629 		 *    also trying to acquire it.
5630 		 * b. dp interrupt has been disabled before do force TX desc
5631 		 *    flush in dp_pdev_deinit().
5632 		 */
5633 		if (!force_free)
5634 			qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
5635 		num_desc = tx_desc_pool->pool_size;
5636 		num_desc_per_page =
5637 			tx_desc_pool->desc_pages.num_element_per_page;
5638 		for (j = 0; j < num_desc; j++) {
5639 			page_id = j / num_desc_per_page;
5640 			offset = j % num_desc_per_page;
5641 
5642 			if (qdf_unlikely(!(tx_desc_pool->
5643 					 desc_pages.cacheable_pages)))
5644 				break;
5645 
5646 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
5647 
5648 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
5649 				/*
5650 				 * Free TX desc if force free is
5651 				 * required, otherwise only reset vdev
5652 				 * in this TX desc.
5653 				 */
5654 				if (force_free) {
5655 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
5656 					dp_tx_comp_free_buf(soc, tx_desc,
5657 							    false);
5658 					dp_tx_desc_release(tx_desc, i);
5659 				} else {
5660 					tx_desc->vdev_id = DP_INVALID_VDEV_ID;
5661 				}
5662 			}
5663 		}
5664 		if (!force_free)
5665 			qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
5666 	}
5667 }
5668 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
5669 /**
5670  * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
5671  *
5672  * @soc: Handle to DP soc structure
5673  * @tx_desc: pointer of one TX desc
5674  * @desc_pool_id: TX Desc pool id
5675  */
5676 static inline void
5677 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
5678 		      uint8_t desc_pool_id)
5679 {
5680 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
5681 
5682 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
5683 
5684 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
5685 }
5686 
5687 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
5688 		      bool force_free)
5689 {
5690 	uint8_t i, num_pool;
5691 	uint32_t j;
5692 	uint32_t num_desc, page_id, offset;
5693 	uint16_t num_desc_per_page;
5694 	struct dp_soc *soc = pdev->soc;
5695 	struct dp_tx_desc_s *tx_desc = NULL;
5696 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
5697 
5698 	if (!vdev && !force_free) {
5699 		dp_err("Reset TX desc vdev, Vdev param is required!");
5700 		return;
5701 	}
5702 
5703 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5704 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5705 
5706 	for (i = 0; i < num_pool; i++) {
5707 		tx_desc_pool = &soc->tx_desc[i];
5708 		if (!tx_desc_pool->desc_pages.cacheable_pages)
5709 			continue;
5710 
5711 		num_desc_per_page =
5712 			tx_desc_pool->desc_pages.num_element_per_page;
5713 		for (j = 0; j < num_desc; j++) {
5714 			page_id = j / num_desc_per_page;
5715 			offset = j % num_desc_per_page;
5716 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
5717 
5718 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
5719 				if (force_free) {
5720 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
5721 					dp_tx_comp_free_buf(soc, tx_desc,
5722 							    false);
5723 					dp_tx_desc_release(tx_desc, i);
5724 				} else {
5725 					dp_tx_desc_reset_vdev(soc, tx_desc,
5726 							      i);
5727 				}
5728 			}
5729 		}
5730 	}
5731 }
5732 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
5733 
5734 /**
5735  * dp_tx_vdev_detach() - detach vdev from dp tx
5736  * @vdev: virtual device instance
5737  *
5738  * Return: QDF_STATUS_SUCCESS: success
5739  *         QDF_STATUS_E_RESOURCES: Error return
5740  */
5741 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
5742 {
5743 	struct dp_pdev *pdev = vdev->pdev;
5744 
5745 	/* Reset TX desc associated to this Vdev as NULL */
5746 	dp_tx_desc_flush(pdev, vdev, false);
5747 
5748 	return QDF_STATUS_SUCCESS;
5749 }
5750 
5751 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
5752 /* Pools will be allocated dynamically */
5753 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
5754 					   int num_desc)
5755 {
5756 	uint8_t i;
5757 
5758 	for (i = 0; i < num_pool; i++) {
5759 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
5760 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
5761 	}
5762 
5763 	return QDF_STATUS_SUCCESS;
5764 }
5765 
5766 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
5767 					  uint32_t num_desc)
5768 {
5769 	return QDF_STATUS_SUCCESS;
5770 }
5771 
5772 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
5773 {
5774 }
5775 
5776 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
5777 {
5778 	uint8_t i;
5779 
5780 	for (i = 0; i < num_pool; i++)
5781 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
5782 }
5783 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
5784 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
5785 					   uint32_t num_desc)
5786 {
5787 	uint8_t i, count;
5788 
5789 	/* Allocate software Tx descriptor pools */
5790 	for (i = 0; i < num_pool; i++) {
5791 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
5792 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5793 				  FL("Tx Desc Pool alloc %d failed %pK"),
5794 				  i, soc);
5795 			goto fail;
5796 		}
5797 	}
5798 	return QDF_STATUS_SUCCESS;
5799 
5800 fail:
5801 	for (count = 0; count < i; count++)
5802 		dp_tx_desc_pool_free(soc, count);
5803 
5804 	return QDF_STATUS_E_NOMEM;
5805 }
5806 
5807 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
5808 					  uint32_t num_desc)
5809 {
5810 	uint8_t i;
5811 	for (i = 0; i < num_pool; i++) {
5812 		if (dp_tx_desc_pool_init(soc, i, num_desc)) {
5813 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5814 				  FL("Tx Desc Pool init %d failed %pK"),
5815 				  i, soc);
5816 			return QDF_STATUS_E_NOMEM;
5817 		}
5818 	}
5819 	return QDF_STATUS_SUCCESS;
5820 }
5821 
5822 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
5823 {
5824 	uint8_t i;
5825 
5826 	for (i = 0; i < num_pool; i++)
5827 		dp_tx_desc_pool_deinit(soc, i);
5828 }
5829 
5830 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
5831 {
5832 	uint8_t i;
5833 
5834 	for (i = 0; i < num_pool; i++)
5835 		dp_tx_desc_pool_free(soc, i);
5836 }
5837 
5838 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
5839 
5840 /**
5841  * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
5842  * @soc: core txrx main context
5843  * @num_pool: number of pools
5844  *
5845  */
5846 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
5847 {
5848 	dp_tx_tso_desc_pool_deinit(soc, num_pool);
5849 	dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
5850 }
5851 
5852 /**
5853  * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
5854  * @soc: core txrx main context
5855  * @num_pool: number of pools
5856  *
5857  */
5858 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
5859 {
5860 	dp_tx_tso_desc_pool_free(soc, num_pool);
5861 	dp_tx_tso_num_seg_pool_free(soc, num_pool);
5862 }
5863 
5864 /**
5865  * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
5866  * @soc: core txrx main context
5867  *
5868  * This function frees all tx related descriptors as below
5869  * 1. Regular TX descriptors (static pools)
5870  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
5871  * 3. TSO descriptors
5872  *
5873  */
5874 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
5875 {
5876 	uint8_t num_pool;
5877 
5878 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5879 
5880 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
5881 	dp_tx_ext_desc_pool_free(soc, num_pool);
5882 	dp_tx_delete_static_pools(soc, num_pool);
5883 }
5884 
5885 /**
5886  * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
5887  * @soc: core txrx main context
5888  *
5889  * This function de-initializes all tx related descriptors as below
5890  * 1. Regular TX descriptors (static pools)
5891  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
5892  * 3. TSO descriptors
5893  *
5894  */
5895 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
5896 {
5897 	uint8_t num_pool;
5898 
5899 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5900 
5901 	dp_tx_flow_control_deinit(soc);
5902 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
5903 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
5904 	dp_tx_deinit_static_pools(soc, num_pool);
5905 }
5906 
5907 /**
5908  * dp_tso_attach() - TSO attach handler
5909  * @txrx_soc: Opaque Dp handle
5910  *
5911  * Reserve TSO descriptor buffers
5912  *
5913  * Return: QDF_STATUS_E_FAILURE on failure or
5914  * QDF_STATUS_SUCCESS on success
5915  */
5916 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
5917 					 uint8_t num_pool,
5918 					 uint32_t num_desc)
5919 {
5920 	if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
5921 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
5922 		return QDF_STATUS_E_FAILURE;
5923 	}
5924 
5925 	if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
5926 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
5927 		       num_pool, soc);
5928 		return QDF_STATUS_E_FAILURE;
5929 	}
5930 	return QDF_STATUS_SUCCESS;
5931 }
5932 
5933 /**
5934  * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
5935  * @soc: DP soc handle
5936  * @num_pool: Number of pools
5937  * @num_desc: Number of descriptors
5938  *
5939  * Initialize TSO descriptor pools
5940  *
5941  * Return: QDF_STATUS_E_FAILURE on failure or
5942  * QDF_STATUS_SUCCESS on success
5943  */
5944 
5945 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
5946 					uint8_t num_pool,
5947 					uint32_t num_desc)
5948 {
5949 	if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
5950 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
5951 		return QDF_STATUS_E_FAILURE;
5952 	}
5953 
5954 	if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
5955 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
5956 		       num_pool, soc);
5957 		return QDF_STATUS_E_FAILURE;
5958 	}
5959 	return QDF_STATUS_SUCCESS;
5960 }
5961 
5962 /**
5963  * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
5964  * @soc: core txrx main context
5965  *
5966  * This function allocates memory for following descriptor pools
5967  * 1. regular sw tx descriptor pools (static pools)
5968  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
5969  * 3. TSO descriptor pools
5970  *
5971  * Return: QDF_STATUS_SUCCESS: success
5972  *         QDF_STATUS_E_RESOURCES: Error return
5973  */
5974 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
5975 {
5976 	uint8_t num_pool;
5977 	uint32_t num_desc;
5978 	uint32_t num_ext_desc;
5979 
5980 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5981 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5982 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
5983 
5984 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5985 		  "%s Tx Desc Alloc num_pool = %d, descs = %d",
5986 		  __func__, num_pool, num_desc);
5987 
5988 	if ((num_pool > MAX_TXDESC_POOLS) ||
5989 	    (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
5990 		goto fail1;
5991 
5992 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
5993 		goto fail1;
5994 
5995 	if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
5996 		goto fail2;
5997 
5998 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
5999 		return QDF_STATUS_SUCCESS;
6000 
6001 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
6002 		goto fail3;
6003 
6004 	return QDF_STATUS_SUCCESS;
6005 
6006 fail3:
6007 	dp_tx_ext_desc_pool_free(soc, num_pool);
6008 fail2:
6009 	dp_tx_delete_static_pools(soc, num_pool);
6010 fail1:
6011 	return QDF_STATUS_E_RESOURCES;
6012 }
6013 
6014 /**
6015  * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
6016  * @soc: core txrx main context
6017  *
6018  * This function initializes the following TX descriptor pools
6019  * 1. regular sw tx descriptor pools (static pools)
6020  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
6021  * 3. TSO descriptor pools
6022  *
6023  * Return: QDF_STATUS_SUCCESS: success
6024  *	   QDF_STATUS_E_RESOURCES: Error return
6025  */
6026 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
6027 {
6028 	uint8_t num_pool;
6029 	uint32_t num_desc;
6030 	uint32_t num_ext_desc;
6031 
6032 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6033 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6034 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
6035 
6036 	if (dp_tx_init_static_pools(soc, num_pool, num_desc))
6037 		goto fail1;
6038 
6039 	if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
6040 		goto fail2;
6041 
6042 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
6043 		return QDF_STATUS_SUCCESS;
6044 
6045 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
6046 		goto fail3;
6047 
6048 	dp_tx_flow_control_init(soc);
6049 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
6050 	return QDF_STATUS_SUCCESS;
6051 
6052 fail3:
6053 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
6054 fail2:
6055 	dp_tx_deinit_static_pools(soc, num_pool);
6056 fail1:
6057 	return QDF_STATUS_E_RESOURCES;
6058 }
6059 
6060 /**
6061  * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
6062  * @txrx_soc: dp soc handle
6063  *
6064  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
6065  *			QDF_STATUS_E_FAILURE
6066  */
6067 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
6068 {
6069 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6070 	uint8_t num_pool;
6071 	uint32_t num_desc;
6072 	uint32_t num_ext_desc;
6073 
6074 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6075 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6076 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
6077 
6078 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
6079 		return QDF_STATUS_E_FAILURE;
6080 
6081 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
6082 		return QDF_STATUS_E_FAILURE;
6083 
6084 	return QDF_STATUS_SUCCESS;
6085 }
6086 
6087 /**
6088  * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
6089  * @txrx_soc: dp soc handle
6090  *
6091  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
6092  */
6093 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
6094 {
6095 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6096 	uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6097 
6098 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
6099 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
6100 
6101 	return QDF_STATUS_SUCCESS;
6102 }
6103 
6104 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
6105 void dp_pkt_add_timestamp(struct dp_vdev *vdev,
6106 			  enum qdf_pkt_timestamp_index index, uint64_t time,
6107 			  qdf_nbuf_t nbuf)
6108 {
6109 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled())) {
6110 		uint64_t tsf_time;
6111 
6112 		if (vdev->get_tsf_time) {
6113 			vdev->get_tsf_time(vdev->osif_vdev, time, &tsf_time);
6114 			qdf_add_dp_pkt_timestamp(nbuf, index, tsf_time);
6115 		}
6116 	}
6117 }
6118 
6119 void dp_pkt_get_timestamp(uint64_t *time)
6120 {
6121 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled()))
6122 		*time = qdf_get_log_timestamp();
6123 }
6124 #endif
6125 
6126