xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "htt.h"
21 #include "dp_htt.h"
22 #include "hal_hw_headers.h"
23 #include "dp_tx.h"
24 #include "dp_tx_desc.h"
25 #include "dp_peer.h"
26 #include "dp_types.h"
27 #include "hal_tx.h"
28 #include "qdf_mem.h"
29 #include "qdf_nbuf.h"
30 #include "qdf_net_types.h"
31 #include "qdf_module.h"
32 #include <wlan_cfg.h>
33 #include "dp_ipa.h"
34 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
35 #include "if_meta_hdr.h"
36 #endif
37 #include "enet.h"
38 #include "dp_internal.h"
39 #ifdef ATH_SUPPORT_IQUE
40 #include "dp_txrx_me.h"
41 #endif
42 #include "dp_hist.h"
43 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
44 #include <wlan_dp_swlm.h>
45 #endif
46 #ifdef WIFI_MONITOR_SUPPORT
47 #include <dp_mon.h>
48 #endif
49 #ifdef FEATURE_WDS
50 #include "dp_txrx_wds.h"
51 #endif
52 #include "cdp_txrx_cmn_reg.h"
53 #ifdef CONFIG_SAWF
54 #include <dp_sawf.h>
55 #endif
56 
57 /* Flag to skip CCE classify when mesh or tid override enabled */
58 #define DP_TX_SKIP_CCE_CLASSIFY \
59 	(DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
60 
61 /* TODO Add support in TSO */
62 #define DP_DESC_NUM_FRAG(x) 0
63 
64 /* disable TQM_BYPASS */
65 #define TQM_BYPASS_WAR 0
66 
67 /* invalid peer id for reinject*/
68 #define DP_INVALID_PEER 0XFFFE
69 
70 #define DP_RETRY_COUNT 7
71 #ifdef WLAN_PEER_JITTER
72 #define DP_AVG_JITTER_WEIGHT_DENOM 4
73 #define DP_AVG_DELAY_WEIGHT_DENOM 3
74 #endif
75 
76 #ifdef QCA_DP_TX_FW_METADATA_V2
77 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
78 	HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
79 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
80 	HTT_TX_TCL_METADATA_V2_VALID_HTT_SET(_var, _val)
81 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
82 	HTT_TX_TCL_METADATA_TYPE_V2_SET(_var, _val)
83 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
84 	HTT_TX_TCL_METADATA_V2_HOST_INSPECTED_SET(_var, _val)
85 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
86 	 HTT_TX_TCL_METADATA_V2_PEER_ID_SET(_var, _val)
87 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
88 	HTT_TX_TCL_METADATA_V2_VDEV_ID_SET(_var, _val)
89 #define DP_TCL_METADATA_TYPE_PEER_BASED \
90 	HTT_TCL_METADATA_V2_TYPE_PEER_BASED
91 #define DP_TCL_METADATA_TYPE_VDEV_BASED \
92 	HTT_TCL_METADATA_V2_TYPE_VDEV_BASED
93 #else
94 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
95 	HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
96 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
97 	HTT_TX_TCL_METADATA_VALID_HTT_SET(_var, _val)
98 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
99 	HTT_TX_TCL_METADATA_TYPE_SET(_var, _val)
100 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
101 	HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val)
102 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
103 	HTT_TX_TCL_METADATA_PEER_ID_SET(_var, _val)
104 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
105 	HTT_TX_TCL_METADATA_VDEV_ID_SET(_var, _val)
106 #define DP_TCL_METADATA_TYPE_PEER_BASED \
107 	HTT_TCL_METADATA_TYPE_PEER_BASED
108 #define DP_TCL_METADATA_TYPE_VDEV_BASED \
109 	HTT_TCL_METADATA_TYPE_VDEV_BASED
110 #endif
111 
112 /*mapping between hal encrypt type and cdp_sec_type*/
113 uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
114 					  HAL_TX_ENCRYPT_TYPE_WEP_128,
115 					  HAL_TX_ENCRYPT_TYPE_WEP_104,
116 					  HAL_TX_ENCRYPT_TYPE_WEP_40,
117 					  HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
118 					  HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
119 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
120 					  HAL_TX_ENCRYPT_TYPE_WAPI,
121 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
122 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
123 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
124 					  HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
125 qdf_export_symbol(sec_type_map);
126 
127 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
128 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
129 {
130 	enum dp_tx_event_type type;
131 
132 	if (flags & DP_TX_DESC_FLAG_FLUSH)
133 		type = DP_TX_DESC_FLUSH;
134 	else if (flags & DP_TX_DESC_FLAG_TX_COMP_ERR)
135 		type = DP_TX_COMP_UNMAP_ERR;
136 	else if (flags & DP_TX_DESC_FLAG_COMPLETED_TX)
137 		type = DP_TX_COMP_UNMAP;
138 	else
139 		type = DP_TX_DESC_UNMAP;
140 
141 	return type;
142 }
143 
144 static inline void
145 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
146 		       qdf_nbuf_t skb, uint32_t sw_cookie,
147 		       enum dp_tx_event_type type)
148 {
149 	struct dp_tx_tcl_history *tx_tcl_history = &soc->tx_tcl_history;
150 	struct dp_tx_comp_history *tx_comp_history = &soc->tx_comp_history;
151 	struct dp_tx_desc_event *entry;
152 	uint32_t idx;
153 	uint16_t slot;
154 
155 	switch (type) {
156 	case DP_TX_COMP_UNMAP:
157 	case DP_TX_COMP_UNMAP_ERR:
158 	case DP_TX_COMP_MSDU_EXT:
159 		if (qdf_unlikely(!tx_comp_history->allocated))
160 			return;
161 
162 		dp_get_frag_hist_next_atomic_idx(&tx_comp_history->index, &idx,
163 						 &slot,
164 						 DP_TX_COMP_HIST_SLOT_SHIFT,
165 						 DP_TX_COMP_HIST_PER_SLOT_MAX,
166 						 DP_TX_COMP_HISTORY_SIZE);
167 		entry = &tx_comp_history->entry[slot][idx];
168 		break;
169 	case DP_TX_DESC_MAP:
170 	case DP_TX_DESC_UNMAP:
171 	case DP_TX_DESC_COOKIE:
172 	case DP_TX_DESC_FLUSH:
173 		if (qdf_unlikely(!tx_tcl_history->allocated))
174 			return;
175 
176 		dp_get_frag_hist_next_atomic_idx(&tx_tcl_history->index, &idx,
177 						 &slot,
178 						 DP_TX_TCL_HIST_SLOT_SHIFT,
179 						 DP_TX_TCL_HIST_PER_SLOT_MAX,
180 						 DP_TX_TCL_HISTORY_SIZE);
181 		entry = &tx_tcl_history->entry[slot][idx];
182 		break;
183 	default:
184 		dp_info_rl("Invalid dp_tx_event_type: %d", type);
185 		return;
186 	}
187 
188 	entry->skb = skb;
189 	entry->paddr = paddr;
190 	entry->sw_cookie = sw_cookie;
191 	entry->type = type;
192 	entry->ts = qdf_get_log_timestamp();
193 }
194 
195 static inline void
196 dp_tx_tso_seg_history_add(struct dp_soc *soc,
197 			  struct qdf_tso_seg_elem_t *tso_seg,
198 			  qdf_nbuf_t skb, uint32_t sw_cookie,
199 			  enum dp_tx_event_type type)
200 {
201 	int i;
202 
203 	for (i = 1; i < tso_seg->seg.num_frags; i++) {
204 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[i].paddr,
205 				       skb, sw_cookie, type);
206 	}
207 
208 	if (!tso_seg->next)
209 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[0].paddr,
210 				       skb, 0xFFFFFFFF, type);
211 }
212 
213 static inline void
214 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
215 		      qdf_nbuf_t skb, uint32_t sw_cookie,
216 		      enum dp_tx_event_type type)
217 {
218 	struct qdf_tso_seg_elem_t *curr_seg = tso_info.tso_seg_list;
219 	uint32_t num_segs = tso_info.num_segs;
220 
221 	while (num_segs) {
222 		dp_tx_tso_seg_history_add(soc, curr_seg, skb, sw_cookie, type);
223 		curr_seg = curr_seg->next;
224 		num_segs--;
225 	}
226 }
227 
228 #else
229 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
230 {
231 	return DP_TX_DESC_INVAL_EVT;
232 }
233 
234 static inline void
235 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
236 		       qdf_nbuf_t skb, uint32_t sw_cookie,
237 		       enum dp_tx_event_type type)
238 {
239 }
240 
241 static inline void
242 dp_tx_tso_seg_history_add(struct dp_soc *soc,
243 			  struct qdf_tso_seg_elem_t *tso_seg,
244 			  qdf_nbuf_t skb, uint32_t sw_cookie,
245 			  enum dp_tx_event_type type)
246 {
247 }
248 
249 static inline void
250 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
251 		      qdf_nbuf_t skb, uint32_t sw_cookie,
252 		      enum dp_tx_event_type type)
253 {
254 }
255 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
256 
257 static int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc);
258 
259 /**
260  * dp_is_tput_high() - Check if throughput is high
261  *
262  * @soc - core txrx main context
263  *
264  * The current function is based of the RTPM tput policy variable where RTPM is
265  * avoided based on throughput.
266  */
267 static inline int dp_is_tput_high(struct dp_soc *soc)
268 {
269 	return dp_get_rtpm_tput_policy_requirement(soc);
270 }
271 
272 #if defined(FEATURE_TSO)
273 /**
274  * dp_tx_tso_unmap_segment() - Unmap TSO segment
275  *
276  * @soc - core txrx main context
277  * @seg_desc - tso segment descriptor
278  * @num_seg_desc - tso number segment descriptor
279  */
280 static void dp_tx_tso_unmap_segment(
281 		struct dp_soc *soc,
282 		struct qdf_tso_seg_elem_t *seg_desc,
283 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
284 {
285 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
286 	if (qdf_unlikely(!seg_desc)) {
287 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
288 			 __func__, __LINE__);
289 		qdf_assert(0);
290 	} else if (qdf_unlikely(!num_seg_desc)) {
291 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
292 			 __func__, __LINE__);
293 		qdf_assert(0);
294 	} else {
295 		bool is_last_seg;
296 		/* no tso segment left to do dma unmap */
297 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
298 			return;
299 
300 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
301 					true : false;
302 		qdf_nbuf_unmap_tso_segment(soc->osdev,
303 					   seg_desc, is_last_seg);
304 		num_seg_desc->num_seg.tso_cmn_num_seg--;
305 	}
306 }
307 
308 /**
309  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
310  *                            back to the freelist
311  *
312  * @soc - soc device handle
313  * @tx_desc - Tx software descriptor
314  */
315 static void dp_tx_tso_desc_release(struct dp_soc *soc,
316 				   struct dp_tx_desc_s *tx_desc)
317 {
318 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
319 	if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_desc)) {
320 		dp_tx_err("SO desc is NULL!");
321 		qdf_assert(0);
322 	} else if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_num_desc)) {
323 		dp_tx_err("TSO num desc is NULL!");
324 		qdf_assert(0);
325 	} else {
326 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
327 			(struct qdf_tso_num_seg_elem_t *)tx_desc->
328 				msdu_ext_desc->tso_num_desc;
329 
330 		/* Add the tso num segment into the free list */
331 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
332 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
333 					    tx_desc->msdu_ext_desc->
334 					    tso_num_desc);
335 			tx_desc->msdu_ext_desc->tso_num_desc = NULL;
336 			DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
337 		}
338 
339 		/* Add the tso segment into the free list*/
340 		dp_tx_tso_desc_free(soc,
341 				    tx_desc->pool_id, tx_desc->msdu_ext_desc->
342 				    tso_desc);
343 		tx_desc->msdu_ext_desc->tso_desc = NULL;
344 	}
345 }
346 #else
347 static void dp_tx_tso_unmap_segment(
348 		struct dp_soc *soc,
349 		struct qdf_tso_seg_elem_t *seg_desc,
350 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
351 
352 {
353 }
354 
355 static void dp_tx_tso_desc_release(struct dp_soc *soc,
356 				   struct dp_tx_desc_s *tx_desc)
357 {
358 }
359 #endif
360 
361 /**
362  * dp_tx_desc_release() - Release Tx Descriptor
363  * @tx_desc : Tx Descriptor
364  * @desc_pool_id: Descriptor Pool ID
365  *
366  * Deallocate all resources attached to Tx descriptor and free the Tx
367  * descriptor.
368  *
369  * Return:
370  */
371 void
372 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
373 {
374 	struct dp_pdev *pdev = tx_desc->pdev;
375 	struct dp_soc *soc;
376 	uint8_t comp_status = 0;
377 
378 	qdf_assert(pdev);
379 
380 	soc = pdev->soc;
381 
382 	dp_tx_outstanding_dec(pdev);
383 
384 	if (tx_desc->msdu_ext_desc) {
385 		if (tx_desc->frm_type == dp_tx_frm_tso)
386 			dp_tx_tso_desc_release(soc, tx_desc);
387 
388 		if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
389 			dp_tx_me_free_buf(tx_desc->pdev,
390 					  tx_desc->msdu_ext_desc->me_buffer);
391 
392 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
393 	}
394 
395 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
396 		qdf_atomic_dec(&soc->num_tx_exception);
397 
398 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
399 				tx_desc->buffer_src)
400 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
401 							     soc->hal_soc);
402 	else
403 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
404 
405 	dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d",
406 		    tx_desc->id, comp_status,
407 		    qdf_atomic_read(&pdev->num_tx_outstanding));
408 
409 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
410 	return;
411 }
412 
413 /**
414  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
415  * @vdev: DP vdev Handle
416  * @nbuf: skb
417  * @msdu_info: msdu_info required to create HTT metadata
418  *
419  * Prepares and fills HTT metadata in the frame pre-header for special frames
420  * that should be transmitted using varying transmit parameters.
421  * There are 2 VDEV modes that currently needs this special metadata -
422  *  1) Mesh Mode
423  *  2) DSRC Mode
424  *
425  * Return: HTT metadata size
426  *
427  */
428 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
429 					  struct dp_tx_msdu_info_s *msdu_info)
430 {
431 	uint32_t *meta_data = msdu_info->meta_data;
432 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
433 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
434 
435 	uint8_t htt_desc_size;
436 
437 	/* Size rounded of multiple of 8 bytes */
438 	uint8_t htt_desc_size_aligned;
439 
440 	uint8_t *hdr = NULL;
441 
442 	/*
443 	 * Metadata - HTT MSDU Extension header
444 	 */
445 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
446 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
447 
448 	if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
449 	    HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
450 							   meta_data[0]) ||
451 	    msdu_info->exception_fw) {
452 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
453 				 htt_desc_size_aligned)) {
454 			nbuf = qdf_nbuf_realloc_headroom(nbuf,
455 							 htt_desc_size_aligned);
456 			if (!nbuf) {
457 				/*
458 				 * qdf_nbuf_realloc_headroom won't do skb_clone
459 				 * as skb_realloc_headroom does. so, no free is
460 				 * needed here.
461 				 */
462 				DP_STATS_INC(vdev,
463 					     tx_i.dropped.headroom_insufficient,
464 					     1);
465 				qdf_print(" %s[%d] skb_realloc_headroom failed",
466 					  __func__, __LINE__);
467 				return 0;
468 			}
469 		}
470 		/* Fill and add HTT metaheader */
471 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
472 		if (!hdr) {
473 			dp_tx_err("Error in filling HTT metadata");
474 
475 			return 0;
476 		}
477 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
478 
479 	} else if (vdev->opmode == wlan_op_mode_ocb) {
480 		/* Todo - Add support for DSRC */
481 	}
482 
483 	return htt_desc_size_aligned;
484 }
485 
486 /**
487  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
488  * @tso_seg: TSO segment to process
489  * @ext_desc: Pointer to MSDU extension descriptor
490  *
491  * Return: void
492  */
493 #if defined(FEATURE_TSO)
494 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
495 		void *ext_desc)
496 {
497 	uint8_t num_frag;
498 	uint32_t tso_flags;
499 
500 	/*
501 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
502 	 * tcp_flag_mask
503 	 *
504 	 * Checksum enable flags are set in TCL descriptor and not in Extension
505 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
506 	 */
507 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
508 
509 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
510 
511 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
512 		tso_seg->tso_flags.ip_len);
513 
514 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
515 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
516 
517 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
518 		uint32_t lo = 0;
519 		uint32_t hi = 0;
520 
521 		qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
522 				  (tso_seg->tso_frags[num_frag].length));
523 
524 		qdf_dmaaddr_to_32s(
525 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
526 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
527 			tso_seg->tso_frags[num_frag].length);
528 	}
529 
530 	return;
531 }
532 #else
533 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
534 		void *ext_desc)
535 {
536 	return;
537 }
538 #endif
539 
540 #if defined(FEATURE_TSO)
541 /**
542  * dp_tx_free_tso_seg_list() - Loop through the tso segments
543  *                             allocated and free them
544  *
545  * @soc: soc handle
546  * @free_seg: list of tso segments
547  * @msdu_info: msdu descriptor
548  *
549  * Return - void
550  */
551 static void dp_tx_free_tso_seg_list(
552 		struct dp_soc *soc,
553 		struct qdf_tso_seg_elem_t *free_seg,
554 		struct dp_tx_msdu_info_s *msdu_info)
555 {
556 	struct qdf_tso_seg_elem_t *next_seg;
557 
558 	while (free_seg) {
559 		next_seg = free_seg->next;
560 		dp_tx_tso_desc_free(soc,
561 				    msdu_info->tx_queue.desc_pool_id,
562 				    free_seg);
563 		free_seg = next_seg;
564 	}
565 }
566 
567 /**
568  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
569  *                                 allocated and free them
570  *
571  * @soc:  soc handle
572  * @free_num_seg: list of tso number segments
573  * @msdu_info: msdu descriptor
574  * Return - void
575  */
576 static void dp_tx_free_tso_num_seg_list(
577 		struct dp_soc *soc,
578 		struct qdf_tso_num_seg_elem_t *free_num_seg,
579 		struct dp_tx_msdu_info_s *msdu_info)
580 {
581 	struct qdf_tso_num_seg_elem_t *next_num_seg;
582 
583 	while (free_num_seg) {
584 		next_num_seg = free_num_seg->next;
585 		dp_tso_num_seg_free(soc,
586 				    msdu_info->tx_queue.desc_pool_id,
587 				    free_num_seg);
588 		free_num_seg = next_num_seg;
589 	}
590 }
591 
592 /**
593  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
594  *                              do dma unmap for each segment
595  *
596  * @soc: soc handle
597  * @free_seg: list of tso segments
598  * @num_seg_desc: tso number segment descriptor
599  *
600  * Return - void
601  */
602 static void dp_tx_unmap_tso_seg_list(
603 		struct dp_soc *soc,
604 		struct qdf_tso_seg_elem_t *free_seg,
605 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
606 {
607 	struct qdf_tso_seg_elem_t *next_seg;
608 
609 	if (qdf_unlikely(!num_seg_desc)) {
610 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
611 		return;
612 	}
613 
614 	while (free_seg) {
615 		next_seg = free_seg->next;
616 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
617 		free_seg = next_seg;
618 	}
619 }
620 
621 #ifdef FEATURE_TSO_STATS
622 /**
623  * dp_tso_get_stats_idx: Retrieve the tso packet id
624  * @pdev - pdev handle
625  *
626  * Return: id
627  */
628 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
629 {
630 	uint32_t stats_idx;
631 
632 	stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
633 						% CDP_MAX_TSO_PACKETS);
634 	return stats_idx;
635 }
636 #else
637 static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
638 {
639 	return 0;
640 }
641 #endif /* FEATURE_TSO_STATS */
642 
643 /**
644  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
645  *				     free the tso segments descriptor and
646  *				     tso num segments descriptor
647  *
648  * @soc:  soc handle
649  * @msdu_info: msdu descriptor
650  * @tso_seg_unmap: flag to show if dma unmap is necessary
651  *
652  * Return - void
653  */
654 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
655 					  struct dp_tx_msdu_info_s *msdu_info,
656 					  bool tso_seg_unmap)
657 {
658 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
659 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
660 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
661 					tso_info->tso_num_seg_list;
662 
663 	/* do dma unmap for each segment */
664 	if (tso_seg_unmap)
665 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
666 
667 	/* free all tso number segment descriptor though looks only have 1 */
668 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
669 
670 	/* free all tso segment descriptor */
671 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
672 }
673 
674 /**
675  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
676  * @vdev: virtual device handle
677  * @msdu: network buffer
678  * @msdu_info: meta data associated with the msdu
679  *
680  * Return: QDF_STATUS_SUCCESS success
681  */
682 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
683 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
684 {
685 	struct qdf_tso_seg_elem_t *tso_seg;
686 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
687 	struct dp_soc *soc = vdev->pdev->soc;
688 	struct dp_pdev *pdev = vdev->pdev;
689 	struct qdf_tso_info_t *tso_info;
690 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
691 	tso_info = &msdu_info->u.tso_info;
692 	tso_info->curr_seg = NULL;
693 	tso_info->tso_seg_list = NULL;
694 	tso_info->num_segs = num_seg;
695 	msdu_info->frm_type = dp_tx_frm_tso;
696 	tso_info->tso_num_seg_list = NULL;
697 
698 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
699 
700 	while (num_seg) {
701 		tso_seg = dp_tx_tso_desc_alloc(
702 				soc, msdu_info->tx_queue.desc_pool_id);
703 		if (tso_seg) {
704 			tso_seg->next = tso_info->tso_seg_list;
705 			tso_info->tso_seg_list = tso_seg;
706 			num_seg--;
707 		} else {
708 			dp_err_rl("Failed to alloc tso seg desc");
709 			DP_STATS_INC_PKT(vdev->pdev,
710 					 tso_stats.tso_no_mem_dropped, 1,
711 					 qdf_nbuf_len(msdu));
712 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
713 
714 			return QDF_STATUS_E_NOMEM;
715 		}
716 	}
717 
718 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
719 
720 	tso_num_seg = dp_tso_num_seg_alloc(soc,
721 			msdu_info->tx_queue.desc_pool_id);
722 
723 	if (tso_num_seg) {
724 		tso_num_seg->next = tso_info->tso_num_seg_list;
725 		tso_info->tso_num_seg_list = tso_num_seg;
726 	} else {
727 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
728 			 __func__);
729 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
730 
731 		return QDF_STATUS_E_NOMEM;
732 	}
733 
734 	msdu_info->num_seg =
735 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
736 
737 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
738 			msdu_info->num_seg);
739 
740 	if (!(msdu_info->num_seg)) {
741 		/*
742 		 * Free allocated TSO seg desc and number seg desc,
743 		 * do unmap for segments if dma map has done.
744 		 */
745 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
746 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
747 
748 		return QDF_STATUS_E_INVAL;
749 	}
750 	dp_tx_tso_history_add(soc, msdu_info->u.tso_info,
751 			      msdu, 0, DP_TX_DESC_MAP);
752 
753 	tso_info->curr_seg = tso_info->tso_seg_list;
754 
755 	tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
756 	dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
757 			     msdu, msdu_info->num_seg);
758 	dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
759 				    tso_info->msdu_stats_idx);
760 	dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
761 	return QDF_STATUS_SUCCESS;
762 }
763 #else
764 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
765 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
766 {
767 	return QDF_STATUS_E_NOMEM;
768 }
769 #endif
770 
771 QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
772 			(DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
773 			 sizeof(struct htt_tx_msdu_desc_ext2_t)));
774 
775 /**
776  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
777  * @vdev: DP Vdev handle
778  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
779  * @desc_pool_id: Descriptor Pool ID
780  *
781  * Return:
782  */
783 static
784 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
785 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
786 {
787 	uint8_t i;
788 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
789 	struct dp_tx_seg_info_s *seg_info;
790 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
791 	struct dp_soc *soc = vdev->pdev->soc;
792 
793 	/* Allocate an extension descriptor */
794 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
795 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
796 
797 	if (!msdu_ext_desc) {
798 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
799 		return NULL;
800 	}
801 
802 	if (msdu_info->exception_fw &&
803 			qdf_unlikely(vdev->mesh_vdev)) {
804 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
805 				&msdu_info->meta_data[0],
806 				sizeof(struct htt_tx_msdu_desc_ext2_t));
807 		qdf_atomic_inc(&soc->num_tx_exception);
808 		msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
809 	}
810 
811 	switch (msdu_info->frm_type) {
812 	case dp_tx_frm_sg:
813 	case dp_tx_frm_me:
814 	case dp_tx_frm_raw:
815 		seg_info = msdu_info->u.sg_info.curr_seg;
816 		/* Update the buffer pointers in MSDU Extension Descriptor */
817 		for (i = 0; i < seg_info->frag_cnt; i++) {
818 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
819 				seg_info->frags[i].paddr_lo,
820 				seg_info->frags[i].paddr_hi,
821 				seg_info->frags[i].len);
822 		}
823 
824 		break;
825 
826 	case dp_tx_frm_tso:
827 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
828 				&cached_ext_desc[0]);
829 		break;
830 
831 
832 	default:
833 		break;
834 	}
835 
836 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
837 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
838 
839 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
840 			msdu_ext_desc->vaddr);
841 
842 	return msdu_ext_desc;
843 }
844 
845 /**
846  * dp_tx_trace_pkt() - Trace TX packet at DP layer
847  *
848  * @skb: skb to be traced
849  * @msdu_id: msdu_id of the packet
850  * @vdev_id: vdev_id of the packet
851  *
852  * Return: None
853  */
854 #ifdef DP_DISABLE_TX_PKT_TRACE
855 static void dp_tx_trace_pkt(struct dp_soc *soc,
856 			    qdf_nbuf_t skb, uint16_t msdu_id,
857 			    uint8_t vdev_id)
858 {
859 }
860 #else
861 static void dp_tx_trace_pkt(struct dp_soc *soc,
862 			    qdf_nbuf_t skb, uint16_t msdu_id,
863 			    uint8_t vdev_id)
864 {
865 	if (dp_is_tput_high(soc))
866 		return;
867 
868 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
869 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
870 	DPTRACE(qdf_dp_trace_ptr(skb,
871 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
872 				 QDF_TRACE_DEFAULT_PDEV_ID,
873 				 qdf_nbuf_data_addr(skb),
874 				 sizeof(qdf_nbuf_data(skb)),
875 				 msdu_id, vdev_id, 0));
876 
877 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
878 
879 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
880 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
881 				      msdu_id, QDF_TX));
882 }
883 #endif
884 
885 #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
886 /**
887  * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as
888  *				      exception by the upper layer (OS_IF)
889  * @soc: DP soc handle
890  * @nbuf: packet to be transmitted
891  *
892  * Returns: 1 if the packet is marked as exception,
893  *	    0, if the packet is not marked as exception.
894  */
895 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
896 						 qdf_nbuf_t nbuf)
897 {
898 	return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf);
899 }
900 #else
901 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
902 						 qdf_nbuf_t nbuf)
903 {
904 	return 0;
905 }
906 #endif
907 
908 #ifdef DP_TRAFFIC_END_INDICATION
909 /**
910  * dp_tx_get_traffic_end_indication_pkt() - Allocate and prepare packet to send
911  *                                          as indication to fw to inform that
912  *                                          data stream has ended
913  * @vdev: DP vdev handle
914  * @nbuf: original buffer from network stack
915  *
916  * Return: NULL on failure,
917  *         nbuf on success
918  */
919 static inline qdf_nbuf_t
920 dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
921 				     qdf_nbuf_t nbuf)
922 {
923 	/* Packet length should be enough to copy upto L3 header */
924 	uint8_t end_nbuf_len = 64;
925 	uint8_t htt_desc_size_aligned;
926 	uint8_t htt_desc_size;
927 	qdf_nbuf_t end_nbuf;
928 
929 	if (qdf_unlikely(QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) ==
930 			 QDF_NBUF_CB_PACKET_TYPE_END_INDICATION)) {
931 		htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
932 		htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
933 
934 		end_nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q);
935 		if (!end_nbuf) {
936 			end_nbuf = qdf_nbuf_alloc(NULL,
937 						  (htt_desc_size_aligned +
938 						  end_nbuf_len),
939 						  htt_desc_size_aligned,
940 						  8, false);
941 			if (!end_nbuf) {
942 				dp_err("Packet allocation failed");
943 				goto out;
944 			}
945 		} else {
946 			qdf_nbuf_reset(end_nbuf, htt_desc_size_aligned, 8);
947 		}
948 		qdf_mem_copy(qdf_nbuf_data(end_nbuf), qdf_nbuf_data(nbuf),
949 			     end_nbuf_len);
950 		qdf_nbuf_set_pktlen(end_nbuf, end_nbuf_len);
951 
952 		return end_nbuf;
953 	}
954 out:
955 	return NULL;
956 }
957 
958 /**
959  * dp_tx_send_traffic_end_indication_pkt() - Send indication packet to FW
960  *                                           via exception path.
961  * @vdev: DP vdev handle
962  * @end_nbuf: skb to send as indication
963  * @msdu_info: msdu_info of original nbuf
964  * @peer_id: peer id
965  *
966  * Return: None
967  */
968 static inline void
969 dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
970 				      qdf_nbuf_t end_nbuf,
971 				      struct dp_tx_msdu_info_s *msdu_info,
972 				      uint16_t peer_id)
973 {
974 	struct dp_tx_msdu_info_s e_msdu_info = {0};
975 	qdf_nbuf_t nbuf;
976 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
977 		(struct htt_tx_msdu_desc_ext2_t *)(e_msdu_info.meta_data);
978 	e_msdu_info.tx_queue = msdu_info->tx_queue;
979 	e_msdu_info.tid = msdu_info->tid;
980 	e_msdu_info.exception_fw = 1;
981 	desc_ext->host_tx_desc_pool = 1;
982 	desc_ext->traffic_end_indication = 1;
983 	nbuf = dp_tx_send_msdu_single(vdev, end_nbuf, &e_msdu_info,
984 				      peer_id, NULL);
985 	if (nbuf) {
986 		dp_err("Traffic end indication packet tx failed");
987 		qdf_nbuf_free(nbuf);
988 	}
989 }
990 
991 /**
992  * dp_tx_traffic_end_indication_set_desc_flag() - Set tx descriptor flag to
993  *                                                mark it traffic end indication
994  *                                                packet.
995  * @tx_desc: Tx descriptor pointer
996  * @msdu_info: msdu_info structure pointer
997  *
998  * Return: None
999  */
1000 static inline void
1001 dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
1002 					   struct dp_tx_msdu_info_s *msdu_info)
1003 {
1004 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
1005 		(struct htt_tx_msdu_desc_ext2_t *)(msdu_info->meta_data);
1006 
1007 	if (qdf_unlikely(desc_ext->traffic_end_indication))
1008 		tx_desc->flags |= DP_TX_DESC_FLAG_TRAFFIC_END_IND;
1009 }
1010 
1011 /**
1012  * dp_tx_traffic_end_indication_enq_ind_pkt() - Enqueue the packet instead of
1013  *                                              freeing which are associated
1014  *                                              with traffic end indication
1015  *                                              flagged descriptor.
1016  * @soc: dp soc handle
1017  * @desc: Tx descriptor pointer
1018  * @nbuf: buffer pointer
1019  *
1020  * Return: True if packet gets enqueued else false
1021  */
1022 static bool
1023 dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
1024 					 struct dp_tx_desc_s *desc,
1025 					 qdf_nbuf_t nbuf)
1026 {
1027 	struct dp_vdev *vdev = NULL;
1028 
1029 	if (qdf_unlikely((desc->flags &
1030 			  DP_TX_DESC_FLAG_TRAFFIC_END_IND) != 0)) {
1031 		vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
1032 					     DP_MOD_ID_TX_COMP);
1033 		if (vdev) {
1034 			qdf_nbuf_queue_add(&vdev->end_ind_pkt_q, nbuf);
1035 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_COMP);
1036 			return true;
1037 		}
1038 	}
1039 	return false;
1040 }
1041 
1042 /**
1043  * dp_tx_traffic_end_indication_is_enabled() - get the feature
1044  *                                             enable/disable status
1045  * @vdev: dp vdev handle
1046  *
1047  * Return: True if feature is enable else false
1048  */
1049 static inline bool
1050 dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
1051 {
1052 	return qdf_unlikely(vdev->traffic_end_ind_en);
1053 }
1054 
1055 static inline qdf_nbuf_t
1056 dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1057 			       struct dp_tx_msdu_info_s *msdu_info,
1058 			       uint16_t peer_id, qdf_nbuf_t end_nbuf)
1059 {
1060 	if (dp_tx_traffic_end_indication_is_enabled(vdev))
1061 		end_nbuf = dp_tx_get_traffic_end_indication_pkt(vdev, nbuf);
1062 
1063 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
1064 
1065 	if (qdf_unlikely(end_nbuf))
1066 		dp_tx_send_traffic_end_indication_pkt(vdev, end_nbuf,
1067 						      msdu_info, peer_id);
1068 	return nbuf;
1069 }
1070 #else
1071 static inline qdf_nbuf_t
1072 dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
1073 				     qdf_nbuf_t nbuf)
1074 {
1075 	return NULL;
1076 }
1077 
1078 static inline void
1079 dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
1080 				      qdf_nbuf_t end_nbuf,
1081 				      struct dp_tx_msdu_info_s *msdu_info,
1082 				      uint16_t peer_id)
1083 {}
1084 
1085 static inline void
1086 dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
1087 					   struct dp_tx_msdu_info_s *msdu_info)
1088 {}
1089 
1090 static inline bool
1091 dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
1092 					 struct dp_tx_desc_s *desc,
1093 					 qdf_nbuf_t nbuf)
1094 {
1095 	return false;
1096 }
1097 
1098 static inline bool
1099 dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
1100 {
1101 	return false;
1102 }
1103 
1104 static inline qdf_nbuf_t
1105 dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1106 			       struct dp_tx_msdu_info_s *msdu_info,
1107 			       uint16_t peer_id, qdf_nbuf_t end_nbuf)
1108 {
1109 	return dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
1110 }
1111 #endif
1112 
1113 #if defined(QCA_SUPPORT_WDS_EXTENDED)
1114 static bool
1115 dp_tx_is_wds_ast_override_en(struct dp_soc *soc,
1116 			     struct cdp_tx_exception_metadata *tx_exc_metadata)
1117 {
1118 	if (soc->features.wds_ext_ast_override_enable &&
1119 	    tx_exc_metadata && tx_exc_metadata->is_wds_extended)
1120 		return true;
1121 
1122 	return false;
1123 }
1124 #else
1125 static bool
1126 dp_tx_is_wds_ast_override_en(struct dp_soc *soc,
1127 			     struct cdp_tx_exception_metadata *tx_exc_metadata)
1128 {
1129 	return false;
1130 }
1131 #endif
1132 
1133 /**
1134  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
1135  * @vdev: DP vdev handle
1136  * @nbuf: skb
1137  * @desc_pool_id: Descriptor pool ID
1138  * @meta_data: Metadata to the fw
1139  * @tx_exc_metadata: Handle that holds exception path metadata
1140  * Allocate and prepare Tx descriptor with msdu information.
1141  *
1142  * Return: Pointer to Tx Descriptor on success,
1143  *         NULL on failure
1144  */
1145 static
1146 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
1147 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
1148 		struct dp_tx_msdu_info_s *msdu_info,
1149 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1150 {
1151 	uint8_t align_pad;
1152 	uint8_t is_exception = 0;
1153 	uint8_t htt_hdr_size;
1154 	struct dp_tx_desc_s *tx_desc;
1155 	struct dp_pdev *pdev = vdev->pdev;
1156 	struct dp_soc *soc = pdev->soc;
1157 
1158 	if (dp_tx_limit_check(vdev, nbuf))
1159 		return NULL;
1160 
1161 	/* Allocate software Tx descriptor */
1162 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1163 
1164 	if (qdf_unlikely(!tx_desc)) {
1165 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1166 		DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
1167 		return NULL;
1168 	}
1169 
1170 	dp_tx_outstanding_inc(pdev);
1171 
1172 	/* Initialize the SW tx descriptor */
1173 	tx_desc->nbuf = nbuf;
1174 	tx_desc->frm_type = dp_tx_frm_std;
1175 	tx_desc->tx_encap_type = ((tx_exc_metadata &&
1176 		(tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
1177 		tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
1178 	tx_desc->vdev_id = vdev->vdev_id;
1179 	tx_desc->pdev = pdev;
1180 	tx_desc->msdu_ext_desc = NULL;
1181 	tx_desc->pkt_offset = 0;
1182 	tx_desc->length = qdf_nbuf_headlen(nbuf);
1183 	tx_desc->shinfo_addr = skb_end_pointer(nbuf);
1184 
1185 	dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
1186 
1187 	if (qdf_unlikely(vdev->multipass_en)) {
1188 		if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
1189 			goto failure;
1190 	}
1191 
1192 	/* Packets marked by upper layer (OS-IF) to be sent to FW */
1193 	if (dp_tx_is_nbuf_marked_exception(soc, nbuf))
1194 		is_exception = 1;
1195 
1196 	/* for BE chipsets if wds extension was enbled will not mark FW
1197 	 * in desc will mark ast index based search for ast index.
1198 	 */
1199 	if (dp_tx_is_wds_ast_override_en(soc, tx_exc_metadata))
1200 		return tx_desc;
1201 
1202 	/*
1203 	 * For special modes (vdev_type == ocb or mesh), data frames should be
1204 	 * transmitted using varying transmit parameters (tx spec) which include
1205 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
1206 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
1207 	 * These frames are sent as exception packets to firmware.
1208 	 *
1209 	 * HW requirement is that metadata should always point to a
1210 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
1211 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
1212 	 *  to get 8-byte aligned start address along with align_pad added
1213 	 *
1214 	 *  |-----------------------------|
1215 	 *  |                             |
1216 	 *  |-----------------------------| <-----Buffer Pointer Address given
1217 	 *  |                             |  ^    in HW descriptor (aligned)
1218 	 *  |       HTT Metadata          |  |
1219 	 *  |                             |  |
1220 	 *  |                             |  | Packet Offset given in descriptor
1221 	 *  |                             |  |
1222 	 *  |-----------------------------|  |
1223 	 *  |       Alignment Pad         |  v
1224 	 *  |-----------------------------| <----- Actual buffer start address
1225 	 *  |        SKB Data             |           (Unaligned)
1226 	 *  |                             |
1227 	 *  |                             |
1228 	 *  |                             |
1229 	 *  |                             |
1230 	 *  |                             |
1231 	 *  |-----------------------------|
1232 	 */
1233 	if (qdf_unlikely((msdu_info->exception_fw)) ||
1234 				(vdev->opmode == wlan_op_mode_ocb) ||
1235 				(tx_exc_metadata &&
1236 				tx_exc_metadata->is_tx_sniffer)) {
1237 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
1238 
1239 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
1240 			DP_STATS_INC(vdev,
1241 				     tx_i.dropped.headroom_insufficient, 1);
1242 			goto failure;
1243 		}
1244 
1245 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
1246 			dp_tx_err("qdf_nbuf_push_head failed");
1247 			goto failure;
1248 		}
1249 
1250 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
1251 				msdu_info);
1252 		if (htt_hdr_size == 0)
1253 			goto failure;
1254 
1255 		tx_desc->length = qdf_nbuf_headlen(nbuf);
1256 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
1257 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1258 		dp_tx_traffic_end_indication_set_desc_flag(tx_desc,
1259 							   msdu_info);
1260 		is_exception = 1;
1261 		tx_desc->length -= tx_desc->pkt_offset;
1262 	}
1263 
1264 #if !TQM_BYPASS_WAR
1265 	if (is_exception || tx_exc_metadata)
1266 #endif
1267 	{
1268 		/* Temporary WAR due to TQM VP issues */
1269 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1270 		qdf_atomic_inc(&soc->num_tx_exception);
1271 	}
1272 
1273 	return tx_desc;
1274 
1275 failure:
1276 	dp_tx_desc_release(tx_desc, desc_pool_id);
1277 	return NULL;
1278 }
1279 
1280 /**
1281  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
1282  * @vdev: DP vdev handle
1283  * @nbuf: skb
1284  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
1285  * @desc_pool_id : Descriptor Pool ID
1286  *
1287  * Allocate and prepare Tx descriptor with msdu and fragment descritor
1288  * information. For frames with fragments, allocate and prepare
1289  * an MSDU extension descriptor
1290  *
1291  * Return: Pointer to Tx Descriptor on success,
1292  *         NULL on failure
1293  */
1294 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
1295 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
1296 		uint8_t desc_pool_id)
1297 {
1298 	struct dp_tx_desc_s *tx_desc;
1299 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
1300 	struct dp_pdev *pdev = vdev->pdev;
1301 	struct dp_soc *soc = pdev->soc;
1302 
1303 	if (dp_tx_limit_check(vdev, nbuf))
1304 		return NULL;
1305 
1306 	/* Allocate software Tx descriptor */
1307 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1308 	if (!tx_desc) {
1309 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1310 		return NULL;
1311 	}
1312 	dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg,
1313 				  nbuf, tx_desc->id, DP_TX_DESC_COOKIE);
1314 
1315 	dp_tx_outstanding_inc(pdev);
1316 
1317 	/* Initialize the SW tx descriptor */
1318 	tx_desc->nbuf = nbuf;
1319 	tx_desc->frm_type = msdu_info->frm_type;
1320 	tx_desc->tx_encap_type = vdev->tx_encap_type;
1321 	tx_desc->vdev_id = vdev->vdev_id;
1322 	tx_desc->pdev = pdev;
1323 	tx_desc->pkt_offset = 0;
1324 
1325 	dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
1326 
1327 	/* Handle scattered frames - TSO/SG/ME */
1328 	/* Allocate and prepare an extension descriptor for scattered frames */
1329 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
1330 	if (!msdu_ext_desc) {
1331 		dp_tx_info("Tx Extension Descriptor Alloc Fail");
1332 		goto failure;
1333 	}
1334 
1335 #if TQM_BYPASS_WAR
1336 	/* Temporary WAR due to TQM VP issues */
1337 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1338 	qdf_atomic_inc(&soc->num_tx_exception);
1339 #endif
1340 	if (qdf_unlikely(msdu_info->exception_fw))
1341 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1342 
1343 	tx_desc->msdu_ext_desc = msdu_ext_desc;
1344 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
1345 
1346 	msdu_ext_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
1347 	msdu_ext_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
1348 
1349 	tx_desc->dma_addr = msdu_ext_desc->paddr;
1350 
1351 	if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
1352 		tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
1353 	else
1354 		tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
1355 
1356 	return tx_desc;
1357 failure:
1358 	dp_tx_desc_release(tx_desc, desc_pool_id);
1359 	return NULL;
1360 }
1361 
1362 /**
1363  * dp_tx_prepare_raw() - Prepare RAW packet TX
1364  * @vdev: DP vdev handle
1365  * @nbuf: buffer pointer
1366  * @seg_info: Pointer to Segment info Descriptor to be prepared
1367  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
1368  *     descriptor
1369  *
1370  * Return:
1371  */
1372 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1373 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1374 {
1375 	qdf_nbuf_t curr_nbuf = NULL;
1376 	uint16_t total_len = 0;
1377 	qdf_dma_addr_t paddr;
1378 	int32_t i;
1379 	int32_t mapped_buf_num = 0;
1380 
1381 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
1382 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1383 
1384 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
1385 
1386 	/* Continue only if frames are of DATA type */
1387 	if (!DP_FRAME_IS_DATA(qos_wh)) {
1388 		DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
1389 		dp_tx_debug("Pkt. recd is of not data type");
1390 		goto error;
1391 	}
1392 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
1393 	if (vdev->raw_mode_war &&
1394 	    (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
1395 	    (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
1396 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
1397 
1398 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
1399 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
1400 		/*
1401 		 * Number of nbuf's must not exceed the size of the frags
1402 		 * array in seg_info.
1403 		 */
1404 		if (i >= DP_TX_MAX_NUM_FRAGS) {
1405 			dp_err_rl("nbuf cnt exceeds the max number of segs");
1406 			DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
1407 			goto error;
1408 		}
1409 		if (QDF_STATUS_SUCCESS !=
1410 			qdf_nbuf_map_nbytes_single(vdev->osdev,
1411 						   curr_nbuf,
1412 						   QDF_DMA_TO_DEVICE,
1413 						   curr_nbuf->len)) {
1414 			dp_tx_err("%s dma map error ", __func__);
1415 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
1416 			goto error;
1417 		}
1418 		/* Update the count of mapped nbuf's */
1419 		mapped_buf_num++;
1420 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
1421 		seg_info->frags[i].paddr_lo = paddr;
1422 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
1423 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
1424 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
1425 		total_len += qdf_nbuf_len(curr_nbuf);
1426 	}
1427 
1428 	seg_info->frag_cnt = i;
1429 	seg_info->total_len = total_len;
1430 	seg_info->next = NULL;
1431 
1432 	sg_info->curr_seg = seg_info;
1433 
1434 	msdu_info->frm_type = dp_tx_frm_raw;
1435 	msdu_info->num_seg = 1;
1436 
1437 	return nbuf;
1438 
1439 error:
1440 	i = 0;
1441 	while (nbuf) {
1442 		curr_nbuf = nbuf;
1443 		if (i < mapped_buf_num) {
1444 			qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
1445 						     QDF_DMA_TO_DEVICE,
1446 						     curr_nbuf->len);
1447 			i++;
1448 		}
1449 		nbuf = qdf_nbuf_next(nbuf);
1450 		qdf_nbuf_free(curr_nbuf);
1451 	}
1452 	return NULL;
1453 
1454 }
1455 
1456 /**
1457  * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
1458  * @soc: DP soc handle
1459  * @nbuf: Buffer pointer
1460  *
1461  * unmap the chain of nbufs that belong to this RAW frame.
1462  *
1463  * Return: None
1464  */
1465 static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
1466 				    qdf_nbuf_t nbuf)
1467 {
1468 	qdf_nbuf_t cur_nbuf = nbuf;
1469 
1470 	do {
1471 		qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
1472 					     QDF_DMA_TO_DEVICE,
1473 					     cur_nbuf->len);
1474 		cur_nbuf = qdf_nbuf_next(cur_nbuf);
1475 	} while (cur_nbuf);
1476 }
1477 
1478 #ifdef VDEV_PEER_PROTOCOL_COUNT
1479 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
1480 					       qdf_nbuf_t nbuf)
1481 {
1482 	qdf_nbuf_t nbuf_local;
1483 	struct dp_vdev *vdev_local = vdev_hdl;
1484 
1485 	do {
1486 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track)))
1487 			break;
1488 		nbuf_local = nbuf;
1489 		if (qdf_unlikely(((vdev_local)->tx_encap_type) ==
1490 			 htt_cmn_pkt_type_raw))
1491 			break;
1492 		else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local))))
1493 			break;
1494 		else if (qdf_nbuf_is_tso((nbuf_local)))
1495 			break;
1496 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local),
1497 						       (nbuf_local),
1498 						       NULL, 1, 0);
1499 	} while (0);
1500 }
1501 #endif
1502 
1503 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1504 /**
1505  * dp_tx_update_stats() - Update soc level tx stats
1506  * @soc: DP soc handle
1507  * @tx_desc: TX descriptor reference
1508  * @ring_id: TCL ring id
1509  *
1510  * Returns: none
1511  */
1512 void dp_tx_update_stats(struct dp_soc *soc,
1513 			struct dp_tx_desc_s *tx_desc,
1514 			uint8_t ring_id)
1515 {
1516 	uint32_t stats_len = dp_tx_get_pkt_len(tx_desc);
1517 
1518 	DP_STATS_INC_PKT(soc, tx.egress[ring_id], 1, stats_len);
1519 }
1520 
1521 int
1522 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1523 			 struct dp_tx_desc_s *tx_desc,
1524 			 uint8_t tid,
1525 			 struct dp_tx_msdu_info_s *msdu_info,
1526 			 uint8_t ring_id)
1527 {
1528 	struct dp_swlm *swlm = &soc->swlm;
1529 	union swlm_data swlm_query_data;
1530 	struct dp_swlm_tcl_data tcl_data;
1531 	QDF_STATUS status;
1532 	int ret;
1533 
1534 	if (!swlm->is_enabled)
1535 		return msdu_info->skip_hp_update;
1536 
1537 	tcl_data.nbuf = tx_desc->nbuf;
1538 	tcl_data.tid = tid;
1539 	tcl_data.ring_id = ring_id;
1540 	tcl_data.pkt_len = dp_tx_get_pkt_len(tx_desc);
1541 	tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
1542 	swlm_query_data.tcl_data = &tcl_data;
1543 
1544 	status = dp_swlm_tcl_pre_check(soc, &tcl_data);
1545 	if (QDF_IS_STATUS_ERROR(status)) {
1546 		dp_swlm_tcl_reset_session_data(soc, ring_id);
1547 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
1548 		return 0;
1549 	}
1550 
1551 	ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
1552 	if (ret) {
1553 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_success, 1);
1554 	} else {
1555 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
1556 	}
1557 
1558 	return ret;
1559 }
1560 
1561 void
1562 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1563 		      int coalesce)
1564 {
1565 	if (coalesce)
1566 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1567 	else
1568 		dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1569 }
1570 
1571 static inline void
1572 dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
1573 {
1574 	if (((i + 1) < msdu_info->num_seg))
1575 		msdu_info->skip_hp_update = 1;
1576 	else
1577 		msdu_info->skip_hp_update = 0;
1578 }
1579 
1580 static inline void
1581 dp_flush_tcp_hp(struct dp_soc *soc, uint8_t ring_id)
1582 {
1583 	hal_ring_handle_t hal_ring_hdl =
1584 		dp_tx_get_hal_ring_hdl(soc, ring_id);
1585 
1586 	if (dp_tx_hal_ring_access_start(soc, hal_ring_hdl)) {
1587 		dp_err("Fillmore: SRNG access start failed");
1588 		return;
1589 	}
1590 
1591 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
1592 }
1593 
1594 static inline void
1595 dp_tx_check_and_flush_hp(struct dp_soc *soc,
1596 			 QDF_STATUS status,
1597 			 struct dp_tx_msdu_info_s *msdu_info)
1598 {
1599 	if (QDF_IS_STATUS_ERROR(status) && !msdu_info->skip_hp_update) {
1600 		dp_flush_tcp_hp(soc,
1601 			(msdu_info->tx_queue.ring_id & DP_TX_QUEUE_MASK));
1602 	}
1603 }
1604 #else
1605 static inline void
1606 dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
1607 {
1608 }
1609 
1610 static inline void
1611 dp_tx_check_and_flush_hp(struct dp_soc *soc,
1612 			 QDF_STATUS status,
1613 			 struct dp_tx_msdu_info_s *msdu_info)
1614 {
1615 }
1616 #endif
1617 
1618 #ifdef FEATURE_RUNTIME_PM
1619 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
1620 {
1621 	int ret;
1622 
1623 	ret = qdf_atomic_read(&soc->rtpm_high_tput_flag) &&
1624 	      (hif_rtpm_get_state() <= HIF_RTPM_STATE_ON);
1625 	return ret;
1626 }
1627 /**
1628  * dp_tx_ring_access_end_wrapper() - Wrapper for ring access end
1629  * @soc: Datapath soc handle
1630  * @hal_ring_hdl: HAL ring handle
1631  * @coalesce: Coalesce the current write or not
1632  *
1633  * Wrapper for HAL ring access end for data transmission for
1634  * FEATURE_RUNTIME_PM
1635  *
1636  * Returns: none
1637  */
1638 void
1639 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1640 			      hal_ring_handle_t hal_ring_hdl,
1641 			      int coalesce)
1642 {
1643 	int ret;
1644 
1645 	/*
1646 	 * Avoid runtime get and put APIs under high throughput scenarios.
1647 	 */
1648 	if (dp_get_rtpm_tput_policy_requirement(soc)) {
1649 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1650 		return;
1651 	}
1652 
1653 	ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
1654 	if (QDF_IS_STATUS_SUCCESS(ret)) {
1655 		if (hif_system_pm_state_check(soc->hif_handle)) {
1656 			dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1657 			hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1658 			hal_srng_inc_flush_cnt(hal_ring_hdl);
1659 		} else {
1660 			dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1661 		}
1662 		hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
1663 	} else {
1664 		dp_runtime_get(soc);
1665 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1666 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1667 		qdf_atomic_inc(&soc->tx_pending_rtpm);
1668 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1669 		dp_runtime_put(soc);
1670 	}
1671 }
1672 #else
1673 
1674 #ifdef DP_POWER_SAVE
1675 void
1676 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1677 			      hal_ring_handle_t hal_ring_hdl,
1678 			      int coalesce)
1679 {
1680 	if (hif_system_pm_state_check(soc->hif_handle)) {
1681 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1682 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1683 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1684 	} else {
1685 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1686 	}
1687 }
1688 #endif
1689 
1690 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
1691 {
1692 	return 0;
1693 }
1694 #endif
1695 
1696 /**
1697  * dp_tx_get_tid() - Obtain TID to be used for this frame
1698  * @vdev: DP vdev handle
1699  * @nbuf: skb
1700  *
1701  * Extract the DSCP or PCP information from frame and map into TID value.
1702  *
1703  * Return: void
1704  */
1705 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1706 			  struct dp_tx_msdu_info_s *msdu_info)
1707 {
1708 	uint8_t tos = 0, dscp_tid_override = 0;
1709 	uint8_t *hdr_ptr, *L3datap;
1710 	uint8_t is_mcast = 0;
1711 	qdf_ether_header_t *eh = NULL;
1712 	qdf_ethervlan_header_t *evh = NULL;
1713 	uint16_t   ether_type;
1714 	qdf_llc_t *llcHdr;
1715 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1716 
1717 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1718 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1719 		eh = (qdf_ether_header_t *)nbuf->data;
1720 		hdr_ptr = (uint8_t *)(eh->ether_dhost);
1721 		L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
1722 	} else {
1723 		qdf_dot3_qosframe_t *qos_wh =
1724 			(qdf_dot3_qosframe_t *) nbuf->data;
1725 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1726 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1727 		return;
1728 	}
1729 
1730 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1731 	ether_type = eh->ether_type;
1732 
1733 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
1734 	/*
1735 	 * Check if packet is dot3 or eth2 type.
1736 	 */
1737 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1738 		ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1739 				sizeof(*llcHdr));
1740 
1741 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1742 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1743 				sizeof(*llcHdr);
1744 			ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
1745 					+ sizeof(*llcHdr) +
1746 					sizeof(qdf_net_vlanhdr_t));
1747 		} else {
1748 			L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
1749 				sizeof(*llcHdr);
1750 		}
1751 	} else {
1752 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1753 			evh = (qdf_ethervlan_header_t *) eh;
1754 			ether_type = evh->ether_type;
1755 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1756 		}
1757 	}
1758 
1759 	/*
1760 	 * Find priority from IP TOS DSCP field
1761 	 */
1762 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1763 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1764 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1765 			/* Only for unicast frames */
1766 			if (!is_mcast) {
1767 				/* send it on VO queue */
1768 				msdu_info->tid = DP_VO_TID;
1769 			}
1770 		} else {
1771 			/*
1772 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1773 			 * from TOS byte.
1774 			 */
1775 			tos = ip->ip_tos;
1776 			dscp_tid_override = 1;
1777 
1778 		}
1779 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1780 		/* TODO
1781 		 * use flowlabel
1782 		 *igmpmld cases to be handled in phase 2
1783 		 */
1784 		unsigned long ver_pri_flowlabel;
1785 		unsigned long pri;
1786 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1787 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1788 			DP_IPV6_PRIORITY_SHIFT;
1789 		tos = pri;
1790 		dscp_tid_override = 1;
1791 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1792 		msdu_info->tid = DP_VO_TID;
1793 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1794 		/* Only for unicast frames */
1795 		if (!is_mcast) {
1796 			/* send ucast arp on VO queue */
1797 			msdu_info->tid = DP_VO_TID;
1798 		}
1799 	}
1800 
1801 	/*
1802 	 * Assign all MCAST packets to BE
1803 	 */
1804 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1805 		if (is_mcast) {
1806 			tos = 0;
1807 			dscp_tid_override = 1;
1808 		}
1809 	}
1810 
1811 	if (dscp_tid_override == 1) {
1812 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1813 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1814 	}
1815 
1816 	if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
1817 		msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
1818 
1819 	return;
1820 }
1821 
1822 /**
1823  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1824  * @vdev: DP vdev handle
1825  * @nbuf: skb
1826  *
1827  * Software based TID classification is required when more than 2 DSCP-TID
1828  * mapping tables are needed.
1829  * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
1830  *
1831  * Return: void
1832  */
1833 static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1834 				      struct dp_tx_msdu_info_s *msdu_info)
1835 {
1836 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1837 
1838 	/*
1839 	 * skip_sw_tid_classification flag will set in below cases-
1840 	 * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
1841 	 * 2. hlos_tid_override enabled for vdev
1842 	 * 3. mesh mode enabled for vdev
1843 	 */
1844 	if (qdf_likely(vdev->skip_sw_tid_classification)) {
1845 		/* Update tid in msdu_info from skb priority */
1846 		if (qdf_unlikely(vdev->skip_sw_tid_classification
1847 			& DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
1848 			uint32_t tid = qdf_nbuf_get_priority(nbuf);
1849 
1850 			if (tid == DP_TX_INVALID_QOS_TAG)
1851 				return;
1852 
1853 			msdu_info->tid = tid;
1854 			return;
1855 		}
1856 		return;
1857 	}
1858 
1859 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1860 }
1861 
1862 #ifdef FEATURE_WLAN_TDLS
1863 /**
1864  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1865  * @soc: datapath SOC
1866  * @vdev: datapath vdev
1867  * @tx_desc: TX descriptor
1868  *
1869  * Return: None
1870  */
1871 static void dp_tx_update_tdls_flags(struct dp_soc *soc,
1872 				    struct dp_vdev *vdev,
1873 				    struct dp_tx_desc_s *tx_desc)
1874 {
1875 	if (vdev) {
1876 		if (vdev->is_tdls_frame) {
1877 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1878 			vdev->is_tdls_frame = false;
1879 		}
1880 	}
1881 }
1882 
1883 static uint8_t dp_htt_tx_comp_get_status(struct dp_soc *soc, char *htt_desc)
1884 {
1885 	uint8_t tx_status = HTT_TX_FW2WBM_TX_STATUS_MAX;
1886 
1887 	switch (soc->arch_id) {
1888 	case CDP_ARCH_TYPE_LI:
1889 		tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
1890 		break;
1891 
1892 	case CDP_ARCH_TYPE_BE:
1893 		tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
1894 		break;
1895 
1896 	default:
1897 		dp_err("Incorrect CDP_ARCH %d", soc->arch_id);
1898 		QDF_BUG(0);
1899 	}
1900 
1901 	return tx_status;
1902 }
1903 
1904 /**
1905  * dp_non_std_htt_tx_comp_free_buff() - Free the non std tx packet buffer
1906  * @soc: dp_soc handle
1907  * @tx_desc: TX descriptor
1908  * @vdev: datapath vdev handle
1909  *
1910  * Return: None
1911  */
1912 static void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
1913 					 struct dp_tx_desc_s *tx_desc)
1914 {
1915 	uint8_t tx_status = 0;
1916 	uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
1917 
1918 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1919 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
1920 						     DP_MOD_ID_TDLS);
1921 
1922 	if (qdf_unlikely(!vdev)) {
1923 		dp_err_rl("vdev is null!");
1924 		goto error;
1925 	}
1926 
1927 	hal_tx_comp_get_htt_desc(&tx_desc->comp, htt_tx_status);
1928 	tx_status = dp_htt_tx_comp_get_status(soc, htt_tx_status);
1929 	dp_debug("vdev_id: %d tx_status: %d", tx_desc->vdev_id, tx_status);
1930 
1931 	if (vdev->tx_non_std_data_callback.func) {
1932 		qdf_nbuf_set_next(nbuf, NULL);
1933 		vdev->tx_non_std_data_callback.func(
1934 				vdev->tx_non_std_data_callback.ctxt,
1935 				nbuf, tx_status);
1936 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1937 		return;
1938 	} else {
1939 		dp_err_rl("callback func is null");
1940 	}
1941 
1942 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1943 error:
1944 	qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
1945 	qdf_nbuf_free(nbuf);
1946 }
1947 
1948 /**
1949  * dp_tx_msdu_single_map() - do nbuf map
1950  * @vdev: DP vdev handle
1951  * @tx_desc: DP TX descriptor pointer
1952  * @nbuf: skb pointer
1953  *
1954  * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
1955  * operation done in other component.
1956  *
1957  * Return: QDF_STATUS
1958  */
1959 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1960 					       struct dp_tx_desc_s *tx_desc,
1961 					       qdf_nbuf_t nbuf)
1962 {
1963 	if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
1964 		return qdf_nbuf_map_nbytes_single(vdev->osdev,
1965 						  nbuf,
1966 						  QDF_DMA_TO_DEVICE,
1967 						  nbuf->len);
1968 	else
1969 		return qdf_nbuf_map_single(vdev->osdev, nbuf,
1970 					   QDF_DMA_TO_DEVICE);
1971 }
1972 #else
1973 static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
1974 					   struct dp_vdev *vdev,
1975 					   struct dp_tx_desc_s *tx_desc)
1976 {
1977 }
1978 
1979 static inline void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
1980 						struct dp_tx_desc_s *tx_desc)
1981 {
1982 }
1983 
1984 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1985 					       struct dp_tx_desc_s *tx_desc,
1986 					       qdf_nbuf_t nbuf)
1987 {
1988 	return qdf_nbuf_map_nbytes_single(vdev->osdev,
1989 					  nbuf,
1990 					  QDF_DMA_TO_DEVICE,
1991 					  nbuf->len);
1992 }
1993 #endif
1994 
1995 static inline
1996 qdf_dma_addr_t dp_tx_nbuf_map_regular(struct dp_vdev *vdev,
1997 				      struct dp_tx_desc_s *tx_desc,
1998 				      qdf_nbuf_t nbuf)
1999 {
2000 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
2001 
2002 	ret = dp_tx_msdu_single_map(vdev, tx_desc, nbuf);
2003 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret)))
2004 		return 0;
2005 
2006 	return qdf_nbuf_mapped_paddr_get(nbuf);
2007 }
2008 
2009 static inline
2010 void dp_tx_nbuf_unmap_regular(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2011 {
2012 	qdf_nbuf_unmap_nbytes_single_paddr(soc->osdev,
2013 					   desc->nbuf,
2014 					   desc->dma_addr,
2015 					   QDF_DMA_TO_DEVICE,
2016 					   desc->length);
2017 }
2018 
2019 #ifdef QCA_DP_TX_RMNET_OPTIMIZATION
2020 static inline bool
2021 is_nbuf_frm_rmnet(qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
2022 {
2023 	struct net_device *ingress_dev;
2024 	skb_frag_t *frag;
2025 	uint16_t buf_len = 0;
2026 	uint16_t linear_data_len = 0;
2027 	uint8_t *payload_addr = NULL;
2028 
2029 	ingress_dev = dev_get_by_index(dev_net(nbuf->dev), nbuf->skb_iif);
2030 
2031 	if ((ingress_dev->priv_flags & IFF_PHONY_HEADROOM)) {
2032 		dev_put(ingress_dev);
2033 		frag = &(skb_shinfo(nbuf)->frags[0]);
2034 		buf_len = skb_frag_size(frag);
2035 		payload_addr = (uint8_t *)skb_frag_address(frag);
2036 		linear_data_len = skb_headlen(nbuf);
2037 
2038 		buf_len += linear_data_len;
2039 		payload_addr = payload_addr - linear_data_len;
2040 		memcpy(payload_addr, nbuf->data, linear_data_len);
2041 
2042 		msdu_info->frm_type = dp_tx_frm_rmnet;
2043 		msdu_info->buf_len = buf_len;
2044 		msdu_info->payload_addr = payload_addr;
2045 
2046 		return true;
2047 	}
2048 	dev_put(ingress_dev);
2049 	return false;
2050 }
2051 
2052 static inline
2053 qdf_dma_addr_t dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s *msdu_info,
2054 				    struct dp_tx_desc_s *tx_desc)
2055 {
2056 	qdf_dma_addr_t paddr;
2057 
2058 	paddr = (qdf_dma_addr_t)qdf_mem_virt_to_phys(msdu_info->payload_addr);
2059 	tx_desc->length  = msdu_info->buf_len;
2060 
2061 	qdf_nbuf_dma_clean_range((void *)msdu_info->payload_addr,
2062 				 (void *)(msdu_info->payload_addr +
2063 					  msdu_info->buf_len));
2064 
2065 	tx_desc->flags |= DP_TX_DESC_FLAG_RMNET;
2066 	return paddr;
2067 }
2068 #else
2069 static inline bool
2070 is_nbuf_frm_rmnet(qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
2071 {
2072 	return false;
2073 }
2074 
2075 static inline
2076 qdf_dma_addr_t dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s *msdu_info,
2077 				    struct dp_tx_desc_s *tx_desc)
2078 {
2079 	return 0;
2080 }
2081 #endif
2082 
2083 #if defined(QCA_DP_TX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
2084 static inline
2085 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
2086 			      struct dp_tx_desc_s *tx_desc,
2087 			      qdf_nbuf_t nbuf)
2088 {
2089 	if (qdf_likely(tx_desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
2090 		qdf_nbuf_dma_clean_range((void *)nbuf->data,
2091 					 (void *)(nbuf->data + nbuf->len));
2092 		return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2093 	} else {
2094 		return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
2095 	}
2096 }
2097 
2098 static inline
2099 void dp_tx_nbuf_unmap(struct dp_soc *soc,
2100 		      struct dp_tx_desc_s *desc)
2101 {
2102 	if (qdf_unlikely(!(desc->flags &
2103 			   (DP_TX_DESC_FLAG_SIMPLE | DP_TX_DESC_FLAG_RMNET))))
2104 		return dp_tx_nbuf_unmap_regular(soc, desc);
2105 }
2106 #else
2107 static inline
2108 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
2109 			      struct dp_tx_desc_s *tx_desc,
2110 			      qdf_nbuf_t nbuf)
2111 {
2112 	return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
2113 }
2114 
2115 static inline
2116 void dp_tx_nbuf_unmap(struct dp_soc *soc,
2117 		      struct dp_tx_desc_s *desc)
2118 {
2119 	return dp_tx_nbuf_unmap_regular(soc, desc);
2120 }
2121 #endif
2122 
2123 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
2124 static inline
2125 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2126 {
2127 	dp_tx_nbuf_unmap(soc, desc);
2128 	desc->flags |= DP_TX_DESC_FLAG_UNMAP_DONE;
2129 }
2130 
2131 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2132 {
2133 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE)))
2134 		dp_tx_nbuf_unmap(soc, desc);
2135 }
2136 #else
2137 static inline
2138 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2139 {
2140 }
2141 
2142 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2143 {
2144 	dp_tx_nbuf_unmap(soc, desc);
2145 }
2146 #endif
2147 
2148 #ifdef MESH_MODE_SUPPORT
2149 /**
2150  * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
2151  * @soc: datapath SOC
2152  * @vdev: datapath vdev
2153  * @tx_desc: TX descriptor
2154  *
2155  * Return: None
2156  */
2157 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
2158 					   struct dp_vdev *vdev,
2159 					   struct dp_tx_desc_s *tx_desc)
2160 {
2161 	if (qdf_unlikely(vdev->mesh_vdev))
2162 		tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
2163 }
2164 
2165 /**
2166  * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
2167  * @soc: dp_soc handle
2168  * @tx_desc: TX descriptor
2169  * @delayed_free: delay the nbuf free
2170  *
2171  * Return: nbuf to be freed late
2172  */
2173 static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
2174 						   struct dp_tx_desc_s *tx_desc,
2175 						   bool delayed_free)
2176 {
2177 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2178 	struct dp_vdev *vdev = NULL;
2179 
2180 	vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, DP_MOD_ID_MESH);
2181 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2182 		if (vdev)
2183 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2184 
2185 		if (delayed_free)
2186 			return nbuf;
2187 
2188 		qdf_nbuf_free(nbuf);
2189 	} else {
2190 		if (vdev && vdev->osif_tx_free_ext) {
2191 			vdev->osif_tx_free_ext((nbuf));
2192 		} else {
2193 			if (delayed_free)
2194 				return nbuf;
2195 
2196 			qdf_nbuf_free(nbuf);
2197 		}
2198 	}
2199 
2200 	if (vdev)
2201 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
2202 
2203 	return NULL;
2204 }
2205 #else
2206 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
2207 					   struct dp_vdev *vdev,
2208 					   struct dp_tx_desc_s *tx_desc)
2209 {
2210 }
2211 
2212 static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
2213 						   struct dp_tx_desc_s *tx_desc,
2214 						   bool delayed_free)
2215 {
2216 	return NULL;
2217 }
2218 #endif
2219 
2220 /**
2221  * dp_tx_frame_is_drop() - checks if the packet is loopback
2222  * @vdev: DP vdev handle
2223  * @nbuf: skb
2224  *
2225  * Return: 1 if frame needs to be dropped else 0
2226  */
2227 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
2228 {
2229 	struct dp_pdev *pdev = NULL;
2230 	struct dp_ast_entry *src_ast_entry = NULL;
2231 	struct dp_ast_entry *dst_ast_entry = NULL;
2232 	struct dp_soc *soc = NULL;
2233 
2234 	qdf_assert(vdev);
2235 	pdev = vdev->pdev;
2236 	qdf_assert(pdev);
2237 	soc = pdev->soc;
2238 
2239 	dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
2240 				(soc, dstmac, vdev->pdev->pdev_id);
2241 
2242 	src_ast_entry = dp_peer_ast_hash_find_by_pdevid
2243 				(soc, srcmac, vdev->pdev->pdev_id);
2244 	if (dst_ast_entry && src_ast_entry) {
2245 		if (dst_ast_entry->peer_id ==
2246 				src_ast_entry->peer_id)
2247 			return 1;
2248 	}
2249 
2250 	return 0;
2251 }
2252 
2253 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
2254 	defined(WLAN_MCAST_MLO)
2255 /* MLO peer id for reinject*/
2256 #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
2257 /* MLO vdev id inc offset */
2258 #define DP_MLO_VDEV_ID_OFFSET 0x80
2259 
2260 #ifdef QCA_SUPPORT_WDS_EXTENDED
2261 static inline bool
2262 dp_tx_wds_ext_check(struct cdp_tx_exception_metadata *tx_exc_metadata)
2263 {
2264 	if (tx_exc_metadata && tx_exc_metadata->is_wds_extended)
2265 		return true;
2266 
2267 	return false;
2268 }
2269 #else
2270 static inline bool
2271 dp_tx_wds_ext_check(struct cdp_tx_exception_metadata *tx_exc_metadata)
2272 {
2273 	return false;
2274 }
2275 #endif
2276 
2277 static inline void
2278 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
2279 			 struct cdp_tx_exception_metadata *tx_exc_metadata)
2280 {
2281 	/* wds ext enabled will not set the TO_FW bit */
2282 	if (dp_tx_wds_ext_check(tx_exc_metadata))
2283 		return;
2284 
2285 	if (!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) {
2286 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2287 		qdf_atomic_inc(&soc->num_tx_exception);
2288 	}
2289 }
2290 
2291 static inline void
2292 dp_tx_update_mcast_param(uint16_t peer_id,
2293 			 uint16_t *htt_tcl_metadata,
2294 			 struct dp_vdev *vdev,
2295 			 struct dp_tx_msdu_info_s *msdu_info)
2296 {
2297 	if (peer_id == DP_MLO_MCAST_REINJECT_PEER_ID) {
2298 		*htt_tcl_metadata = 0;
2299 		DP_TX_TCL_METADATA_TYPE_SET(
2300 				*htt_tcl_metadata,
2301 				HTT_TCL_METADATA_V2_TYPE_GLOBAL_SEQ_BASED);
2302 		HTT_TX_TCL_METADATA_GLBL_SEQ_NO_SET(*htt_tcl_metadata,
2303 						    msdu_info->gsn);
2304 
2305 		msdu_info->vdev_id = vdev->vdev_id + DP_MLO_VDEV_ID_OFFSET;
2306 		if (qdf_unlikely(vdev->nawds_enabled ||
2307 				 dp_vdev_is_wds_ext_enabled(vdev)))
2308 			HTT_TX_TCL_METADATA_GLBL_SEQ_HOST_INSPECTED_SET(
2309 							*htt_tcl_metadata, 1);
2310 	} else {
2311 		msdu_info->vdev_id = vdev->vdev_id;
2312 	}
2313 }
2314 #else
2315 static inline void
2316 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
2317 			 struct cdp_tx_exception_metadata *tx_exc_metadata)
2318 {
2319 }
2320 
2321 static inline void
2322 dp_tx_update_mcast_param(uint16_t peer_id,
2323 			 uint16_t *htt_tcl_metadata,
2324 			 struct dp_vdev *vdev,
2325 			 struct dp_tx_msdu_info_s *msdu_info)
2326 {
2327 }
2328 #endif
2329 
2330 #ifdef DP_TX_SW_DROP_STATS_INC
2331 static void tx_sw_drop_stats_inc(struct dp_pdev *pdev,
2332 				 qdf_nbuf_t nbuf,
2333 				 enum cdp_tx_sw_drop drop_code)
2334 {
2335 	/* EAPOL Drop stats */
2336 	if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) {
2337 		switch (drop_code) {
2338 		case TX_DESC_ERR:
2339 			DP_STATS_INC(pdev, eap_drop_stats.tx_desc_err, 1);
2340 			break;
2341 		case TX_HAL_RING_ACCESS_ERR:
2342 			DP_STATS_INC(pdev,
2343 				     eap_drop_stats.tx_hal_ring_access_err, 1);
2344 			break;
2345 		case TX_DMA_MAP_ERR:
2346 			DP_STATS_INC(pdev, eap_drop_stats.tx_dma_map_err, 1);
2347 			break;
2348 		case TX_HW_ENQUEUE:
2349 			DP_STATS_INC(pdev, eap_drop_stats.tx_hw_enqueue, 1);
2350 			break;
2351 		case TX_SW_ENQUEUE:
2352 			DP_STATS_INC(pdev, eap_drop_stats.tx_sw_enqueue, 1);
2353 			break;
2354 		default:
2355 			dp_info_rl("Invalid eapol_drop code: %d", drop_code);
2356 			break;
2357 		}
2358 	}
2359 }
2360 #else
2361 static void tx_sw_drop_stats_inc(struct dp_pdev *pdev,
2362 				 qdf_nbuf_t nbuf,
2363 				 enum cdp_tx_sw_drop drop_code)
2364 {
2365 }
2366 #endif
2367 
2368 /**
2369  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
2370  * @vdev: DP vdev handle
2371  * @nbuf: skb
2372  * @tid: TID from HLOS for overriding default DSCP-TID mapping
2373  * @meta_data: Metadata to the fw
2374  * @tx_q: Tx queue to be used for this Tx frame
2375  * @peer_id: peer_id of the peer in case of NAWDS frames
2376  * @tx_exc_metadata: Handle that holds exception path metadata
2377  *
2378  * Return: NULL on success,
2379  *         nbuf when it fails to send
2380  */
2381 qdf_nbuf_t
2382 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2383 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
2384 		       struct cdp_tx_exception_metadata *tx_exc_metadata)
2385 {
2386 	struct dp_pdev *pdev = vdev->pdev;
2387 	struct dp_soc *soc = pdev->soc;
2388 	struct dp_tx_desc_s *tx_desc;
2389 	QDF_STATUS status;
2390 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
2391 	uint16_t htt_tcl_metadata = 0;
2392 	enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
2393 	uint8_t tid = msdu_info->tid;
2394 	struct cdp_tid_tx_stats *tid_stats = NULL;
2395 	qdf_dma_addr_t paddr;
2396 
2397 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
2398 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
2399 			msdu_info, tx_exc_metadata);
2400 	if (!tx_desc) {
2401 		dp_err_rl("Tx_desc prepare Fail vdev_id %d vdev %pK queue %d",
2402 			  vdev->vdev_id, vdev, tx_q->desc_pool_id);
2403 		drop_code = TX_DESC_ERR;
2404 		goto fail_return;
2405 	}
2406 
2407 	dp_tx_update_tdls_flags(soc, vdev, tx_desc);
2408 
2409 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
2410 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2411 		DP_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
2412 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
2413 		DP_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
2414 					    DP_TCL_METADATA_TYPE_PEER_BASED);
2415 		DP_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
2416 					       peer_id);
2417 		dp_tx_bypass_reinjection(soc, tx_desc, tx_exc_metadata);
2418 	} else
2419 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2420 
2421 	if (msdu_info->exception_fw)
2422 		DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2423 
2424 	dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
2425 					 !pdev->enhanced_stats_en);
2426 
2427 	dp_tx_update_mesh_flags(soc, vdev, tx_desc);
2428 
2429 	if (qdf_unlikely(msdu_info->frm_type == dp_tx_frm_rmnet))
2430 		paddr = dp_tx_rmnet_nbuf_map(msdu_info, tx_desc);
2431 	else
2432 		paddr =  dp_tx_nbuf_map(vdev, tx_desc, nbuf);
2433 
2434 	if (!paddr) {
2435 		/* Handle failure */
2436 		dp_err("qdf_nbuf_map failed");
2437 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
2438 		drop_code = TX_DMA_MAP_ERR;
2439 		goto release_desc;
2440 	}
2441 
2442 	tx_desc->dma_addr = paddr;
2443 	dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
2444 			       tx_desc->id, DP_TX_DESC_MAP);
2445 	dp_tx_update_mcast_param(peer_id, &htt_tcl_metadata, vdev, msdu_info);
2446 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
2447 	status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
2448 					     htt_tcl_metadata,
2449 					     tx_exc_metadata, msdu_info);
2450 
2451 	if (status != QDF_STATUS_SUCCESS) {
2452 		dp_tx_err_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2453 			     tx_desc, tx_q->ring_id);
2454 		dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
2455 				       tx_desc->id, DP_TX_DESC_UNMAP);
2456 		dp_tx_nbuf_unmap(soc, tx_desc);
2457 		drop_code = TX_HW_ENQUEUE;
2458 		goto release_desc;
2459 	}
2460 
2461 	tx_sw_drop_stats_inc(pdev, nbuf, drop_code);
2462 	return NULL;
2463 
2464 release_desc:
2465 	dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2466 
2467 fail_return:
2468 	dp_tx_get_tid(vdev, nbuf, msdu_info);
2469 	tx_sw_drop_stats_inc(pdev, nbuf, drop_code);
2470 	tid_stats = &pdev->stats.tid_stats.
2471 		    tid_tx_stats[tx_q->ring_id][tid];
2472 	tid_stats->swdrop_cnt[drop_code]++;
2473 	return nbuf;
2474 }
2475 
2476 /**
2477  * dp_tdls_tx_comp_free_buff() - Free non std buffer when TDLS flag is set
2478  * @soc: Soc handle
2479  * @desc: software Tx descriptor to be processed
2480  *
2481  * Return: 0 if Success
2482  */
2483 #ifdef FEATURE_WLAN_TDLS
2484 static inline int
2485 dp_tdls_tx_comp_free_buff(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2486 {
2487 	/* If it is TDLS mgmt, don't unmap or free the frame */
2488 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) {
2489 		dp_non_std_htt_tx_comp_free_buff(soc, desc);
2490 		return 0;
2491 	}
2492 	return 1;
2493 }
2494 #else
2495 static inline int
2496 dp_tdls_tx_comp_free_buff(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2497 {
2498 	return 1;
2499 }
2500 #endif
2501 
2502 /**
2503  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2504  * @soc: Soc handle
2505  * @desc: software Tx descriptor to be processed
2506  * @delayed_free: defer freeing of nbuf
2507  *
2508  * Return: nbuf to be freed later
2509  */
2510 qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
2511 			       bool delayed_free)
2512 {
2513 	qdf_nbuf_t nbuf = desc->nbuf;
2514 	enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
2515 
2516 	/* nbuf already freed in vdev detach path */
2517 	if (!nbuf)
2518 		return NULL;
2519 
2520 	if (!dp_tdls_tx_comp_free_buff(soc, desc))
2521 		return NULL;
2522 
2523 	/* 0 : MSDU buffer, 1 : MLE */
2524 	if (desc->msdu_ext_desc) {
2525 		/* TSO free */
2526 		if (hal_tx_ext_desc_get_tso_enable(
2527 					desc->msdu_ext_desc->vaddr)) {
2528 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
2529 					       desc->id, DP_TX_COMP_MSDU_EXT);
2530 			dp_tx_tso_seg_history_add(soc,
2531 						  desc->msdu_ext_desc->tso_desc,
2532 						  desc->nbuf, desc->id, type);
2533 			/* unmap eash TSO seg before free the nbuf */
2534 			dp_tx_tso_unmap_segment(soc,
2535 						desc->msdu_ext_desc->tso_desc,
2536 						desc->msdu_ext_desc->
2537 						tso_num_desc);
2538 			goto nbuf_free;
2539 		}
2540 
2541 		if (qdf_unlikely(desc->frm_type == dp_tx_frm_sg)) {
2542 			void *msdu_ext_desc = desc->msdu_ext_desc->vaddr;
2543 			qdf_dma_addr_t iova;
2544 			uint32_t frag_len;
2545 			uint32_t i;
2546 
2547 			qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
2548 						     QDF_DMA_TO_DEVICE,
2549 						     qdf_nbuf_headlen(nbuf));
2550 
2551 			for (i = 1; i < DP_TX_MAX_NUM_FRAGS; i++) {
2552 				hal_tx_ext_desc_get_frag_info(msdu_ext_desc, i,
2553 							      &iova,
2554 							      &frag_len);
2555 				if (!iova || !frag_len)
2556 					break;
2557 
2558 				qdf_mem_unmap_page(soc->osdev, iova, frag_len,
2559 						   QDF_DMA_TO_DEVICE);
2560 			}
2561 
2562 			goto nbuf_free;
2563 		}
2564 	}
2565 	/* If it's ME frame, dont unmap the cloned nbuf's */
2566 	if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
2567 		goto nbuf_free;
2568 
2569 	dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, desc->id, type);
2570 	dp_tx_unmap(soc, desc);
2571 
2572 	if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
2573 		return dp_mesh_tx_comp_free_buff(soc, desc, delayed_free);
2574 
2575 	if (dp_tx_traffic_end_indication_enq_ind_pkt(soc, desc, nbuf))
2576 		return NULL;
2577 
2578 nbuf_free:
2579 	if (delayed_free)
2580 		return nbuf;
2581 
2582 	qdf_nbuf_free(nbuf);
2583 
2584 	return NULL;
2585 }
2586 
2587 /**
2588  * dp_tx_sg_unmap_buf() - Unmap scatter gather fragments
2589  * @soc: DP soc handle
2590  * @nbuf: skb
2591  * @msdu_info: MSDU info
2592  *
2593  * Return: None
2594  */
2595 static inline void
2596 dp_tx_sg_unmap_buf(struct dp_soc *soc, qdf_nbuf_t nbuf,
2597 		   struct dp_tx_msdu_info_s *msdu_info)
2598 {
2599 	uint32_t cur_idx;
2600 	struct dp_tx_seg_info_s *seg = msdu_info->u.sg_info.curr_seg;
2601 
2602 	qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE,
2603 				     qdf_nbuf_headlen(nbuf));
2604 
2605 	for (cur_idx = 1; cur_idx < seg->frag_cnt; cur_idx++)
2606 		qdf_mem_unmap_page(soc->osdev, (qdf_dma_addr_t)
2607 				   (seg->frags[cur_idx].paddr_lo | ((uint64_t)
2608 				    seg->frags[cur_idx].paddr_hi) << 32),
2609 				   seg->frags[cur_idx].len,
2610 				   QDF_DMA_TO_DEVICE);
2611 }
2612 
2613 /**
2614  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
2615  * @vdev: DP vdev handle
2616  * @nbuf: skb
2617  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
2618  *
2619  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
2620  *
2621  * Return: NULL on success,
2622  *         nbuf when it fails to send
2623  */
2624 #if QDF_LOCK_STATS
2625 noinline
2626 #else
2627 #endif
2628 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2629 				    struct dp_tx_msdu_info_s *msdu_info)
2630 {
2631 	uint32_t i;
2632 	struct dp_pdev *pdev = vdev->pdev;
2633 	struct dp_soc *soc = pdev->soc;
2634 	struct dp_tx_desc_s *tx_desc;
2635 	bool is_cce_classified = false;
2636 	QDF_STATUS status;
2637 	uint16_t htt_tcl_metadata = 0;
2638 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
2639 	struct cdp_tid_tx_stats *tid_stats = NULL;
2640 	uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
2641 
2642 	if (msdu_info->frm_type == dp_tx_frm_me)
2643 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2644 
2645 	i = 0;
2646 	/* Print statement to track i and num_seg */
2647 	/*
2648 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
2649 	 * descriptors using information in msdu_info
2650 	 */
2651 	while (i < msdu_info->num_seg) {
2652 		/*
2653 		 * Setup Tx descriptor for an MSDU, and MSDU extension
2654 		 * descriptor
2655 		 */
2656 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
2657 				tx_q->desc_pool_id);
2658 
2659 		if (!tx_desc) {
2660 			if (msdu_info->frm_type == dp_tx_frm_me) {
2661 				prep_desc_fail++;
2662 				dp_tx_me_free_buf(pdev,
2663 					(void *)(msdu_info->u.sg_info
2664 						.curr_seg->frags[0].vaddr));
2665 				if (prep_desc_fail == msdu_info->num_seg) {
2666 					/*
2667 					 * Unmap is needed only if descriptor
2668 					 * preparation failed for all segments.
2669 					 */
2670 					qdf_nbuf_unmap(soc->osdev,
2671 						       msdu_info->u.sg_info.
2672 						       curr_seg->nbuf,
2673 						       QDF_DMA_TO_DEVICE);
2674 				}
2675 				/*
2676 				 * Free the nbuf for the current segment
2677 				 * and make it point to the next in the list.
2678 				 * For me, there are as many segments as there
2679 				 * are no of clients.
2680 				 */
2681 				qdf_nbuf_free(msdu_info->u.sg_info
2682 					      .curr_seg->nbuf);
2683 				if (msdu_info->u.sg_info.curr_seg->next) {
2684 					msdu_info->u.sg_info.curr_seg =
2685 						msdu_info->u.sg_info
2686 						.curr_seg->next;
2687 					nbuf = msdu_info->u.sg_info
2688 					       .curr_seg->nbuf;
2689 				}
2690 				i++;
2691 				continue;
2692 			}
2693 
2694 			if (msdu_info->frm_type == dp_tx_frm_tso) {
2695 				dp_tx_tso_seg_history_add(
2696 						soc,
2697 						msdu_info->u.tso_info.curr_seg,
2698 						nbuf, 0, DP_TX_DESC_UNMAP);
2699 				dp_tx_tso_unmap_segment(soc,
2700 							msdu_info->u.tso_info.
2701 							curr_seg,
2702 							msdu_info->u.tso_info.
2703 							tso_num_seg_list);
2704 
2705 				if (msdu_info->u.tso_info.curr_seg->next) {
2706 					msdu_info->u.tso_info.curr_seg =
2707 					msdu_info->u.tso_info.curr_seg->next;
2708 					i++;
2709 					continue;
2710 				}
2711 			}
2712 
2713 			if (msdu_info->frm_type == dp_tx_frm_sg)
2714 				dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
2715 
2716 			goto done;
2717 		}
2718 
2719 		if (msdu_info->frm_type == dp_tx_frm_me) {
2720 			tx_desc->msdu_ext_desc->me_buffer =
2721 				(struct dp_tx_me_buf_t *)msdu_info->
2722 				u.sg_info.curr_seg->frags[0].vaddr;
2723 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
2724 		}
2725 
2726 		if (is_cce_classified)
2727 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2728 
2729 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2730 		if (msdu_info->exception_fw) {
2731 			DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2732 		}
2733 
2734 		dp_tx_is_hp_update_required(i, msdu_info);
2735 
2736 		/*
2737 		 * For frames with multiple segments (TSO, ME), jump to next
2738 		 * segment.
2739 		 */
2740 		if (msdu_info->frm_type == dp_tx_frm_tso) {
2741 			if (msdu_info->u.tso_info.curr_seg->next) {
2742 				msdu_info->u.tso_info.curr_seg =
2743 					msdu_info->u.tso_info.curr_seg->next;
2744 
2745 				/*
2746 				 * If this is a jumbo nbuf, then increment the
2747 				 * number of nbuf users for each additional
2748 				 * segment of the msdu. This will ensure that
2749 				 * the skb is freed only after receiving tx
2750 				 * completion for all segments of an nbuf
2751 				 */
2752 				qdf_nbuf_inc_users(nbuf);
2753 
2754 				/* Check with MCL if this is needed */
2755 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
2756 				 */
2757 			}
2758 		}
2759 
2760 		dp_tx_update_mcast_param(DP_INVALID_PEER,
2761 					 &htt_tcl_metadata,
2762 					 vdev,
2763 					 msdu_info);
2764 		/*
2765 		 * Enqueue the Tx MSDU descriptor to HW for transmit
2766 		 */
2767 		status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
2768 						     htt_tcl_metadata,
2769 						     NULL, msdu_info);
2770 
2771 		dp_tx_check_and_flush_hp(soc, status, msdu_info);
2772 
2773 		if (status != QDF_STATUS_SUCCESS) {
2774 			dp_info_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2775 				   tx_desc, tx_q->ring_id);
2776 
2777 			dp_tx_get_tid(vdev, nbuf, msdu_info);
2778 			tid_stats = &pdev->stats.tid_stats.
2779 				    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
2780 			tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
2781 
2782 			if (msdu_info->frm_type == dp_tx_frm_me) {
2783 				hw_enq_fail++;
2784 				if (hw_enq_fail == msdu_info->num_seg) {
2785 					/*
2786 					 * Unmap is needed only if enqueue
2787 					 * failed for all segments.
2788 					 */
2789 					qdf_nbuf_unmap(soc->osdev,
2790 						       msdu_info->u.sg_info.
2791 						       curr_seg->nbuf,
2792 						       QDF_DMA_TO_DEVICE);
2793 				}
2794 				/*
2795 				 * Free the nbuf for the current segment
2796 				 * and make it point to the next in the list.
2797 				 * For me, there are as many segments as there
2798 				 * are no of clients.
2799 				 */
2800 				qdf_nbuf_free(msdu_info->u.sg_info
2801 					      .curr_seg->nbuf);
2802 				dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2803 				if (msdu_info->u.sg_info.curr_seg->next) {
2804 					msdu_info->u.sg_info.curr_seg =
2805 						msdu_info->u.sg_info
2806 						.curr_seg->next;
2807 					nbuf = msdu_info->u.sg_info
2808 					       .curr_seg->nbuf;
2809 				} else
2810 					break;
2811 				i++;
2812 				continue;
2813 			}
2814 
2815 			/*
2816 			 * For TSO frames, the nbuf users increment done for
2817 			 * the current segment has to be reverted, since the
2818 			 * hw enqueue for this segment failed
2819 			 */
2820 			if (msdu_info->frm_type == dp_tx_frm_tso &&
2821 			    msdu_info->u.tso_info.curr_seg) {
2822 				/*
2823 				 * unmap and free current,
2824 				 * retransmit remaining segments
2825 				 */
2826 				dp_tx_comp_free_buf(soc, tx_desc, false);
2827 				i++;
2828 				dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2829 				continue;
2830 			}
2831 
2832 			if (msdu_info->frm_type == dp_tx_frm_sg)
2833 				dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
2834 
2835 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2836 			goto done;
2837 		}
2838 
2839 		/*
2840 		 * TODO
2841 		 * if tso_info structure can be modified to have curr_seg
2842 		 * as first element, following 2 blocks of code (for TSO and SG)
2843 		 * can be combined into 1
2844 		 */
2845 
2846 		/*
2847 		 * For Multicast-Unicast converted packets,
2848 		 * each converted frame (for a client) is represented as
2849 		 * 1 segment
2850 		 */
2851 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
2852 				(msdu_info->frm_type == dp_tx_frm_me)) {
2853 			if (msdu_info->u.sg_info.curr_seg->next) {
2854 				msdu_info->u.sg_info.curr_seg =
2855 					msdu_info->u.sg_info.curr_seg->next;
2856 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2857 			} else
2858 				break;
2859 		}
2860 		i++;
2861 	}
2862 
2863 	nbuf = NULL;
2864 
2865 done:
2866 	return nbuf;
2867 }
2868 
2869 /**
2870  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
2871  *                     for SG frames
2872  * @vdev: DP vdev handle
2873  * @nbuf: skb
2874  * @seg_info: Pointer to Segment info Descriptor to be prepared
2875  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2876  *
2877  * Return: NULL on success,
2878  *         nbuf when it fails to send
2879  */
2880 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2881 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
2882 {
2883 	uint32_t cur_frag, nr_frags, i;
2884 	qdf_dma_addr_t paddr;
2885 	struct dp_tx_sg_info_s *sg_info;
2886 
2887 	sg_info = &msdu_info->u.sg_info;
2888 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
2889 
2890 	if (QDF_STATUS_SUCCESS !=
2891 		qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
2892 					   QDF_DMA_TO_DEVICE,
2893 					   qdf_nbuf_headlen(nbuf))) {
2894 		dp_tx_err("dma map error");
2895 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2896 		qdf_nbuf_free(nbuf);
2897 		return NULL;
2898 	}
2899 
2900 	paddr = qdf_nbuf_mapped_paddr_get(nbuf);
2901 	seg_info->frags[0].paddr_lo = paddr;
2902 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
2903 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
2904 	seg_info->frags[0].vaddr = (void *) nbuf;
2905 
2906 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
2907 		if (QDF_STATUS_SUCCESS != qdf_nbuf_frag_map(vdev->osdev,
2908 							    nbuf, 0,
2909 							    QDF_DMA_TO_DEVICE,
2910 							    cur_frag)) {
2911 			dp_tx_err("frag dma map error");
2912 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2913 			goto map_err;
2914 		}
2915 
2916 		paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
2917 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
2918 		seg_info->frags[cur_frag + 1].paddr_hi =
2919 			((uint64_t) paddr) >> 32;
2920 		seg_info->frags[cur_frag + 1].len =
2921 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
2922 	}
2923 
2924 	seg_info->frag_cnt = (cur_frag + 1);
2925 	seg_info->total_len = qdf_nbuf_len(nbuf);
2926 	seg_info->next = NULL;
2927 
2928 	sg_info->curr_seg = seg_info;
2929 
2930 	msdu_info->frm_type = dp_tx_frm_sg;
2931 	msdu_info->num_seg = 1;
2932 
2933 	return nbuf;
2934 map_err:
2935 	/* restore paddr into nbuf before calling unmap */
2936 	qdf_nbuf_mapped_paddr_set(nbuf,
2937 				  (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
2938 				  ((uint64_t)
2939 				  seg_info->frags[0].paddr_hi) << 32));
2940 	qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
2941 				     QDF_DMA_TO_DEVICE,
2942 				     seg_info->frags[0].len);
2943 	for (i = 1; i <= cur_frag; i++) {
2944 		qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
2945 				   (seg_info->frags[i].paddr_lo | ((uint64_t)
2946 				   seg_info->frags[i].paddr_hi) << 32),
2947 				   seg_info->frags[i].len,
2948 				   QDF_DMA_TO_DEVICE);
2949 	}
2950 	qdf_nbuf_free(nbuf);
2951 	return NULL;
2952 }
2953 
2954 /**
2955  * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
2956  * @vdev: DP vdev handle
2957  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2958  * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
2959  *
2960  * Return: NULL on failure,
2961  *         nbuf when extracted successfully
2962  */
2963 static
2964 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
2965 				    struct dp_tx_msdu_info_s *msdu_info,
2966 				    uint16_t ppdu_cookie)
2967 {
2968 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2969 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2970 
2971 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2972 
2973 	HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
2974 				(msdu_info->meta_data[5], 1);
2975 	HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
2976 				(msdu_info->meta_data[5], 1);
2977 	HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
2978 				(msdu_info->meta_data[6], ppdu_cookie);
2979 
2980 	msdu_info->exception_fw = 1;
2981 	msdu_info->is_tx_sniffer = 1;
2982 }
2983 
2984 #ifdef MESH_MODE_SUPPORT
2985 
2986 /**
2987  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
2988 				and prepare msdu_info for mesh frames.
2989  * @vdev: DP vdev handle
2990  * @nbuf: skb
2991  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2992  *
2993  * Return: NULL on failure,
2994  *         nbuf when extracted successfully
2995  */
2996 static
2997 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2998 				struct dp_tx_msdu_info_s *msdu_info)
2999 {
3000 	struct meta_hdr_s *mhdr;
3001 	struct htt_tx_msdu_desc_ext2_t *meta_data =
3002 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
3003 
3004 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
3005 
3006 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
3007 		msdu_info->exception_fw = 0;
3008 		goto remove_meta_hdr;
3009 	}
3010 
3011 	msdu_info->exception_fw = 1;
3012 
3013 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
3014 
3015 	meta_data->host_tx_desc_pool = 1;
3016 	meta_data->update_peer_cache = 1;
3017 	meta_data->learning_frame = 1;
3018 
3019 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
3020 		meta_data->power = mhdr->power;
3021 
3022 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
3023 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
3024 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
3025 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
3026 
3027 		meta_data->dyn_bw = 1;
3028 
3029 		meta_data->valid_pwr = 1;
3030 		meta_data->valid_mcs_mask = 1;
3031 		meta_data->valid_nss_mask = 1;
3032 		meta_data->valid_preamble_type  = 1;
3033 		meta_data->valid_retries = 1;
3034 		meta_data->valid_bw_info = 1;
3035 	}
3036 
3037 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
3038 		meta_data->encrypt_type = 0;
3039 		meta_data->valid_encrypt_type = 1;
3040 		meta_data->learning_frame = 0;
3041 	}
3042 
3043 	meta_data->valid_key_flags = 1;
3044 	meta_data->key_flags = (mhdr->keyix & 0x3);
3045 
3046 remove_meta_hdr:
3047 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
3048 		dp_tx_err("qdf_nbuf_pull_head failed");
3049 		qdf_nbuf_free(nbuf);
3050 		return NULL;
3051 	}
3052 
3053 	msdu_info->tid = qdf_nbuf_get_priority(nbuf);
3054 
3055 	dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x"
3056 		   " tid %d to_fw %d",
3057 		   msdu_info->meta_data[0],
3058 		   msdu_info->meta_data[1],
3059 		   msdu_info->meta_data[2],
3060 		   msdu_info->meta_data[3],
3061 		   msdu_info->meta_data[4],
3062 		   msdu_info->meta_data[5],
3063 		   msdu_info->tid, msdu_info->exception_fw);
3064 
3065 	return nbuf;
3066 }
3067 #else
3068 static
3069 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
3070 				struct dp_tx_msdu_info_s *msdu_info)
3071 {
3072 	return nbuf;
3073 }
3074 
3075 #endif
3076 
3077 /**
3078  * dp_check_exc_metadata() - Checks if parameters are valid
3079  * @tx_exc - holds all exception path parameters
3080  *
3081  * Returns true when all the parameters are valid else false
3082  *
3083  */
3084 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
3085 {
3086 	bool invalid_tid = (tx_exc->tid >= DP_MAX_TIDS && tx_exc->tid !=
3087 			    HTT_INVALID_TID);
3088 	bool invalid_encap_type =
3089 			(tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
3090 			 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
3091 	bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
3092 				 tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
3093 	bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
3094 			       tx_exc->ppdu_cookie == 0);
3095 
3096 	if (tx_exc->is_intrabss_fwd)
3097 		return true;
3098 
3099 	if (invalid_tid || invalid_encap_type || invalid_sec_type ||
3100 	    invalid_cookie) {
3101 		return false;
3102 	}
3103 
3104 	return true;
3105 }
3106 
3107 #ifdef ATH_SUPPORT_IQUE
3108 /**
3109  * dp_tx_mcast_enhance() - Multicast enhancement on TX
3110  * @vdev: vdev handle
3111  * @nbuf: skb
3112  *
3113  * Return: true on success,
3114  *         false on failure
3115  */
3116 bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3117 {
3118 	qdf_ether_header_t *eh;
3119 
3120 	/* Mcast to Ucast Conversion*/
3121 	if (qdf_likely(!vdev->mcast_enhancement_en))
3122 		return true;
3123 
3124 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3125 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
3126 	    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
3127 		dp_verbose_debug("Mcast frm for ME %pK", vdev);
3128 		qdf_nbuf_set_next(nbuf, NULL);
3129 
3130 		DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
3131 				 qdf_nbuf_len(nbuf));
3132 		if (dp_tx_prepare_send_me(vdev, nbuf) ==
3133 				QDF_STATUS_SUCCESS) {
3134 			return false;
3135 		}
3136 
3137 		if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
3138 			if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
3139 					QDF_STATUS_SUCCESS) {
3140 				return false;
3141 			}
3142 		}
3143 	}
3144 
3145 	return true;
3146 }
3147 #else
3148 bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3149 {
3150 	return true;
3151 }
3152 #endif
3153 
3154 #ifdef QCA_SUPPORT_WDS_EXTENDED
3155 /**
3156  * dp_tx_mcast_drop() - Drop mcast frame if drop_tx_mcast is set in WDS_EXT
3157  * @vdev: vdev handle
3158  * @nbuf: skb
3159  *
3160  * Return: true if frame is dropped, false otherwise
3161  */
3162 static inline bool dp_tx_mcast_drop(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3163 {
3164 	/* Drop tx mcast and WDS Extended feature check */
3165 	if (qdf_unlikely((vdev->drop_tx_mcast) && (vdev->wds_ext_enabled))) {
3166 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
3167 						qdf_nbuf_data(nbuf);
3168 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
3169 			DP_STATS_INC(vdev, tx_i.dropped.tx_mcast_drop, 1);
3170 			return true;
3171 		}
3172 	}
3173 
3174 	return false;
3175 }
3176 #else
3177 static inline bool dp_tx_mcast_drop(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3178 {
3179 	return false;
3180 }
3181 #endif
3182 /**
3183  * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
3184  * @nbuf: qdf_nbuf_t
3185  * @vdev: struct dp_vdev *
3186  *
3187  * Allow packet for processing only if it is for peer client which is
3188  * connected with same vap. Drop packet if client is connected to
3189  * different vap.
3190  *
3191  * Return: QDF_STATUS
3192  */
3193 static inline QDF_STATUS
3194 dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
3195 {
3196 	struct dp_ast_entry *dst_ast_entry = NULL;
3197 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3198 
3199 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
3200 	    DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
3201 		return QDF_STATUS_SUCCESS;
3202 
3203 	qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
3204 	dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
3205 							eh->ether_dhost,
3206 							vdev->vdev_id);
3207 
3208 	/* If there is no ast entry, return failure */
3209 	if (qdf_unlikely(!dst_ast_entry)) {
3210 		qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
3211 		return QDF_STATUS_E_FAILURE;
3212 	}
3213 	qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
3214 
3215 	return QDF_STATUS_SUCCESS;
3216 }
3217 
3218 /**
3219  * dp_tx_nawds_handler() - NAWDS handler
3220  *
3221  * @soc: DP soc handle
3222  * @vdev_id: id of DP vdev handle
3223  * @msdu_info: msdu_info required to create HTT metadata
3224  * @nbuf: skb
3225  *
3226  * This API transfers the multicast frames with the peer id
3227  * on NAWDS enabled peer.
3228 
3229  * Return: none
3230  */
3231 
3232 static inline
3233 void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
3234 			 struct dp_tx_msdu_info_s *msdu_info,
3235 			 qdf_nbuf_t nbuf, uint16_t sa_peer_id)
3236 {
3237 	struct dp_peer *peer = NULL;
3238 	qdf_nbuf_t nbuf_clone = NULL;
3239 	uint16_t peer_id = DP_INVALID_PEER;
3240 	struct dp_txrx_peer *txrx_peer;
3241 
3242 	/* This check avoids pkt forwarding which is entered
3243 	 * in the ast table but still doesn't have valid peerid.
3244 	 */
3245 	if (sa_peer_id == HTT_INVALID_PEER)
3246 		return;
3247 
3248 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3249 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3250 		txrx_peer = dp_get_txrx_peer(peer);
3251 		if (!txrx_peer)
3252 			continue;
3253 
3254 		if (!txrx_peer->bss_peer && txrx_peer->nawds_enabled) {
3255 			peer_id = peer->peer_id;
3256 
3257 			if (!dp_peer_is_primary_link_peer(peer))
3258 				continue;
3259 
3260 			/* Multicast packets needs to be
3261 			 * dropped in case of intra bss forwarding
3262 			 */
3263 			if (sa_peer_id == txrx_peer->peer_id) {
3264 				dp_tx_debug("multicast packet");
3265 				DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3266 							  tx.nawds_mcast_drop,
3267 							  1);
3268 				continue;
3269 			}
3270 
3271 			nbuf_clone = qdf_nbuf_clone(nbuf);
3272 
3273 			if (!nbuf_clone) {
3274 				QDF_TRACE(QDF_MODULE_ID_DP,
3275 					  QDF_TRACE_LEVEL_ERROR,
3276 					  FL("nbuf clone failed"));
3277 				break;
3278 			}
3279 
3280 			nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
3281 							    msdu_info, peer_id,
3282 							    NULL);
3283 
3284 			if (nbuf_clone) {
3285 				dp_tx_debug("pkt send failed");
3286 				qdf_nbuf_free(nbuf_clone);
3287 			} else {
3288 				if (peer_id != DP_INVALID_PEER)
3289 					DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
3290 								      tx.nawds_mcast,
3291 								      1, qdf_nbuf_len(nbuf));
3292 			}
3293 		}
3294 	}
3295 
3296 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3297 }
3298 
3299 /**
3300  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
3301  * @soc: DP soc handle
3302  * @vdev_id: id of DP vdev handle
3303  * @nbuf: skb
3304  * @tx_exc_metadata: Handle that holds exception path meta data
3305  *
3306  * Entry point for Core Tx layer (DP_TX) invoked from
3307  * hard_start_xmit in OSIF/HDD to transmit frames through fw
3308  *
3309  * Return: NULL on success,
3310  *         nbuf when it fails to send
3311  */
3312 qdf_nbuf_t
3313 dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3314 		     qdf_nbuf_t nbuf,
3315 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
3316 {
3317 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3318 	struct dp_tx_msdu_info_s msdu_info;
3319 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3320 						     DP_MOD_ID_TX_EXCEPTION);
3321 
3322 	if (qdf_unlikely(!vdev))
3323 		goto fail;
3324 
3325 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
3326 
3327 	if (!tx_exc_metadata)
3328 		goto fail;
3329 
3330 	msdu_info.tid = tx_exc_metadata->tid;
3331 	dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
3332 			 QDF_MAC_ADDR_REF(nbuf->data));
3333 
3334 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
3335 
3336 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
3337 		dp_tx_err("Invalid parameters in exception path");
3338 		goto fail;
3339 	}
3340 
3341 	/* for peer based metadata check if peer is valid */
3342 	if (tx_exc_metadata->peer_id != CDP_INVALID_PEER) {
3343 		struct dp_peer *peer = NULL;
3344 
3345 		 peer = dp_peer_get_ref_by_id(vdev->pdev->soc,
3346 					      tx_exc_metadata->peer_id,
3347 					      DP_MOD_ID_TX_EXCEPTION);
3348 		if (qdf_unlikely(!peer)) {
3349 			DP_STATS_INC(vdev,
3350 				     tx_i.dropped.invalid_peer_id_in_exc_path,
3351 				     1);
3352 			goto fail;
3353 		}
3354 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_EXCEPTION);
3355 	}
3356 	/* Basic sanity checks for unsupported packets */
3357 
3358 	/* MESH mode */
3359 	if (qdf_unlikely(vdev->mesh_vdev)) {
3360 		dp_tx_err("Mesh mode is not supported in exception path");
3361 		goto fail;
3362 	}
3363 
3364 	/*
3365 	 * Classify the frame and call corresponding
3366 	 * "prepare" function which extracts the segment (TSO)
3367 	 * and fragmentation information (for TSO , SG, ME, or Raw)
3368 	 * into MSDU_INFO structure which is later used to fill
3369 	 * SW and HW descriptors.
3370 	 */
3371 	if (qdf_nbuf_is_tso(nbuf)) {
3372 		dp_verbose_debug("TSO frame %pK", vdev);
3373 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
3374 				 qdf_nbuf_len(nbuf));
3375 
3376 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
3377 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
3378 					 qdf_nbuf_len(nbuf));
3379 			goto fail;
3380 		}
3381 
3382 		DP_STATS_INC(vdev,  tx_i.rcvd.num, msdu_info.num_seg - 1);
3383 
3384 		goto send_multiple;
3385 	}
3386 
3387 	/* SG */
3388 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
3389 		struct dp_tx_seg_info_s seg_info = {0};
3390 
3391 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
3392 		if (!nbuf)
3393 			goto fail;
3394 
3395 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
3396 
3397 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
3398 				 qdf_nbuf_len(nbuf));
3399 
3400 		goto send_multiple;
3401 	}
3402 
3403 	if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
3404 		DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
3405 				 qdf_nbuf_len(nbuf));
3406 
3407 		dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
3408 					       tx_exc_metadata->ppdu_cookie);
3409 	}
3410 
3411 	/*
3412 	 * Get HW Queue to use for this frame.
3413 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
3414 	 * dedicated for data and 1 for command.
3415 	 * "queue_id" maps to one hardware ring.
3416 	 *  With each ring, we also associate a unique Tx descriptor pool
3417 	 *  to minimize lock contention for these resources.
3418 	 */
3419 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3420 
3421 	/*
3422 	 * if the packet is mcast packet send through mlo_macst handler
3423 	 * for all prnt_vdevs
3424 	 */
3425 
3426 	if (soc->arch_ops.dp_tx_mlo_mcast_send) {
3427 		nbuf = soc->arch_ops.dp_tx_mlo_mcast_send(soc, vdev,
3428 							  nbuf,
3429 							  tx_exc_metadata);
3430 		if (!nbuf)
3431 			goto fail;
3432 	}
3433 
3434 	if (qdf_likely(tx_exc_metadata->is_intrabss_fwd)) {
3435 		if (qdf_unlikely(vdev->nawds_enabled)) {
3436 			/*
3437 			 * This is a multicast packet
3438 			 */
3439 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
3440 					    tx_exc_metadata->peer_id);
3441 			DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
3442 					 1, qdf_nbuf_len(nbuf));
3443 		}
3444 
3445 		nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
3446 					      DP_INVALID_PEER, NULL);
3447 	} else {
3448 		/*
3449 		 * Check exception descriptors
3450 		 */
3451 		if (dp_tx_exception_limit_check(vdev))
3452 			goto fail;
3453 
3454 		/*  Single linear frame */
3455 		/*
3456 		 * If nbuf is a simple linear frame, use send_single function to
3457 		 * prepare direct-buffer type TCL descriptor and enqueue to TCL
3458 		 * SRNG. There is no need to setup a MSDU extension descriptor.
3459 		 */
3460 		nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
3461 					      tx_exc_metadata->peer_id,
3462 					      tx_exc_metadata);
3463 	}
3464 
3465 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3466 	return nbuf;
3467 
3468 send_multiple:
3469 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3470 
3471 fail:
3472 	if (vdev)
3473 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3474 	dp_verbose_debug("pkt send failed");
3475 	return nbuf;
3476 }
3477 
3478 /**
3479  * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
3480  *      in exception path in special case to avoid regular exception path chk.
3481  * @soc: DP soc handle
3482  * @vdev_id: id of DP vdev handle
3483  * @nbuf: skb
3484  * @tx_exc_metadata: Handle that holds exception path meta data
3485  *
3486  * Entry point for Core Tx layer (DP_TX) invoked from
3487  * hard_start_xmit in OSIF/HDD to transmit frames through fw
3488  *
3489  * Return: NULL on success,
3490  *         nbuf when it fails to send
3491  */
3492 qdf_nbuf_t
3493 dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
3494 				   uint8_t vdev_id, qdf_nbuf_t nbuf,
3495 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
3496 {
3497 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3498 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3499 						     DP_MOD_ID_TX_EXCEPTION);
3500 
3501 	if (qdf_unlikely(!vdev))
3502 		goto fail;
3503 
3504 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
3505 			== QDF_STATUS_E_FAILURE)) {
3506 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
3507 		goto fail;
3508 	}
3509 
3510 	/* Unref count as it will again be taken inside dp_tx_exception */
3511 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3512 
3513 	return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
3514 
3515 fail:
3516 	if (vdev)
3517 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3518 	dp_verbose_debug("pkt send failed");
3519 	return nbuf;
3520 }
3521 
3522 /**
3523  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
3524  * @soc: DP soc handle
3525  * @vdev_id: DP vdev handle
3526  * @nbuf: skb
3527  *
3528  * Entry point for Core Tx layer (DP_TX) invoked from
3529  * hard_start_xmit in OSIF/HDD
3530  *
3531  * Return: NULL on success,
3532  *         nbuf when it fails to send
3533  */
3534 #ifdef MESH_MODE_SUPPORT
3535 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3536 			   qdf_nbuf_t nbuf)
3537 {
3538 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3539 	struct meta_hdr_s *mhdr;
3540 	qdf_nbuf_t nbuf_mesh = NULL;
3541 	qdf_nbuf_t nbuf_clone = NULL;
3542 	struct dp_vdev *vdev;
3543 	uint8_t no_enc_frame = 0;
3544 
3545 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
3546 	if (!nbuf_mesh) {
3547 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3548 				"qdf_nbuf_unshare failed");
3549 		return nbuf;
3550 	}
3551 
3552 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
3553 	if (!vdev) {
3554 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3555 				"vdev is NULL for vdev_id %d", vdev_id);
3556 		return nbuf;
3557 	}
3558 
3559 	nbuf = nbuf_mesh;
3560 
3561 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
3562 
3563 	if ((vdev->sec_type != cdp_sec_type_none) &&
3564 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
3565 		no_enc_frame = 1;
3566 
3567 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
3568 		qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
3569 
3570 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
3571 		       !no_enc_frame) {
3572 		nbuf_clone = qdf_nbuf_clone(nbuf);
3573 		if (!nbuf_clone) {
3574 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3575 				"qdf_nbuf_clone failed");
3576 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
3577 			return nbuf;
3578 		}
3579 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
3580 	}
3581 
3582 	if (nbuf_clone) {
3583 		if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
3584 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
3585 		} else {
3586 			qdf_nbuf_free(nbuf_clone);
3587 		}
3588 	}
3589 
3590 	if (no_enc_frame)
3591 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
3592 	else
3593 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
3594 
3595 	nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
3596 	if ((!nbuf) && no_enc_frame) {
3597 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
3598 	}
3599 
3600 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
3601 	return nbuf;
3602 }
3603 
3604 #else
3605 
3606 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
3607 			   qdf_nbuf_t nbuf)
3608 {
3609 	return dp_tx_send(soc, vdev_id, nbuf);
3610 }
3611 
3612 #endif
3613 
3614 #ifdef QCA_DP_TX_NBUF_AND_NBUF_DATA_PREFETCH
3615 static inline
3616 void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
3617 {
3618 	if (nbuf) {
3619 		qdf_prefetch(&nbuf->len);
3620 		qdf_prefetch(&nbuf->data);
3621 	}
3622 }
3623 #else
3624 static inline
3625 void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
3626 {
3627 }
3628 #endif
3629 
3630 #ifdef DP_UMAC_HW_RESET_SUPPORT
3631 /*
3632  * dp_tx_drop() - Drop the frame on a given VAP
3633  * @soc: DP soc handle
3634  * @vdev_id: id of DP vdev handle
3635  * @nbuf: skb
3636  *
3637  * Drop all the incoming packets
3638  *
3639  * Return: nbuf
3640  *
3641  */
3642 qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3643 		      qdf_nbuf_t nbuf)
3644 {
3645 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3646 	struct dp_vdev *vdev = NULL;
3647 
3648 	vdev = soc->vdev_id_map[vdev_id];
3649 	if (qdf_unlikely(!vdev))
3650 		return nbuf;
3651 
3652 	DP_STATS_INC(vdev, tx_i.dropped.drop_ingress, 1);
3653 	return nbuf;
3654 }
3655 
3656 /*
3657  * dp_tx_exc_drop() - Drop the frame on a given VAP
3658  * @soc: DP soc handle
3659  * @vdev_id: id of DP vdev handle
3660  * @nbuf: skb
3661  * @tx_exc_metadata: Handle that holds exception path meta data
3662  *
3663  * Drop all the incoming packets
3664  *
3665  * Return: nbuf
3666  *
3667  */
3668 qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3669 			  qdf_nbuf_t nbuf,
3670 			  struct cdp_tx_exception_metadata *tx_exc_metadata)
3671 {
3672 	return dp_tx_drop(soc_hdl, vdev_id, nbuf);
3673 }
3674 #endif
3675 
3676 #ifdef FEATURE_DIRECT_LINK
3677 /*
3678  * dp_vdev_tx_mark_to_fw() - Mark to_fw bit for the tx packet
3679  * @nbuf: skb
3680  * @vdev: DP vdev handle
3681  *
3682  * Return: None
3683  */
3684 static inline void dp_vdev_tx_mark_to_fw(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
3685 {
3686 	if (qdf_unlikely(vdev->to_fw))
3687 		QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf) = 1;
3688 }
3689 #else
3690 static inline void dp_vdev_tx_mark_to_fw(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
3691 {
3692 }
3693 #endif
3694 
3695 /*
3696  * dp_tx_send() - Transmit a frame on a given VAP
3697  * @soc: DP soc handle
3698  * @vdev_id: id of DP vdev handle
3699  * @nbuf: skb
3700  *
3701  * Entry point for Core Tx layer (DP_TX) invoked from
3702  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
3703  * cases
3704  *
3705  * Return: NULL on success,
3706  *         nbuf when it fails to send
3707  */
3708 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3709 		      qdf_nbuf_t nbuf)
3710 {
3711 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3712 	uint16_t peer_id = HTT_INVALID_PEER;
3713 	/*
3714 	 * doing a memzero is causing additional function call overhead
3715 	 * so doing static stack clearing
3716 	 */
3717 	struct dp_tx_msdu_info_s msdu_info = {0};
3718 	struct dp_vdev *vdev = NULL;
3719 	qdf_nbuf_t end_nbuf = NULL;
3720 
3721 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3722 		return nbuf;
3723 
3724 	/*
3725 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3726 	 * this in per packet path.
3727 	 *
3728 	 * As in this path vdev memory is already protected with netdev
3729 	 * tx lock
3730 	 */
3731 	vdev = soc->vdev_id_map[vdev_id];
3732 	if (qdf_unlikely(!vdev))
3733 		return nbuf;
3734 
3735 	dp_vdev_tx_mark_to_fw(nbuf, vdev);
3736 
3737 	/*
3738 	 * Set Default Host TID value to invalid TID
3739 	 * (TID override disabled)
3740 	 */
3741 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
3742 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
3743 
3744 	if (qdf_unlikely(vdev->mesh_vdev)) {
3745 		qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
3746 								&msdu_info);
3747 		if (!nbuf_mesh) {
3748 			dp_verbose_debug("Extracting mesh metadata failed");
3749 			return nbuf;
3750 		}
3751 		nbuf = nbuf_mesh;
3752 	}
3753 
3754 	/*
3755 	 * Get HW Queue to use for this frame.
3756 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
3757 	 * dedicated for data and 1 for command.
3758 	 * "queue_id" maps to one hardware ring.
3759 	 *  With each ring, we also associate a unique Tx descriptor pool
3760 	 *  to minimize lock contention for these resources.
3761 	 */
3762 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3763 	DP_STATS_INC(vdev, tx_i.rcvd_per_core[msdu_info.tx_queue.desc_pool_id],
3764 		     1);
3765 
3766 	/*
3767 	 * TCL H/W supports 2 DSCP-TID mapping tables.
3768 	 *  Table 1 - Default DSCP-TID mapping table
3769 	 *  Table 2 - 1 DSCP-TID override table
3770 	 *
3771 	 * If we need a different DSCP-TID mapping for this vap,
3772 	 * call tid_classify to extract DSCP/ToS from frame and
3773 	 * map to a TID and store in msdu_info. This is later used
3774 	 * to fill in TCL Input descriptor (per-packet TID override).
3775 	 */
3776 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
3777 
3778 	/*
3779 	 * Classify the frame and call corresponding
3780 	 * "prepare" function which extracts the segment (TSO)
3781 	 * and fragmentation information (for TSO , SG, ME, or Raw)
3782 	 * into MSDU_INFO structure which is later used to fill
3783 	 * SW and HW descriptors.
3784 	 */
3785 	if (qdf_nbuf_is_tso(nbuf)) {
3786 		dp_verbose_debug("TSO frame %pK", vdev);
3787 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
3788 				 qdf_nbuf_len(nbuf));
3789 
3790 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
3791 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
3792 					 qdf_nbuf_len(nbuf));
3793 			return nbuf;
3794 		}
3795 
3796 		DP_STATS_INC(vdev,  tx_i.rcvd.num, msdu_info.num_seg - 1);
3797 
3798 		goto send_multiple;
3799 	}
3800 
3801 	/* SG */
3802 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
3803 		if (qdf_nbuf_get_nr_frags(nbuf) > DP_TX_MAX_NUM_FRAGS - 1) {
3804 			if (qdf_unlikely(qdf_nbuf_linearize(nbuf)))
3805 				return nbuf;
3806 		} else {
3807 			struct dp_tx_seg_info_s seg_info = {0};
3808 
3809 			if (qdf_unlikely(is_nbuf_frm_rmnet(nbuf, &msdu_info)))
3810 				goto send_single;
3811 
3812 			nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info,
3813 						&msdu_info);
3814 			if (!nbuf)
3815 				return NULL;
3816 
3817 			dp_verbose_debug("non-TSO SG frame %pK", vdev);
3818 
3819 			DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
3820 					 qdf_nbuf_len(nbuf));
3821 
3822 			goto send_multiple;
3823 		}
3824 	}
3825 
3826 	if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
3827 		return NULL;
3828 
3829 	if (qdf_unlikely(dp_tx_mcast_drop(vdev, nbuf)))
3830 		return nbuf;
3831 
3832 	/* RAW */
3833 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
3834 		struct dp_tx_seg_info_s seg_info = {0};
3835 
3836 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
3837 		if (!nbuf)
3838 			return NULL;
3839 
3840 		dp_verbose_debug("Raw frame %pK", vdev);
3841 
3842 		goto send_multiple;
3843 
3844 	}
3845 
3846 	if (qdf_unlikely(vdev->nawds_enabled)) {
3847 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
3848 					  qdf_nbuf_data(nbuf);
3849 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
3850 			uint16_t sa_peer_id = DP_INVALID_PEER;
3851 
3852 			if (!soc->ast_offload_support) {
3853 				struct dp_ast_entry *ast_entry = NULL;
3854 
3855 				qdf_spin_lock_bh(&soc->ast_lock);
3856 				ast_entry = dp_peer_ast_hash_find_by_pdevid
3857 					(soc,
3858 					 (uint8_t *)(eh->ether_shost),
3859 					 vdev->pdev->pdev_id);
3860 				if (ast_entry)
3861 					sa_peer_id = ast_entry->peer_id;
3862 				qdf_spin_unlock_bh(&soc->ast_lock);
3863 			}
3864 
3865 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
3866 					    sa_peer_id);
3867 		}
3868 		peer_id = DP_INVALID_PEER;
3869 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
3870 				 1, qdf_nbuf_len(nbuf));
3871 	}
3872 
3873 send_single:
3874 	/*  Single linear frame */
3875 	/*
3876 	 * If nbuf is a simple linear frame, use send_single function to
3877 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
3878 	 * SRNG. There is no need to setup a MSDU extension descriptor.
3879 	 */
3880 	dp_tx_prefetch_nbuf_data(nbuf);
3881 
3882 	nbuf = dp_tx_send_msdu_single_wrapper(vdev, nbuf, &msdu_info,
3883 					      peer_id, end_nbuf);
3884 	return nbuf;
3885 
3886 send_multiple:
3887 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3888 
3889 	if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
3890 		dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
3891 
3892 	return nbuf;
3893 }
3894 
3895 /**
3896  * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
3897  *      case to vaoid check in perpkt path.
3898  * @soc: DP soc handle
3899  * @vdev_id: id of DP vdev handle
3900  * @nbuf: skb
3901  *
3902  * Entry point for Core Tx layer (DP_TX) invoked from
3903  * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
3904  * with special condition to avoid per pkt check in dp_tx_send
3905  *
3906  * Return: NULL on success,
3907  *         nbuf when it fails to send
3908  */
3909 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
3910 				    uint8_t vdev_id, qdf_nbuf_t nbuf)
3911 {
3912 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3913 	struct dp_vdev *vdev = NULL;
3914 
3915 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3916 		return nbuf;
3917 
3918 	/*
3919 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3920 	 * this in per packet path.
3921 	 *
3922 	 * As in this path vdev memory is already protected with netdev
3923 	 * tx lock
3924 	 */
3925 	vdev = soc->vdev_id_map[vdev_id];
3926 	if (qdf_unlikely(!vdev))
3927 		return nbuf;
3928 
3929 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
3930 			== QDF_STATUS_E_FAILURE)) {
3931 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
3932 		return nbuf;
3933 	}
3934 
3935 	return dp_tx_send(soc_hdl, vdev_id, nbuf);
3936 }
3937 
3938 #ifdef UMAC_SUPPORT_PROXY_ARP
3939 /**
3940  * dp_tx_proxy_arp() - Tx proxy arp handler
3941  * @vdev: datapath vdev handle
3942  * @buf: sk buffer
3943  *
3944  * Return: status
3945  */
3946 static inline
3947 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3948 {
3949 	if (vdev->osif_proxy_arp)
3950 		return vdev->osif_proxy_arp(vdev->osif_vdev, nbuf);
3951 
3952 	/*
3953 	 * when UMAC_SUPPORT_PROXY_ARP is defined, we expect
3954 	 * osif_proxy_arp has a valid function pointer assigned
3955 	 * to it
3956 	 */
3957 	dp_tx_err("valid function pointer for osif_proxy_arp is expected!!\n");
3958 
3959 	return QDF_STATUS_NOT_INITIALIZED;
3960 }
3961 #else
3962 /**
3963  * dp_tx_proxy_arp() - Tx proxy arp handler
3964  * @vdev: datapath vdev handle
3965  * @buf: sk buffer
3966  *
3967  * This function always return 0 when UMAC_SUPPORT_PROXY_ARP
3968  * is not defined.
3969  *
3970  * Return: status
3971  */
3972 static inline
3973 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3974 {
3975 	return QDF_STATUS_SUCCESS;
3976 }
3977 #endif
3978 
3979 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
3980 	!defined(CONFIG_MLO_SINGLE_DEV)
3981 #ifdef WLAN_MCAST_MLO
3982 static bool
3983 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
3984 		       struct dp_tx_desc_s *tx_desc,
3985 		       qdf_nbuf_t nbuf,
3986 		       uint8_t reinject_reason)
3987 {
3988 	if (reinject_reason == HTT_TX_FW2WBM_REINJECT_REASON_MLO_MCAST) {
3989 		if (soc->arch_ops.dp_tx_mcast_handler)
3990 			soc->arch_ops.dp_tx_mcast_handler(soc, vdev, nbuf);
3991 
3992 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3993 		return true;
3994 	}
3995 
3996 	return false;
3997 }
3998 #else /* WLAN_MCAST_MLO */
3999 static inline bool
4000 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
4001 		       struct dp_tx_desc_s *tx_desc,
4002 		       qdf_nbuf_t nbuf,
4003 		       uint8_t reinject_reason)
4004 {
4005 	return false;
4006 }
4007 #endif /* WLAN_MCAST_MLO */
4008 #else
4009 static inline bool
4010 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
4011 		       struct dp_tx_desc_s *tx_desc,
4012 		       qdf_nbuf_t nbuf,
4013 		       uint8_t reinject_reason)
4014 {
4015 	return false;
4016 }
4017 #endif
4018 
4019 /**
4020  * dp_tx_reinject_handler() - Tx Reinject Handler
4021  * @soc: datapath soc handle
4022  * @vdev: datapath vdev handle
4023  * @tx_desc: software descriptor head pointer
4024  * @status : Tx completion status from HTT descriptor
4025  * @reinject_reason : reinject reason from HTT descriptor
4026  *
4027  * This function reinjects frames back to Target.
4028  * Todo - Host queue needs to be added
4029  *
4030  * Return: none
4031  */
4032 void dp_tx_reinject_handler(struct dp_soc *soc,
4033 			    struct dp_vdev *vdev,
4034 			    struct dp_tx_desc_s *tx_desc,
4035 			    uint8_t *status,
4036 			    uint8_t reinject_reason)
4037 {
4038 	struct dp_peer *peer = NULL;
4039 	uint32_t peer_id = HTT_INVALID_PEER;
4040 	qdf_nbuf_t nbuf = tx_desc->nbuf;
4041 	qdf_nbuf_t nbuf_copy = NULL;
4042 	struct dp_tx_msdu_info_s msdu_info;
4043 #ifdef WDS_VENDOR_EXTENSION
4044 	int is_mcast = 0, is_ucast = 0;
4045 	int num_peers_3addr = 0;
4046 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
4047 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
4048 #endif
4049 	struct dp_txrx_peer *txrx_peer;
4050 
4051 	qdf_assert(vdev);
4052 
4053 	dp_tx_debug("Tx reinject path");
4054 
4055 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
4056 			qdf_nbuf_len(tx_desc->nbuf));
4057 
4058 	if (dp_tx_reinject_mlo_hdl(soc, vdev, tx_desc, nbuf, reinject_reason))
4059 		return;
4060 
4061 #ifdef WDS_VENDOR_EXTENSION
4062 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
4063 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
4064 	} else {
4065 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
4066 	}
4067 	is_ucast = !is_mcast;
4068 
4069 	qdf_spin_lock_bh(&vdev->peer_list_lock);
4070 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4071 		txrx_peer = dp_get_txrx_peer(peer);
4072 
4073 		if (!txrx_peer || txrx_peer->bss_peer)
4074 			continue;
4075 
4076 		/* Detect wds peers that use 3-addr framing for mcast.
4077 		 * if there are any, the bss_peer is used to send the
4078 		 * the mcast frame using 3-addr format. all wds enabled
4079 		 * peers that use 4-addr framing for mcast frames will
4080 		 * be duplicated and sent as 4-addr frames below.
4081 		 */
4082 		if (!txrx_peer->wds_enabled ||
4083 		    !txrx_peer->wds_ecm.wds_tx_mcast_4addr) {
4084 			num_peers_3addr = 1;
4085 			break;
4086 		}
4087 	}
4088 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
4089 #endif
4090 
4091 	if (qdf_unlikely(vdev->mesh_vdev)) {
4092 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
4093 	} else {
4094 		qdf_spin_lock_bh(&vdev->peer_list_lock);
4095 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4096 			txrx_peer = dp_get_txrx_peer(peer);
4097 			if (!txrx_peer)
4098 				continue;
4099 
4100 			if ((txrx_peer->peer_id != HTT_INVALID_PEER) &&
4101 #ifdef WDS_VENDOR_EXTENSION
4102 			/*
4103 			 * . if 3-addr STA, then send on BSS Peer
4104 			 * . if Peer WDS enabled and accept 4-addr mcast,
4105 			 * send mcast on that peer only
4106 			 * . if Peer WDS enabled and accept 4-addr ucast,
4107 			 * send ucast on that peer only
4108 			 */
4109 			((txrx_peer->bss_peer && num_peers_3addr && is_mcast) ||
4110 			 (txrx_peer->wds_enabled &&
4111 			 ((is_mcast && txrx_peer->wds_ecm.wds_tx_mcast_4addr) ||
4112 			 (is_ucast &&
4113 			 txrx_peer->wds_ecm.wds_tx_ucast_4addr))))) {
4114 #else
4115 			(txrx_peer->bss_peer &&
4116 			 (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) {
4117 #endif
4118 				peer_id = DP_INVALID_PEER;
4119 
4120 				nbuf_copy = qdf_nbuf_copy(nbuf);
4121 
4122 				if (!nbuf_copy) {
4123 					dp_tx_debug("nbuf copy failed");
4124 					break;
4125 				}
4126 				qdf_mem_zero(&msdu_info, sizeof(msdu_info));
4127 				dp_tx_get_queue(vdev, nbuf,
4128 						&msdu_info.tx_queue);
4129 
4130 				nbuf_copy = dp_tx_send_msdu_single(vdev,
4131 						nbuf_copy,
4132 						&msdu_info,
4133 						peer_id,
4134 						NULL);
4135 
4136 				if (nbuf_copy) {
4137 					dp_tx_debug("pkt send failed");
4138 					qdf_nbuf_free(nbuf_copy);
4139 				}
4140 			}
4141 		}
4142 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4143 
4144 		qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
4145 					     QDF_DMA_TO_DEVICE, nbuf->len);
4146 		qdf_nbuf_free(nbuf);
4147 	}
4148 
4149 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
4150 }
4151 
4152 /**
4153  * dp_tx_inspect_handler() - Tx Inspect Handler
4154  * @soc: datapath soc handle
4155  * @vdev: datapath vdev handle
4156  * @tx_desc: software descriptor head pointer
4157  * @status : Tx completion status from HTT descriptor
4158  *
4159  * Handles Tx frames sent back to Host for inspection
4160  * (ProxyARP)
4161  *
4162  * Return: none
4163  */
4164 void dp_tx_inspect_handler(struct dp_soc *soc,
4165 			   struct dp_vdev *vdev,
4166 			   struct dp_tx_desc_s *tx_desc,
4167 			   uint8_t *status)
4168 {
4169 
4170 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4171 			"%s Tx inspect path",
4172 			__func__);
4173 
4174 	DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
4175 			 qdf_nbuf_len(tx_desc->nbuf));
4176 
4177 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
4178 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
4179 }
4180 
4181 #ifdef MESH_MODE_SUPPORT
4182 /**
4183  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
4184  *                                         in mesh meta header
4185  * @tx_desc: software descriptor head pointer
4186  * @ts: pointer to tx completion stats
4187  * Return: none
4188  */
4189 static
4190 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
4191 		struct hal_tx_completion_status *ts)
4192 {
4193 	qdf_nbuf_t netbuf = tx_desc->nbuf;
4194 
4195 	if (!tx_desc->msdu_ext_desc) {
4196 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
4197 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4198 				"netbuf %pK offset %d",
4199 				netbuf, tx_desc->pkt_offset);
4200 			return;
4201 		}
4202 	}
4203 }
4204 
4205 #else
4206 static
4207 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
4208 		struct hal_tx_completion_status *ts)
4209 {
4210 }
4211 
4212 #endif
4213 
4214 #ifdef CONFIG_SAWF
4215 static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
4216 					 struct dp_vdev *vdev,
4217 					 struct dp_txrx_peer *txrx_peer,
4218 					 struct dp_tx_desc_s *tx_desc,
4219 					 struct hal_tx_completion_status *ts,
4220 					 uint8_t tid)
4221 {
4222 	dp_sawf_tx_compl_update_peer_stats(soc, vdev, txrx_peer, tx_desc,
4223 					   ts, tid);
4224 }
4225 
4226 static void dp_tx_compute_delay_avg(struct cdp_delay_tx_stats  *tx_delay,
4227 				    uint32_t nw_delay,
4228 				    uint32_t sw_delay,
4229 				    uint32_t hw_delay)
4230 {
4231 	dp_peer_tid_delay_avg(tx_delay,
4232 			      nw_delay,
4233 			      sw_delay,
4234 			      hw_delay);
4235 }
4236 #else
4237 static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
4238 					 struct dp_vdev *vdev,
4239 					 struct dp_txrx_peer *txrx_peer,
4240 					 struct dp_tx_desc_s *tx_desc,
4241 					 struct hal_tx_completion_status *ts,
4242 					 uint8_t tid)
4243 {
4244 }
4245 
4246 static inline void
4247 dp_tx_compute_delay_avg(struct cdp_delay_tx_stats *tx_delay,
4248 			uint32_t nw_delay, uint32_t sw_delay,
4249 			uint32_t hw_delay)
4250 {
4251 }
4252 #endif
4253 
4254 #ifdef QCA_PEER_EXT_STATS
4255 #ifdef WLAN_CONFIG_TX_DELAY
4256 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
4257 				    struct dp_tx_desc_s *tx_desc,
4258 				    struct hal_tx_completion_status *ts,
4259 				    struct dp_vdev *vdev)
4260 {
4261 	struct dp_soc *soc = vdev->pdev->soc;
4262 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
4263 	int64_t timestamp_ingress, timestamp_hw_enqueue;
4264 	uint32_t sw_enqueue_delay, fwhw_transmit_delay = 0;
4265 
4266 	if (!ts->valid)
4267 		return;
4268 
4269 	timestamp_ingress = qdf_nbuf_get_timestamp_us(tx_desc->nbuf);
4270 	timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
4271 
4272 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4273 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
4274 
4275 	if (soc->arch_ops.dp_tx_compute_hw_delay)
4276 		if (!soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts,
4277 							  &fwhw_transmit_delay))
4278 			dp_hist_update_stats(&tx_delay->hwtx_delay,
4279 					     fwhw_transmit_delay);
4280 
4281 	dp_tx_compute_delay_avg(tx_delay, 0, sw_enqueue_delay,
4282 				fwhw_transmit_delay);
4283 }
4284 #else
4285 /*
4286  * dp_tx_compute_tid_delay() - Compute per TID delay
4287  * @stats: Per TID delay stats
4288  * @tx_desc: Software Tx descriptor
4289  * @ts: Tx completion status
4290  * @vdev: vdev
4291  *
4292  * Compute the software enqueue and hw enqueue delays and
4293  * update the respective histograms
4294  *
4295  * Return: void
4296  */
4297 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
4298 				    struct dp_tx_desc_s *tx_desc,
4299 				    struct hal_tx_completion_status *ts,
4300 				    struct dp_vdev *vdev)
4301 {
4302 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
4303 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
4304 	uint32_t sw_enqueue_delay, fwhw_transmit_delay;
4305 
4306 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
4307 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
4308 	timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
4309 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4310 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
4311 					 timestamp_hw_enqueue);
4312 
4313 	/*
4314 	 * Update the Tx software enqueue delay and HW enque-Completion delay.
4315 	 */
4316 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
4317 	dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
4318 }
4319 #endif
4320 
4321 /*
4322  * dp_tx_update_peer_delay_stats() - Update the peer delay stats
4323  * @txrx_peer: DP peer context
4324  * @tx_desc: Tx software descriptor
4325  * @tid: Transmission ID
4326  * @ring_id: Rx CPU context ID/CPU_ID
4327  *
4328  * Update the peer extended stats. These are enhanced other
4329  * delay stats per msdu level.
4330  *
4331  * Return: void
4332  */
4333 static void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
4334 					  struct dp_tx_desc_s *tx_desc,
4335 					  struct hal_tx_completion_status *ts,
4336 					  uint8_t ring_id)
4337 {
4338 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4339 	struct dp_soc *soc = NULL;
4340 	struct dp_peer_delay_stats *delay_stats = NULL;
4341 	uint8_t tid;
4342 
4343 	soc = pdev->soc;
4344 	if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
4345 		return;
4346 
4347 	if (!txrx_peer->delay_stats)
4348 		return;
4349 
4350 	tid = ts->tid;
4351 	delay_stats = txrx_peer->delay_stats;
4352 
4353 	qdf_assert(ring < CDP_MAX_TXRX_CTX);
4354 
4355 	/*
4356 	 * For non-TID packets use the TID 9
4357 	 */
4358 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4359 		tid = CDP_MAX_DATA_TIDS - 1;
4360 
4361 	dp_tx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
4362 				tx_desc, ts, txrx_peer->vdev);
4363 }
4364 #else
4365 static inline
4366 void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
4367 				   struct dp_tx_desc_s *tx_desc,
4368 				   struct hal_tx_completion_status *ts,
4369 				   uint8_t ring_id)
4370 {
4371 }
4372 #endif
4373 
4374 #ifdef WLAN_PEER_JITTER
4375 /*
4376  * dp_tx_jitter_get_avg_jitter() - compute the average jitter
4377  * @curr_delay: Current delay
4378  * @prev_Delay: Previous delay
4379  * @avg_jitter: Average Jitter
4380  * Return: Newly Computed Average Jitter
4381  */
4382 static uint32_t dp_tx_jitter_get_avg_jitter(uint32_t curr_delay,
4383 					    uint32_t prev_delay,
4384 					    uint32_t avg_jitter)
4385 {
4386 	uint32_t curr_jitter;
4387 	int32_t jitter_diff;
4388 
4389 	curr_jitter = qdf_abs(curr_delay - prev_delay);
4390 	if (!avg_jitter)
4391 		return curr_jitter;
4392 
4393 	jitter_diff = curr_jitter - avg_jitter;
4394 	if (jitter_diff < 0)
4395 		avg_jitter = avg_jitter -
4396 			(qdf_abs(jitter_diff) >> DP_AVG_JITTER_WEIGHT_DENOM);
4397 	else
4398 		avg_jitter = avg_jitter +
4399 			(qdf_abs(jitter_diff) >> DP_AVG_JITTER_WEIGHT_DENOM);
4400 
4401 	return avg_jitter;
4402 }
4403 
4404 /*
4405  * dp_tx_jitter_get_avg_delay() - compute the average delay
4406  * @curr_delay: Current delay
4407  * @avg_Delay: Average delay
4408  * Return: Newly Computed Average Delay
4409  */
4410 static uint32_t dp_tx_jitter_get_avg_delay(uint32_t curr_delay,
4411 					   uint32_t avg_delay)
4412 {
4413 	int32_t delay_diff;
4414 
4415 	if (!avg_delay)
4416 		return curr_delay;
4417 
4418 	delay_diff = curr_delay - avg_delay;
4419 	if (delay_diff < 0)
4420 		avg_delay = avg_delay - (qdf_abs(delay_diff) >>
4421 					DP_AVG_DELAY_WEIGHT_DENOM);
4422 	else
4423 		avg_delay = avg_delay + (qdf_abs(delay_diff) >>
4424 					DP_AVG_DELAY_WEIGHT_DENOM);
4425 
4426 	return avg_delay;
4427 }
4428 
4429 #ifdef WLAN_CONFIG_TX_DELAY
4430 /*
4431  * dp_tx_compute_cur_delay() - get the current delay
4432  * @soc: soc handle
4433  * @vdev: vdev structure for data path state
4434  * @ts: Tx completion status
4435  * @curr_delay: current delay
4436  * @tx_desc: tx descriptor
4437  * Return: void
4438  */
4439 static
4440 QDF_STATUS dp_tx_compute_cur_delay(struct dp_soc *soc,
4441 				   struct dp_vdev *vdev,
4442 				   struct hal_tx_completion_status *ts,
4443 				   uint32_t *curr_delay,
4444 				   struct dp_tx_desc_s *tx_desc)
4445 {
4446 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
4447 
4448 	if (soc->arch_ops.dp_tx_compute_hw_delay)
4449 		status = soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts,
4450 							      curr_delay);
4451 	return status;
4452 }
4453 #else
4454 static
4455 QDF_STATUS dp_tx_compute_cur_delay(struct dp_soc *soc,
4456 				   struct dp_vdev *vdev,
4457 				   struct hal_tx_completion_status *ts,
4458 				   uint32_t *curr_delay,
4459 				   struct dp_tx_desc_s *tx_desc)
4460 {
4461 	int64_t current_timestamp, timestamp_hw_enqueue;
4462 
4463 	current_timestamp = qdf_ktime_to_us(qdf_ktime_real_get());
4464 	timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
4465 	*curr_delay = (uint32_t)(current_timestamp - timestamp_hw_enqueue);
4466 
4467 	return QDF_STATUS_SUCCESS;
4468 }
4469 #endif
4470 
4471 /* dp_tx_compute_tid_jitter() - compute per tid per ring jitter
4472  * @jiiter - per tid per ring jitter stats
4473  * @ts: Tx completion status
4474  * @vdev - vdev structure for data path state
4475  * @tx_desc - tx descriptor
4476  * Return: void
4477  */
4478 static void dp_tx_compute_tid_jitter(struct cdp_peer_tid_stats *jitter,
4479 				     struct hal_tx_completion_status *ts,
4480 				     struct dp_vdev *vdev,
4481 				     struct dp_tx_desc_s *tx_desc)
4482 {
4483 	uint32_t curr_delay, avg_delay, avg_jitter, prev_delay;
4484 	struct dp_soc *soc = vdev->pdev->soc;
4485 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
4486 
4487 	if (ts->status !=  HAL_TX_TQM_RR_FRAME_ACKED) {
4488 		jitter->tx_drop += 1;
4489 		return;
4490 	}
4491 
4492 	status = dp_tx_compute_cur_delay(soc, vdev, ts, &curr_delay,
4493 					 tx_desc);
4494 
4495 	if (QDF_IS_STATUS_SUCCESS(status)) {
4496 		avg_delay = jitter->tx_avg_delay;
4497 		avg_jitter = jitter->tx_avg_jitter;
4498 		prev_delay = jitter->tx_prev_delay;
4499 		avg_jitter = dp_tx_jitter_get_avg_jitter(curr_delay,
4500 							 prev_delay,
4501 							 avg_jitter);
4502 		avg_delay = dp_tx_jitter_get_avg_delay(curr_delay, avg_delay);
4503 		jitter->tx_avg_delay = avg_delay;
4504 		jitter->tx_avg_jitter = avg_jitter;
4505 		jitter->tx_prev_delay = curr_delay;
4506 		jitter->tx_total_success += 1;
4507 	} else if (status == QDF_STATUS_E_FAILURE) {
4508 		jitter->tx_avg_err += 1;
4509 	}
4510 }
4511 
4512 /* dp_tx_update_peer_jitter_stats() - Update the peer jitter stats
4513  * @txrx_peer: DP peer context
4514  * @tx_desc: Tx software descriptor
4515  * @ts: Tx completion status
4516  * @ring_id: Rx CPU context ID/CPU_ID
4517  * Return: void
4518  */
4519 static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer,
4520 					   struct dp_tx_desc_s *tx_desc,
4521 					   struct hal_tx_completion_status *ts,
4522 					   uint8_t ring_id)
4523 {
4524 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4525 	struct dp_soc *soc = pdev->soc;
4526 	struct cdp_peer_tid_stats *jitter_stats = NULL;
4527 	uint8_t tid;
4528 	struct cdp_peer_tid_stats *rx_tid = NULL;
4529 
4530 	if (qdf_likely(!wlan_cfg_is_peer_jitter_stats_enabled(soc->wlan_cfg_ctx)))
4531 		return;
4532 
4533 	tid = ts->tid;
4534 	jitter_stats = txrx_peer->jitter_stats;
4535 	qdf_assert_always(jitter_stats);
4536 	qdf_assert(ring < CDP_MAX_TXRX_CTX);
4537 	/*
4538 	 * For non-TID packets use the TID 9
4539 	 */
4540 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4541 		tid = CDP_MAX_DATA_TIDS - 1;
4542 
4543 	rx_tid = &jitter_stats[tid * CDP_MAX_TXRX_CTX + ring_id];
4544 	dp_tx_compute_tid_jitter(rx_tid,
4545 				 ts, txrx_peer->vdev, tx_desc);
4546 }
4547 #else
4548 static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer,
4549 					   struct dp_tx_desc_s *tx_desc,
4550 					   struct hal_tx_completion_status *ts,
4551 					   uint8_t ring_id)
4552 {
4553 }
4554 #endif
4555 
4556 #ifdef HW_TX_DELAY_STATS_ENABLE
4557 /**
4558  * dp_update_tx_delay_stats() - update the delay stats
4559  * @vdev: vdev handle
4560  * @delay: delay in ms or us based on the flag delay_in_us
4561  * @tid: tid value
4562  * @mode: type of tx delay mode
4563  * @ring id: ring number
4564  * @delay_in_us: flag to indicate whether the delay is in ms or us
4565  *
4566  * Return: none
4567  */
4568 static inline
4569 void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
4570 			      uint8_t mode, uint8_t ring_id, bool delay_in_us)
4571 {
4572 	struct cdp_tid_tx_stats *tstats =
4573 		&vdev->stats.tid_tx_stats[ring_id][tid];
4574 
4575 	dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
4576 			      delay_in_us);
4577 }
4578 #else
4579 static inline
4580 void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
4581 			      uint8_t mode, uint8_t ring_id, bool delay_in_us)
4582 {
4583 	struct cdp_tid_tx_stats *tstats =
4584 		&vdev->pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
4585 
4586 	dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
4587 			      delay_in_us);
4588 }
4589 #endif
4590 
4591 /**
4592  * dp_tx_compute_delay() - Compute and fill in all timestamps
4593  *				to pass in correct fields
4594  *
4595  * @vdev: pdev handle
4596  * @tx_desc: tx descriptor
4597  * @tid: tid value
4598  * @ring_id: TCL or WBM ring number for transmit path
4599  * Return: none
4600  */
4601 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
4602 			 uint8_t tid, uint8_t ring_id)
4603 {
4604 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
4605 	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
4606 	uint32_t fwhw_transmit_delay_us;
4607 
4608 	if (qdf_likely(!vdev->pdev->delay_stats_flag) &&
4609 	    qdf_likely(!dp_is_vdev_tx_delay_stats_enabled(vdev)))
4610 		return;
4611 
4612 	if (dp_is_vdev_tx_delay_stats_enabled(vdev)) {
4613 		fwhw_transmit_delay_us =
4614 			qdf_ktime_to_us(qdf_ktime_real_get()) -
4615 			qdf_ktime_to_us(tx_desc->timestamp);
4616 
4617 		/*
4618 		 * Delay between packet enqueued to HW and Tx completion in us
4619 		 */
4620 		dp_update_tx_delay_stats(vdev, fwhw_transmit_delay_us, tid,
4621 					 CDP_DELAY_STATS_FW_HW_TRANSMIT,
4622 					 ring_id, true);
4623 		/*
4624 		 * For MCL, only enqueue to completion delay is required
4625 		 * so return if the vdev flag is enabled.
4626 		 */
4627 		return;
4628 	}
4629 
4630 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
4631 	timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
4632 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
4633 					 timestamp_hw_enqueue);
4634 
4635 	if (!timestamp_hw_enqueue)
4636 		return;
4637 	/*
4638 	 * Delay between packet enqueued to HW and Tx completion in ms
4639 	 */
4640 	dp_update_tx_delay_stats(vdev, fwhw_transmit_delay, tid,
4641 				 CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id,
4642 				 false);
4643 
4644 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
4645 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4646 	interframe_delay = (uint32_t)(timestamp_ingress -
4647 				      vdev->prev_tx_enq_tstamp);
4648 
4649 	/*
4650 	 * Delay in software enqueue
4651 	 */
4652 	dp_update_tx_delay_stats(vdev, sw_enqueue_delay, tid,
4653 				 CDP_DELAY_STATS_SW_ENQ, ring_id,
4654 				 false);
4655 
4656 	/*
4657 	 * Update interframe delay stats calculated at hardstart receive point.
4658 	 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
4659 	 * interframe delay will not be calculate correctly for 1st frame.
4660 	 * On the other side, this will help in avoiding extra per packet check
4661 	 * of !vdev->prev_tx_enq_tstamp.
4662 	 */
4663 	dp_update_tx_delay_stats(vdev, interframe_delay, tid,
4664 				 CDP_DELAY_STATS_TX_INTERFRAME, ring_id,
4665 				 false);
4666 	vdev->prev_tx_enq_tstamp = timestamp_ingress;
4667 }
4668 
4669 #ifdef DISABLE_DP_STATS
4670 static
4671 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf,
4672 				   struct dp_txrx_peer *txrx_peer)
4673 {
4674 }
4675 #else
4676 static inline void
4677 dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer)
4678 {
4679 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
4680 
4681 	DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
4682 	if (subtype != QDF_PROTO_INVALID)
4683 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.no_ack_count[subtype],
4684 					  1);
4685 }
4686 #endif
4687 
4688 #ifndef QCA_ENHANCED_STATS_SUPPORT
4689 #ifdef DP_PEER_EXTENDED_API
4690 static inline uint8_t
4691 dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
4692 {
4693 	return txrx_peer->mpdu_retry_threshold;
4694 }
4695 #else
4696 static inline uint8_t
4697 dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
4698 {
4699 	return 0;
4700 }
4701 #endif
4702 
4703 /**
4704  * dp_tx_update_peer_extd_stats()- Update Tx extended path stats for peer
4705  *
4706  * @ts: Tx compltion status
4707  * @txrx_peer: datapath txrx_peer handle
4708  *
4709  * Return: void
4710  */
4711 static inline void
4712 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
4713 			     struct dp_txrx_peer *txrx_peer)
4714 {
4715 	uint8_t mcs, pkt_type, dst_mcs_idx;
4716 	uint8_t retry_threshold = dp_tx_get_mpdu_retry_threshold(txrx_peer);
4717 
4718 	mcs = ts->mcs;
4719 	pkt_type = ts->pkt_type;
4720 	/* do HW to SW pkt type conversion */
4721 	pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX :
4722 		    hal_2_dp_pkt_type_map[pkt_type]);
4723 
4724 	dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs);
4725 	if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
4726 		DP_PEER_EXTD_STATS_INC(txrx_peer,
4727 				       tx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
4728 				       1);
4729 
4730 	DP_PEER_EXTD_STATS_INC(txrx_peer, tx.sgi_count[ts->sgi], 1);
4731 	DP_PEER_EXTD_STATS_INC(txrx_peer, tx.bw[ts->bw], 1);
4732 	DP_PEER_EXTD_STATS_UPD(txrx_peer, tx.last_ack_rssi, ts->ack_frame_rssi);
4733 	DP_PEER_EXTD_STATS_INC(txrx_peer,
4734 			       tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
4735 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.stbc, 1, ts->stbc);
4736 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.ldpc, 1, ts->ldpc);
4737 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries, 1, ts->transmit_cnt > 1);
4738 	if (ts->first_msdu) {
4739 		DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries_mpdu, 1,
4740 					ts->transmit_cnt > 1);
4741 
4742 		if (!retry_threshold)
4743 			return;
4744 		DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.mpdu_success_with_retries,
4745 					qdf_do_div(ts->transmit_cnt,
4746 						   retry_threshold),
4747 					ts->transmit_cnt > retry_threshold);
4748 	}
4749 }
4750 #else
4751 static inline void
4752 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
4753 			     struct dp_txrx_peer *txrx_peer)
4754 {
4755 }
4756 #endif
4757 
4758 /**
4759  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
4760  *				per wbm ring
4761  *
4762  * @tx_desc: software descriptor head pointer
4763  * @ts: Tx completion status
4764  * @peer: peer handle
4765  * @ring_id: ring number
4766  *
4767  * Return: None
4768  */
4769 static inline void
4770 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
4771 			struct hal_tx_completion_status *ts,
4772 			struct dp_txrx_peer *txrx_peer, uint8_t ring_id)
4773 {
4774 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4775 	uint8_t tid = ts->tid;
4776 	uint32_t length;
4777 	struct cdp_tid_tx_stats *tid_stats;
4778 
4779 	if (!pdev)
4780 		return;
4781 
4782 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4783 		tid = CDP_MAX_DATA_TIDS - 1;
4784 
4785 	tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
4786 
4787 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
4788 		dp_err_rl("Release source:%d is not from TQM", ts->release_src);
4789 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.release_src_not_tqm, 1);
4790 		return;
4791 	}
4792 
4793 	length = qdf_nbuf_len(tx_desc->nbuf);
4794 	DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
4795 
4796 	if (qdf_unlikely(pdev->delay_stats_flag) ||
4797 	    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(txrx_peer->vdev)))
4798 		dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id);
4799 
4800 	if (ts->status < CDP_MAX_TX_TQM_STATUS) {
4801 		tid_stats->tqm_status_cnt[ts->status]++;
4802 	}
4803 
4804 	if (qdf_likely(ts->status == HAL_TX_TQM_RR_FRAME_ACKED)) {
4805 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.retry_count, 1,
4806 					   ts->transmit_cnt > 1);
4807 
4808 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.multiple_retry_count,
4809 					   1, ts->transmit_cnt > 2);
4810 
4811 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.ofdma, 1, ts->ofdma);
4812 
4813 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.amsdu_cnt, 1,
4814 					   ts->msdu_part_of_amsdu);
4815 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.non_amsdu_cnt, 1,
4816 					   !ts->msdu_part_of_amsdu);
4817 
4818 		txrx_peer->stats.per_pkt_stats.tx.last_tx_ts =
4819 							qdf_system_ticks();
4820 
4821 		dp_tx_update_peer_extd_stats(ts, txrx_peer);
4822 
4823 		return;
4824 	}
4825 
4826 	/*
4827 	 * tx_failed is ideally supposed to be updated from HTT ppdu
4828 	 * completion stats. But in IPQ807X/IPQ6018 chipsets owing to
4829 	 * hw limitation there are no completions for failed cases.
4830 	 * Hence updating tx_failed from data path. Please note that
4831 	 * if tx_failed is fixed to be from ppdu, then this has to be
4832 	 * removed
4833 	 */
4834 	DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
4835 
4836 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.failed_retry_count, 1,
4837 				   ts->transmit_cnt > DP_RETRY_COUNT);
4838 	dp_update_no_ack_stats(tx_desc->nbuf, txrx_peer);
4839 
4840 	if (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED) {
4841 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.age_out, 1);
4842 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_REM) {
4843 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.dropped.fw_rem, 1,
4844 					      length);
4845 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX) {
4846 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_notx, 1);
4847 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TX) {
4848 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_tx, 1);
4849 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON1) {
4850 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason1, 1);
4851 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON2) {
4852 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason2, 1);
4853 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON3) {
4854 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason3, 1);
4855 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_DISABLE_QUEUE) {
4856 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4857 					  tx.dropped.fw_rem_queue_disable, 1);
4858 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TILL_NONMATCHING) {
4859 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4860 					  tx.dropped.fw_rem_no_match, 1);
4861 	} else if (ts->status == HAL_TX_TQM_RR_DROP_THRESHOLD) {
4862 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4863 					  tx.dropped.drop_threshold, 1);
4864 	} else if (ts->status == HAL_TX_TQM_RR_LINK_DESC_UNAVAILABLE) {
4865 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4866 					  tx.dropped.drop_link_desc_na, 1);
4867 	} else if (ts->status == HAL_TX_TQM_RR_DROP_OR_INVALID_MSDU) {
4868 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4869 					  tx.dropped.invalid_drop, 1);
4870 	} else if (ts->status == HAL_TX_TQM_RR_MULTICAST_DROP) {
4871 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4872 					  tx.dropped.mcast_vdev_drop, 1);
4873 	} else {
4874 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.invalid_rr, 1);
4875 	}
4876 }
4877 
4878 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
4879 /**
4880  * dp_tx_flow_pool_lock() - take flow pool lock
4881  * @soc: core txrx main context
4882  * @tx_desc: tx desc
4883  *
4884  * Return: None
4885  */
4886 static inline
4887 void dp_tx_flow_pool_lock(struct dp_soc *soc,
4888 			  struct dp_tx_desc_s *tx_desc)
4889 {
4890 	struct dp_tx_desc_pool_s *pool;
4891 	uint8_t desc_pool_id;
4892 
4893 	desc_pool_id = tx_desc->pool_id;
4894 	pool = &soc->tx_desc[desc_pool_id];
4895 
4896 	qdf_spin_lock_bh(&pool->flow_pool_lock);
4897 }
4898 
4899 /**
4900  * dp_tx_flow_pool_unlock() - release flow pool lock
4901  * @soc: core txrx main context
4902  * @tx_desc: tx desc
4903  *
4904  * Return: None
4905  */
4906 static inline
4907 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
4908 			    struct dp_tx_desc_s *tx_desc)
4909 {
4910 	struct dp_tx_desc_pool_s *pool;
4911 	uint8_t desc_pool_id;
4912 
4913 	desc_pool_id = tx_desc->pool_id;
4914 	pool = &soc->tx_desc[desc_pool_id];
4915 
4916 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
4917 }
4918 #else
4919 static inline
4920 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
4921 {
4922 }
4923 
4924 static inline
4925 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
4926 {
4927 }
4928 #endif
4929 
4930 /**
4931  * dp_tx_notify_completion() - Notify tx completion for this desc
4932  * @soc: core txrx main context
4933  * @vdev: datapath vdev handle
4934  * @tx_desc: tx desc
4935  * @netbuf:  buffer
4936  * @status: tx status
4937  *
4938  * Return: none
4939  */
4940 static inline void dp_tx_notify_completion(struct dp_soc *soc,
4941 					   struct dp_vdev *vdev,
4942 					   struct dp_tx_desc_s *tx_desc,
4943 					   qdf_nbuf_t netbuf,
4944 					   uint8_t status)
4945 {
4946 	void *osif_dev;
4947 	ol_txrx_completion_fp tx_compl_cbk = NULL;
4948 	uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
4949 
4950 	qdf_assert(tx_desc);
4951 
4952 	if (!vdev ||
4953 	    !vdev->osif_vdev) {
4954 		return;
4955 	}
4956 
4957 	osif_dev = vdev->osif_vdev;
4958 	tx_compl_cbk = vdev->tx_comp;
4959 
4960 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
4961 		flag |= BIT(QDF_TX_RX_STATUS_OK);
4962 
4963 	if (tx_compl_cbk)
4964 		tx_compl_cbk(netbuf, osif_dev, flag);
4965 }
4966 
4967 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
4968  * @pdev: pdev handle
4969  * @tid: tid value
4970  * @txdesc_ts: timestamp from txdesc
4971  * @ppdu_id: ppdu id
4972  *
4973  * Return: none
4974  */
4975 #ifdef FEATURE_PERPKT_INFO
4976 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
4977 					       struct dp_txrx_peer *txrx_peer,
4978 					       uint8_t tid,
4979 					       uint64_t txdesc_ts,
4980 					       uint32_t ppdu_id)
4981 {
4982 	uint64_t delta_ms;
4983 	struct cdp_tx_sojourn_stats *sojourn_stats;
4984 	struct dp_peer *primary_link_peer = NULL;
4985 	struct dp_soc *link_peer_soc = NULL;
4986 
4987 	if (qdf_unlikely(!pdev->enhanced_stats_en))
4988 		return;
4989 
4990 	if (qdf_unlikely(tid == HTT_INVALID_TID ||
4991 			 tid >= CDP_DATA_TID_MAX))
4992 		return;
4993 
4994 	if (qdf_unlikely(!pdev->sojourn_buf))
4995 		return;
4996 
4997 	primary_link_peer = dp_get_primary_link_peer_by_id(pdev->soc,
4998 							   txrx_peer->peer_id,
4999 							   DP_MOD_ID_TX_COMP);
5000 
5001 	if (qdf_unlikely(!primary_link_peer))
5002 		return;
5003 
5004 	sojourn_stats = (struct cdp_tx_sojourn_stats *)
5005 		qdf_nbuf_data(pdev->sojourn_buf);
5006 
5007 	link_peer_soc = primary_link_peer->vdev->pdev->soc;
5008 	sojourn_stats->cookie = (void *)
5009 			dp_monitor_peer_get_peerstats_ctx(link_peer_soc,
5010 							  primary_link_peer);
5011 
5012 	delta_ms = qdf_ktime_to_ms(qdf_ktime_real_get()) -
5013 				txdesc_ts;
5014 	qdf_ewma_tx_lag_add(&txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid],
5015 			    delta_ms);
5016 	sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
5017 	sojourn_stats->num_msdus[tid] = 1;
5018 	sojourn_stats->avg_sojourn_msdu[tid].internal =
5019 		txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid].internal;
5020 	dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
5021 			     pdev->sojourn_buf, HTT_INVALID_PEER,
5022 			     WDI_NO_VAL, pdev->pdev_id);
5023 	sojourn_stats->sum_sojourn_msdu[tid] = 0;
5024 	sojourn_stats->num_msdus[tid] = 0;
5025 	sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
5026 
5027 	dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_TX_COMP);
5028 }
5029 #else
5030 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
5031 					       struct dp_txrx_peer *txrx_peer,
5032 					       uint8_t tid,
5033 					       uint64_t txdesc_ts,
5034 					       uint32_t ppdu_id)
5035 {
5036 }
5037 #endif
5038 
5039 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
5040 /**
5041  * dp_send_completion_to_pkt_capture() - send tx completion to packet capture
5042  * @soc: dp_soc handle
5043  * @desc: Tx Descriptor
5044  * @ts: HAL Tx completion descriptor contents
5045  *
5046  * This function is used to send tx completion to packet capture
5047  */
5048 void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
5049 				       struct dp_tx_desc_s *desc,
5050 				       struct hal_tx_completion_status *ts)
5051 {
5052 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc,
5053 			     desc, ts->peer_id,
5054 			     WDI_NO_VAL, desc->pdev->pdev_id);
5055 }
5056 #endif
5057 
5058 /**
5059  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
5060  * @soc: DP Soc handle
5061  * @tx_desc: software Tx descriptor
5062  * @ts : Tx completion status from HAL/HTT descriptor
5063  *
5064  * Return: none
5065  */
5066 void
5067 dp_tx_comp_process_desc(struct dp_soc *soc,
5068 			struct dp_tx_desc_s *desc,
5069 			struct hal_tx_completion_status *ts,
5070 			struct dp_txrx_peer *txrx_peer)
5071 {
5072 	uint64_t time_latency = 0;
5073 	uint16_t peer_id = DP_INVALID_PEER_ID;
5074 
5075 	/*
5076 	 * m_copy/tx_capture modes are not supported for
5077 	 * scatter gather packets
5078 	 */
5079 	if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
5080 		time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
5081 				qdf_ktime_to_ms(desc->timestamp));
5082 	}
5083 
5084 	dp_send_completion_to_pkt_capture(soc, desc, ts);
5085 
5086 	if (dp_tx_pkt_tracepoints_enabled())
5087 		qdf_trace_dp_packet(desc->nbuf, QDF_TX,
5088 				    desc->msdu_ext_desc ?
5089 				    desc->msdu_ext_desc->tso_desc : NULL,
5090 				    qdf_ktime_to_ms(desc->timestamp));
5091 
5092 	if (!(desc->msdu_ext_desc)) {
5093 		dp_tx_enh_unmap(soc, desc);
5094 		if (txrx_peer)
5095 			peer_id = txrx_peer->peer_id;
5096 
5097 		if (QDF_STATUS_SUCCESS ==
5098 		    dp_monitor_tx_add_to_comp_queue(soc, desc, ts, peer_id)) {
5099 			return;
5100 		}
5101 
5102 		if (QDF_STATUS_SUCCESS ==
5103 		    dp_get_completion_indication_for_stack(soc,
5104 							   desc->pdev,
5105 							   txrx_peer, ts,
5106 							   desc->nbuf,
5107 							   time_latency)) {
5108 			dp_send_completion_to_stack(soc,
5109 						    desc->pdev,
5110 						    ts->peer_id,
5111 						    ts->ppdu_id,
5112 						    desc->nbuf);
5113 			return;
5114 		}
5115 	}
5116 
5117 	desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX;
5118 	dp_tx_comp_free_buf(soc, desc, false);
5119 }
5120 
5121 #ifdef DISABLE_DP_STATS
5122 /**
5123  * dp_tx_update_connectivity_stats() - update tx connectivity stats
5124  * @soc: core txrx main context
5125  * @tx_desc: tx desc
5126  * @status: tx status
5127  *
5128  * Return: none
5129  */
5130 static inline
5131 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
5132 				     struct dp_vdev *vdev,
5133 				     struct dp_tx_desc_s *tx_desc,
5134 				     uint8_t status)
5135 {
5136 }
5137 #else
5138 static inline
5139 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
5140 				     struct dp_vdev *vdev,
5141 				     struct dp_tx_desc_s *tx_desc,
5142 				     uint8_t status)
5143 {
5144 	void *osif_dev;
5145 	ol_txrx_stats_rx_fp stats_cbk;
5146 	uint8_t pkt_type;
5147 
5148 	qdf_assert(tx_desc);
5149 
5150 	if (!vdev ||
5151 	    !vdev->osif_vdev ||
5152 	    !vdev->stats_cb)
5153 		return;
5154 
5155 	osif_dev = vdev->osif_vdev;
5156 	stats_cbk = vdev->stats_cb;
5157 
5158 	stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
5159 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
5160 		stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
5161 			  &pkt_type);
5162 }
5163 #endif
5164 
5165 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
5166 /* Mask for bit29 ~ bit31 */
5167 #define DP_TX_TS_BIT29_31_MASK 0xE0000000
5168 /* Timestamp value (unit us) if bit29 is set */
5169 #define DP_TX_TS_BIT29_SET_VALUE BIT(29)
5170 /**
5171  * dp_tx_adjust_enqueue_buffer_ts() - adjust the enqueue buffer_timestamp
5172  * @ack_ts: OTA ack timestamp, unit us.
5173  * @enqueue_ts: TCL enqueue TX data to TQM timestamp, unit us.
5174  * @base_delta_ts: base timestamp delta for ack_ts and enqueue_ts
5175  *
5176  * this function will restore the bit29 ~ bit31 3 bits value for
5177  * buffer_timestamp in wbm2sw ring entry, currently buffer_timestamp only
5178  * can support 0x7FFF * 1024 us (29 bits), but if the timestamp is >
5179  * 0x7FFF * 1024 us, bit29~ bit31 will be lost.
5180  *
5181  * Return: the adjusted buffer_timestamp value
5182  */
5183 static inline
5184 uint32_t dp_tx_adjust_enqueue_buffer_ts(uint32_t ack_ts,
5185 					uint32_t enqueue_ts,
5186 					uint32_t base_delta_ts)
5187 {
5188 	uint32_t ack_buffer_ts;
5189 	uint32_t ack_buffer_ts_bit29_31;
5190 	uint32_t adjusted_enqueue_ts;
5191 
5192 	/* corresponding buffer_timestamp value when receive OTA Ack */
5193 	ack_buffer_ts = ack_ts - base_delta_ts;
5194 	ack_buffer_ts_bit29_31 = ack_buffer_ts & DP_TX_TS_BIT29_31_MASK;
5195 
5196 	/* restore the bit29 ~ bit31 value */
5197 	adjusted_enqueue_ts = ack_buffer_ts_bit29_31 | enqueue_ts;
5198 
5199 	/*
5200 	 * if actual enqueue_ts value occupied 29 bits only, this enqueue_ts
5201 	 * value + real UL delay overflow 29 bits, then 30th bit (bit-29)
5202 	 * should not be marked, otherwise extra 0x20000000 us is added to
5203 	 * enqueue_ts.
5204 	 */
5205 	if (qdf_unlikely(adjusted_enqueue_ts > ack_buffer_ts))
5206 		adjusted_enqueue_ts -= DP_TX_TS_BIT29_SET_VALUE;
5207 
5208 	return adjusted_enqueue_ts;
5209 }
5210 
5211 QDF_STATUS
5212 dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
5213 			  uint32_t delta_tsf,
5214 			  uint32_t *delay_us)
5215 {
5216 	uint32_t buffer_ts;
5217 	uint32_t delay;
5218 
5219 	if (!delay_us)
5220 		return QDF_STATUS_E_INVAL;
5221 
5222 	/* Tx_rate_stats_info_valid is 0 and tsf is invalid then */
5223 	if (!ts->valid)
5224 		return QDF_STATUS_E_INVAL;
5225 
5226 	/* buffer_timestamp is in units of 1024 us and is [31:13] of
5227 	 * WBM_RELEASE_RING_4. After left shift 10 bits, it's
5228 	 * valid up to 29 bits.
5229 	 */
5230 	buffer_ts = ts->buffer_timestamp << 10;
5231 	buffer_ts = dp_tx_adjust_enqueue_buffer_ts(ts->tsf,
5232 						   buffer_ts, delta_tsf);
5233 
5234 	delay = ts->tsf - buffer_ts - delta_tsf;
5235 
5236 	if (qdf_unlikely(delay & 0x80000000)) {
5237 		dp_err_rl("delay = 0x%x (-ve)\n"
5238 			  "release_src = %d\n"
5239 			  "ppdu_id = 0x%x\n"
5240 			  "peer_id = 0x%x\n"
5241 			  "tid = 0x%x\n"
5242 			  "release_reason = %d\n"
5243 			  "tsf = %u (0x%x)\n"
5244 			  "buffer_timestamp = %u (0x%x)\n"
5245 			  "delta_tsf = %u (0x%x)\n",
5246 			  delay, ts->release_src, ts->ppdu_id, ts->peer_id,
5247 			  ts->tid, ts->status, ts->tsf, ts->tsf,
5248 			  ts->buffer_timestamp, ts->buffer_timestamp,
5249 			  delta_tsf, delta_tsf);
5250 
5251 		delay = 0;
5252 		goto end;
5253 	}
5254 
5255 	delay &= 0x1FFFFFFF; /* mask 29 BITS */
5256 	if (delay > 0x1000000) {
5257 		dp_info_rl("----------------------\n"
5258 			   "Tx completion status:\n"
5259 			   "----------------------\n"
5260 			   "release_src = %d\n"
5261 			   "ppdu_id = 0x%x\n"
5262 			   "release_reason = %d\n"
5263 			   "tsf = %u (0x%x)\n"
5264 			   "buffer_timestamp = %u (0x%x)\n"
5265 			   "delta_tsf = %u (0x%x)\n",
5266 			   ts->release_src, ts->ppdu_id, ts->status,
5267 			   ts->tsf, ts->tsf, ts->buffer_timestamp,
5268 			   ts->buffer_timestamp, delta_tsf, delta_tsf);
5269 		return QDF_STATUS_E_FAILURE;
5270 	}
5271 
5272 
5273 end:
5274 	*delay_us = delay;
5275 
5276 	return QDF_STATUS_SUCCESS;
5277 }
5278 
5279 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5280 		      uint32_t delta_tsf)
5281 {
5282 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5283 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5284 						     DP_MOD_ID_CDP);
5285 
5286 	if (!vdev) {
5287 		dp_err_rl("vdev %d does not exist", vdev_id);
5288 		return;
5289 	}
5290 
5291 	vdev->delta_tsf = delta_tsf;
5292 	dp_debug("vdev id %u delta_tsf %u", vdev_id, delta_tsf);
5293 
5294 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5295 }
5296 #endif
5297 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
5298 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
5299 				      uint8_t vdev_id, bool enable)
5300 {
5301 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5302 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5303 						     DP_MOD_ID_CDP);
5304 
5305 	if (!vdev) {
5306 		dp_err_rl("vdev %d does not exist", vdev_id);
5307 		return QDF_STATUS_E_FAILURE;
5308 	}
5309 
5310 	qdf_atomic_set(&vdev->ul_delay_report, enable);
5311 
5312 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5313 
5314 	return QDF_STATUS_SUCCESS;
5315 }
5316 
5317 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5318 			       uint32_t *val)
5319 {
5320 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5321 	struct dp_vdev *vdev;
5322 	uint32_t delay_accum;
5323 	uint32_t pkts_accum;
5324 
5325 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
5326 	if (!vdev) {
5327 		dp_err_rl("vdev %d does not exist", vdev_id);
5328 		return QDF_STATUS_E_FAILURE;
5329 	}
5330 
5331 	if (!qdf_atomic_read(&vdev->ul_delay_report)) {
5332 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5333 		return QDF_STATUS_E_FAILURE;
5334 	}
5335 
5336 	/* Average uplink delay based on current accumulated values */
5337 	delay_accum = qdf_atomic_read(&vdev->ul_delay_accum);
5338 	pkts_accum = qdf_atomic_read(&vdev->ul_pkts_accum);
5339 
5340 	*val = delay_accum / pkts_accum;
5341 	dp_debug("uplink_delay %u delay_accum %u pkts_accum %u", *val,
5342 		 delay_accum, pkts_accum);
5343 
5344 	/* Reset accumulated values to 0 */
5345 	qdf_atomic_set(&vdev->ul_delay_accum, 0);
5346 	qdf_atomic_set(&vdev->ul_pkts_accum, 0);
5347 
5348 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5349 
5350 	return QDF_STATUS_SUCCESS;
5351 }
5352 
5353 static void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
5354 				      struct hal_tx_completion_status *ts)
5355 {
5356 	uint32_t ul_delay;
5357 
5358 	if (qdf_unlikely(!vdev)) {
5359 		dp_info_rl("vdev is null or delete in progress");
5360 		return;
5361 	}
5362 
5363 	if (!qdf_atomic_read(&vdev->ul_delay_report))
5364 		return;
5365 
5366 	if (QDF_IS_STATUS_ERROR(dp_tx_compute_hw_delay_us(ts,
5367 							  vdev->delta_tsf,
5368 							  &ul_delay)))
5369 		return;
5370 
5371 	ul_delay /= 1000; /* in unit of ms */
5372 
5373 	qdf_atomic_add(ul_delay, &vdev->ul_delay_accum);
5374 	qdf_atomic_inc(&vdev->ul_pkts_accum);
5375 }
5376 #else /* !WLAN_FEATURE_TSF_UPLINK_DELAY */
5377 static inline
5378 void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
5379 			       struct hal_tx_completion_status *ts)
5380 {
5381 }
5382 #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
5383 
5384 /**
5385  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
5386  * @soc: DP soc handle
5387  * @tx_desc: software descriptor head pointer
5388  * @ts: Tx completion status
5389  * @txrx_peer: txrx peer handle
5390  * @ring_id: ring number
5391  *
5392  * Return: none
5393  */
5394 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
5395 				  struct dp_tx_desc_s *tx_desc,
5396 				  struct hal_tx_completion_status *ts,
5397 				  struct dp_txrx_peer *txrx_peer,
5398 				  uint8_t ring_id)
5399 {
5400 	uint32_t length;
5401 	qdf_ether_header_t *eh;
5402 	struct dp_vdev *vdev = NULL;
5403 	qdf_nbuf_t nbuf = tx_desc->nbuf;
5404 	enum qdf_dp_tx_rx_status dp_status;
5405 
5406 	if (!nbuf) {
5407 		dp_info_rl("invalid tx descriptor. nbuf NULL");
5408 		goto out;
5409 	}
5410 
5411 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
5412 	length = dp_tx_get_pkt_len(tx_desc);
5413 
5414 	dp_status = dp_tx_hw_to_qdf(ts->status);
5415 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
5416 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
5417 				 QDF_TRACE_DEFAULT_PDEV_ID,
5418 				 qdf_nbuf_data_addr(nbuf),
5419 				 sizeof(qdf_nbuf_data(nbuf)),
5420 				 tx_desc->id, ts->status, dp_status));
5421 
5422 	dp_tx_comp_debug("-------------------- \n"
5423 			 "Tx Completion Stats: \n"
5424 			 "-------------------- \n"
5425 			 "ack_frame_rssi = %d \n"
5426 			 "first_msdu = %d \n"
5427 			 "last_msdu = %d \n"
5428 			 "msdu_part_of_amsdu = %d \n"
5429 			 "rate_stats valid = %d \n"
5430 			 "bw = %d \n"
5431 			 "pkt_type = %d \n"
5432 			 "stbc = %d \n"
5433 			 "ldpc = %d \n"
5434 			 "sgi = %d \n"
5435 			 "mcs = %d \n"
5436 			 "ofdma = %d \n"
5437 			 "tones_in_ru = %d \n"
5438 			 "tsf = %d \n"
5439 			 "ppdu_id = %d \n"
5440 			 "transmit_cnt = %d \n"
5441 			 "tid = %d \n"
5442 			 "peer_id = %d\n"
5443 			 "tx_status = %d\n",
5444 			 ts->ack_frame_rssi, ts->first_msdu,
5445 			 ts->last_msdu, ts->msdu_part_of_amsdu,
5446 			 ts->valid, ts->bw, ts->pkt_type, ts->stbc,
5447 			 ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
5448 			 ts->tones_in_ru, ts->tsf, ts->ppdu_id,
5449 			 ts->transmit_cnt, ts->tid, ts->peer_id,
5450 			 ts->status);
5451 
5452 	/* Update SoC level stats */
5453 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
5454 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
5455 
5456 	if (!txrx_peer) {
5457 		dp_info_rl("peer is null or deletion in progress");
5458 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
5459 		goto out;
5460 	}
5461 	vdev = txrx_peer->vdev;
5462 
5463 	dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
5464 	dp_tx_update_uplink_delay(soc, vdev, ts);
5465 
5466 	/* check tx complete notification */
5467 	if (qdf_nbuf_tx_notify_comp_get(nbuf))
5468 		dp_tx_notify_completion(soc, vdev, tx_desc,
5469 					nbuf, ts->status);
5470 
5471 	/* Update per-packet stats for mesh mode */
5472 	if (qdf_unlikely(vdev->mesh_vdev) &&
5473 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
5474 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
5475 
5476 	/* Update peer level stats */
5477 	if (qdf_unlikely(txrx_peer->bss_peer &&
5478 			 vdev->opmode == wlan_op_mode_ap)) {
5479 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
5480 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1,
5481 						      length);
5482 
5483 			if (txrx_peer->vdev->tx_encap_type ==
5484 				htt_cmn_pkt_type_ethernet &&
5485 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
5486 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
5487 							      tx.bcast, 1,
5488 							      length);
5489 			}
5490 		}
5491 	} else {
5492 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.ucast, 1, length);
5493 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
5494 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.tx_success,
5495 						      1, length);
5496 			if (qdf_unlikely(txrx_peer->in_twt)) {
5497 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
5498 							      tx.tx_success_twt,
5499 							      1, length);
5500 			}
5501 		}
5502 	}
5503 
5504 	dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id);
5505 	dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts, ring_id);
5506 	dp_tx_update_peer_jitter_stats(txrx_peer, tx_desc, ts, ring_id);
5507 	dp_tx_update_peer_sawf_stats(soc, vdev, txrx_peer, tx_desc,
5508 				     ts, ts->tid);
5509 	dp_tx_send_pktlog(soc, vdev->pdev, tx_desc, nbuf, dp_status);
5510 
5511 #ifdef QCA_SUPPORT_RDK_STATS
5512 	if (soc->peerstats_enabled)
5513 		dp_tx_sojourn_stats_process(vdev->pdev, txrx_peer, ts->tid,
5514 					    qdf_ktime_to_ms(tx_desc->timestamp),
5515 					    ts->ppdu_id);
5516 #endif
5517 
5518 out:
5519 	return;
5520 }
5521 
5522 #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \
5523 	defined(QCA_ENHANCED_STATS_SUPPORT)
5524 /*
5525  * dp_tx_update_peer_basic_stats(): Update peer basic stats
5526  * @txrx_peer: Datapath txrx_peer handle
5527  * @length: Length of the packet
5528  * @tx_status: Tx status from TQM/FW
5529  * @update: enhanced flag value present in dp_pdev
5530  *
5531  * Return: none
5532  */
5533 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
5534 				   uint32_t length, uint8_t tx_status,
5535 				   bool update)
5536 {
5537 	if (update || (!txrx_peer->hw_txrx_stats_en)) {
5538 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
5539 
5540 		if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
5541 			DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
5542 	}
5543 }
5544 #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
5545 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
5546 				   uint32_t length, uint8_t tx_status,
5547 				   bool update)
5548 {
5549 	if (!txrx_peer->hw_txrx_stats_en) {
5550 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
5551 
5552 		if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
5553 			DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
5554 	}
5555 }
5556 
5557 #else
5558 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
5559 				   uint32_t length, uint8_t tx_status,
5560 				   bool update)
5561 {
5562 	DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
5563 
5564 	if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
5565 		DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
5566 }
5567 #endif
5568 
5569 /*
5570  * dp_tx_prefetch_next_nbuf_data(): Prefetch nbuf and nbuf data
5571  * @nbuf: skb buffer
5572  *
5573  * Return: none
5574  */
5575 #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
5576 static inline
5577 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
5578 {
5579 	qdf_nbuf_t nbuf = NULL;
5580 
5581 	if (next)
5582 		nbuf = next->nbuf;
5583 	if (nbuf)
5584 		qdf_prefetch(nbuf);
5585 }
5586 #else
5587 static inline
5588 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
5589 {
5590 }
5591 #endif
5592 
5593 /**
5594  * dp_tx_mcast_reinject_handler() - Tx reinjected multicast packets handler
5595  * @soc: core txrx main context
5596  * @desc: software descriptor
5597  *
5598  * Return: true when packet is reinjected
5599  */
5600 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
5601 	defined(WLAN_MCAST_MLO) && !defined(CONFIG_MLO_SINGLE_DEV)
5602 static inline bool
5603 dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
5604 {
5605 	struct dp_vdev *vdev = NULL;
5606 
5607 	if (desc->tx_status == HAL_TX_TQM_RR_MULTICAST_DROP) {
5608 		if (!soc->arch_ops.dp_tx_mcast_handler ||
5609 		    !soc->arch_ops.dp_tx_is_mcast_primary)
5610 			return false;
5611 
5612 		vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
5613 					     DP_MOD_ID_REINJECT);
5614 
5615 		if (qdf_unlikely(!vdev)) {
5616 			dp_tx_comp_info_rl("Unable to get vdev ref  %d",
5617 					   desc->id);
5618 			return false;
5619 		}
5620 
5621 		if (!(soc->arch_ops.dp_tx_is_mcast_primary(soc, vdev))) {
5622 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
5623 			return false;
5624 		}
5625 		DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
5626 				 qdf_nbuf_len(desc->nbuf));
5627 		soc->arch_ops.dp_tx_mcast_handler(soc, vdev, desc->nbuf);
5628 		dp_tx_desc_release(desc, desc->pool_id);
5629 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
5630 		return true;
5631 	}
5632 
5633 	return false;
5634 }
5635 #else
5636 static inline bool
5637 dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
5638 {
5639 	return false;
5640 }
5641 #endif
5642 
5643 #ifdef QCA_DP_TX_NBUF_LIST_FREE
5644 static inline void
5645 dp_tx_nbuf_queue_head_init(qdf_nbuf_queue_head_t *nbuf_queue_head)
5646 {
5647 	qdf_nbuf_queue_head_init(nbuf_queue_head);
5648 }
5649 
5650 static inline void
5651 dp_tx_nbuf_dev_queue_free(qdf_nbuf_queue_head_t *nbuf_queue_head,
5652 			  struct dp_tx_desc_s *desc)
5653 {
5654 	qdf_nbuf_t nbuf = NULL;
5655 
5656 	nbuf = desc->nbuf;
5657 	if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_FAST))
5658 		qdf_nbuf_dev_queue_head(nbuf_queue_head, nbuf);
5659 	else
5660 		qdf_nbuf_free(nbuf);
5661 }
5662 
5663 static inline void
5664 dp_tx_nbuf_dev_kfree_list(qdf_nbuf_queue_head_t *nbuf_queue_head)
5665 {
5666 	qdf_nbuf_dev_kfree_list(nbuf_queue_head);
5667 }
5668 #else
5669 static inline void
5670 dp_tx_nbuf_queue_head_init(qdf_nbuf_queue_head_t *nbuf_queue_head)
5671 {
5672 }
5673 
5674 static inline void
5675 dp_tx_nbuf_dev_queue_free(qdf_nbuf_queue_head_t *nbuf_queue_head,
5676 			  struct dp_tx_desc_s *desc)
5677 {
5678 	qdf_nbuf_free(desc->nbuf);
5679 }
5680 
5681 static inline void
5682 dp_tx_nbuf_dev_kfree_list(qdf_nbuf_queue_head_t *nbuf_queue_head)
5683 {
5684 }
5685 #endif
5686 
5687 /**
5688  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
5689  * @soc: core txrx main context
5690  * @comp_head: software descriptor head pointer
5691  * @ring_id: ring number
5692  *
5693  * This function will process batch of descriptors reaped by dp_tx_comp_handler
5694  * and release the software descriptors after processing is complete
5695  *
5696  * Return: none
5697  */
5698 void
5699 dp_tx_comp_process_desc_list(struct dp_soc *soc,
5700 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id)
5701 {
5702 	struct dp_tx_desc_s *desc;
5703 	struct dp_tx_desc_s *next;
5704 	struct hal_tx_completion_status ts;
5705 	struct dp_txrx_peer *txrx_peer = NULL;
5706 	uint16_t peer_id = DP_INVALID_PEER;
5707 	dp_txrx_ref_handle txrx_ref_handle = NULL;
5708 	qdf_nbuf_queue_head_t h;
5709 
5710 	desc = comp_head;
5711 
5712 	dp_tx_nbuf_queue_head_init(&h);
5713 
5714 	while (desc) {
5715 		next = desc->next;
5716 		dp_tx_prefetch_next_nbuf_data(next);
5717 
5718 		if (peer_id != desc->peer_id) {
5719 			if (txrx_peer)
5720 				dp_txrx_peer_unref_delete(txrx_ref_handle,
5721 							  DP_MOD_ID_TX_COMP);
5722 			peer_id = desc->peer_id;
5723 			txrx_peer =
5724 				dp_txrx_peer_get_ref_by_id(soc, peer_id,
5725 							   &txrx_ref_handle,
5726 							   DP_MOD_ID_TX_COMP);
5727 		}
5728 
5729 		if (dp_tx_mcast_reinject_handler(soc, desc)) {
5730 			desc = next;
5731 			continue;
5732 		}
5733 
5734 		if (desc->flags & DP_TX_DESC_FLAG_PPEDS) {
5735 			if (qdf_likely(txrx_peer))
5736 				dp_tx_update_peer_basic_stats(txrx_peer,
5737 							      desc->length,
5738 							      desc->tx_status,
5739 							      false);
5740 			dp_tx_nbuf_dev_queue_free(&h, desc);
5741 			dp_ppeds_tx_desc_free(soc, desc);
5742 			desc = next;
5743 			continue;
5744 		}
5745 
5746 		if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
5747 			struct dp_pdev *pdev = desc->pdev;
5748 
5749 			if (qdf_likely(txrx_peer))
5750 				dp_tx_update_peer_basic_stats(txrx_peer,
5751 							      desc->length,
5752 							      desc->tx_status,
5753 							      false);
5754 			qdf_assert(pdev);
5755 			dp_tx_outstanding_dec(pdev);
5756 
5757 			/*
5758 			 * Calling a QDF WRAPPER here is creating significant
5759 			 * performance impact so avoided the wrapper call here
5760 			 */
5761 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
5762 					       desc->id, DP_TX_COMP_UNMAP);
5763 			dp_tx_nbuf_unmap(soc, desc);
5764 			dp_tx_nbuf_dev_queue_free(&h, desc);
5765 			dp_tx_desc_free(soc, desc, desc->pool_id);
5766 			desc = next;
5767 			continue;
5768 		}
5769 
5770 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
5771 
5772 		dp_tx_comp_process_tx_status(soc, desc, &ts, txrx_peer,
5773 					     ring_id);
5774 
5775 		dp_tx_comp_process_desc(soc, desc, &ts, txrx_peer);
5776 
5777 		dp_tx_desc_release(desc, desc->pool_id);
5778 		desc = next;
5779 	}
5780 	dp_tx_nbuf_dev_kfree_list(&h);
5781 	if (txrx_peer)
5782 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
5783 }
5784 
5785 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
5786 static inline
5787 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
5788 				   int max_reap_limit)
5789 {
5790 	bool limit_hit = false;
5791 
5792 	limit_hit =
5793 		(num_reaped >= max_reap_limit) ? true : false;
5794 
5795 	if (limit_hit)
5796 		DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
5797 
5798 	return limit_hit;
5799 }
5800 
5801 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
5802 {
5803 	return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
5804 }
5805 
5806 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
5807 {
5808 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
5809 
5810 	return cfg->tx_comp_loop_pkt_limit;
5811 }
5812 #else
5813 static inline
5814 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
5815 				   int max_reap_limit)
5816 {
5817 	return false;
5818 }
5819 
5820 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
5821 {
5822 	return false;
5823 }
5824 
5825 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
5826 {
5827 	return 0;
5828 }
5829 #endif
5830 
5831 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
5832 static inline int
5833 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
5834 				  int *max_reap_limit)
5835 {
5836 	return soc->arch_ops.dp_srng_test_and_update_nf_params(soc, dp_srng,
5837 							       max_reap_limit);
5838 }
5839 #else
5840 static inline int
5841 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
5842 				  int *max_reap_limit)
5843 {
5844 	return 0;
5845 }
5846 #endif
5847 
5848 #ifdef DP_TX_TRACKING
5849 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
5850 {
5851 	if ((tx_desc->magic != DP_TX_MAGIC_PATTERN_INUSE) &&
5852 	    (tx_desc->magic != DP_TX_MAGIC_PATTERN_FREE)) {
5853 		dp_err_rl("tx_desc %u is corrupted", tx_desc->id);
5854 		qdf_trigger_self_recovery(NULL, QDF_TX_DESC_LEAK);
5855 	}
5856 }
5857 #endif
5858 
5859 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
5860 			    hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
5861 			    uint32_t quota)
5862 {
5863 	void *tx_comp_hal_desc;
5864 	void *last_prefetched_hw_desc = NULL;
5865 	struct dp_tx_desc_s *last_prefetched_sw_desc = NULL;
5866 	hal_soc_handle_t hal_soc;
5867 	uint8_t buffer_src;
5868 	struct dp_tx_desc_s *tx_desc = NULL;
5869 	struct dp_tx_desc_s *head_desc = NULL;
5870 	struct dp_tx_desc_s *tail_desc = NULL;
5871 	uint32_t num_processed = 0;
5872 	uint32_t count;
5873 	uint32_t num_avail_for_reap = 0;
5874 	bool force_break = false;
5875 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
5876 	int max_reap_limit, ring_near_full;
5877 	uint32_t num_entries;
5878 
5879 	DP_HIST_INIT();
5880 
5881 	num_entries = hal_srng_get_num_entries(soc->hal_soc, hal_ring_hdl);
5882 
5883 more_data:
5884 
5885 	hal_soc = soc->hal_soc;
5886 	/* Re-initialize local variables to be re-used */
5887 	head_desc = NULL;
5888 	tail_desc = NULL;
5889 	count = 0;
5890 	max_reap_limit = dp_tx_comp_get_loop_pkt_limit(soc);
5891 
5892 	ring_near_full = dp_srng_test_and_update_nf_params(soc, tx_comp_ring,
5893 							   &max_reap_limit);
5894 
5895 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
5896 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
5897 		return 0;
5898 	}
5899 
5900 	if (!num_avail_for_reap)
5901 		num_avail_for_reap = hal_srng_dst_num_valid(hal_soc,
5902 							    hal_ring_hdl, 0);
5903 
5904 	if (num_avail_for_reap >= quota)
5905 		num_avail_for_reap = quota;
5906 
5907 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
5908 	last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc,
5909 							    hal_ring_hdl,
5910 							    num_avail_for_reap);
5911 
5912 	/* Find head descriptor from completion ring */
5913 	while (qdf_likely(num_avail_for_reap--)) {
5914 
5915 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
5916 		if (qdf_unlikely(!tx_comp_hal_desc))
5917 			break;
5918 		buffer_src = hal_tx_comp_get_buffer_source(hal_soc,
5919 							   tx_comp_hal_desc);
5920 
5921 		/* If this buffer was not released by TQM or FW, then it is not
5922 		 * Tx completion indication, assert */
5923 		if (qdf_unlikely(buffer_src !=
5924 					HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
5925 				 (qdf_unlikely(buffer_src !=
5926 					HAL_TX_COMP_RELEASE_SOURCE_FW))) {
5927 			uint8_t wbm_internal_error;
5928 
5929 			dp_err_rl(
5930 				"Tx comp release_src != TQM | FW but from %d",
5931 				buffer_src);
5932 			hal_dump_comp_desc(tx_comp_hal_desc);
5933 			DP_STATS_INC(soc, tx.invalid_release_source, 1);
5934 
5935 			/* When WBM sees NULL buffer_addr_info in any of
5936 			 * ingress rings it sends an error indication,
5937 			 * with wbm_internal_error=1, to a specific ring.
5938 			 * The WBM2SW ring used to indicate these errors is
5939 			 * fixed in HW, and that ring is being used as Tx
5940 			 * completion ring. These errors are not related to
5941 			 * Tx completions, and should just be ignored
5942 			 */
5943 			wbm_internal_error = hal_get_wbm_internal_error(
5944 							hal_soc,
5945 							tx_comp_hal_desc);
5946 
5947 			if (wbm_internal_error) {
5948 				dp_err_rl("Tx comp wbm_internal_error!!");
5949 				DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
5950 
5951 				if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
5952 								buffer_src)
5953 					dp_handle_wbm_internal_error(
5954 						soc,
5955 						tx_comp_hal_desc,
5956 						hal_tx_comp_get_buffer_type(
5957 							tx_comp_hal_desc));
5958 
5959 			} else {
5960 				dp_err_rl("Tx comp wbm_internal_error false");
5961 				DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
5962 			}
5963 			continue;
5964 		}
5965 
5966 		soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
5967 							       tx_comp_hal_desc,
5968 							       &tx_desc);
5969 		if (qdf_unlikely(!tx_desc)) {
5970 			dp_err("unable to retrieve tx_desc!");
5971 			hal_dump_comp_desc(tx_comp_hal_desc);
5972 			DP_STATS_INC(soc, tx.invalid_tx_comp_desc, 1);
5973 			QDF_BUG(0);
5974 			continue;
5975 		}
5976 		tx_desc->buffer_src = buffer_src;
5977 
5978 		if (tx_desc->flags & DP_TX_DESC_FLAG_PPEDS)
5979 			goto add_to_pool2;
5980 
5981 		/*
5982 		 * If the release source is FW, process the HTT status
5983 		 */
5984 		if (qdf_unlikely(buffer_src ==
5985 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
5986 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
5987 
5988 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
5989 					htt_tx_status);
5990 			/* Collect hw completion contents */
5991 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
5992 					      &tx_desc->comp, 1);
5993 			soc->arch_ops.dp_tx_process_htt_completion(
5994 							soc,
5995 							tx_desc,
5996 							htt_tx_status,
5997 							ring_id);
5998 		} else {
5999 			tx_desc->tx_status =
6000 				hal_tx_comp_get_tx_status(tx_comp_hal_desc);
6001 			tx_desc->buffer_src = buffer_src;
6002 			/*
6003 			 * If the fast completion mode is enabled extended
6004 			 * metadata from descriptor is not copied
6005 			 */
6006 			if (qdf_likely(tx_desc->flags &
6007 						DP_TX_DESC_FLAG_SIMPLE))
6008 				goto add_to_pool;
6009 
6010 			/*
6011 			 * If the descriptor is already freed in vdev_detach,
6012 			 * continue to next descriptor
6013 			 */
6014 			if (qdf_unlikely
6015 				((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
6016 				 !tx_desc->flags)) {
6017 				dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
6018 						   tx_desc->id);
6019 				DP_STATS_INC(soc, tx.tx_comp_exception, 1);
6020 				dp_tx_desc_check_corruption(tx_desc);
6021 				continue;
6022 			}
6023 
6024 			if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
6025 				dp_tx_comp_info_rl("pdev in down state %d",
6026 						   tx_desc->id);
6027 				tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
6028 				dp_tx_comp_free_buf(soc, tx_desc, false);
6029 				dp_tx_desc_release(tx_desc, tx_desc->pool_id);
6030 				goto next_desc;
6031 			}
6032 
6033 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
6034 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
6035 				dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
6036 						 tx_desc->flags, tx_desc->id);
6037 				qdf_assert_always(0);
6038 			}
6039 
6040 			/* Collect hw completion contents */
6041 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
6042 					      &tx_desc->comp, 1);
6043 add_to_pool:
6044 			DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
6045 
6046 add_to_pool2:
6047 			/* First ring descriptor on the cycle */
6048 			if (!head_desc) {
6049 				head_desc = tx_desc;
6050 				tail_desc = tx_desc;
6051 			}
6052 
6053 			tail_desc->next = tx_desc;
6054 			tx_desc->next = NULL;
6055 			tail_desc = tx_desc;
6056 		}
6057 next_desc:
6058 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
6059 
6060 		/*
6061 		 * Processed packet count is more than given quota
6062 		 * stop to processing
6063 		 */
6064 
6065 		count++;
6066 
6067 		dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
6068 					       num_avail_for_reap,
6069 					       hal_ring_hdl,
6070 					       &last_prefetched_hw_desc,
6071 					       &last_prefetched_sw_desc);
6072 
6073 		if (dp_tx_comp_loop_pkt_limit_hit(soc, count, max_reap_limit))
6074 			break;
6075 	}
6076 
6077 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
6078 
6079 	/* Process the reaped descriptors */
6080 	if (head_desc)
6081 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
6082 
6083 	DP_STATS_INC(soc, tx.tx_comp[ring_id], count);
6084 
6085 	/*
6086 	 * If we are processing in near-full condition, there are 3 scenario
6087 	 * 1) Ring entries has reached critical state
6088 	 * 2) Ring entries are still near high threshold
6089 	 * 3) Ring entries are below the safe level
6090 	 *
6091 	 * One more loop will move the state to normal processing and yield
6092 	 */
6093 	if (ring_near_full)
6094 		goto more_data;
6095 
6096 	if (dp_tx_comp_enable_eol_data_check(soc)) {
6097 
6098 		if (num_processed >= quota)
6099 			force_break = true;
6100 
6101 		if (!force_break &&
6102 		    hal_srng_dst_peek_sync_locked(soc->hal_soc,
6103 						  hal_ring_hdl)) {
6104 			DP_STATS_INC(soc, tx.hp_oos2, 1);
6105 			if (!hif_exec_should_yield(soc->hif_handle,
6106 						   int_ctx->dp_intr_id))
6107 				goto more_data;
6108 
6109 			num_avail_for_reap =
6110 				hal_srng_dst_num_valid_locked(soc->hal_soc,
6111 							      hal_ring_hdl,
6112 							      true);
6113 			if (qdf_unlikely(num_entries &&
6114 					 (num_avail_for_reap >=
6115 					  num_entries >> 1))) {
6116 				DP_STATS_INC(soc, tx.near_full, 1);
6117 				goto more_data;
6118 			}
6119 		}
6120 	}
6121 	DP_TX_HIST_STATS_PER_PDEV();
6122 
6123 	return num_processed;
6124 }
6125 
6126 #ifdef FEATURE_WLAN_TDLS
6127 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
6128 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
6129 {
6130 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6131 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6132 						     DP_MOD_ID_TDLS);
6133 
6134 	if (!vdev) {
6135 		dp_err("vdev handle for id %d is NULL", vdev_id);
6136 		return NULL;
6137 	}
6138 
6139 	if (tx_spec & OL_TX_SPEC_NO_FREE)
6140 		vdev->is_tdls_frame = true;
6141 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
6142 
6143 	return dp_tx_send(soc_hdl, vdev_id, msdu_list);
6144 }
6145 #endif
6146 
6147 /**
6148  * dp_tx_vdev_attach() - attach vdev to dp tx
6149  * @vdev: virtual device instance
6150  *
6151  * Return: QDF_STATUS_SUCCESS: success
6152  *         QDF_STATUS_E_RESOURCES: Error return
6153  */
6154 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
6155 {
6156 	int pdev_id;
6157 	/*
6158 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
6159 	 */
6160 	DP_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
6161 				    DP_TCL_METADATA_TYPE_VDEV_BASED);
6162 
6163 	DP_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
6164 				       vdev->vdev_id);
6165 
6166 	pdev_id =
6167 		dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
6168 						       vdev->pdev->pdev_id);
6169 	DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
6170 
6171 	/*
6172 	 * Set HTT Extension Valid bit to 0 by default
6173 	 */
6174 	DP_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
6175 
6176 	dp_tx_vdev_update_search_flags(vdev);
6177 
6178 	return QDF_STATUS_SUCCESS;
6179 }
6180 
6181 #ifndef FEATURE_WDS
6182 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
6183 {
6184 	return false;
6185 }
6186 #endif
6187 
6188 /**
6189  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
6190  * @vdev: virtual device instance
6191  *
6192  * Return: void
6193  *
6194  */
6195 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
6196 {
6197 	struct dp_soc *soc = vdev->pdev->soc;
6198 
6199 	/*
6200 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
6201 	 * for TDLS link
6202 	 *
6203 	 * Enable AddrY (SA based search) only for non-WDS STA and
6204 	 * ProxySTA VAP (in HKv1) modes.
6205 	 *
6206 	 * In all other VAP modes, only DA based search should be
6207 	 * enabled
6208 	 */
6209 	if (vdev->opmode == wlan_op_mode_sta &&
6210 	    vdev->tdls_link_connected)
6211 		vdev->hal_desc_addr_search_flags =
6212 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
6213 	else if ((vdev->opmode == wlan_op_mode_sta) &&
6214 		 !dp_tx_da_search_override(vdev))
6215 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
6216 	else
6217 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
6218 
6219 	if (vdev->opmode == wlan_op_mode_sta && !vdev->tdls_link_connected)
6220 		vdev->search_type = soc->sta_mode_search_policy;
6221 	else
6222 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
6223 }
6224 
6225 static inline bool
6226 dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
6227 			  struct dp_vdev *vdev,
6228 			  struct dp_tx_desc_s *tx_desc)
6229 {
6230 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
6231 		return false;
6232 
6233 	/*
6234 	 * if vdev is given, then only check whether desc
6235 	 * vdev match. if vdev is NULL, then check whether
6236 	 * desc pdev match.
6237 	 */
6238 	return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
6239 		(tx_desc->pdev == pdev);
6240 }
6241 
6242 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6243 /**
6244  * dp_tx_desc_flush() - release resources associated
6245  *                      to TX Desc
6246  *
6247  * @dp_pdev: Handle to DP pdev structure
6248  * @vdev: virtual device instance
6249  * NULL: no specific Vdev is required and check all allcated TX desc
6250  * on this pdev.
6251  * Non-NULL: only check the allocated TX Desc associated to this Vdev.
6252  *
6253  * @force_free:
6254  * true: flush the TX desc.
6255  * false: only reset the Vdev in each allocated TX desc
6256  * that associated to current Vdev.
6257  *
6258  * This function will go through the TX desc pool to flush
6259  * the outstanding TX data or reset Vdev to NULL in associated TX
6260  * Desc.
6261  */
6262 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
6263 		      bool force_free)
6264 {
6265 	uint8_t i;
6266 	uint32_t j;
6267 	uint32_t num_desc, page_id, offset;
6268 	uint16_t num_desc_per_page;
6269 	struct dp_soc *soc = pdev->soc;
6270 	struct dp_tx_desc_s *tx_desc = NULL;
6271 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
6272 
6273 	if (!vdev && !force_free) {
6274 		dp_err("Reset TX desc vdev, Vdev param is required!");
6275 		return;
6276 	}
6277 
6278 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
6279 		tx_desc_pool = &soc->tx_desc[i];
6280 		if (!(tx_desc_pool->pool_size) ||
6281 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
6282 		    !(tx_desc_pool->desc_pages.cacheable_pages))
6283 			continue;
6284 
6285 		/*
6286 		 * Add flow pool lock protection in case pool is freed
6287 		 * due to all tx_desc is recycled when handle TX completion.
6288 		 * this is not necessary when do force flush as:
6289 		 * a. double lock will happen if dp_tx_desc_release is
6290 		 *    also trying to acquire it.
6291 		 * b. dp interrupt has been disabled before do force TX desc
6292 		 *    flush in dp_pdev_deinit().
6293 		 */
6294 		if (!force_free)
6295 			qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
6296 		num_desc = tx_desc_pool->pool_size;
6297 		num_desc_per_page =
6298 			tx_desc_pool->desc_pages.num_element_per_page;
6299 		for (j = 0; j < num_desc; j++) {
6300 			page_id = j / num_desc_per_page;
6301 			offset = j % num_desc_per_page;
6302 
6303 			if (qdf_unlikely(!(tx_desc_pool->
6304 					 desc_pages.cacheable_pages)))
6305 				break;
6306 
6307 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
6308 
6309 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
6310 				/*
6311 				 * Free TX desc if force free is
6312 				 * required, otherwise only reset vdev
6313 				 * in this TX desc.
6314 				 */
6315 				if (force_free) {
6316 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
6317 					dp_tx_comp_free_buf(soc, tx_desc,
6318 							    false);
6319 					dp_tx_desc_release(tx_desc, i);
6320 				} else {
6321 					tx_desc->vdev_id = DP_INVALID_VDEV_ID;
6322 				}
6323 			}
6324 		}
6325 		if (!force_free)
6326 			qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
6327 	}
6328 }
6329 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
6330 /**
6331  * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
6332  *
6333  * @soc: Handle to DP soc structure
6334  * @tx_desc: pointer of one TX desc
6335  * @desc_pool_id: TX Desc pool id
6336  */
6337 static inline void
6338 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
6339 		      uint8_t desc_pool_id)
6340 {
6341 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
6342 
6343 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
6344 
6345 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
6346 }
6347 
6348 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
6349 		      bool force_free)
6350 {
6351 	uint8_t i, num_pool;
6352 	uint32_t j;
6353 	uint32_t num_desc, page_id, offset;
6354 	uint16_t num_desc_per_page;
6355 	struct dp_soc *soc = pdev->soc;
6356 	struct dp_tx_desc_s *tx_desc = NULL;
6357 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
6358 
6359 	if (!vdev && !force_free) {
6360 		dp_err("Reset TX desc vdev, Vdev param is required!");
6361 		return;
6362 	}
6363 
6364 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6365 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6366 
6367 	for (i = 0; i < num_pool; i++) {
6368 		tx_desc_pool = &soc->tx_desc[i];
6369 		if (!tx_desc_pool->desc_pages.cacheable_pages)
6370 			continue;
6371 
6372 		num_desc_per_page =
6373 			tx_desc_pool->desc_pages.num_element_per_page;
6374 		for (j = 0; j < num_desc; j++) {
6375 			page_id = j / num_desc_per_page;
6376 			offset = j % num_desc_per_page;
6377 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
6378 
6379 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
6380 				if (force_free) {
6381 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
6382 					dp_tx_comp_free_buf(soc, tx_desc,
6383 							    false);
6384 					dp_tx_desc_release(tx_desc, i);
6385 				} else {
6386 					dp_tx_desc_reset_vdev(soc, tx_desc,
6387 							      i);
6388 				}
6389 			}
6390 		}
6391 	}
6392 }
6393 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
6394 
6395 /**
6396  * dp_tx_vdev_detach() - detach vdev from dp tx
6397  * @vdev: virtual device instance
6398  *
6399  * Return: QDF_STATUS_SUCCESS: success
6400  *         QDF_STATUS_E_RESOURCES: Error return
6401  */
6402 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
6403 {
6404 	struct dp_pdev *pdev = vdev->pdev;
6405 
6406 	/* Reset TX desc associated to this Vdev as NULL */
6407 	dp_tx_desc_flush(pdev, vdev, false);
6408 
6409 	return QDF_STATUS_SUCCESS;
6410 }
6411 
6412 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6413 /* Pools will be allocated dynamically */
6414 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
6415 					   int num_desc)
6416 {
6417 	uint8_t i;
6418 
6419 	for (i = 0; i < num_pool; i++) {
6420 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
6421 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
6422 	}
6423 
6424 	return QDF_STATUS_SUCCESS;
6425 }
6426 
6427 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
6428 					  uint32_t num_desc)
6429 {
6430 	return QDF_STATUS_SUCCESS;
6431 }
6432 
6433 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
6434 {
6435 }
6436 
6437 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
6438 {
6439 	uint8_t i;
6440 
6441 	for (i = 0; i < num_pool; i++)
6442 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
6443 }
6444 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
6445 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
6446 					   uint32_t num_desc)
6447 {
6448 	uint8_t i, count;
6449 
6450 	/* Allocate software Tx descriptor pools */
6451 	for (i = 0; i < num_pool; i++) {
6452 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
6453 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6454 				  FL("Tx Desc Pool alloc %d failed %pK"),
6455 				  i, soc);
6456 			goto fail;
6457 		}
6458 	}
6459 	return QDF_STATUS_SUCCESS;
6460 
6461 fail:
6462 	for (count = 0; count < i; count++)
6463 		dp_tx_desc_pool_free(soc, count);
6464 
6465 	return QDF_STATUS_E_NOMEM;
6466 }
6467 
6468 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
6469 					  uint32_t num_desc)
6470 {
6471 	uint8_t i;
6472 	for (i = 0; i < num_pool; i++) {
6473 		if (dp_tx_desc_pool_init(soc, i, num_desc)) {
6474 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6475 				  FL("Tx Desc Pool init %d failed %pK"),
6476 				  i, soc);
6477 			return QDF_STATUS_E_NOMEM;
6478 		}
6479 	}
6480 	return QDF_STATUS_SUCCESS;
6481 }
6482 
6483 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
6484 {
6485 	uint8_t i;
6486 
6487 	for (i = 0; i < num_pool; i++)
6488 		dp_tx_desc_pool_deinit(soc, i);
6489 }
6490 
6491 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
6492 {
6493 	uint8_t i;
6494 
6495 	for (i = 0; i < num_pool; i++)
6496 		dp_tx_desc_pool_free(soc, i);
6497 }
6498 
6499 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
6500 
6501 /**
6502  * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
6503  * @soc: core txrx main context
6504  * @num_pool: number of pools
6505  *
6506  */
6507 static void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
6508 {
6509 	dp_tx_tso_desc_pool_deinit(soc, num_pool);
6510 	dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
6511 }
6512 
6513 /**
6514  * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
6515  * @soc: core txrx main context
6516  * @num_pool: number of pools
6517  *
6518  */
6519 static void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
6520 {
6521 	dp_tx_tso_desc_pool_free(soc, num_pool);
6522 	dp_tx_tso_num_seg_pool_free(soc, num_pool);
6523 }
6524 
6525 /**
6526  * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
6527  * @soc: core txrx main context
6528  *
6529  * This function frees all tx related descriptors as below
6530  * 1. Regular TX descriptors (static pools)
6531  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
6532  * 3. TSO descriptors
6533  *
6534  */
6535 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
6536 {
6537 	uint8_t num_pool;
6538 
6539 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6540 
6541 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
6542 	dp_tx_ext_desc_pool_free(soc, num_pool);
6543 	dp_tx_delete_static_pools(soc, num_pool);
6544 }
6545 
6546 /**
6547  * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
6548  * @soc: core txrx main context
6549  *
6550  * This function de-initializes all tx related descriptors as below
6551  * 1. Regular TX descriptors (static pools)
6552  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
6553  * 3. TSO descriptors
6554  *
6555  */
6556 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
6557 {
6558 	uint8_t num_pool;
6559 
6560 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6561 
6562 	dp_tx_flow_control_deinit(soc);
6563 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
6564 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
6565 	dp_tx_deinit_static_pools(soc, num_pool);
6566 }
6567 
6568 /**
6569  * dp_tx_tso_cmn_desc_pool_alloc() - TSO cmn desc pool allocator
6570  * @soc: DP soc handle
6571  * @num_pool: Number of pools
6572  * @num_desc: Number of descriptors
6573  *
6574  * Reserve TSO descriptor buffers
6575  *
6576  * Return: QDF_STATUS_E_FAILURE on failure or
6577  *         QDF_STATUS_SUCCESS on success
6578  */
6579 static QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
6580 						uint8_t num_pool,
6581 						uint32_t num_desc)
6582 {
6583 	if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
6584 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
6585 		return QDF_STATUS_E_FAILURE;
6586 	}
6587 
6588 	if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
6589 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
6590 		       num_pool, soc);
6591 		return QDF_STATUS_E_FAILURE;
6592 	}
6593 	return QDF_STATUS_SUCCESS;
6594 }
6595 
6596 /**
6597  * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
6598  * @soc: DP soc handle
6599  * @num_pool: Number of pools
6600  * @num_desc: Number of descriptors
6601  *
6602  * Initialize TSO descriptor pools
6603  *
6604  * Return: QDF_STATUS_E_FAILURE on failure or
6605  *         QDF_STATUS_SUCCESS on success
6606  */
6607 
6608 static QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
6609 					       uint8_t num_pool,
6610 					       uint32_t num_desc)
6611 {
6612 	if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
6613 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
6614 		return QDF_STATUS_E_FAILURE;
6615 	}
6616 
6617 	if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
6618 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
6619 		       num_pool, soc);
6620 		return QDF_STATUS_E_FAILURE;
6621 	}
6622 	return QDF_STATUS_SUCCESS;
6623 }
6624 
6625 /**
6626  * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
6627  * @soc: core txrx main context
6628  *
6629  * This function allocates memory for following descriptor pools
6630  * 1. regular sw tx descriptor pools (static pools)
6631  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
6632  * 3. TSO descriptor pools
6633  *
6634  * Return: QDF_STATUS_SUCCESS: success
6635  *         QDF_STATUS_E_RESOURCES: Error return
6636  */
6637 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
6638 {
6639 	uint8_t num_pool;
6640 	uint32_t num_desc;
6641 	uint32_t num_ext_desc;
6642 
6643 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6644 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6645 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
6646 
6647 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6648 		  "%s Tx Desc Alloc num_pool = %d, descs = %d",
6649 		  __func__, num_pool, num_desc);
6650 
6651 	if ((num_pool > MAX_TXDESC_POOLS) ||
6652 	    (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
6653 		goto fail1;
6654 
6655 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
6656 		goto fail1;
6657 
6658 	if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
6659 		goto fail2;
6660 
6661 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
6662 		return QDF_STATUS_SUCCESS;
6663 
6664 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
6665 		goto fail3;
6666 
6667 	return QDF_STATUS_SUCCESS;
6668 
6669 fail3:
6670 	dp_tx_ext_desc_pool_free(soc, num_pool);
6671 fail2:
6672 	dp_tx_delete_static_pools(soc, num_pool);
6673 fail1:
6674 	return QDF_STATUS_E_RESOURCES;
6675 }
6676 
6677 /**
6678  * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
6679  * @soc: core txrx main context
6680  *
6681  * This function initializes the following TX descriptor pools
6682  * 1. regular sw tx descriptor pools (static pools)
6683  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
6684  * 3. TSO descriptor pools
6685  *
6686  * Return: QDF_STATUS_SUCCESS: success
6687  *	   QDF_STATUS_E_RESOURCES: Error return
6688  */
6689 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
6690 {
6691 	uint8_t num_pool;
6692 	uint32_t num_desc;
6693 	uint32_t num_ext_desc;
6694 
6695 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6696 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6697 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
6698 
6699 	if (dp_tx_init_static_pools(soc, num_pool, num_desc))
6700 		goto fail1;
6701 
6702 	if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
6703 		goto fail2;
6704 
6705 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
6706 		return QDF_STATUS_SUCCESS;
6707 
6708 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
6709 		goto fail3;
6710 
6711 	dp_tx_flow_control_init(soc);
6712 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
6713 	return QDF_STATUS_SUCCESS;
6714 
6715 fail3:
6716 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
6717 fail2:
6718 	dp_tx_deinit_static_pools(soc, num_pool);
6719 fail1:
6720 	return QDF_STATUS_E_RESOURCES;
6721 }
6722 
6723 /**
6724  * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
6725  * @txrx_soc: dp soc handle
6726  *
6727  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
6728  *			QDF_STATUS_E_FAILURE
6729  */
6730 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
6731 {
6732 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6733 	uint8_t num_pool;
6734 	uint32_t num_ext_desc;
6735 
6736 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6737 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
6738 
6739 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
6740 		return QDF_STATUS_E_FAILURE;
6741 
6742 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
6743 		return QDF_STATUS_E_FAILURE;
6744 
6745 	return QDF_STATUS_SUCCESS;
6746 }
6747 
6748 /**
6749  * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
6750  * @txrx_soc: dp soc handle
6751  *
6752  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
6753  */
6754 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
6755 {
6756 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6757 	uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6758 
6759 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
6760 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
6761 
6762 	return QDF_STATUS_SUCCESS;
6763 }
6764 
6765 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
6766 void dp_pkt_add_timestamp(struct dp_vdev *vdev,
6767 			  enum qdf_pkt_timestamp_index index, uint64_t time,
6768 			  qdf_nbuf_t nbuf)
6769 {
6770 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled())) {
6771 		uint64_t tsf_time;
6772 
6773 		if (vdev->get_tsf_time) {
6774 			vdev->get_tsf_time(vdev->osif_vdev, time, &tsf_time);
6775 			qdf_add_dp_pkt_timestamp(nbuf, index, tsf_time);
6776 		}
6777 	}
6778 }
6779 
6780 void dp_pkt_get_timestamp(uint64_t *time)
6781 {
6782 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled()))
6783 		*time = qdf_get_log_timestamp();
6784 }
6785 #endif
6786 
6787