xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision cfe8cda78633be00818878028ff51fc658a66c94)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "htt.h"
21 #include "dp_htt.h"
22 #include "hal_hw_headers.h"
23 #include "dp_tx.h"
24 #include "dp_tx_desc.h"
25 #include "dp_peer.h"
26 #include "dp_types.h"
27 #include "hal_tx.h"
28 #include "qdf_mem.h"
29 #include "qdf_nbuf.h"
30 #include "qdf_net_types.h"
31 #include "qdf_module.h"
32 #include <wlan_cfg.h>
33 #include "dp_ipa.h"
34 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
35 #include "if_meta_hdr.h"
36 #endif
37 #include "enet.h"
38 #include "dp_internal.h"
39 #ifdef ATH_SUPPORT_IQUE
40 #include "dp_txrx_me.h"
41 #endif
42 #include "dp_hist.h"
43 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
44 #include <wlan_dp_swlm.h>
45 #endif
46 #ifdef WIFI_MONITOR_SUPPORT
47 #include <dp_mon.h>
48 #endif
49 #ifdef FEATURE_WDS
50 #include "dp_txrx_wds.h"
51 #endif
52 #include "cdp_txrx_cmn_reg.h"
53 #ifdef CONFIG_SAWF
54 #include <dp_sawf.h>
55 #endif
56 
57 /* Flag to skip CCE classify when mesh or tid override enabled */
58 #define DP_TX_SKIP_CCE_CLASSIFY \
59 	(DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
60 
61 /* TODO Add support in TSO */
62 #define DP_DESC_NUM_FRAG(x) 0
63 
64 /* disable TQM_BYPASS */
65 #define TQM_BYPASS_WAR 0
66 
67 /* invalid peer id for reinject*/
68 #define DP_INVALID_PEER 0XFFFE
69 
70 #define DP_RETRY_COUNT 7
71 #ifdef WLAN_PEER_JITTER
72 #define DP_AVG_JITTER_WEIGHT_DENOM 4
73 #define DP_AVG_DELAY_WEIGHT_DENOM 3
74 #endif
75 
76 #ifdef QCA_DP_TX_FW_METADATA_V2
77 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
78 	HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
79 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
80 	HTT_TX_TCL_METADATA_V2_VALID_HTT_SET(_var, _val)
81 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
82 	HTT_TX_TCL_METADATA_TYPE_V2_SET(_var, _val)
83 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
84 	HTT_TX_TCL_METADATA_V2_HOST_INSPECTED_SET(_var, _val)
85 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
86 	 HTT_TX_TCL_METADATA_V2_PEER_ID_SET(_var, _val)
87 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
88 	HTT_TX_TCL_METADATA_V2_VDEV_ID_SET(_var, _val)
89 #define DP_TCL_METADATA_TYPE_PEER_BASED \
90 	HTT_TCL_METADATA_V2_TYPE_PEER_BASED
91 #define DP_TCL_METADATA_TYPE_VDEV_BASED \
92 	HTT_TCL_METADATA_V2_TYPE_VDEV_BASED
93 #else
94 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
95 	HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
96 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
97 	HTT_TX_TCL_METADATA_VALID_HTT_SET(_var, _val)
98 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
99 	HTT_TX_TCL_METADATA_TYPE_SET(_var, _val)
100 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
101 	HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val)
102 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
103 	HTT_TX_TCL_METADATA_PEER_ID_SET(_var, _val)
104 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
105 	HTT_TX_TCL_METADATA_VDEV_ID_SET(_var, _val)
106 #define DP_TCL_METADATA_TYPE_PEER_BASED \
107 	HTT_TCL_METADATA_TYPE_PEER_BASED
108 #define DP_TCL_METADATA_TYPE_VDEV_BASED \
109 	HTT_TCL_METADATA_TYPE_VDEV_BASED
110 #endif
111 
112 /*mapping between hal encrypt type and cdp_sec_type*/
113 uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
114 					  HAL_TX_ENCRYPT_TYPE_WEP_128,
115 					  HAL_TX_ENCRYPT_TYPE_WEP_104,
116 					  HAL_TX_ENCRYPT_TYPE_WEP_40,
117 					  HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
118 					  HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
119 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
120 					  HAL_TX_ENCRYPT_TYPE_WAPI,
121 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
122 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
123 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
124 					  HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
125 qdf_export_symbol(sec_type_map);
126 
127 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
128 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
129 {
130 	enum dp_tx_event_type type;
131 
132 	if (flags & DP_TX_DESC_FLAG_FLUSH)
133 		type = DP_TX_DESC_FLUSH;
134 	else if (flags & DP_TX_DESC_FLAG_TX_COMP_ERR)
135 		type = DP_TX_COMP_UNMAP_ERR;
136 	else if (flags & DP_TX_DESC_FLAG_COMPLETED_TX)
137 		type = DP_TX_COMP_UNMAP;
138 	else
139 		type = DP_TX_DESC_UNMAP;
140 
141 	return type;
142 }
143 
144 static inline void
145 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
146 		       qdf_nbuf_t skb, uint32_t sw_cookie,
147 		       enum dp_tx_event_type type)
148 {
149 	struct dp_tx_tcl_history *tx_tcl_history = &soc->tx_tcl_history;
150 	struct dp_tx_comp_history *tx_comp_history = &soc->tx_comp_history;
151 	struct dp_tx_desc_event *entry;
152 	uint32_t idx;
153 	uint16_t slot;
154 
155 	switch (type) {
156 	case DP_TX_COMP_UNMAP:
157 	case DP_TX_COMP_UNMAP_ERR:
158 	case DP_TX_COMP_MSDU_EXT:
159 		if (qdf_unlikely(!tx_comp_history->allocated))
160 			return;
161 
162 		dp_get_frag_hist_next_atomic_idx(&tx_comp_history->index, &idx,
163 						 &slot,
164 						 DP_TX_COMP_HIST_SLOT_SHIFT,
165 						 DP_TX_COMP_HIST_PER_SLOT_MAX,
166 						 DP_TX_COMP_HISTORY_SIZE);
167 		entry = &tx_comp_history->entry[slot][idx];
168 		break;
169 	case DP_TX_DESC_MAP:
170 	case DP_TX_DESC_UNMAP:
171 	case DP_TX_DESC_COOKIE:
172 	case DP_TX_DESC_FLUSH:
173 		if (qdf_unlikely(!tx_tcl_history->allocated))
174 			return;
175 
176 		dp_get_frag_hist_next_atomic_idx(&tx_tcl_history->index, &idx,
177 						 &slot,
178 						 DP_TX_TCL_HIST_SLOT_SHIFT,
179 						 DP_TX_TCL_HIST_PER_SLOT_MAX,
180 						 DP_TX_TCL_HISTORY_SIZE);
181 		entry = &tx_tcl_history->entry[slot][idx];
182 		break;
183 	default:
184 		dp_info_rl("Invalid dp_tx_event_type: %d", type);
185 		return;
186 	}
187 
188 	entry->skb = skb;
189 	entry->paddr = paddr;
190 	entry->sw_cookie = sw_cookie;
191 	entry->type = type;
192 	entry->ts = qdf_get_log_timestamp();
193 }
194 
195 static inline void
196 dp_tx_tso_seg_history_add(struct dp_soc *soc,
197 			  struct qdf_tso_seg_elem_t *tso_seg,
198 			  qdf_nbuf_t skb, uint32_t sw_cookie,
199 			  enum dp_tx_event_type type)
200 {
201 	int i;
202 
203 	for (i = 1; i < tso_seg->seg.num_frags; i++) {
204 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[i].paddr,
205 				       skb, sw_cookie, type);
206 	}
207 
208 	if (!tso_seg->next)
209 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[0].paddr,
210 				       skb, 0xFFFFFFFF, type);
211 }
212 
213 static inline void
214 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
215 		      qdf_nbuf_t skb, uint32_t sw_cookie,
216 		      enum dp_tx_event_type type)
217 {
218 	struct qdf_tso_seg_elem_t *curr_seg = tso_info.tso_seg_list;
219 	uint32_t num_segs = tso_info.num_segs;
220 
221 	while (num_segs) {
222 		dp_tx_tso_seg_history_add(soc, curr_seg, skb, sw_cookie, type);
223 		curr_seg = curr_seg->next;
224 		num_segs--;
225 	}
226 }
227 
228 #else
229 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
230 {
231 	return DP_TX_DESC_INVAL_EVT;
232 }
233 
234 static inline void
235 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
236 		       qdf_nbuf_t skb, uint32_t sw_cookie,
237 		       enum dp_tx_event_type type)
238 {
239 }
240 
241 static inline void
242 dp_tx_tso_seg_history_add(struct dp_soc *soc,
243 			  struct qdf_tso_seg_elem_t *tso_seg,
244 			  qdf_nbuf_t skb, uint32_t sw_cookie,
245 			  enum dp_tx_event_type type)
246 {
247 }
248 
249 static inline void
250 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
251 		      qdf_nbuf_t skb, uint32_t sw_cookie,
252 		      enum dp_tx_event_type type)
253 {
254 }
255 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
256 
257 static int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc);
258 
259 /**
260  * dp_is_tput_high() - Check if throughput is high
261  *
262  * @soc - core txrx main context
263  *
264  * The current function is based of the RTPM tput policy variable where RTPM is
265  * avoided based on throughput.
266  */
267 static inline int dp_is_tput_high(struct dp_soc *soc)
268 {
269 	return dp_get_rtpm_tput_policy_requirement(soc);
270 }
271 
272 #if defined(FEATURE_TSO)
273 /**
274  * dp_tx_tso_unmap_segment() - Unmap TSO segment
275  *
276  * @soc - core txrx main context
277  * @seg_desc - tso segment descriptor
278  * @num_seg_desc - tso number segment descriptor
279  */
280 static void dp_tx_tso_unmap_segment(
281 		struct dp_soc *soc,
282 		struct qdf_tso_seg_elem_t *seg_desc,
283 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
284 {
285 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
286 	if (qdf_unlikely(!seg_desc)) {
287 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
288 			 __func__, __LINE__);
289 		qdf_assert(0);
290 	} else if (qdf_unlikely(!num_seg_desc)) {
291 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
292 			 __func__, __LINE__);
293 		qdf_assert(0);
294 	} else {
295 		bool is_last_seg;
296 		/* no tso segment left to do dma unmap */
297 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
298 			return;
299 
300 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
301 					true : false;
302 		qdf_nbuf_unmap_tso_segment(soc->osdev,
303 					   seg_desc, is_last_seg);
304 		num_seg_desc->num_seg.tso_cmn_num_seg--;
305 	}
306 }
307 
308 /**
309  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
310  *                            back to the freelist
311  *
312  * @soc - soc device handle
313  * @tx_desc - Tx software descriptor
314  */
315 static void dp_tx_tso_desc_release(struct dp_soc *soc,
316 				   struct dp_tx_desc_s *tx_desc)
317 {
318 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
319 	if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_desc)) {
320 		dp_tx_err("SO desc is NULL!");
321 		qdf_assert(0);
322 	} else if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_num_desc)) {
323 		dp_tx_err("TSO num desc is NULL!");
324 		qdf_assert(0);
325 	} else {
326 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
327 			(struct qdf_tso_num_seg_elem_t *)tx_desc->
328 				msdu_ext_desc->tso_num_desc;
329 
330 		/* Add the tso num segment into the free list */
331 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
332 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
333 					    tx_desc->msdu_ext_desc->
334 					    tso_num_desc);
335 			tx_desc->msdu_ext_desc->tso_num_desc = NULL;
336 			DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
337 		}
338 
339 		/* Add the tso segment into the free list*/
340 		dp_tx_tso_desc_free(soc,
341 				    tx_desc->pool_id, tx_desc->msdu_ext_desc->
342 				    tso_desc);
343 		tx_desc->msdu_ext_desc->tso_desc = NULL;
344 	}
345 }
346 #else
347 static void dp_tx_tso_unmap_segment(
348 		struct dp_soc *soc,
349 		struct qdf_tso_seg_elem_t *seg_desc,
350 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
351 
352 {
353 }
354 
355 static void dp_tx_tso_desc_release(struct dp_soc *soc,
356 				   struct dp_tx_desc_s *tx_desc)
357 {
358 }
359 #endif
360 
361 /**
362  * dp_tx_desc_release() - Release Tx Descriptor
363  * @tx_desc : Tx Descriptor
364  * @desc_pool_id: Descriptor Pool ID
365  *
366  * Deallocate all resources attached to Tx descriptor and free the Tx
367  * descriptor.
368  *
369  * Return:
370  */
371 void
372 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
373 {
374 	struct dp_pdev *pdev = tx_desc->pdev;
375 	struct dp_soc *soc;
376 	uint8_t comp_status = 0;
377 
378 	qdf_assert(pdev);
379 
380 	soc = pdev->soc;
381 
382 	dp_tx_outstanding_dec(pdev);
383 
384 	if (tx_desc->msdu_ext_desc) {
385 		if (tx_desc->frm_type == dp_tx_frm_tso)
386 			dp_tx_tso_desc_release(soc, tx_desc);
387 
388 		if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
389 			dp_tx_me_free_buf(tx_desc->pdev,
390 					  tx_desc->msdu_ext_desc->me_buffer);
391 
392 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
393 	}
394 
395 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
396 		qdf_atomic_dec(&soc->num_tx_exception);
397 
398 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
399 				tx_desc->buffer_src)
400 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
401 							     soc->hal_soc);
402 	else
403 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
404 
405 	dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d",
406 		    tx_desc->id, comp_status,
407 		    qdf_atomic_read(&pdev->num_tx_outstanding));
408 
409 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
410 	return;
411 }
412 
413 /**
414  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
415  * @vdev: DP vdev Handle
416  * @nbuf: skb
417  * @msdu_info: msdu_info required to create HTT metadata
418  *
419  * Prepares and fills HTT metadata in the frame pre-header for special frames
420  * that should be transmitted using varying transmit parameters.
421  * There are 2 VDEV modes that currently needs this special metadata -
422  *  1) Mesh Mode
423  *  2) DSRC Mode
424  *
425  * Return: HTT metadata size
426  *
427  */
428 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
429 					  struct dp_tx_msdu_info_s *msdu_info)
430 {
431 	uint32_t *meta_data = msdu_info->meta_data;
432 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
433 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
434 
435 	uint8_t htt_desc_size;
436 
437 	/* Size rounded of multiple of 8 bytes */
438 	uint8_t htt_desc_size_aligned;
439 
440 	uint8_t *hdr = NULL;
441 
442 	/*
443 	 * Metadata - HTT MSDU Extension header
444 	 */
445 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
446 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
447 
448 	if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
449 	    HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
450 							   meta_data[0]) ||
451 	    msdu_info->exception_fw) {
452 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
453 				 htt_desc_size_aligned)) {
454 			nbuf = qdf_nbuf_realloc_headroom(nbuf,
455 							 htt_desc_size_aligned);
456 			if (!nbuf) {
457 				/*
458 				 * qdf_nbuf_realloc_headroom won't do skb_clone
459 				 * as skb_realloc_headroom does. so, no free is
460 				 * needed here.
461 				 */
462 				DP_STATS_INC(vdev,
463 					     tx_i.dropped.headroom_insufficient,
464 					     1);
465 				qdf_print(" %s[%d] skb_realloc_headroom failed",
466 					  __func__, __LINE__);
467 				return 0;
468 			}
469 		}
470 		/* Fill and add HTT metaheader */
471 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
472 		if (!hdr) {
473 			dp_tx_err("Error in filling HTT metadata");
474 
475 			return 0;
476 		}
477 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
478 
479 	} else if (vdev->opmode == wlan_op_mode_ocb) {
480 		/* Todo - Add support for DSRC */
481 	}
482 
483 	return htt_desc_size_aligned;
484 }
485 
486 /**
487  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
488  * @tso_seg: TSO segment to process
489  * @ext_desc: Pointer to MSDU extension descriptor
490  *
491  * Return: void
492  */
493 #if defined(FEATURE_TSO)
494 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
495 		void *ext_desc)
496 {
497 	uint8_t num_frag;
498 	uint32_t tso_flags;
499 
500 	/*
501 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
502 	 * tcp_flag_mask
503 	 *
504 	 * Checksum enable flags are set in TCL descriptor and not in Extension
505 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
506 	 */
507 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
508 
509 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
510 
511 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
512 		tso_seg->tso_flags.ip_len);
513 
514 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
515 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
516 
517 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
518 		uint32_t lo = 0;
519 		uint32_t hi = 0;
520 
521 		qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
522 				  (tso_seg->tso_frags[num_frag].length));
523 
524 		qdf_dmaaddr_to_32s(
525 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
526 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
527 			tso_seg->tso_frags[num_frag].length);
528 	}
529 
530 	return;
531 }
532 #else
533 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
534 		void *ext_desc)
535 {
536 	return;
537 }
538 #endif
539 
540 #if defined(FEATURE_TSO)
541 /**
542  * dp_tx_free_tso_seg_list() - Loop through the tso segments
543  *                             allocated and free them
544  *
545  * @soc: soc handle
546  * @free_seg: list of tso segments
547  * @msdu_info: msdu descriptor
548  *
549  * Return - void
550  */
551 static void dp_tx_free_tso_seg_list(
552 		struct dp_soc *soc,
553 		struct qdf_tso_seg_elem_t *free_seg,
554 		struct dp_tx_msdu_info_s *msdu_info)
555 {
556 	struct qdf_tso_seg_elem_t *next_seg;
557 
558 	while (free_seg) {
559 		next_seg = free_seg->next;
560 		dp_tx_tso_desc_free(soc,
561 				    msdu_info->tx_queue.desc_pool_id,
562 				    free_seg);
563 		free_seg = next_seg;
564 	}
565 }
566 
567 /**
568  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
569  *                                 allocated and free them
570  *
571  * @soc:  soc handle
572  * @free_num_seg: list of tso number segments
573  * @msdu_info: msdu descriptor
574  * Return - void
575  */
576 static void dp_tx_free_tso_num_seg_list(
577 		struct dp_soc *soc,
578 		struct qdf_tso_num_seg_elem_t *free_num_seg,
579 		struct dp_tx_msdu_info_s *msdu_info)
580 {
581 	struct qdf_tso_num_seg_elem_t *next_num_seg;
582 
583 	while (free_num_seg) {
584 		next_num_seg = free_num_seg->next;
585 		dp_tso_num_seg_free(soc,
586 				    msdu_info->tx_queue.desc_pool_id,
587 				    free_num_seg);
588 		free_num_seg = next_num_seg;
589 	}
590 }
591 
592 /**
593  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
594  *                              do dma unmap for each segment
595  *
596  * @soc: soc handle
597  * @free_seg: list of tso segments
598  * @num_seg_desc: tso number segment descriptor
599  *
600  * Return - void
601  */
602 static void dp_tx_unmap_tso_seg_list(
603 		struct dp_soc *soc,
604 		struct qdf_tso_seg_elem_t *free_seg,
605 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
606 {
607 	struct qdf_tso_seg_elem_t *next_seg;
608 
609 	if (qdf_unlikely(!num_seg_desc)) {
610 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
611 		return;
612 	}
613 
614 	while (free_seg) {
615 		next_seg = free_seg->next;
616 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
617 		free_seg = next_seg;
618 	}
619 }
620 
621 #ifdef FEATURE_TSO_STATS
622 /**
623  * dp_tso_get_stats_idx: Retrieve the tso packet id
624  * @pdev - pdev handle
625  *
626  * Return: id
627  */
628 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
629 {
630 	uint32_t stats_idx;
631 
632 	stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
633 						% CDP_MAX_TSO_PACKETS);
634 	return stats_idx;
635 }
636 #else
637 static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
638 {
639 	return 0;
640 }
641 #endif /* FEATURE_TSO_STATS */
642 
643 /**
644  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
645  *				     free the tso segments descriptor and
646  *				     tso num segments descriptor
647  *
648  * @soc:  soc handle
649  * @msdu_info: msdu descriptor
650  * @tso_seg_unmap: flag to show if dma unmap is necessary
651  *
652  * Return - void
653  */
654 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
655 					  struct dp_tx_msdu_info_s *msdu_info,
656 					  bool tso_seg_unmap)
657 {
658 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
659 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
660 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
661 					tso_info->tso_num_seg_list;
662 
663 	/* do dma unmap for each segment */
664 	if (tso_seg_unmap)
665 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
666 
667 	/* free all tso number segment descriptor though looks only have 1 */
668 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
669 
670 	/* free all tso segment descriptor */
671 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
672 }
673 
674 /**
675  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
676  * @vdev: virtual device handle
677  * @msdu: network buffer
678  * @msdu_info: meta data associated with the msdu
679  *
680  * Return: QDF_STATUS_SUCCESS success
681  */
682 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
683 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
684 {
685 	struct qdf_tso_seg_elem_t *tso_seg;
686 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
687 	struct dp_soc *soc = vdev->pdev->soc;
688 	struct dp_pdev *pdev = vdev->pdev;
689 	struct qdf_tso_info_t *tso_info;
690 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
691 	tso_info = &msdu_info->u.tso_info;
692 	tso_info->curr_seg = NULL;
693 	tso_info->tso_seg_list = NULL;
694 	tso_info->num_segs = num_seg;
695 	msdu_info->frm_type = dp_tx_frm_tso;
696 	tso_info->tso_num_seg_list = NULL;
697 
698 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
699 
700 	while (num_seg) {
701 		tso_seg = dp_tx_tso_desc_alloc(
702 				soc, msdu_info->tx_queue.desc_pool_id);
703 		if (tso_seg) {
704 			tso_seg->next = tso_info->tso_seg_list;
705 			tso_info->tso_seg_list = tso_seg;
706 			num_seg--;
707 		} else {
708 			dp_err_rl("Failed to alloc tso seg desc");
709 			DP_STATS_INC_PKT(vdev->pdev,
710 					 tso_stats.tso_no_mem_dropped, 1,
711 					 qdf_nbuf_len(msdu));
712 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
713 
714 			return QDF_STATUS_E_NOMEM;
715 		}
716 	}
717 
718 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
719 
720 	tso_num_seg = dp_tso_num_seg_alloc(soc,
721 			msdu_info->tx_queue.desc_pool_id);
722 
723 	if (tso_num_seg) {
724 		tso_num_seg->next = tso_info->tso_num_seg_list;
725 		tso_info->tso_num_seg_list = tso_num_seg;
726 	} else {
727 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
728 			 __func__);
729 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
730 
731 		return QDF_STATUS_E_NOMEM;
732 	}
733 
734 	msdu_info->num_seg =
735 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
736 
737 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
738 			msdu_info->num_seg);
739 
740 	if (!(msdu_info->num_seg)) {
741 		/*
742 		 * Free allocated TSO seg desc and number seg desc,
743 		 * do unmap for segments if dma map has done.
744 		 */
745 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
746 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
747 
748 		return QDF_STATUS_E_INVAL;
749 	}
750 	dp_tx_tso_history_add(soc, msdu_info->u.tso_info,
751 			      msdu, 0, DP_TX_DESC_MAP);
752 
753 	tso_info->curr_seg = tso_info->tso_seg_list;
754 
755 	tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
756 	dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
757 			     msdu, msdu_info->num_seg);
758 	dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
759 				    tso_info->msdu_stats_idx);
760 	dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
761 	return QDF_STATUS_SUCCESS;
762 }
763 #else
764 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
765 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
766 {
767 	return QDF_STATUS_E_NOMEM;
768 }
769 #endif
770 
771 QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
772 			(DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
773 			 sizeof(struct htt_tx_msdu_desc_ext2_t)));
774 
775 /**
776  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
777  * @vdev: DP Vdev handle
778  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
779  * @desc_pool_id: Descriptor Pool ID
780  *
781  * Return:
782  */
783 static
784 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
785 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
786 {
787 	uint8_t i;
788 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
789 	struct dp_tx_seg_info_s *seg_info;
790 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
791 	struct dp_soc *soc = vdev->pdev->soc;
792 
793 	/* Allocate an extension descriptor */
794 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
795 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
796 
797 	if (!msdu_ext_desc) {
798 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
799 		return NULL;
800 	}
801 
802 	if (msdu_info->exception_fw &&
803 			qdf_unlikely(vdev->mesh_vdev)) {
804 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
805 				&msdu_info->meta_data[0],
806 				sizeof(struct htt_tx_msdu_desc_ext2_t));
807 		qdf_atomic_inc(&soc->num_tx_exception);
808 		msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
809 	}
810 
811 	switch (msdu_info->frm_type) {
812 	case dp_tx_frm_sg:
813 	case dp_tx_frm_me:
814 	case dp_tx_frm_raw:
815 		seg_info = msdu_info->u.sg_info.curr_seg;
816 		/* Update the buffer pointers in MSDU Extension Descriptor */
817 		for (i = 0; i < seg_info->frag_cnt; i++) {
818 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
819 				seg_info->frags[i].paddr_lo,
820 				seg_info->frags[i].paddr_hi,
821 				seg_info->frags[i].len);
822 		}
823 
824 		break;
825 
826 	case dp_tx_frm_tso:
827 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
828 				&cached_ext_desc[0]);
829 		break;
830 
831 
832 	default:
833 		break;
834 	}
835 
836 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
837 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
838 
839 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
840 			msdu_ext_desc->vaddr);
841 
842 	return msdu_ext_desc;
843 }
844 
845 /**
846  * dp_tx_trace_pkt() - Trace TX packet at DP layer
847  *
848  * @skb: skb to be traced
849  * @msdu_id: msdu_id of the packet
850  * @vdev_id: vdev_id of the packet
851  *
852  * Return: None
853  */
854 #ifdef DP_DISABLE_TX_PKT_TRACE
855 static void dp_tx_trace_pkt(struct dp_soc *soc,
856 			    qdf_nbuf_t skb, uint16_t msdu_id,
857 			    uint8_t vdev_id)
858 {
859 }
860 #else
861 static void dp_tx_trace_pkt(struct dp_soc *soc,
862 			    qdf_nbuf_t skb, uint16_t msdu_id,
863 			    uint8_t vdev_id)
864 {
865 	if (dp_is_tput_high(soc))
866 		return;
867 
868 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
869 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
870 	DPTRACE(qdf_dp_trace_ptr(skb,
871 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
872 				 QDF_TRACE_DEFAULT_PDEV_ID,
873 				 qdf_nbuf_data_addr(skb),
874 				 sizeof(qdf_nbuf_data(skb)),
875 				 msdu_id, vdev_id, 0));
876 
877 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
878 
879 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
880 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
881 				      msdu_id, QDF_TX));
882 }
883 #endif
884 
885 #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
886 /**
887  * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as
888  *				      exception by the upper layer (OS_IF)
889  * @soc: DP soc handle
890  * @nbuf: packet to be transmitted
891  *
892  * Returns: 1 if the packet is marked as exception,
893  *	    0, if the packet is not marked as exception.
894  */
895 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
896 						 qdf_nbuf_t nbuf)
897 {
898 	return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf);
899 }
900 #else
901 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
902 						 qdf_nbuf_t nbuf)
903 {
904 	return 0;
905 }
906 #endif
907 
908 #ifdef DP_TRAFFIC_END_INDICATION
909 /**
910  * dp_tx_get_traffic_end_indication_pkt() - Allocate and prepare packet to send
911  *                                          as indication to fw to inform that
912  *                                          data stream has ended
913  * @vdev: DP vdev handle
914  * @nbuf: original buffer from network stack
915  *
916  * Return: NULL on failure,
917  *         nbuf on success
918  */
919 static inline qdf_nbuf_t
920 dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
921 				     qdf_nbuf_t nbuf)
922 {
923 	/* Packet length should be enough to copy upto L3 header */
924 	uint8_t end_nbuf_len = 64;
925 	uint8_t htt_desc_size_aligned;
926 	uint8_t htt_desc_size;
927 	qdf_nbuf_t end_nbuf;
928 
929 	if (qdf_unlikely(QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) ==
930 			 QDF_NBUF_CB_PACKET_TYPE_END_INDICATION)) {
931 		htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
932 		htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
933 
934 		end_nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q);
935 		if (!end_nbuf) {
936 			end_nbuf = qdf_nbuf_alloc(NULL,
937 						  (htt_desc_size_aligned +
938 						  end_nbuf_len),
939 						  htt_desc_size_aligned,
940 						  8, false);
941 			if (!end_nbuf) {
942 				dp_err("Packet allocation failed");
943 				goto out;
944 			}
945 		} else {
946 			qdf_nbuf_reset(end_nbuf, htt_desc_size_aligned, 8);
947 		}
948 		qdf_mem_copy(qdf_nbuf_data(end_nbuf), qdf_nbuf_data(nbuf),
949 			     end_nbuf_len);
950 		qdf_nbuf_set_pktlen(end_nbuf, end_nbuf_len);
951 
952 		return end_nbuf;
953 	}
954 out:
955 	return NULL;
956 }
957 
958 /**
959  * dp_tx_send_traffic_end_indication_pkt() - Send indication packet to FW
960  *                                           via exception path.
961  * @vdev: DP vdev handle
962  * @end_nbuf: skb to send as indication
963  * @msdu_info: msdu_info of original nbuf
964  * @peer_id: peer id
965  *
966  * Return: None
967  */
968 static inline void
969 dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
970 				      qdf_nbuf_t end_nbuf,
971 				      struct dp_tx_msdu_info_s *msdu_info,
972 				      uint16_t peer_id)
973 {
974 	struct dp_tx_msdu_info_s e_msdu_info = {0};
975 	qdf_nbuf_t nbuf;
976 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
977 		(struct htt_tx_msdu_desc_ext2_t *)(e_msdu_info.meta_data);
978 	e_msdu_info.tx_queue = msdu_info->tx_queue;
979 	e_msdu_info.tid = msdu_info->tid;
980 	e_msdu_info.exception_fw = 1;
981 	desc_ext->host_tx_desc_pool = 1;
982 	desc_ext->traffic_end_indication = 1;
983 	nbuf = dp_tx_send_msdu_single(vdev, end_nbuf, &e_msdu_info,
984 				      peer_id, NULL);
985 	if (nbuf) {
986 		dp_err("Traffic end indication packet tx failed");
987 		qdf_nbuf_free(nbuf);
988 	}
989 }
990 
991 /**
992  * dp_tx_traffic_end_indication_set_desc_flag() - Set tx descriptor flag to
993  *                                                mark it traffic end indication
994  *                                                packet.
995  * @tx_desc: Tx descriptor pointer
996  * @msdu_info: msdu_info structure pointer
997  *
998  * Return: None
999  */
1000 static inline void
1001 dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
1002 					   struct dp_tx_msdu_info_s *msdu_info)
1003 {
1004 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
1005 		(struct htt_tx_msdu_desc_ext2_t *)(msdu_info->meta_data);
1006 
1007 	if (qdf_unlikely(desc_ext->traffic_end_indication))
1008 		tx_desc->flags |= DP_TX_DESC_FLAG_TRAFFIC_END_IND;
1009 }
1010 
1011 /**
1012  * dp_tx_traffic_end_indication_enq_ind_pkt() - Enqueue the packet instead of
1013  *                                              freeing which are associated
1014  *                                              with traffic end indication
1015  *                                              flagged descriptor.
1016  * @soc: dp soc handle
1017  * @desc: Tx descriptor pointer
1018  * @nbuf: buffer pointer
1019  *
1020  * Return: True if packet gets enqueued else false
1021  */
1022 static bool
1023 dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
1024 					 struct dp_tx_desc_s *desc,
1025 					 qdf_nbuf_t nbuf)
1026 {
1027 	struct dp_vdev *vdev = NULL;
1028 
1029 	if (qdf_unlikely((desc->flags &
1030 			  DP_TX_DESC_FLAG_TRAFFIC_END_IND) != 0)) {
1031 		vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
1032 					     DP_MOD_ID_TX_COMP);
1033 		if (vdev) {
1034 			qdf_nbuf_queue_add(&vdev->end_ind_pkt_q, nbuf);
1035 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_COMP);
1036 			return true;
1037 		}
1038 	}
1039 	return false;
1040 }
1041 
1042 /**
1043  * dp_tx_traffic_end_indication_is_enabled() - get the feature
1044  *                                             enable/disable status
1045  * @vdev: dp vdev handle
1046  *
1047  * Return: True if feature is enable else false
1048  */
1049 static inline bool
1050 dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
1051 {
1052 	return qdf_unlikely(vdev->traffic_end_ind_en);
1053 }
1054 
1055 static inline qdf_nbuf_t
1056 dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1057 			       struct dp_tx_msdu_info_s *msdu_info,
1058 			       uint16_t peer_id, qdf_nbuf_t end_nbuf)
1059 {
1060 	if (dp_tx_traffic_end_indication_is_enabled(vdev))
1061 		end_nbuf = dp_tx_get_traffic_end_indication_pkt(vdev, nbuf);
1062 
1063 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
1064 
1065 	if (qdf_unlikely(end_nbuf))
1066 		dp_tx_send_traffic_end_indication_pkt(vdev, end_nbuf,
1067 						      msdu_info, peer_id);
1068 	return nbuf;
1069 }
1070 #else
1071 static inline qdf_nbuf_t
1072 dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
1073 				     qdf_nbuf_t nbuf)
1074 {
1075 	return NULL;
1076 }
1077 
1078 static inline void
1079 dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
1080 				      qdf_nbuf_t end_nbuf,
1081 				      struct dp_tx_msdu_info_s *msdu_info,
1082 				      uint16_t peer_id)
1083 {}
1084 
1085 static inline void
1086 dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
1087 					   struct dp_tx_msdu_info_s *msdu_info)
1088 {}
1089 
1090 static inline bool
1091 dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
1092 					 struct dp_tx_desc_s *desc,
1093 					 qdf_nbuf_t nbuf)
1094 {
1095 	return false;
1096 }
1097 
1098 static inline bool
1099 dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
1100 {
1101 	return false;
1102 }
1103 
1104 static inline qdf_nbuf_t
1105 dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1106 			       struct dp_tx_msdu_info_s *msdu_info,
1107 			       uint16_t peer_id, qdf_nbuf_t end_nbuf)
1108 {
1109 	return dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
1110 }
1111 #endif
1112 
1113 /**
1114  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
1115  * @vdev: DP vdev handle
1116  * @nbuf: skb
1117  * @desc_pool_id: Descriptor pool ID
1118  * @meta_data: Metadata to the fw
1119  * @tx_exc_metadata: Handle that holds exception path metadata
1120  * Allocate and prepare Tx descriptor with msdu information.
1121  *
1122  * Return: Pointer to Tx Descriptor on success,
1123  *         NULL on failure
1124  */
1125 static
1126 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
1127 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
1128 		struct dp_tx_msdu_info_s *msdu_info,
1129 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1130 {
1131 	uint8_t align_pad;
1132 	uint8_t is_exception = 0;
1133 	uint8_t htt_hdr_size;
1134 	struct dp_tx_desc_s *tx_desc;
1135 	struct dp_pdev *pdev = vdev->pdev;
1136 	struct dp_soc *soc = pdev->soc;
1137 
1138 	if (dp_tx_limit_check(vdev))
1139 		return NULL;
1140 
1141 	/* Allocate software Tx descriptor */
1142 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1143 
1144 	if (qdf_unlikely(!tx_desc)) {
1145 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1146 		DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
1147 		return NULL;
1148 	}
1149 
1150 	dp_tx_outstanding_inc(pdev);
1151 
1152 	/* Initialize the SW tx descriptor */
1153 	tx_desc->nbuf = nbuf;
1154 	tx_desc->frm_type = dp_tx_frm_std;
1155 	tx_desc->tx_encap_type = ((tx_exc_metadata &&
1156 		(tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
1157 		tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
1158 	tx_desc->vdev_id = vdev->vdev_id;
1159 	tx_desc->pdev = pdev;
1160 	tx_desc->msdu_ext_desc = NULL;
1161 	tx_desc->pkt_offset = 0;
1162 	tx_desc->length = qdf_nbuf_headlen(nbuf);
1163 	tx_desc->shinfo_addr = skb_end_pointer(nbuf);
1164 
1165 	dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
1166 
1167 	if (qdf_unlikely(vdev->multipass_en)) {
1168 		if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
1169 			goto failure;
1170 	}
1171 
1172 	/* Packets marked by upper layer (OS-IF) to be sent to FW */
1173 	if (dp_tx_is_nbuf_marked_exception(soc, nbuf))
1174 		is_exception = 1;
1175 	/*
1176 	 * For special modes (vdev_type == ocb or mesh), data frames should be
1177 	 * transmitted using varying transmit parameters (tx spec) which include
1178 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
1179 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
1180 	 * These frames are sent as exception packets to firmware.
1181 	 *
1182 	 * HW requirement is that metadata should always point to a
1183 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
1184 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
1185 	 *  to get 8-byte aligned start address along with align_pad added
1186 	 *
1187 	 *  |-----------------------------|
1188 	 *  |                             |
1189 	 *  |-----------------------------| <-----Buffer Pointer Address given
1190 	 *  |                             |  ^    in HW descriptor (aligned)
1191 	 *  |       HTT Metadata          |  |
1192 	 *  |                             |  |
1193 	 *  |                             |  | Packet Offset given in descriptor
1194 	 *  |                             |  |
1195 	 *  |-----------------------------|  |
1196 	 *  |       Alignment Pad         |  v
1197 	 *  |-----------------------------| <----- Actual buffer start address
1198 	 *  |        SKB Data             |           (Unaligned)
1199 	 *  |                             |
1200 	 *  |                             |
1201 	 *  |                             |
1202 	 *  |                             |
1203 	 *  |                             |
1204 	 *  |-----------------------------|
1205 	 */
1206 	if (qdf_unlikely((msdu_info->exception_fw)) ||
1207 				(vdev->opmode == wlan_op_mode_ocb) ||
1208 				(tx_exc_metadata &&
1209 				tx_exc_metadata->is_tx_sniffer)) {
1210 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
1211 
1212 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
1213 			DP_STATS_INC(vdev,
1214 				     tx_i.dropped.headroom_insufficient, 1);
1215 			goto failure;
1216 		}
1217 
1218 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
1219 			dp_tx_err("qdf_nbuf_push_head failed");
1220 			goto failure;
1221 		}
1222 
1223 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
1224 				msdu_info);
1225 		if (htt_hdr_size == 0)
1226 			goto failure;
1227 
1228 		tx_desc->length = qdf_nbuf_headlen(nbuf);
1229 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
1230 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1231 		dp_tx_traffic_end_indication_set_desc_flag(tx_desc,
1232 							   msdu_info);
1233 		is_exception = 1;
1234 		tx_desc->length -= tx_desc->pkt_offset;
1235 	}
1236 
1237 #if !TQM_BYPASS_WAR
1238 	if (is_exception || tx_exc_metadata)
1239 #endif
1240 	{
1241 		/* Temporary WAR due to TQM VP issues */
1242 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1243 		qdf_atomic_inc(&soc->num_tx_exception);
1244 	}
1245 
1246 	return tx_desc;
1247 
1248 failure:
1249 	dp_tx_desc_release(tx_desc, desc_pool_id);
1250 	return NULL;
1251 }
1252 
1253 /**
1254  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
1255  * @vdev: DP vdev handle
1256  * @nbuf: skb
1257  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
1258  * @desc_pool_id : Descriptor Pool ID
1259  *
1260  * Allocate and prepare Tx descriptor with msdu and fragment descritor
1261  * information. For frames with fragments, allocate and prepare
1262  * an MSDU extension descriptor
1263  *
1264  * Return: Pointer to Tx Descriptor on success,
1265  *         NULL on failure
1266  */
1267 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
1268 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
1269 		uint8_t desc_pool_id)
1270 {
1271 	struct dp_tx_desc_s *tx_desc;
1272 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
1273 	struct dp_pdev *pdev = vdev->pdev;
1274 	struct dp_soc *soc = pdev->soc;
1275 
1276 	if (dp_tx_limit_check(vdev))
1277 		return NULL;
1278 
1279 	/* Allocate software Tx descriptor */
1280 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1281 	if (!tx_desc) {
1282 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1283 		return NULL;
1284 	}
1285 	dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg,
1286 				  nbuf, tx_desc->id, DP_TX_DESC_COOKIE);
1287 
1288 	dp_tx_outstanding_inc(pdev);
1289 
1290 	/* Initialize the SW tx descriptor */
1291 	tx_desc->nbuf = nbuf;
1292 	tx_desc->frm_type = msdu_info->frm_type;
1293 	tx_desc->tx_encap_type = vdev->tx_encap_type;
1294 	tx_desc->vdev_id = vdev->vdev_id;
1295 	tx_desc->pdev = pdev;
1296 	tx_desc->pkt_offset = 0;
1297 
1298 	dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
1299 
1300 	/* Handle scattered frames - TSO/SG/ME */
1301 	/* Allocate and prepare an extension descriptor for scattered frames */
1302 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
1303 	if (!msdu_ext_desc) {
1304 		dp_tx_info("Tx Extension Descriptor Alloc Fail");
1305 		goto failure;
1306 	}
1307 
1308 #if TQM_BYPASS_WAR
1309 	/* Temporary WAR due to TQM VP issues */
1310 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1311 	qdf_atomic_inc(&soc->num_tx_exception);
1312 #endif
1313 	if (qdf_unlikely(msdu_info->exception_fw))
1314 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1315 
1316 	tx_desc->msdu_ext_desc = msdu_ext_desc;
1317 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
1318 
1319 	msdu_ext_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
1320 	msdu_ext_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
1321 
1322 	tx_desc->dma_addr = msdu_ext_desc->paddr;
1323 
1324 	if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
1325 		tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
1326 	else
1327 		tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
1328 
1329 	return tx_desc;
1330 failure:
1331 	dp_tx_desc_release(tx_desc, desc_pool_id);
1332 	return NULL;
1333 }
1334 
1335 /**
1336  * dp_tx_prepare_raw() - Prepare RAW packet TX
1337  * @vdev: DP vdev handle
1338  * @nbuf: buffer pointer
1339  * @seg_info: Pointer to Segment info Descriptor to be prepared
1340  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
1341  *     descriptor
1342  *
1343  * Return:
1344  */
1345 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1346 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1347 {
1348 	qdf_nbuf_t curr_nbuf = NULL;
1349 	uint16_t total_len = 0;
1350 	qdf_dma_addr_t paddr;
1351 	int32_t i;
1352 	int32_t mapped_buf_num = 0;
1353 
1354 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
1355 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1356 
1357 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
1358 
1359 	/* Continue only if frames are of DATA type */
1360 	if (!DP_FRAME_IS_DATA(qos_wh)) {
1361 		DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
1362 		dp_tx_debug("Pkt. recd is of not data type");
1363 		goto error;
1364 	}
1365 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
1366 	if (vdev->raw_mode_war &&
1367 	    (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
1368 	    (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
1369 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
1370 
1371 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
1372 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
1373 		/*
1374 		 * Number of nbuf's must not exceed the size of the frags
1375 		 * array in seg_info.
1376 		 */
1377 		if (i >= DP_TX_MAX_NUM_FRAGS) {
1378 			dp_err_rl("nbuf cnt exceeds the max number of segs");
1379 			DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
1380 			goto error;
1381 		}
1382 		if (QDF_STATUS_SUCCESS !=
1383 			qdf_nbuf_map_nbytes_single(vdev->osdev,
1384 						   curr_nbuf,
1385 						   QDF_DMA_TO_DEVICE,
1386 						   curr_nbuf->len)) {
1387 			dp_tx_err("%s dma map error ", __func__);
1388 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
1389 			goto error;
1390 		}
1391 		/* Update the count of mapped nbuf's */
1392 		mapped_buf_num++;
1393 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
1394 		seg_info->frags[i].paddr_lo = paddr;
1395 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
1396 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
1397 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
1398 		total_len += qdf_nbuf_len(curr_nbuf);
1399 	}
1400 
1401 	seg_info->frag_cnt = i;
1402 	seg_info->total_len = total_len;
1403 	seg_info->next = NULL;
1404 
1405 	sg_info->curr_seg = seg_info;
1406 
1407 	msdu_info->frm_type = dp_tx_frm_raw;
1408 	msdu_info->num_seg = 1;
1409 
1410 	return nbuf;
1411 
1412 error:
1413 	i = 0;
1414 	while (nbuf) {
1415 		curr_nbuf = nbuf;
1416 		if (i < mapped_buf_num) {
1417 			qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
1418 						     QDF_DMA_TO_DEVICE,
1419 						     curr_nbuf->len);
1420 			i++;
1421 		}
1422 		nbuf = qdf_nbuf_next(nbuf);
1423 		qdf_nbuf_free(curr_nbuf);
1424 	}
1425 	return NULL;
1426 
1427 }
1428 
1429 /**
1430  * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
1431  * @soc: DP soc handle
1432  * @nbuf: Buffer pointer
1433  *
1434  * unmap the chain of nbufs that belong to this RAW frame.
1435  *
1436  * Return: None
1437  */
1438 static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
1439 				    qdf_nbuf_t nbuf)
1440 {
1441 	qdf_nbuf_t cur_nbuf = nbuf;
1442 
1443 	do {
1444 		qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
1445 					     QDF_DMA_TO_DEVICE,
1446 					     cur_nbuf->len);
1447 		cur_nbuf = qdf_nbuf_next(cur_nbuf);
1448 	} while (cur_nbuf);
1449 }
1450 
1451 #ifdef VDEV_PEER_PROTOCOL_COUNT
1452 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
1453 					       qdf_nbuf_t nbuf)
1454 {
1455 	qdf_nbuf_t nbuf_local;
1456 	struct dp_vdev *vdev_local = vdev_hdl;
1457 
1458 	do {
1459 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track)))
1460 			break;
1461 		nbuf_local = nbuf;
1462 		if (qdf_unlikely(((vdev_local)->tx_encap_type) ==
1463 			 htt_cmn_pkt_type_raw))
1464 			break;
1465 		else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local))))
1466 			break;
1467 		else if (qdf_nbuf_is_tso((nbuf_local)))
1468 			break;
1469 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local),
1470 						       (nbuf_local),
1471 						       NULL, 1, 0);
1472 	} while (0);
1473 }
1474 #endif
1475 
1476 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1477 /**
1478  * dp_tx_update_stats() - Update soc level tx stats
1479  * @soc: DP soc handle
1480  * @tx_desc: TX descriptor reference
1481  * @ring_id: TCL ring id
1482  *
1483  * Returns: none
1484  */
1485 void dp_tx_update_stats(struct dp_soc *soc,
1486 			struct dp_tx_desc_s *tx_desc,
1487 			uint8_t ring_id)
1488 {
1489 	uint32_t stats_len = 0;
1490 
1491 	if (tx_desc->frm_type == dp_tx_frm_tso)
1492 		stats_len  = tx_desc->msdu_ext_desc->tso_desc->seg.total_len;
1493 	else
1494 		stats_len = qdf_nbuf_len(tx_desc->nbuf);
1495 
1496 	DP_STATS_INC_PKT(soc, tx.egress[ring_id], 1, stats_len);
1497 }
1498 
1499 int
1500 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1501 			 struct dp_tx_desc_s *tx_desc,
1502 			 uint8_t tid,
1503 			 struct dp_tx_msdu_info_s *msdu_info,
1504 			 uint8_t ring_id)
1505 {
1506 	struct dp_swlm *swlm = &soc->swlm;
1507 	union swlm_data swlm_query_data;
1508 	struct dp_swlm_tcl_data tcl_data;
1509 	QDF_STATUS status;
1510 	int ret;
1511 
1512 	if (!swlm->is_enabled)
1513 		return msdu_info->skip_hp_update;
1514 
1515 	tcl_data.nbuf = tx_desc->nbuf;
1516 	tcl_data.tid = tid;
1517 	tcl_data.ring_id = ring_id;
1518 	if (tx_desc->frm_type == dp_tx_frm_tso) {
1519 		tcl_data.pkt_len  =
1520 			tx_desc->msdu_ext_desc->tso_desc->seg.total_len;
1521 	} else {
1522 		tcl_data.pkt_len = qdf_nbuf_len(tx_desc->nbuf);
1523 	}
1524 	tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
1525 	swlm_query_data.tcl_data = &tcl_data;
1526 
1527 	status = dp_swlm_tcl_pre_check(soc, &tcl_data);
1528 	if (QDF_IS_STATUS_ERROR(status)) {
1529 		dp_swlm_tcl_reset_session_data(soc, ring_id);
1530 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
1531 		return 0;
1532 	}
1533 
1534 	ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
1535 	if (ret) {
1536 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_success, 1);
1537 	} else {
1538 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
1539 	}
1540 
1541 	return ret;
1542 }
1543 
1544 void
1545 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1546 		      int coalesce)
1547 {
1548 	if (coalesce)
1549 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1550 	else
1551 		dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1552 }
1553 
1554 static inline void
1555 dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
1556 {
1557 	if (((i + 1) < msdu_info->num_seg))
1558 		msdu_info->skip_hp_update = 1;
1559 	else
1560 		msdu_info->skip_hp_update = 0;
1561 }
1562 
1563 static inline void
1564 dp_flush_tcp_hp(struct dp_soc *soc, uint8_t ring_id)
1565 {
1566 	hal_ring_handle_t hal_ring_hdl =
1567 		dp_tx_get_hal_ring_hdl(soc, ring_id);
1568 
1569 	if (dp_tx_hal_ring_access_start(soc, hal_ring_hdl)) {
1570 		dp_err("Fillmore: SRNG access start failed");
1571 		return;
1572 	}
1573 
1574 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
1575 }
1576 
1577 static inline void
1578 dp_tx_check_and_flush_hp(struct dp_soc *soc,
1579 			 QDF_STATUS status,
1580 			 struct dp_tx_msdu_info_s *msdu_info)
1581 {
1582 	if (QDF_IS_STATUS_ERROR(status) && !msdu_info->skip_hp_update) {
1583 		dp_flush_tcp_hp(soc,
1584 			(msdu_info->tx_queue.ring_id & DP_TX_QUEUE_MASK));
1585 	}
1586 }
1587 #else
1588 static inline void
1589 dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
1590 {
1591 }
1592 
1593 static inline void
1594 dp_tx_check_and_flush_hp(struct dp_soc *soc,
1595 			 QDF_STATUS status,
1596 			 struct dp_tx_msdu_info_s *msdu_info)
1597 {
1598 }
1599 #endif
1600 
1601 #ifdef FEATURE_RUNTIME_PM
1602 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
1603 {
1604 	int ret;
1605 
1606 	ret = qdf_atomic_read(&soc->rtpm_high_tput_flag) &&
1607 	      (hif_rtpm_get_state() <= HIF_RTPM_STATE_ON);
1608 	return ret;
1609 }
1610 /**
1611  * dp_tx_ring_access_end_wrapper() - Wrapper for ring access end
1612  * @soc: Datapath soc handle
1613  * @hal_ring_hdl: HAL ring handle
1614  * @coalesce: Coalesce the current write or not
1615  *
1616  * Wrapper for HAL ring access end for data transmission for
1617  * FEATURE_RUNTIME_PM
1618  *
1619  * Returns: none
1620  */
1621 void
1622 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1623 			      hal_ring_handle_t hal_ring_hdl,
1624 			      int coalesce)
1625 {
1626 	int ret;
1627 
1628 	/*
1629 	 * Avoid runtime get and put APIs under high throughput scenarios.
1630 	 */
1631 	if (dp_get_rtpm_tput_policy_requirement(soc)) {
1632 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1633 		return;
1634 	}
1635 
1636 	ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
1637 	if (QDF_IS_STATUS_SUCCESS(ret)) {
1638 		if (hif_system_pm_state_check(soc->hif_handle) ||
1639 					qdf_unlikely(soc->is_tx_pause)) {
1640 			dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1641 			hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1642 			hal_srng_inc_flush_cnt(hal_ring_hdl);
1643 		} else {
1644 			dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1645 		}
1646 		hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
1647 	} else {
1648 		dp_runtime_get(soc);
1649 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1650 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1651 		qdf_atomic_inc(&soc->tx_pending_rtpm);
1652 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1653 		dp_runtime_put(soc);
1654 	}
1655 }
1656 #else
1657 
1658 #ifdef DP_POWER_SAVE
1659 void
1660 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1661 			      hal_ring_handle_t hal_ring_hdl,
1662 			      int coalesce)
1663 {
1664 	if (hif_system_pm_state_check(soc->hif_handle) ||
1665 					qdf_unlikely(soc->is_tx_pause)) {
1666 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1667 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1668 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1669 	} else {
1670 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1671 	}
1672 }
1673 #endif
1674 
1675 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
1676 {
1677 	return 0;
1678 }
1679 #endif
1680 
1681 /**
1682  * dp_tx_get_tid() - Obtain TID to be used for this frame
1683  * @vdev: DP vdev handle
1684  * @nbuf: skb
1685  *
1686  * Extract the DSCP or PCP information from frame and map into TID value.
1687  *
1688  * Return: void
1689  */
1690 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1691 			  struct dp_tx_msdu_info_s *msdu_info)
1692 {
1693 	uint8_t tos = 0, dscp_tid_override = 0;
1694 	uint8_t *hdr_ptr, *L3datap;
1695 	uint8_t is_mcast = 0;
1696 	qdf_ether_header_t *eh = NULL;
1697 	qdf_ethervlan_header_t *evh = NULL;
1698 	uint16_t   ether_type;
1699 	qdf_llc_t *llcHdr;
1700 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1701 
1702 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1703 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1704 		eh = (qdf_ether_header_t *)nbuf->data;
1705 		hdr_ptr = (uint8_t *)(eh->ether_dhost);
1706 		L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
1707 	} else {
1708 		qdf_dot3_qosframe_t *qos_wh =
1709 			(qdf_dot3_qosframe_t *) nbuf->data;
1710 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1711 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1712 		return;
1713 	}
1714 
1715 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1716 	ether_type = eh->ether_type;
1717 
1718 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
1719 	/*
1720 	 * Check if packet is dot3 or eth2 type.
1721 	 */
1722 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1723 		ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1724 				sizeof(*llcHdr));
1725 
1726 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1727 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1728 				sizeof(*llcHdr);
1729 			ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
1730 					+ sizeof(*llcHdr) +
1731 					sizeof(qdf_net_vlanhdr_t));
1732 		} else {
1733 			L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
1734 				sizeof(*llcHdr);
1735 		}
1736 	} else {
1737 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1738 			evh = (qdf_ethervlan_header_t *) eh;
1739 			ether_type = evh->ether_type;
1740 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1741 		}
1742 	}
1743 
1744 	/*
1745 	 * Find priority from IP TOS DSCP field
1746 	 */
1747 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1748 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1749 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1750 			/* Only for unicast frames */
1751 			if (!is_mcast) {
1752 				/* send it on VO queue */
1753 				msdu_info->tid = DP_VO_TID;
1754 			}
1755 		} else {
1756 			/*
1757 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1758 			 * from TOS byte.
1759 			 */
1760 			tos = ip->ip_tos;
1761 			dscp_tid_override = 1;
1762 
1763 		}
1764 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1765 		/* TODO
1766 		 * use flowlabel
1767 		 *igmpmld cases to be handled in phase 2
1768 		 */
1769 		unsigned long ver_pri_flowlabel;
1770 		unsigned long pri;
1771 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1772 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1773 			DP_IPV6_PRIORITY_SHIFT;
1774 		tos = pri;
1775 		dscp_tid_override = 1;
1776 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1777 		msdu_info->tid = DP_VO_TID;
1778 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1779 		/* Only for unicast frames */
1780 		if (!is_mcast) {
1781 			/* send ucast arp on VO queue */
1782 			msdu_info->tid = DP_VO_TID;
1783 		}
1784 	}
1785 
1786 	/*
1787 	 * Assign all MCAST packets to BE
1788 	 */
1789 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1790 		if (is_mcast) {
1791 			tos = 0;
1792 			dscp_tid_override = 1;
1793 		}
1794 	}
1795 
1796 	if (dscp_tid_override == 1) {
1797 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1798 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1799 	}
1800 
1801 	if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
1802 		msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
1803 
1804 	return;
1805 }
1806 
1807 /**
1808  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1809  * @vdev: DP vdev handle
1810  * @nbuf: skb
1811  *
1812  * Software based TID classification is required when more than 2 DSCP-TID
1813  * mapping tables are needed.
1814  * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
1815  *
1816  * Return: void
1817  */
1818 static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1819 				      struct dp_tx_msdu_info_s *msdu_info)
1820 {
1821 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1822 
1823 	/*
1824 	 * skip_sw_tid_classification flag will set in below cases-
1825 	 * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
1826 	 * 2. hlos_tid_override enabled for vdev
1827 	 * 3. mesh mode enabled for vdev
1828 	 */
1829 	if (qdf_likely(vdev->skip_sw_tid_classification)) {
1830 		/* Update tid in msdu_info from skb priority */
1831 		if (qdf_unlikely(vdev->skip_sw_tid_classification
1832 			& DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
1833 			uint32_t tid = qdf_nbuf_get_priority(nbuf);
1834 
1835 			if (tid == DP_TX_INVALID_QOS_TAG)
1836 				return;
1837 
1838 			msdu_info->tid = tid;
1839 			return;
1840 		}
1841 		return;
1842 	}
1843 
1844 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1845 }
1846 
1847 #ifdef FEATURE_WLAN_TDLS
1848 /**
1849  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1850  * @soc: datapath SOC
1851  * @vdev: datapath vdev
1852  * @tx_desc: TX descriptor
1853  *
1854  * Return: None
1855  */
1856 static void dp_tx_update_tdls_flags(struct dp_soc *soc,
1857 				    struct dp_vdev *vdev,
1858 				    struct dp_tx_desc_s *tx_desc)
1859 {
1860 	if (vdev) {
1861 		if (vdev->is_tdls_frame) {
1862 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1863 			vdev->is_tdls_frame = false;
1864 		}
1865 	}
1866 }
1867 
1868 static uint8_t dp_htt_tx_comp_get_status(struct dp_soc *soc, char *htt_desc)
1869 {
1870 	uint8_t tx_status = HTT_TX_FW2WBM_TX_STATUS_MAX;
1871 
1872 	switch (soc->arch_id) {
1873 	case CDP_ARCH_TYPE_LI:
1874 		tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
1875 		break;
1876 
1877 	case CDP_ARCH_TYPE_BE:
1878 		tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
1879 		break;
1880 
1881 	default:
1882 		dp_err("Incorrect CDP_ARCH %d", soc->arch_id);
1883 		QDF_BUG(0);
1884 	}
1885 
1886 	return tx_status;
1887 }
1888 
1889 /**
1890  * dp_non_std_htt_tx_comp_free_buff() - Free the non std tx packet buffer
1891  * @soc: dp_soc handle
1892  * @tx_desc: TX descriptor
1893  * @vdev: datapath vdev handle
1894  *
1895  * Return: None
1896  */
1897 static void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
1898 					 struct dp_tx_desc_s *tx_desc)
1899 {
1900 	uint8_t tx_status = 0;
1901 	uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
1902 
1903 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1904 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
1905 						     DP_MOD_ID_TDLS);
1906 
1907 	if (qdf_unlikely(!vdev)) {
1908 		dp_err_rl("vdev is null!");
1909 		goto error;
1910 	}
1911 
1912 	hal_tx_comp_get_htt_desc(&tx_desc->comp, htt_tx_status);
1913 	tx_status = dp_htt_tx_comp_get_status(soc, htt_tx_status);
1914 	dp_debug("vdev_id: %d tx_status: %d", tx_desc->vdev_id, tx_status);
1915 
1916 	if (vdev->tx_non_std_data_callback.func) {
1917 		qdf_nbuf_set_next(nbuf, NULL);
1918 		vdev->tx_non_std_data_callback.func(
1919 				vdev->tx_non_std_data_callback.ctxt,
1920 				nbuf, tx_status);
1921 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1922 		return;
1923 	} else {
1924 		dp_err_rl("callback func is null");
1925 	}
1926 
1927 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1928 error:
1929 	qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
1930 	qdf_nbuf_free(nbuf);
1931 }
1932 
1933 /**
1934  * dp_tx_msdu_single_map() - do nbuf map
1935  * @vdev: DP vdev handle
1936  * @tx_desc: DP TX descriptor pointer
1937  * @nbuf: skb pointer
1938  *
1939  * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
1940  * operation done in other component.
1941  *
1942  * Return: QDF_STATUS
1943  */
1944 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1945 					       struct dp_tx_desc_s *tx_desc,
1946 					       qdf_nbuf_t nbuf)
1947 {
1948 	if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
1949 		return qdf_nbuf_map_nbytes_single(vdev->osdev,
1950 						  nbuf,
1951 						  QDF_DMA_TO_DEVICE,
1952 						  nbuf->len);
1953 	else
1954 		return qdf_nbuf_map_single(vdev->osdev, nbuf,
1955 					   QDF_DMA_TO_DEVICE);
1956 }
1957 #else
1958 static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
1959 					   struct dp_vdev *vdev,
1960 					   struct dp_tx_desc_s *tx_desc)
1961 {
1962 }
1963 
1964 static inline void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
1965 						struct dp_tx_desc_s *tx_desc)
1966 {
1967 }
1968 
1969 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1970 					       struct dp_tx_desc_s *tx_desc,
1971 					       qdf_nbuf_t nbuf)
1972 {
1973 	return qdf_nbuf_map_nbytes_single(vdev->osdev,
1974 					  nbuf,
1975 					  QDF_DMA_TO_DEVICE,
1976 					  nbuf->len);
1977 }
1978 #endif
1979 
1980 static inline
1981 qdf_dma_addr_t dp_tx_nbuf_map_regular(struct dp_vdev *vdev,
1982 				      struct dp_tx_desc_s *tx_desc,
1983 				      qdf_nbuf_t nbuf)
1984 {
1985 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
1986 
1987 	ret = dp_tx_msdu_single_map(vdev, tx_desc, nbuf);
1988 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret)))
1989 		return 0;
1990 
1991 	return qdf_nbuf_mapped_paddr_get(nbuf);
1992 }
1993 
1994 static inline
1995 void dp_tx_nbuf_unmap_regular(struct dp_soc *soc, struct dp_tx_desc_s *desc)
1996 {
1997 	qdf_nbuf_unmap_nbytes_single_paddr(soc->osdev,
1998 					   desc->nbuf,
1999 					   desc->dma_addr,
2000 					   QDF_DMA_TO_DEVICE,
2001 					   desc->length);
2002 }
2003 
2004 #if defined(QCA_DP_TX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
2005 static inline
2006 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
2007 			      struct dp_tx_desc_s *tx_desc,
2008 			      qdf_nbuf_t nbuf)
2009 {
2010 	if (qdf_likely(tx_desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
2011 		qdf_nbuf_dma_clean_range((void *)nbuf->data,
2012 					 (void *)(nbuf->data + nbuf->len));
2013 		return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2014 	} else {
2015 		return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
2016 	}
2017 }
2018 
2019 static inline
2020 void dp_tx_nbuf_unmap(struct dp_soc *soc,
2021 		      struct dp_tx_desc_s *desc)
2022 {
2023 	if (qdf_unlikely(!(desc->flags & DP_TX_DESC_FLAG_SIMPLE)))
2024 		return dp_tx_nbuf_unmap_regular(soc, desc);
2025 }
2026 #else
2027 static inline
2028 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
2029 			      struct dp_tx_desc_s *tx_desc,
2030 			      qdf_nbuf_t nbuf)
2031 {
2032 	return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
2033 }
2034 
2035 static inline
2036 void dp_tx_nbuf_unmap(struct dp_soc *soc,
2037 		      struct dp_tx_desc_s *desc)
2038 {
2039 	return dp_tx_nbuf_unmap_regular(soc, desc);
2040 }
2041 #endif
2042 
2043 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
2044 static inline
2045 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2046 {
2047 	dp_tx_nbuf_unmap(soc, desc);
2048 	desc->flags |= DP_TX_DESC_FLAG_UNMAP_DONE;
2049 }
2050 
2051 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2052 {
2053 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE)))
2054 		dp_tx_nbuf_unmap(soc, desc);
2055 }
2056 #else
2057 static inline
2058 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2059 {
2060 }
2061 
2062 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2063 {
2064 	dp_tx_nbuf_unmap(soc, desc);
2065 }
2066 #endif
2067 
2068 #ifdef MESH_MODE_SUPPORT
2069 /**
2070  * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
2071  * @soc: datapath SOC
2072  * @vdev: datapath vdev
2073  * @tx_desc: TX descriptor
2074  *
2075  * Return: None
2076  */
2077 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
2078 					   struct dp_vdev *vdev,
2079 					   struct dp_tx_desc_s *tx_desc)
2080 {
2081 	if (qdf_unlikely(vdev->mesh_vdev))
2082 		tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
2083 }
2084 
2085 /**
2086  * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
2087  * @soc: dp_soc handle
2088  * @tx_desc: TX descriptor
2089  * @delayed_free: delay the nbuf free
2090  *
2091  * Return: nbuf to be freed late
2092  */
2093 static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
2094 						   struct dp_tx_desc_s *tx_desc,
2095 						   bool delayed_free)
2096 {
2097 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2098 	struct dp_vdev *vdev = NULL;
2099 
2100 	vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, DP_MOD_ID_MESH);
2101 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2102 		if (vdev)
2103 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2104 
2105 		if (delayed_free)
2106 			return nbuf;
2107 
2108 		qdf_nbuf_free(nbuf);
2109 	} else {
2110 		if (vdev && vdev->osif_tx_free_ext) {
2111 			vdev->osif_tx_free_ext((nbuf));
2112 		} else {
2113 			if (delayed_free)
2114 				return nbuf;
2115 
2116 			qdf_nbuf_free(nbuf);
2117 		}
2118 	}
2119 
2120 	if (vdev)
2121 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
2122 
2123 	return NULL;
2124 }
2125 #else
2126 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
2127 					   struct dp_vdev *vdev,
2128 					   struct dp_tx_desc_s *tx_desc)
2129 {
2130 }
2131 
2132 static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
2133 						   struct dp_tx_desc_s *tx_desc,
2134 						   bool delayed_free)
2135 {
2136 	return NULL;
2137 }
2138 #endif
2139 
2140 /**
2141  * dp_tx_frame_is_drop() - checks if the packet is loopback
2142  * @vdev: DP vdev handle
2143  * @nbuf: skb
2144  *
2145  * Return: 1 if frame needs to be dropped else 0
2146  */
2147 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
2148 {
2149 	struct dp_pdev *pdev = NULL;
2150 	struct dp_ast_entry *src_ast_entry = NULL;
2151 	struct dp_ast_entry *dst_ast_entry = NULL;
2152 	struct dp_soc *soc = NULL;
2153 
2154 	qdf_assert(vdev);
2155 	pdev = vdev->pdev;
2156 	qdf_assert(pdev);
2157 	soc = pdev->soc;
2158 
2159 	dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
2160 				(soc, dstmac, vdev->pdev->pdev_id);
2161 
2162 	src_ast_entry = dp_peer_ast_hash_find_by_pdevid
2163 				(soc, srcmac, vdev->pdev->pdev_id);
2164 	if (dst_ast_entry && src_ast_entry) {
2165 		if (dst_ast_entry->peer_id ==
2166 				src_ast_entry->peer_id)
2167 			return 1;
2168 	}
2169 
2170 	return 0;
2171 }
2172 
2173 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
2174 	defined(WLAN_MCAST_MLO)
2175 /* MLO peer id for reinject*/
2176 #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
2177 /* MLO vdev id inc offset */
2178 #define DP_MLO_VDEV_ID_OFFSET 0x80
2179 
2180 static inline void
2181 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2182 {
2183 	if (!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) {
2184 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2185 		qdf_atomic_inc(&soc->num_tx_exception);
2186 	}
2187 }
2188 
2189 static inline void
2190 dp_tx_update_mcast_param(uint16_t peer_id,
2191 			 uint16_t *htt_tcl_metadata,
2192 			 struct dp_vdev *vdev,
2193 			 struct dp_tx_msdu_info_s *msdu_info)
2194 {
2195 	if (peer_id == DP_MLO_MCAST_REINJECT_PEER_ID) {
2196 		*htt_tcl_metadata = 0;
2197 		DP_TX_TCL_METADATA_TYPE_SET(
2198 				*htt_tcl_metadata,
2199 				HTT_TCL_METADATA_V2_TYPE_GLOBAL_SEQ_BASED);
2200 		HTT_TX_TCL_METADATA_GLBL_SEQ_NO_SET(*htt_tcl_metadata,
2201 						    msdu_info->gsn);
2202 
2203 		msdu_info->vdev_id = vdev->vdev_id + DP_MLO_VDEV_ID_OFFSET;
2204 		if (qdf_unlikely(vdev->nawds_enabled))
2205 			HTT_TX_TCL_METADATA_GLBL_SEQ_HOST_INSPECTED_SET(
2206 							*htt_tcl_metadata, 1);
2207 	} else {
2208 		msdu_info->vdev_id = vdev->vdev_id;
2209 	}
2210 }
2211 #else
2212 static inline void
2213 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2214 {
2215 }
2216 
2217 static inline void
2218 dp_tx_update_mcast_param(uint16_t peer_id,
2219 			 uint16_t *htt_tcl_metadata,
2220 			 struct dp_vdev *vdev,
2221 			 struct dp_tx_msdu_info_s *msdu_info)
2222 {
2223 }
2224 #endif
2225 /**
2226  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
2227  * @vdev: DP vdev handle
2228  * @nbuf: skb
2229  * @tid: TID from HLOS for overriding default DSCP-TID mapping
2230  * @meta_data: Metadata to the fw
2231  * @tx_q: Tx queue to be used for this Tx frame
2232  * @peer_id: peer_id of the peer in case of NAWDS frames
2233  * @tx_exc_metadata: Handle that holds exception path metadata
2234  *
2235  * Return: NULL on success,
2236  *         nbuf when it fails to send
2237  */
2238 qdf_nbuf_t
2239 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2240 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
2241 		       struct cdp_tx_exception_metadata *tx_exc_metadata)
2242 {
2243 	struct dp_pdev *pdev = vdev->pdev;
2244 	struct dp_soc *soc = pdev->soc;
2245 	struct dp_tx_desc_s *tx_desc;
2246 	QDF_STATUS status;
2247 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
2248 	uint16_t htt_tcl_metadata = 0;
2249 	enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
2250 	uint8_t tid = msdu_info->tid;
2251 	struct cdp_tid_tx_stats *tid_stats = NULL;
2252 	qdf_dma_addr_t paddr;
2253 
2254 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
2255 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
2256 			msdu_info, tx_exc_metadata);
2257 	if (!tx_desc) {
2258 		dp_err_rl("Tx_desc prepare Fail vdev_id %d vdev %pK queue %d",
2259 			  vdev->vdev_id, vdev, tx_q->desc_pool_id);
2260 		drop_code = TX_DESC_ERR;
2261 		goto fail_return;
2262 	}
2263 
2264 	dp_tx_update_tdls_flags(soc, vdev, tx_desc);
2265 
2266 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
2267 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2268 		DP_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
2269 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
2270 		DP_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
2271 					    DP_TCL_METADATA_TYPE_PEER_BASED);
2272 		DP_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
2273 					       peer_id);
2274 		dp_tx_bypass_reinjection(soc, tx_desc);
2275 	} else
2276 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2277 
2278 	if (msdu_info->exception_fw)
2279 		DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2280 
2281 	dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
2282 					 !pdev->enhanced_stats_en);
2283 
2284 	dp_tx_update_mesh_flags(soc, vdev, tx_desc);
2285 
2286 	paddr =  dp_tx_nbuf_map(vdev, tx_desc, nbuf);
2287 	if (!paddr) {
2288 		/* Handle failure */
2289 		dp_err("qdf_nbuf_map failed");
2290 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
2291 		drop_code = TX_DMA_MAP_ERR;
2292 		goto release_desc;
2293 	}
2294 
2295 	tx_desc->dma_addr = paddr;
2296 	dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
2297 			       tx_desc->id, DP_TX_DESC_MAP);
2298 	dp_tx_update_mcast_param(peer_id, &htt_tcl_metadata, vdev, msdu_info);
2299 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
2300 	status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
2301 					     htt_tcl_metadata,
2302 					     tx_exc_metadata, msdu_info);
2303 
2304 	if (status != QDF_STATUS_SUCCESS) {
2305 		dp_tx_err_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2306 			     tx_desc, tx_q->ring_id);
2307 		dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
2308 				       tx_desc->id, DP_TX_DESC_UNMAP);
2309 		dp_tx_nbuf_unmap(soc, tx_desc);
2310 		drop_code = TX_HW_ENQUEUE;
2311 		goto release_desc;
2312 	}
2313 
2314 	return NULL;
2315 
2316 release_desc:
2317 	dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2318 
2319 fail_return:
2320 	dp_tx_get_tid(vdev, nbuf, msdu_info);
2321 	tid_stats = &pdev->stats.tid_stats.
2322 		    tid_tx_stats[tx_q->ring_id][tid];
2323 	tid_stats->swdrop_cnt[drop_code]++;
2324 	return nbuf;
2325 }
2326 
2327 /**
2328  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2329  * @soc: Soc handle
2330  * @desc: software Tx descriptor to be processed
2331  * @delayed_free: defer freeing of nbuf
2332  *
2333  * Return: nbuf to be freed later
2334  */
2335 qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
2336 			       bool delayed_free)
2337 {
2338 	qdf_nbuf_t nbuf = desc->nbuf;
2339 	enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
2340 
2341 	/* nbuf already freed in vdev detach path */
2342 	if (!nbuf)
2343 		return NULL;
2344 
2345 	/* If it is TDLS mgmt, don't unmap or free the frame */
2346 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) {
2347 		dp_non_std_htt_tx_comp_free_buff(soc, desc);
2348 		return NULL;
2349 	}
2350 
2351 	/* 0 : MSDU buffer, 1 : MLE */
2352 	if (desc->msdu_ext_desc) {
2353 		/* TSO free */
2354 		if (hal_tx_ext_desc_get_tso_enable(
2355 					desc->msdu_ext_desc->vaddr)) {
2356 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
2357 					       desc->id, DP_TX_COMP_MSDU_EXT);
2358 			dp_tx_tso_seg_history_add(soc,
2359 						  desc->msdu_ext_desc->tso_desc,
2360 						  desc->nbuf, desc->id, type);
2361 			/* unmap eash TSO seg before free the nbuf */
2362 			dp_tx_tso_unmap_segment(soc,
2363 						desc->msdu_ext_desc->tso_desc,
2364 						desc->msdu_ext_desc->
2365 						tso_num_desc);
2366 			goto nbuf_free;
2367 		}
2368 
2369 		if (qdf_unlikely(desc->frm_type == dp_tx_frm_sg)) {
2370 			void *msdu_ext_desc = desc->msdu_ext_desc->vaddr;
2371 			qdf_dma_addr_t iova;
2372 			uint32_t frag_len;
2373 			uint32_t i;
2374 
2375 			qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
2376 						     QDF_DMA_TO_DEVICE,
2377 						     qdf_nbuf_headlen(nbuf));
2378 
2379 			for (i = 1; i < DP_TX_MAX_NUM_FRAGS; i++) {
2380 				hal_tx_ext_desc_get_frag_info(msdu_ext_desc, i,
2381 							      &iova,
2382 							      &frag_len);
2383 				if (!iova || !frag_len)
2384 					break;
2385 
2386 				qdf_mem_unmap_page(soc->osdev, iova, frag_len,
2387 						   QDF_DMA_TO_DEVICE);
2388 			}
2389 
2390 			goto nbuf_free;
2391 		}
2392 	}
2393 	/* If it's ME frame, dont unmap the cloned nbuf's */
2394 	if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
2395 		goto nbuf_free;
2396 
2397 	dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, desc->id, type);
2398 	dp_tx_unmap(soc, desc);
2399 
2400 	if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
2401 		return dp_mesh_tx_comp_free_buff(soc, desc, delayed_free);
2402 
2403 	if (dp_tx_traffic_end_indication_enq_ind_pkt(soc, desc, nbuf))
2404 		return NULL;
2405 
2406 nbuf_free:
2407 	if (delayed_free)
2408 		return nbuf;
2409 
2410 	qdf_nbuf_free(nbuf);
2411 
2412 	return NULL;
2413 }
2414 
2415 /**
2416  * dp_tx_sg_unmap_buf() - Unmap scatter gather fragments
2417  * @soc: DP soc handle
2418  * @nbuf: skb
2419  * @msdu_info: MSDU info
2420  *
2421  * Return: None
2422  */
2423 static inline void
2424 dp_tx_sg_unmap_buf(struct dp_soc *soc, qdf_nbuf_t nbuf,
2425 		   struct dp_tx_msdu_info_s *msdu_info)
2426 {
2427 	uint32_t cur_idx;
2428 	struct dp_tx_seg_info_s *seg = msdu_info->u.sg_info.curr_seg;
2429 
2430 	qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE,
2431 				     qdf_nbuf_headlen(nbuf));
2432 
2433 	for (cur_idx = 1; cur_idx < seg->frag_cnt; cur_idx++)
2434 		qdf_mem_unmap_page(soc->osdev, (qdf_dma_addr_t)
2435 				   (seg->frags[cur_idx].paddr_lo | ((uint64_t)
2436 				    seg->frags[cur_idx].paddr_hi) << 32),
2437 				   seg->frags[cur_idx].len,
2438 				   QDF_DMA_TO_DEVICE);
2439 }
2440 
2441 /**
2442  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
2443  * @vdev: DP vdev handle
2444  * @nbuf: skb
2445  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
2446  *
2447  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
2448  *
2449  * Return: NULL on success,
2450  *         nbuf when it fails to send
2451  */
2452 #if QDF_LOCK_STATS
2453 noinline
2454 #else
2455 #endif
2456 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2457 				    struct dp_tx_msdu_info_s *msdu_info)
2458 {
2459 	uint32_t i;
2460 	struct dp_pdev *pdev = vdev->pdev;
2461 	struct dp_soc *soc = pdev->soc;
2462 	struct dp_tx_desc_s *tx_desc;
2463 	bool is_cce_classified = false;
2464 	QDF_STATUS status;
2465 	uint16_t htt_tcl_metadata = 0;
2466 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
2467 	struct cdp_tid_tx_stats *tid_stats = NULL;
2468 	uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
2469 
2470 	if (msdu_info->frm_type == dp_tx_frm_me)
2471 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2472 
2473 	i = 0;
2474 	/* Print statement to track i and num_seg */
2475 	/*
2476 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
2477 	 * descriptors using information in msdu_info
2478 	 */
2479 	while (i < msdu_info->num_seg) {
2480 		/*
2481 		 * Setup Tx descriptor for an MSDU, and MSDU extension
2482 		 * descriptor
2483 		 */
2484 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
2485 				tx_q->desc_pool_id);
2486 
2487 		if (!tx_desc) {
2488 			if (msdu_info->frm_type == dp_tx_frm_me) {
2489 				prep_desc_fail++;
2490 				dp_tx_me_free_buf(pdev,
2491 					(void *)(msdu_info->u.sg_info
2492 						.curr_seg->frags[0].vaddr));
2493 				if (prep_desc_fail == msdu_info->num_seg) {
2494 					/*
2495 					 * Unmap is needed only if descriptor
2496 					 * preparation failed for all segments.
2497 					 */
2498 					qdf_nbuf_unmap(soc->osdev,
2499 						       msdu_info->u.sg_info.
2500 						       curr_seg->nbuf,
2501 						       QDF_DMA_TO_DEVICE);
2502 				}
2503 				/*
2504 				 * Free the nbuf for the current segment
2505 				 * and make it point to the next in the list.
2506 				 * For me, there are as many segments as there
2507 				 * are no of clients.
2508 				 */
2509 				qdf_nbuf_free(msdu_info->u.sg_info
2510 					      .curr_seg->nbuf);
2511 				if (msdu_info->u.sg_info.curr_seg->next) {
2512 					msdu_info->u.sg_info.curr_seg =
2513 						msdu_info->u.sg_info
2514 						.curr_seg->next;
2515 					nbuf = msdu_info->u.sg_info
2516 					       .curr_seg->nbuf;
2517 				}
2518 				i++;
2519 				continue;
2520 			}
2521 
2522 			if (msdu_info->frm_type == dp_tx_frm_tso) {
2523 				dp_tx_tso_seg_history_add(
2524 						soc,
2525 						msdu_info->u.tso_info.curr_seg,
2526 						nbuf, 0, DP_TX_DESC_UNMAP);
2527 				dp_tx_tso_unmap_segment(soc,
2528 							msdu_info->u.tso_info.
2529 							curr_seg,
2530 							msdu_info->u.tso_info.
2531 							tso_num_seg_list);
2532 
2533 				if (msdu_info->u.tso_info.curr_seg->next) {
2534 					msdu_info->u.tso_info.curr_seg =
2535 					msdu_info->u.tso_info.curr_seg->next;
2536 					i++;
2537 					continue;
2538 				}
2539 			}
2540 
2541 			if (msdu_info->frm_type == dp_tx_frm_sg)
2542 				dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
2543 
2544 			goto done;
2545 		}
2546 
2547 		if (msdu_info->frm_type == dp_tx_frm_me) {
2548 			tx_desc->msdu_ext_desc->me_buffer =
2549 				(struct dp_tx_me_buf_t *)msdu_info->
2550 				u.sg_info.curr_seg->frags[0].vaddr;
2551 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
2552 		}
2553 
2554 		if (is_cce_classified)
2555 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2556 
2557 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2558 		if (msdu_info->exception_fw) {
2559 			DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2560 		}
2561 
2562 		dp_tx_is_hp_update_required(i, msdu_info);
2563 
2564 		/*
2565 		 * For frames with multiple segments (TSO, ME), jump to next
2566 		 * segment.
2567 		 */
2568 		if (msdu_info->frm_type == dp_tx_frm_tso) {
2569 			if (msdu_info->u.tso_info.curr_seg->next) {
2570 				msdu_info->u.tso_info.curr_seg =
2571 					msdu_info->u.tso_info.curr_seg->next;
2572 
2573 				/*
2574 				 * If this is a jumbo nbuf, then increment the
2575 				 * number of nbuf users for each additional
2576 				 * segment of the msdu. This will ensure that
2577 				 * the skb is freed only after receiving tx
2578 				 * completion for all segments of an nbuf
2579 				 */
2580 				qdf_nbuf_inc_users(nbuf);
2581 
2582 				/* Check with MCL if this is needed */
2583 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
2584 				 */
2585 			}
2586 		}
2587 
2588 		dp_tx_update_mcast_param(DP_INVALID_PEER,
2589 					 &htt_tcl_metadata,
2590 					 vdev,
2591 					 msdu_info);
2592 		/*
2593 		 * Enqueue the Tx MSDU descriptor to HW for transmit
2594 		 */
2595 		status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
2596 						     htt_tcl_metadata,
2597 						     NULL, msdu_info);
2598 
2599 		dp_tx_check_and_flush_hp(soc, status, msdu_info);
2600 
2601 		if (status != QDF_STATUS_SUCCESS) {
2602 			dp_info_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2603 				   tx_desc, tx_q->ring_id);
2604 
2605 			dp_tx_get_tid(vdev, nbuf, msdu_info);
2606 			tid_stats = &pdev->stats.tid_stats.
2607 				    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
2608 			tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
2609 
2610 			if (msdu_info->frm_type == dp_tx_frm_me) {
2611 				hw_enq_fail++;
2612 				if (hw_enq_fail == msdu_info->num_seg) {
2613 					/*
2614 					 * Unmap is needed only if enqueue
2615 					 * failed for all segments.
2616 					 */
2617 					qdf_nbuf_unmap(soc->osdev,
2618 						       msdu_info->u.sg_info.
2619 						       curr_seg->nbuf,
2620 						       QDF_DMA_TO_DEVICE);
2621 				}
2622 				/*
2623 				 * Free the nbuf for the current segment
2624 				 * and make it point to the next in the list.
2625 				 * For me, there are as many segments as there
2626 				 * are no of clients.
2627 				 */
2628 				qdf_nbuf_free(msdu_info->u.sg_info
2629 					      .curr_seg->nbuf);
2630 				dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2631 				if (msdu_info->u.sg_info.curr_seg->next) {
2632 					msdu_info->u.sg_info.curr_seg =
2633 						msdu_info->u.sg_info
2634 						.curr_seg->next;
2635 					nbuf = msdu_info->u.sg_info
2636 					       .curr_seg->nbuf;
2637 				} else
2638 					break;
2639 				i++;
2640 				continue;
2641 			}
2642 
2643 			/*
2644 			 * For TSO frames, the nbuf users increment done for
2645 			 * the current segment has to be reverted, since the
2646 			 * hw enqueue for this segment failed
2647 			 */
2648 			if (msdu_info->frm_type == dp_tx_frm_tso &&
2649 			    msdu_info->u.tso_info.curr_seg) {
2650 				/*
2651 				 * unmap and free current,
2652 				 * retransmit remaining segments
2653 				 */
2654 				dp_tx_comp_free_buf(soc, tx_desc, false);
2655 				i++;
2656 				dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2657 				continue;
2658 			}
2659 
2660 			if (msdu_info->frm_type == dp_tx_frm_sg)
2661 				dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
2662 
2663 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2664 			goto done;
2665 		}
2666 
2667 		/*
2668 		 * TODO
2669 		 * if tso_info structure can be modified to have curr_seg
2670 		 * as first element, following 2 blocks of code (for TSO and SG)
2671 		 * can be combined into 1
2672 		 */
2673 
2674 		/*
2675 		 * For Multicast-Unicast converted packets,
2676 		 * each converted frame (for a client) is represented as
2677 		 * 1 segment
2678 		 */
2679 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
2680 				(msdu_info->frm_type == dp_tx_frm_me)) {
2681 			if (msdu_info->u.sg_info.curr_seg->next) {
2682 				msdu_info->u.sg_info.curr_seg =
2683 					msdu_info->u.sg_info.curr_seg->next;
2684 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2685 			} else
2686 				break;
2687 		}
2688 		i++;
2689 	}
2690 
2691 	nbuf = NULL;
2692 
2693 done:
2694 	return nbuf;
2695 }
2696 
2697 /**
2698  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
2699  *                     for SG frames
2700  * @vdev: DP vdev handle
2701  * @nbuf: skb
2702  * @seg_info: Pointer to Segment info Descriptor to be prepared
2703  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2704  *
2705  * Return: NULL on success,
2706  *         nbuf when it fails to send
2707  */
2708 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2709 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
2710 {
2711 	uint32_t cur_frag, nr_frags, i;
2712 	qdf_dma_addr_t paddr;
2713 	struct dp_tx_sg_info_s *sg_info;
2714 
2715 	sg_info = &msdu_info->u.sg_info;
2716 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
2717 
2718 	if (QDF_STATUS_SUCCESS !=
2719 		qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
2720 					   QDF_DMA_TO_DEVICE,
2721 					   qdf_nbuf_headlen(nbuf))) {
2722 		dp_tx_err("dma map error");
2723 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2724 		qdf_nbuf_free(nbuf);
2725 		return NULL;
2726 	}
2727 
2728 	paddr = qdf_nbuf_mapped_paddr_get(nbuf);
2729 	seg_info->frags[0].paddr_lo = paddr;
2730 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
2731 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
2732 	seg_info->frags[0].vaddr = (void *) nbuf;
2733 
2734 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
2735 		if (QDF_STATUS_SUCCESS != qdf_nbuf_frag_map(vdev->osdev,
2736 							    nbuf, 0,
2737 							    QDF_DMA_TO_DEVICE,
2738 							    cur_frag)) {
2739 			dp_tx_err("frag dma map error");
2740 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2741 			goto map_err;
2742 		}
2743 
2744 		paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
2745 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
2746 		seg_info->frags[cur_frag + 1].paddr_hi =
2747 			((uint64_t) paddr) >> 32;
2748 		seg_info->frags[cur_frag + 1].len =
2749 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
2750 	}
2751 
2752 	seg_info->frag_cnt = (cur_frag + 1);
2753 	seg_info->total_len = qdf_nbuf_len(nbuf);
2754 	seg_info->next = NULL;
2755 
2756 	sg_info->curr_seg = seg_info;
2757 
2758 	msdu_info->frm_type = dp_tx_frm_sg;
2759 	msdu_info->num_seg = 1;
2760 
2761 	return nbuf;
2762 map_err:
2763 	/* restore paddr into nbuf before calling unmap */
2764 	qdf_nbuf_mapped_paddr_set(nbuf,
2765 				  (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
2766 				  ((uint64_t)
2767 				  seg_info->frags[0].paddr_hi) << 32));
2768 	qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
2769 				     QDF_DMA_TO_DEVICE,
2770 				     seg_info->frags[0].len);
2771 	for (i = 1; i <= cur_frag; i++) {
2772 		qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
2773 				   (seg_info->frags[i].paddr_lo | ((uint64_t)
2774 				   seg_info->frags[i].paddr_hi) << 32),
2775 				   seg_info->frags[i].len,
2776 				   QDF_DMA_TO_DEVICE);
2777 	}
2778 	qdf_nbuf_free(nbuf);
2779 	return NULL;
2780 }
2781 
2782 /**
2783  * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
2784  * @vdev: DP vdev handle
2785  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2786  * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
2787  *
2788  * Return: NULL on failure,
2789  *         nbuf when extracted successfully
2790  */
2791 static
2792 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
2793 				    struct dp_tx_msdu_info_s *msdu_info,
2794 				    uint16_t ppdu_cookie)
2795 {
2796 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2797 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2798 
2799 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2800 
2801 	HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
2802 				(msdu_info->meta_data[5], 1);
2803 	HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
2804 				(msdu_info->meta_data[5], 1);
2805 	HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
2806 				(msdu_info->meta_data[6], ppdu_cookie);
2807 
2808 	msdu_info->exception_fw = 1;
2809 	msdu_info->is_tx_sniffer = 1;
2810 }
2811 
2812 #ifdef MESH_MODE_SUPPORT
2813 
2814 /**
2815  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
2816 				and prepare msdu_info for mesh frames.
2817  * @vdev: DP vdev handle
2818  * @nbuf: skb
2819  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2820  *
2821  * Return: NULL on failure,
2822  *         nbuf when extracted successfully
2823  */
2824 static
2825 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2826 				struct dp_tx_msdu_info_s *msdu_info)
2827 {
2828 	struct meta_hdr_s *mhdr;
2829 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2830 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2831 
2832 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2833 
2834 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
2835 		msdu_info->exception_fw = 0;
2836 		goto remove_meta_hdr;
2837 	}
2838 
2839 	msdu_info->exception_fw = 1;
2840 
2841 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2842 
2843 	meta_data->host_tx_desc_pool = 1;
2844 	meta_data->update_peer_cache = 1;
2845 	meta_data->learning_frame = 1;
2846 
2847 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
2848 		meta_data->power = mhdr->power;
2849 
2850 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
2851 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
2852 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
2853 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
2854 
2855 		meta_data->dyn_bw = 1;
2856 
2857 		meta_data->valid_pwr = 1;
2858 		meta_data->valid_mcs_mask = 1;
2859 		meta_data->valid_nss_mask = 1;
2860 		meta_data->valid_preamble_type  = 1;
2861 		meta_data->valid_retries = 1;
2862 		meta_data->valid_bw_info = 1;
2863 	}
2864 
2865 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
2866 		meta_data->encrypt_type = 0;
2867 		meta_data->valid_encrypt_type = 1;
2868 		meta_data->learning_frame = 0;
2869 	}
2870 
2871 	meta_data->valid_key_flags = 1;
2872 	meta_data->key_flags = (mhdr->keyix & 0x3);
2873 
2874 remove_meta_hdr:
2875 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
2876 		dp_tx_err("qdf_nbuf_pull_head failed");
2877 		qdf_nbuf_free(nbuf);
2878 		return NULL;
2879 	}
2880 
2881 	msdu_info->tid = qdf_nbuf_get_priority(nbuf);
2882 
2883 	dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x"
2884 		   " tid %d to_fw %d",
2885 		   msdu_info->meta_data[0],
2886 		   msdu_info->meta_data[1],
2887 		   msdu_info->meta_data[2],
2888 		   msdu_info->meta_data[3],
2889 		   msdu_info->meta_data[4],
2890 		   msdu_info->meta_data[5],
2891 		   msdu_info->tid, msdu_info->exception_fw);
2892 
2893 	return nbuf;
2894 }
2895 #else
2896 static
2897 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2898 				struct dp_tx_msdu_info_s *msdu_info)
2899 {
2900 	return nbuf;
2901 }
2902 
2903 #endif
2904 
2905 /**
2906  * dp_check_exc_metadata() - Checks if parameters are valid
2907  * @tx_exc - holds all exception path parameters
2908  *
2909  * Returns true when all the parameters are valid else false
2910  *
2911  */
2912 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
2913 {
2914 	bool invalid_tid = (tx_exc->tid >= DP_MAX_TIDS && tx_exc->tid !=
2915 			    HTT_INVALID_TID);
2916 	bool invalid_encap_type =
2917 			(tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
2918 			 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
2919 	bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
2920 				 tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
2921 	bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
2922 			       tx_exc->ppdu_cookie == 0);
2923 
2924 	if (tx_exc->is_intrabss_fwd)
2925 		return true;
2926 
2927 	if (invalid_tid || invalid_encap_type || invalid_sec_type ||
2928 	    invalid_cookie) {
2929 		return false;
2930 	}
2931 
2932 	return true;
2933 }
2934 
2935 #ifdef ATH_SUPPORT_IQUE
2936 /**
2937  * dp_tx_mcast_enhance() - Multicast enhancement on TX
2938  * @vdev: vdev handle
2939  * @nbuf: skb
2940  *
2941  * Return: true on success,
2942  *         false on failure
2943  */
2944 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
2945 {
2946 	qdf_ether_header_t *eh;
2947 
2948 	/* Mcast to Ucast Conversion*/
2949 	if (qdf_likely(!vdev->mcast_enhancement_en))
2950 		return true;
2951 
2952 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2953 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2954 	    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2955 		dp_verbose_debug("Mcast frm for ME %pK", vdev);
2956 		qdf_nbuf_set_next(nbuf, NULL);
2957 
2958 		DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
2959 				 qdf_nbuf_len(nbuf));
2960 		if (dp_tx_prepare_send_me(vdev, nbuf) ==
2961 				QDF_STATUS_SUCCESS) {
2962 			return false;
2963 		}
2964 
2965 		if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
2966 			if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
2967 					QDF_STATUS_SUCCESS) {
2968 				return false;
2969 			}
2970 		}
2971 	}
2972 
2973 	return true;
2974 }
2975 #else
2976 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
2977 {
2978 	return true;
2979 }
2980 #endif
2981 
2982 /**
2983  * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
2984  * @nbuf: qdf_nbuf_t
2985  * @vdev: struct dp_vdev *
2986  *
2987  * Allow packet for processing only if it is for peer client which is
2988  * connected with same vap. Drop packet if client is connected to
2989  * different vap.
2990  *
2991  * Return: QDF_STATUS
2992  */
2993 static inline QDF_STATUS
2994 dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
2995 {
2996 	struct dp_ast_entry *dst_ast_entry = NULL;
2997 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2998 
2999 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
3000 	    DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
3001 		return QDF_STATUS_SUCCESS;
3002 
3003 	qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
3004 	dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
3005 							eh->ether_dhost,
3006 							vdev->vdev_id);
3007 
3008 	/* If there is no ast entry, return failure */
3009 	if (qdf_unlikely(!dst_ast_entry)) {
3010 		qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
3011 		return QDF_STATUS_E_FAILURE;
3012 	}
3013 	qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
3014 
3015 	return QDF_STATUS_SUCCESS;
3016 }
3017 
3018 /**
3019  * dp_tx_nawds_handler() - NAWDS handler
3020  *
3021  * @soc: DP soc handle
3022  * @vdev_id: id of DP vdev handle
3023  * @msdu_info: msdu_info required to create HTT metadata
3024  * @nbuf: skb
3025  *
3026  * This API transfers the multicast frames with the peer id
3027  * on NAWDS enabled peer.
3028 
3029  * Return: none
3030  */
3031 
3032 static inline
3033 void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
3034 			 struct dp_tx_msdu_info_s *msdu_info,
3035 			 qdf_nbuf_t nbuf, uint16_t sa_peer_id)
3036 {
3037 	struct dp_peer *peer = NULL;
3038 	qdf_nbuf_t nbuf_clone = NULL;
3039 	uint16_t peer_id = DP_INVALID_PEER;
3040 	struct dp_txrx_peer *txrx_peer;
3041 
3042 	/* This check avoids pkt forwarding which is entered
3043 	 * in the ast table but still doesn't have valid peerid.
3044 	 */
3045 	if (sa_peer_id == HTT_INVALID_PEER)
3046 		return;
3047 
3048 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3049 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3050 		txrx_peer = dp_get_txrx_peer(peer);
3051 		if (!txrx_peer)
3052 			continue;
3053 
3054 		if (!txrx_peer->bss_peer && txrx_peer->nawds_enabled) {
3055 			peer_id = peer->peer_id;
3056 
3057 			if (!dp_peer_is_primary_link_peer(peer))
3058 				continue;
3059 
3060 			/* Multicast packets needs to be
3061 			 * dropped in case of intra bss forwarding
3062 			 */
3063 			if (sa_peer_id == txrx_peer->peer_id) {
3064 				dp_tx_debug("multicast packet");
3065 				DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3066 							  tx.nawds_mcast_drop,
3067 							  1);
3068 				continue;
3069 			}
3070 
3071 			nbuf_clone = qdf_nbuf_clone(nbuf);
3072 
3073 			if (!nbuf_clone) {
3074 				QDF_TRACE(QDF_MODULE_ID_DP,
3075 					  QDF_TRACE_LEVEL_ERROR,
3076 					  FL("nbuf clone failed"));
3077 				break;
3078 			}
3079 
3080 			nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
3081 							    msdu_info, peer_id,
3082 							    NULL);
3083 
3084 			if (nbuf_clone) {
3085 				dp_tx_debug("pkt send failed");
3086 				qdf_nbuf_free(nbuf_clone);
3087 			} else {
3088 				if (peer_id != DP_INVALID_PEER)
3089 					DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
3090 								      tx.nawds_mcast,
3091 								      1, qdf_nbuf_len(nbuf));
3092 			}
3093 		}
3094 	}
3095 
3096 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3097 }
3098 
3099 /**
3100  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
3101  * @soc: DP soc handle
3102  * @vdev_id: id of DP vdev handle
3103  * @nbuf: skb
3104  * @tx_exc_metadata: Handle that holds exception path meta data
3105  *
3106  * Entry point for Core Tx layer (DP_TX) invoked from
3107  * hard_start_xmit in OSIF/HDD to transmit frames through fw
3108  *
3109  * Return: NULL on success,
3110  *         nbuf when it fails to send
3111  */
3112 qdf_nbuf_t
3113 dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3114 		     qdf_nbuf_t nbuf,
3115 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
3116 {
3117 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3118 	qdf_ether_header_t *eh = NULL;
3119 	struct dp_tx_msdu_info_s msdu_info;
3120 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3121 						     DP_MOD_ID_TX_EXCEPTION);
3122 
3123 	if (qdf_unlikely(!vdev))
3124 		goto fail;
3125 
3126 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
3127 
3128 	if (!tx_exc_metadata)
3129 		goto fail;
3130 
3131 	msdu_info.tid = tx_exc_metadata->tid;
3132 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3133 	dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
3134 			 QDF_MAC_ADDR_REF(nbuf->data));
3135 
3136 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
3137 
3138 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
3139 		dp_tx_err("Invalid parameters in exception path");
3140 		goto fail;
3141 	}
3142 
3143 	/* for peer based metadata check if peer is valid */
3144 	if (tx_exc_metadata->peer_id != CDP_INVALID_PEER) {
3145 		struct dp_peer *peer = NULL;
3146 
3147 		 peer = dp_peer_get_ref_by_id(vdev->pdev->soc,
3148 					      tx_exc_metadata->peer_id,
3149 					      DP_MOD_ID_TX_EXCEPTION);
3150 		if (qdf_unlikely(!peer)) {
3151 			DP_STATS_INC(vdev,
3152 				     tx_i.dropped.invalid_peer_id_in_exc_path,
3153 				     1);
3154 			goto fail;
3155 		}
3156 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_EXCEPTION);
3157 	}
3158 	/* Basic sanity checks for unsupported packets */
3159 
3160 	/* MESH mode */
3161 	if (qdf_unlikely(vdev->mesh_vdev)) {
3162 		dp_tx_err("Mesh mode is not supported in exception path");
3163 		goto fail;
3164 	}
3165 
3166 	/*
3167 	 * Classify the frame and call corresponding
3168 	 * "prepare" function which extracts the segment (TSO)
3169 	 * and fragmentation information (for TSO , SG, ME, or Raw)
3170 	 * into MSDU_INFO structure which is later used to fill
3171 	 * SW and HW descriptors.
3172 	 */
3173 	if (qdf_nbuf_is_tso(nbuf)) {
3174 		dp_verbose_debug("TSO frame %pK", vdev);
3175 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
3176 				 qdf_nbuf_len(nbuf));
3177 
3178 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
3179 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
3180 					 qdf_nbuf_len(nbuf));
3181 			goto fail;
3182 		}
3183 
3184 		goto send_multiple;
3185 	}
3186 
3187 	/* SG */
3188 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
3189 		struct dp_tx_seg_info_s seg_info = {0};
3190 
3191 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
3192 		if (!nbuf)
3193 			goto fail;
3194 
3195 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
3196 
3197 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
3198 				 qdf_nbuf_len(nbuf));
3199 
3200 		goto send_multiple;
3201 	}
3202 
3203 	if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
3204 		DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
3205 				 qdf_nbuf_len(nbuf));
3206 
3207 		dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
3208 					       tx_exc_metadata->ppdu_cookie);
3209 	}
3210 
3211 	/*
3212 	 * Get HW Queue to use for this frame.
3213 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
3214 	 * dedicated for data and 1 for command.
3215 	 * "queue_id" maps to one hardware ring.
3216 	 *  With each ring, we also associate a unique Tx descriptor pool
3217 	 *  to minimize lock contention for these resources.
3218 	 */
3219 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3220 
3221 	if (qdf_likely(tx_exc_metadata->is_intrabss_fwd)) {
3222 		if (qdf_unlikely(vdev->nawds_enabled)) {
3223 			/*
3224 			 * This is a multicast packet
3225 			 */
3226 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
3227 					    tx_exc_metadata->peer_id);
3228 			DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
3229 					 1, qdf_nbuf_len(nbuf));
3230 		}
3231 
3232 		nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
3233 					      DP_INVALID_PEER, NULL);
3234 	} else {
3235 		/*
3236 		 * Check exception descriptors
3237 		 */
3238 		if (dp_tx_exception_limit_check(vdev))
3239 			goto fail;
3240 
3241 		/*  Single linear frame */
3242 		/*
3243 		 * If nbuf is a simple linear frame, use send_single function to
3244 		 * prepare direct-buffer type TCL descriptor and enqueue to TCL
3245 		 * SRNG. There is no need to setup a MSDU extension descriptor.
3246 		 */
3247 		nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
3248 					      tx_exc_metadata->peer_id,
3249 					      tx_exc_metadata);
3250 	}
3251 
3252 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3253 	return nbuf;
3254 
3255 send_multiple:
3256 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3257 
3258 fail:
3259 	if (vdev)
3260 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3261 	dp_verbose_debug("pkt send failed");
3262 	return nbuf;
3263 }
3264 
3265 /**
3266  * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
3267  *      in exception path in special case to avoid regular exception path chk.
3268  * @soc: DP soc handle
3269  * @vdev_id: id of DP vdev handle
3270  * @nbuf: skb
3271  * @tx_exc_metadata: Handle that holds exception path meta data
3272  *
3273  * Entry point for Core Tx layer (DP_TX) invoked from
3274  * hard_start_xmit in OSIF/HDD to transmit frames through fw
3275  *
3276  * Return: NULL on success,
3277  *         nbuf when it fails to send
3278  */
3279 qdf_nbuf_t
3280 dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
3281 				   uint8_t vdev_id, qdf_nbuf_t nbuf,
3282 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
3283 {
3284 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3285 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3286 						     DP_MOD_ID_TX_EXCEPTION);
3287 
3288 	if (qdf_unlikely(!vdev))
3289 		goto fail;
3290 
3291 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
3292 			== QDF_STATUS_E_FAILURE)) {
3293 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
3294 		goto fail;
3295 	}
3296 
3297 	/* Unref count as it will again be taken inside dp_tx_exception */
3298 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3299 
3300 	return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
3301 
3302 fail:
3303 	if (vdev)
3304 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3305 	dp_verbose_debug("pkt send failed");
3306 	return nbuf;
3307 }
3308 
3309 /**
3310  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
3311  * @soc: DP soc handle
3312  * @vdev_id: DP vdev handle
3313  * @nbuf: skb
3314  *
3315  * Entry point for Core Tx layer (DP_TX) invoked from
3316  * hard_start_xmit in OSIF/HDD
3317  *
3318  * Return: NULL on success,
3319  *         nbuf when it fails to send
3320  */
3321 #ifdef MESH_MODE_SUPPORT
3322 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3323 			   qdf_nbuf_t nbuf)
3324 {
3325 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3326 	struct meta_hdr_s *mhdr;
3327 	qdf_nbuf_t nbuf_mesh = NULL;
3328 	qdf_nbuf_t nbuf_clone = NULL;
3329 	struct dp_vdev *vdev;
3330 	uint8_t no_enc_frame = 0;
3331 
3332 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
3333 	if (!nbuf_mesh) {
3334 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3335 				"qdf_nbuf_unshare failed");
3336 		return nbuf;
3337 	}
3338 
3339 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
3340 	if (!vdev) {
3341 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3342 				"vdev is NULL for vdev_id %d", vdev_id);
3343 		return nbuf;
3344 	}
3345 
3346 	nbuf = nbuf_mesh;
3347 
3348 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
3349 
3350 	if ((vdev->sec_type != cdp_sec_type_none) &&
3351 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
3352 		no_enc_frame = 1;
3353 
3354 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
3355 		qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
3356 
3357 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
3358 		       !no_enc_frame) {
3359 		nbuf_clone = qdf_nbuf_clone(nbuf);
3360 		if (!nbuf_clone) {
3361 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3362 				"qdf_nbuf_clone failed");
3363 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
3364 			return nbuf;
3365 		}
3366 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
3367 	}
3368 
3369 	if (nbuf_clone) {
3370 		if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
3371 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
3372 		} else {
3373 			qdf_nbuf_free(nbuf_clone);
3374 		}
3375 	}
3376 
3377 	if (no_enc_frame)
3378 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
3379 	else
3380 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
3381 
3382 	nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
3383 	if ((!nbuf) && no_enc_frame) {
3384 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
3385 	}
3386 
3387 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
3388 	return nbuf;
3389 }
3390 
3391 #else
3392 
3393 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
3394 			   qdf_nbuf_t nbuf)
3395 {
3396 	return dp_tx_send(soc, vdev_id, nbuf);
3397 }
3398 
3399 #endif
3400 
3401 #ifdef QCA_DP_TX_NBUF_AND_NBUF_DATA_PREFETCH
3402 static inline
3403 void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
3404 {
3405 	if (nbuf) {
3406 		qdf_prefetch(&nbuf->len);
3407 		qdf_prefetch(&nbuf->data);
3408 	}
3409 }
3410 #else
3411 static inline
3412 void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
3413 {
3414 }
3415 #endif
3416 
3417 #ifdef DP_UMAC_HW_RESET_SUPPORT
3418 /*
3419  * dp_tx_drop() - Drop the frame on a given VAP
3420  * @soc: DP soc handle
3421  * @vdev_id: id of DP vdev handle
3422  * @nbuf: skb
3423  *
3424  * Drop all the incoming packets
3425  *
3426  * Return: nbuf
3427  *
3428  */
3429 qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3430 		      qdf_nbuf_t nbuf)
3431 {
3432 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3433 	struct dp_vdev *vdev = NULL;
3434 
3435 	vdev = soc->vdev_id_map[vdev_id];
3436 	if (qdf_unlikely(!vdev))
3437 		return nbuf;
3438 
3439 	DP_STATS_INC(vdev, tx_i.dropped.drop_ingress, 1);
3440 	return nbuf;
3441 }
3442 
3443 /*
3444  * dp_tx_exc_drop() - Drop the frame on a given VAP
3445  * @soc: DP soc handle
3446  * @vdev_id: id of DP vdev handle
3447  * @nbuf: skb
3448  * @tx_exc_metadata: Handle that holds exception path meta data
3449  *
3450  * Drop all the incoming packets
3451  *
3452  * Return: nbuf
3453  *
3454  */
3455 qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3456 			  qdf_nbuf_t nbuf,
3457 			  struct cdp_tx_exception_metadata *tx_exc_metadata)
3458 {
3459 	return dp_tx_drop(soc_hdl, vdev_id, nbuf);
3460 }
3461 #endif
3462 
3463 /*
3464  * dp_tx_send() - Transmit a frame on a given VAP
3465  * @soc: DP soc handle
3466  * @vdev_id: id of DP vdev handle
3467  * @nbuf: skb
3468  *
3469  * Entry point for Core Tx layer (DP_TX) invoked from
3470  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
3471  * cases
3472  *
3473  * Return: NULL on success,
3474  *         nbuf when it fails to send
3475  */
3476 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3477 		      qdf_nbuf_t nbuf)
3478 {
3479 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3480 	uint16_t peer_id = HTT_INVALID_PEER;
3481 	/*
3482 	 * doing a memzero is causing additional function call overhead
3483 	 * so doing static stack clearing
3484 	 */
3485 	struct dp_tx_msdu_info_s msdu_info = {0};
3486 	struct dp_vdev *vdev = NULL;
3487 	qdf_nbuf_t end_nbuf = NULL;
3488 
3489 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3490 		return nbuf;
3491 
3492 	/*
3493 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3494 	 * this in per packet path.
3495 	 *
3496 	 * As in this path vdev memory is already protected with netdev
3497 	 * tx lock
3498 	 */
3499 	vdev = soc->vdev_id_map[vdev_id];
3500 	if (qdf_unlikely(!vdev))
3501 		return nbuf;
3502 
3503 	/*
3504 	 * Set Default Host TID value to invalid TID
3505 	 * (TID override disabled)
3506 	 */
3507 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
3508 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_headlen(nbuf));
3509 
3510 	if (qdf_unlikely(vdev->mesh_vdev)) {
3511 		qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
3512 								&msdu_info);
3513 		if (!nbuf_mesh) {
3514 			dp_verbose_debug("Extracting mesh metadata failed");
3515 			return nbuf;
3516 		}
3517 		nbuf = nbuf_mesh;
3518 	}
3519 
3520 	/*
3521 	 * Get HW Queue to use for this frame.
3522 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
3523 	 * dedicated for data and 1 for command.
3524 	 * "queue_id" maps to one hardware ring.
3525 	 *  With each ring, we also associate a unique Tx descriptor pool
3526 	 *  to minimize lock contention for these resources.
3527 	 */
3528 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3529 	DP_STATS_INC(vdev, tx_i.rcvd_per_core[msdu_info.tx_queue.desc_pool_id],
3530 		     1);
3531 
3532 	/*
3533 	 * TCL H/W supports 2 DSCP-TID mapping tables.
3534 	 *  Table 1 - Default DSCP-TID mapping table
3535 	 *  Table 2 - 1 DSCP-TID override table
3536 	 *
3537 	 * If we need a different DSCP-TID mapping for this vap,
3538 	 * call tid_classify to extract DSCP/ToS from frame and
3539 	 * map to a TID and store in msdu_info. This is later used
3540 	 * to fill in TCL Input descriptor (per-packet TID override).
3541 	 */
3542 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
3543 
3544 	/*
3545 	 * Classify the frame and call corresponding
3546 	 * "prepare" function which extracts the segment (TSO)
3547 	 * and fragmentation information (for TSO , SG, ME, or Raw)
3548 	 * into MSDU_INFO structure which is later used to fill
3549 	 * SW and HW descriptors.
3550 	 */
3551 	if (qdf_nbuf_is_tso(nbuf)) {
3552 		dp_verbose_debug("TSO frame %pK", vdev);
3553 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
3554 				 qdf_nbuf_len(nbuf));
3555 
3556 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
3557 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
3558 					 qdf_nbuf_len(nbuf));
3559 			return nbuf;
3560 		}
3561 
3562 		goto send_multiple;
3563 	}
3564 
3565 	/* SG */
3566 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
3567 		if (qdf_nbuf_get_nr_frags(nbuf) > DP_TX_MAX_NUM_FRAGS - 1) {
3568 			if (qdf_unlikely(qdf_nbuf_linearize(nbuf)))
3569 				return nbuf;
3570 		} else {
3571 			struct dp_tx_seg_info_s seg_info = {0};
3572 
3573 			nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info,
3574 						&msdu_info);
3575 			if (!nbuf)
3576 				return NULL;
3577 
3578 			dp_verbose_debug("non-TSO SG frame %pK", vdev);
3579 
3580 			DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
3581 					 qdf_nbuf_len(nbuf));
3582 
3583 			goto send_multiple;
3584 		}
3585 	}
3586 
3587 	if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
3588 		return NULL;
3589 
3590 	/* RAW */
3591 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
3592 		struct dp_tx_seg_info_s seg_info = {0};
3593 
3594 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
3595 		if (!nbuf)
3596 			return NULL;
3597 
3598 		dp_verbose_debug("Raw frame %pK", vdev);
3599 
3600 		goto send_multiple;
3601 
3602 	}
3603 
3604 	if (qdf_unlikely(vdev->nawds_enabled)) {
3605 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
3606 					  qdf_nbuf_data(nbuf);
3607 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
3608 			uint16_t sa_peer_id = DP_INVALID_PEER;
3609 
3610 			if (!soc->ast_offload_support) {
3611 				struct dp_ast_entry *ast_entry = NULL;
3612 
3613 				qdf_spin_lock_bh(&soc->ast_lock);
3614 				ast_entry = dp_peer_ast_hash_find_by_pdevid
3615 					(soc,
3616 					 (uint8_t *)(eh->ether_shost),
3617 					 vdev->pdev->pdev_id);
3618 				if (ast_entry)
3619 					sa_peer_id = ast_entry->peer_id;
3620 				qdf_spin_unlock_bh(&soc->ast_lock);
3621 			}
3622 
3623 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
3624 					    sa_peer_id);
3625 		}
3626 		peer_id = DP_INVALID_PEER;
3627 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
3628 				 1, qdf_nbuf_len(nbuf));
3629 	}
3630 
3631 	/*  Single linear frame */
3632 	/*
3633 	 * If nbuf is a simple linear frame, use send_single function to
3634 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
3635 	 * SRNG. There is no need to setup a MSDU extension descriptor.
3636 	 */
3637 	dp_tx_prefetch_nbuf_data(nbuf);
3638 
3639 	nbuf = dp_tx_send_msdu_single_wrapper(vdev, nbuf, &msdu_info,
3640 					      peer_id, end_nbuf);
3641 	return nbuf;
3642 
3643 send_multiple:
3644 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3645 
3646 	if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
3647 		dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
3648 
3649 	return nbuf;
3650 }
3651 
3652 /**
3653  * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
3654  *      case to vaoid check in perpkt path.
3655  * @soc: DP soc handle
3656  * @vdev_id: id of DP vdev handle
3657  * @nbuf: skb
3658  *
3659  * Entry point for Core Tx layer (DP_TX) invoked from
3660  * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
3661  * with special condition to avoid per pkt check in dp_tx_send
3662  *
3663  * Return: NULL on success,
3664  *         nbuf when it fails to send
3665  */
3666 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
3667 				    uint8_t vdev_id, qdf_nbuf_t nbuf)
3668 {
3669 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3670 	struct dp_vdev *vdev = NULL;
3671 
3672 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3673 		return nbuf;
3674 
3675 	/*
3676 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3677 	 * this in per packet path.
3678 	 *
3679 	 * As in this path vdev memory is already protected with netdev
3680 	 * tx lock
3681 	 */
3682 	vdev = soc->vdev_id_map[vdev_id];
3683 	if (qdf_unlikely(!vdev))
3684 		return nbuf;
3685 
3686 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
3687 			== QDF_STATUS_E_FAILURE)) {
3688 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
3689 		return nbuf;
3690 	}
3691 
3692 	return dp_tx_send(soc_hdl, vdev_id, nbuf);
3693 }
3694 
3695 #ifdef UMAC_SUPPORT_PROXY_ARP
3696 /**
3697  * dp_tx_proxy_arp() - Tx proxy arp handler
3698  * @vdev: datapath vdev handle
3699  * @buf: sk buffer
3700  *
3701  * Return: status
3702  */
3703 static inline
3704 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3705 {
3706 	if (vdev->osif_proxy_arp)
3707 		return vdev->osif_proxy_arp(vdev->osif_vdev, nbuf);
3708 
3709 	/*
3710 	 * when UMAC_SUPPORT_PROXY_ARP is defined, we expect
3711 	 * osif_proxy_arp has a valid function pointer assigned
3712 	 * to it
3713 	 */
3714 	dp_tx_err("valid function pointer for osif_proxy_arp is expected!!\n");
3715 
3716 	return QDF_STATUS_NOT_INITIALIZED;
3717 }
3718 #else
3719 /**
3720  * dp_tx_proxy_arp() - Tx proxy arp handler
3721  * @vdev: datapath vdev handle
3722  * @buf: sk buffer
3723  *
3724  * This function always return 0 when UMAC_SUPPORT_PROXY_ARP
3725  * is not defined.
3726  *
3727  * Return: status
3728  */
3729 static inline
3730 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3731 {
3732 	return QDF_STATUS_SUCCESS;
3733 }
3734 #endif
3735 
3736 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
3737 #ifdef WLAN_MCAST_MLO
3738 static bool
3739 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
3740 		       struct dp_tx_desc_s *tx_desc,
3741 		       qdf_nbuf_t nbuf,
3742 		       uint8_t reinject_reason)
3743 {
3744 	if (reinject_reason == HTT_TX_FW2WBM_REINJECT_REASON_MLO_MCAST) {
3745 		if (soc->arch_ops.dp_tx_mcast_handler)
3746 			soc->arch_ops.dp_tx_mcast_handler(soc, vdev, nbuf);
3747 
3748 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3749 		return true;
3750 	}
3751 
3752 	return false;
3753 }
3754 #else /* WLAN_MCAST_MLO */
3755 static inline bool
3756 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
3757 		       struct dp_tx_desc_s *tx_desc,
3758 		       qdf_nbuf_t nbuf,
3759 		       uint8_t reinject_reason)
3760 {
3761 	return false;
3762 }
3763 #endif /* WLAN_MCAST_MLO */
3764 #else
3765 static inline bool
3766 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
3767 		       struct dp_tx_desc_s *tx_desc,
3768 		       qdf_nbuf_t nbuf,
3769 		       uint8_t reinject_reason)
3770 {
3771 	return false;
3772 }
3773 #endif
3774 
3775 /**
3776  * dp_tx_reinject_handler() - Tx Reinject Handler
3777  * @soc: datapath soc handle
3778  * @vdev: datapath vdev handle
3779  * @tx_desc: software descriptor head pointer
3780  * @status : Tx completion status from HTT descriptor
3781  * @reinject_reason : reinject reason from HTT descriptor
3782  *
3783  * This function reinjects frames back to Target.
3784  * Todo - Host queue needs to be added
3785  *
3786  * Return: none
3787  */
3788 void dp_tx_reinject_handler(struct dp_soc *soc,
3789 			    struct dp_vdev *vdev,
3790 			    struct dp_tx_desc_s *tx_desc,
3791 			    uint8_t *status,
3792 			    uint8_t reinject_reason)
3793 {
3794 	struct dp_peer *peer = NULL;
3795 	uint32_t peer_id = HTT_INVALID_PEER;
3796 	qdf_nbuf_t nbuf = tx_desc->nbuf;
3797 	qdf_nbuf_t nbuf_copy = NULL;
3798 	struct dp_tx_msdu_info_s msdu_info;
3799 #ifdef WDS_VENDOR_EXTENSION
3800 	int is_mcast = 0, is_ucast = 0;
3801 	int num_peers_3addr = 0;
3802 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
3803 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
3804 #endif
3805 	struct dp_txrx_peer *txrx_peer;
3806 
3807 	qdf_assert(vdev);
3808 
3809 	dp_tx_debug("Tx reinject path");
3810 
3811 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
3812 			qdf_nbuf_len(tx_desc->nbuf));
3813 
3814 	if (dp_tx_reinject_mlo_hdl(soc, vdev, tx_desc, nbuf, reinject_reason))
3815 		return;
3816 
3817 #ifdef WDS_VENDOR_EXTENSION
3818 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
3819 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
3820 	} else {
3821 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
3822 	}
3823 	is_ucast = !is_mcast;
3824 
3825 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3826 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3827 		txrx_peer = dp_get_txrx_peer(peer);
3828 
3829 		if (!txrx_peer || txrx_peer->bss_peer)
3830 			continue;
3831 
3832 		/* Detect wds peers that use 3-addr framing for mcast.
3833 		 * if there are any, the bss_peer is used to send the
3834 		 * the mcast frame using 3-addr format. all wds enabled
3835 		 * peers that use 4-addr framing for mcast frames will
3836 		 * be duplicated and sent as 4-addr frames below.
3837 		 */
3838 		if (!txrx_peer->wds_enabled ||
3839 		    !txrx_peer->wds_ecm.wds_tx_mcast_4addr) {
3840 			num_peers_3addr = 1;
3841 			break;
3842 		}
3843 	}
3844 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3845 #endif
3846 
3847 	if (qdf_unlikely(vdev->mesh_vdev)) {
3848 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
3849 	} else {
3850 		qdf_spin_lock_bh(&vdev->peer_list_lock);
3851 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3852 			txrx_peer = dp_get_txrx_peer(peer);
3853 			if (!txrx_peer)
3854 				continue;
3855 
3856 			if ((txrx_peer->peer_id != HTT_INVALID_PEER) &&
3857 #ifdef WDS_VENDOR_EXTENSION
3858 			/*
3859 			 * . if 3-addr STA, then send on BSS Peer
3860 			 * . if Peer WDS enabled and accept 4-addr mcast,
3861 			 * send mcast on that peer only
3862 			 * . if Peer WDS enabled and accept 4-addr ucast,
3863 			 * send ucast on that peer only
3864 			 */
3865 			((txrx_peer->bss_peer && num_peers_3addr && is_mcast) ||
3866 			 (txrx_peer->wds_enabled &&
3867 			 ((is_mcast && txrx_peer->wds_ecm.wds_tx_mcast_4addr) ||
3868 			 (is_ucast &&
3869 			 txrx_peer->wds_ecm.wds_tx_ucast_4addr))))) {
3870 #else
3871 			(txrx_peer->bss_peer &&
3872 			 (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) {
3873 #endif
3874 				peer_id = DP_INVALID_PEER;
3875 
3876 				nbuf_copy = qdf_nbuf_copy(nbuf);
3877 
3878 				if (!nbuf_copy) {
3879 					dp_tx_debug("nbuf copy failed");
3880 					break;
3881 				}
3882 				qdf_mem_zero(&msdu_info, sizeof(msdu_info));
3883 				dp_tx_get_queue(vdev, nbuf,
3884 						&msdu_info.tx_queue);
3885 
3886 				nbuf_copy = dp_tx_send_msdu_single(vdev,
3887 						nbuf_copy,
3888 						&msdu_info,
3889 						peer_id,
3890 						NULL);
3891 
3892 				if (nbuf_copy) {
3893 					dp_tx_debug("pkt send failed");
3894 					qdf_nbuf_free(nbuf_copy);
3895 				}
3896 			}
3897 		}
3898 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
3899 
3900 		qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
3901 					     QDF_DMA_TO_DEVICE, nbuf->len);
3902 		qdf_nbuf_free(nbuf);
3903 	}
3904 
3905 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3906 }
3907 
3908 /**
3909  * dp_tx_inspect_handler() - Tx Inspect Handler
3910  * @soc: datapath soc handle
3911  * @vdev: datapath vdev handle
3912  * @tx_desc: software descriptor head pointer
3913  * @status : Tx completion status from HTT descriptor
3914  *
3915  * Handles Tx frames sent back to Host for inspection
3916  * (ProxyARP)
3917  *
3918  * Return: none
3919  */
3920 void dp_tx_inspect_handler(struct dp_soc *soc,
3921 			   struct dp_vdev *vdev,
3922 			   struct dp_tx_desc_s *tx_desc,
3923 			   uint8_t *status)
3924 {
3925 
3926 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3927 			"%s Tx inspect path",
3928 			__func__);
3929 
3930 	DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
3931 			 qdf_nbuf_len(tx_desc->nbuf));
3932 
3933 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
3934 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3935 }
3936 
3937 #ifdef MESH_MODE_SUPPORT
3938 /**
3939  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
3940  *                                         in mesh meta header
3941  * @tx_desc: software descriptor head pointer
3942  * @ts: pointer to tx completion stats
3943  * Return: none
3944  */
3945 static
3946 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
3947 		struct hal_tx_completion_status *ts)
3948 {
3949 	qdf_nbuf_t netbuf = tx_desc->nbuf;
3950 
3951 	if (!tx_desc->msdu_ext_desc) {
3952 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
3953 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3954 				"netbuf %pK offset %d",
3955 				netbuf, tx_desc->pkt_offset);
3956 			return;
3957 		}
3958 	}
3959 }
3960 
3961 #else
3962 static
3963 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
3964 		struct hal_tx_completion_status *ts)
3965 {
3966 }
3967 
3968 #endif
3969 
3970 #ifdef CONFIG_SAWF
3971 static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
3972 					 struct dp_vdev *vdev,
3973 					 struct dp_txrx_peer *txrx_peer,
3974 					 struct dp_tx_desc_s *tx_desc,
3975 					 struct hal_tx_completion_status *ts,
3976 					 uint8_t tid)
3977 {
3978 	dp_sawf_tx_compl_update_peer_stats(soc, vdev, txrx_peer, tx_desc,
3979 					   ts, tid);
3980 }
3981 
3982 static void dp_tx_compute_delay_avg(struct cdp_delay_tx_stats  *tx_delay,
3983 				    uint32_t nw_delay,
3984 				    uint32_t sw_delay,
3985 				    uint32_t hw_delay)
3986 {
3987 	dp_peer_tid_delay_avg(tx_delay,
3988 			      nw_delay,
3989 			      sw_delay,
3990 			      hw_delay);
3991 }
3992 #else
3993 static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
3994 					 struct dp_vdev *vdev,
3995 					 struct dp_txrx_peer *txrx_peer,
3996 					 struct dp_tx_desc_s *tx_desc,
3997 					 struct hal_tx_completion_status *ts,
3998 					 uint8_t tid)
3999 {
4000 }
4001 
4002 static inline void
4003 dp_tx_compute_delay_avg(struct cdp_delay_tx_stats *tx_delay,
4004 			uint32_t nw_delay, uint32_t sw_delay,
4005 			uint32_t hw_delay)
4006 {
4007 }
4008 #endif
4009 
4010 #ifdef QCA_PEER_EXT_STATS
4011 #ifdef WLAN_CONFIG_TX_DELAY
4012 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
4013 				    struct dp_tx_desc_s *tx_desc,
4014 				    struct hal_tx_completion_status *ts,
4015 				    struct dp_vdev *vdev)
4016 {
4017 	struct dp_soc *soc = vdev->pdev->soc;
4018 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
4019 	int64_t timestamp_ingress, timestamp_hw_enqueue;
4020 	uint32_t sw_enqueue_delay, fwhw_transmit_delay = 0;
4021 
4022 	if (!ts->valid)
4023 		return;
4024 
4025 	timestamp_ingress = qdf_nbuf_get_timestamp_us(tx_desc->nbuf);
4026 	timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
4027 
4028 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4029 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
4030 
4031 	if (soc->arch_ops.dp_tx_compute_hw_delay)
4032 		if (!soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts,
4033 							  &fwhw_transmit_delay))
4034 			dp_hist_update_stats(&tx_delay->hwtx_delay,
4035 					     fwhw_transmit_delay);
4036 
4037 	dp_tx_compute_delay_avg(tx_delay, 0, sw_enqueue_delay,
4038 				fwhw_transmit_delay);
4039 }
4040 #else
4041 /*
4042  * dp_tx_compute_tid_delay() - Compute per TID delay
4043  * @stats: Per TID delay stats
4044  * @tx_desc: Software Tx descriptor
4045  * @ts: Tx completion status
4046  * @vdev: vdev
4047  *
4048  * Compute the software enqueue and hw enqueue delays and
4049  * update the respective histograms
4050  *
4051  * Return: void
4052  */
4053 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
4054 				    struct dp_tx_desc_s *tx_desc,
4055 				    struct hal_tx_completion_status *ts,
4056 				    struct dp_vdev *vdev)
4057 {
4058 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
4059 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
4060 	uint32_t sw_enqueue_delay, fwhw_transmit_delay;
4061 
4062 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
4063 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
4064 	timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
4065 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4066 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
4067 					 timestamp_hw_enqueue);
4068 
4069 	/*
4070 	 * Update the Tx software enqueue delay and HW enque-Completion delay.
4071 	 */
4072 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
4073 	dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
4074 }
4075 #endif
4076 
4077 /*
4078  * dp_tx_update_peer_delay_stats() - Update the peer delay stats
4079  * @txrx_peer: DP peer context
4080  * @tx_desc: Tx software descriptor
4081  * @tid: Transmission ID
4082  * @ring_id: Rx CPU context ID/CPU_ID
4083  *
4084  * Update the peer extended stats. These are enhanced other
4085  * delay stats per msdu level.
4086  *
4087  * Return: void
4088  */
4089 static void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
4090 					  struct dp_tx_desc_s *tx_desc,
4091 					  struct hal_tx_completion_status *ts,
4092 					  uint8_t ring_id)
4093 {
4094 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4095 	struct dp_soc *soc = NULL;
4096 	struct dp_peer_delay_stats *delay_stats = NULL;
4097 	uint8_t tid;
4098 
4099 	soc = pdev->soc;
4100 	if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
4101 		return;
4102 
4103 	tid = ts->tid;
4104 	delay_stats = txrx_peer->delay_stats;
4105 
4106 	qdf_assert(delay_stats);
4107 	qdf_assert(ring < CDP_MAX_TXRX_CTX);
4108 
4109 	/*
4110 	 * For non-TID packets use the TID 9
4111 	 */
4112 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4113 		tid = CDP_MAX_DATA_TIDS - 1;
4114 
4115 	dp_tx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
4116 				tx_desc, ts, txrx_peer->vdev);
4117 }
4118 #else
4119 static inline
4120 void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
4121 				   struct dp_tx_desc_s *tx_desc,
4122 				   struct hal_tx_completion_status *ts,
4123 				   uint8_t ring_id)
4124 {
4125 }
4126 #endif
4127 
4128 #ifdef WLAN_PEER_JITTER
4129 /*
4130  * dp_tx_jitter_get_avg_jitter() - compute the average jitter
4131  * @curr_delay: Current delay
4132  * @prev_Delay: Previous delay
4133  * @avg_jitter: Average Jitter
4134  * Return: Newly Computed Average Jitter
4135  */
4136 static uint32_t dp_tx_jitter_get_avg_jitter(uint32_t curr_delay,
4137 					    uint32_t prev_delay,
4138 					    uint32_t avg_jitter)
4139 {
4140 	uint32_t curr_jitter;
4141 	int32_t jitter_diff;
4142 
4143 	curr_jitter = qdf_abs(curr_delay - prev_delay);
4144 	if (!avg_jitter)
4145 		return curr_jitter;
4146 
4147 	jitter_diff = curr_jitter - avg_jitter;
4148 	if (jitter_diff < 0)
4149 		avg_jitter = avg_jitter -
4150 			(qdf_abs(jitter_diff) >> DP_AVG_JITTER_WEIGHT_DENOM);
4151 	else
4152 		avg_jitter = avg_jitter +
4153 			(qdf_abs(jitter_diff) >> DP_AVG_JITTER_WEIGHT_DENOM);
4154 
4155 	return avg_jitter;
4156 }
4157 
4158 /*
4159  * dp_tx_jitter_get_avg_delay() - compute the average delay
4160  * @curr_delay: Current delay
4161  * @avg_Delay: Average delay
4162  * Return: Newly Computed Average Delay
4163  */
4164 static uint32_t dp_tx_jitter_get_avg_delay(uint32_t curr_delay,
4165 					   uint32_t avg_delay)
4166 {
4167 	int32_t delay_diff;
4168 
4169 	if (!avg_delay)
4170 		return curr_delay;
4171 
4172 	delay_diff = curr_delay - avg_delay;
4173 	if (delay_diff < 0)
4174 		avg_delay = avg_delay - (qdf_abs(delay_diff) >>
4175 					DP_AVG_DELAY_WEIGHT_DENOM);
4176 	else
4177 		avg_delay = avg_delay + (qdf_abs(delay_diff) >>
4178 					DP_AVG_DELAY_WEIGHT_DENOM);
4179 
4180 	return avg_delay;
4181 }
4182 
4183 #ifdef WLAN_CONFIG_TX_DELAY
4184 /*
4185  * dp_tx_compute_cur_delay() - get the current delay
4186  * @soc: soc handle
4187  * @vdev: vdev structure for data path state
4188  * @ts: Tx completion status
4189  * @curr_delay: current delay
4190  * @tx_desc: tx descriptor
4191  * Return: void
4192  */
4193 static
4194 QDF_STATUS dp_tx_compute_cur_delay(struct dp_soc *soc,
4195 				   struct dp_vdev *vdev,
4196 				   struct hal_tx_completion_status *ts,
4197 				   uint32_t *curr_delay,
4198 				   struct dp_tx_desc_s *tx_desc)
4199 {
4200 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
4201 
4202 	if (soc->arch_ops.dp_tx_compute_hw_delay)
4203 		status = soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts,
4204 							      curr_delay);
4205 	return status;
4206 }
4207 #else
4208 static
4209 QDF_STATUS dp_tx_compute_cur_delay(struct dp_soc *soc,
4210 				   struct dp_vdev *vdev,
4211 				   struct hal_tx_completion_status *ts,
4212 				   uint32_t *curr_delay,
4213 				   struct dp_tx_desc_s *tx_desc)
4214 {
4215 	int64_t current_timestamp, timestamp_hw_enqueue;
4216 
4217 	current_timestamp = qdf_ktime_to_us(qdf_ktime_real_get());
4218 	timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
4219 	*curr_delay = (uint32_t)(current_timestamp - timestamp_hw_enqueue);
4220 
4221 	return QDF_STATUS_SUCCESS;
4222 }
4223 #endif
4224 
4225 /* dp_tx_compute_tid_jitter() - compute per tid per ring jitter
4226  * @jiiter - per tid per ring jitter stats
4227  * @ts: Tx completion status
4228  * @vdev - vdev structure for data path state
4229  * @tx_desc - tx descriptor
4230  * Return: void
4231  */
4232 static void dp_tx_compute_tid_jitter(struct cdp_peer_tid_stats *jitter,
4233 				     struct hal_tx_completion_status *ts,
4234 				     struct dp_vdev *vdev,
4235 				     struct dp_tx_desc_s *tx_desc)
4236 {
4237 	uint32_t curr_delay, avg_delay, avg_jitter, prev_delay;
4238 	struct dp_soc *soc = vdev->pdev->soc;
4239 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
4240 
4241 	if (ts->status !=  HAL_TX_TQM_RR_FRAME_ACKED) {
4242 		jitter->tx_drop += 1;
4243 		return;
4244 	}
4245 
4246 	status = dp_tx_compute_cur_delay(soc, vdev, ts, &curr_delay,
4247 					 tx_desc);
4248 
4249 	if (QDF_IS_STATUS_SUCCESS(status)) {
4250 		avg_delay = jitter->tx_avg_delay;
4251 		avg_jitter = jitter->tx_avg_jitter;
4252 		prev_delay = jitter->tx_prev_delay;
4253 		avg_jitter = dp_tx_jitter_get_avg_jitter(curr_delay,
4254 							 prev_delay,
4255 							 avg_jitter);
4256 		avg_delay = dp_tx_jitter_get_avg_delay(curr_delay, avg_delay);
4257 		jitter->tx_avg_delay = avg_delay;
4258 		jitter->tx_avg_jitter = avg_jitter;
4259 		jitter->tx_prev_delay = curr_delay;
4260 		jitter->tx_total_success += 1;
4261 	} else if (status == QDF_STATUS_E_FAILURE) {
4262 		jitter->tx_avg_err += 1;
4263 	}
4264 }
4265 
4266 /* dp_tx_update_peer_jitter_stats() - Update the peer jitter stats
4267  * @txrx_peer: DP peer context
4268  * @tx_desc: Tx software descriptor
4269  * @ts: Tx completion status
4270  * @ring_id: Rx CPU context ID/CPU_ID
4271  * Return: void
4272  */
4273 static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer,
4274 					   struct dp_tx_desc_s *tx_desc,
4275 					   struct hal_tx_completion_status *ts,
4276 					   uint8_t ring_id)
4277 {
4278 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4279 	struct dp_soc *soc = pdev->soc;
4280 	struct cdp_peer_tid_stats *jitter_stats = NULL;
4281 	uint8_t tid;
4282 	struct cdp_peer_tid_stats *rx_tid = NULL;
4283 
4284 	if (qdf_likely(!wlan_cfg_is_peer_jitter_stats_enabled(soc->wlan_cfg_ctx)))
4285 		return;
4286 
4287 	tid = ts->tid;
4288 	jitter_stats = txrx_peer->jitter_stats;
4289 	qdf_assert_always(jitter_stats);
4290 	qdf_assert(ring < CDP_MAX_TXRX_CTX);
4291 	/*
4292 	 * For non-TID packets use the TID 9
4293 	 */
4294 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4295 		tid = CDP_MAX_DATA_TIDS - 1;
4296 
4297 	rx_tid = &jitter_stats[tid * CDP_MAX_TXRX_CTX + ring_id];
4298 	dp_tx_compute_tid_jitter(rx_tid,
4299 				 ts, txrx_peer->vdev, tx_desc);
4300 }
4301 #else
4302 static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer,
4303 					   struct dp_tx_desc_s *tx_desc,
4304 					   struct hal_tx_completion_status *ts,
4305 					   uint8_t ring_id)
4306 {
4307 }
4308 #endif
4309 
4310 #ifdef HW_TX_DELAY_STATS_ENABLE
4311 /**
4312  * dp_update_tx_delay_stats() - update the delay stats
4313  * @vdev: vdev handle
4314  * @delay: delay in ms or us based on the flag delay_in_us
4315  * @tid: tid value
4316  * @mode: type of tx delay mode
4317  * @ring id: ring number
4318  * @delay_in_us: flag to indicate whether the delay is in ms or us
4319  *
4320  * Return: none
4321  */
4322 static inline
4323 void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
4324 			      uint8_t mode, uint8_t ring_id, bool delay_in_us)
4325 {
4326 	struct cdp_tid_tx_stats *tstats =
4327 		&vdev->stats.tid_tx_stats[ring_id][tid];
4328 
4329 	dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
4330 			      delay_in_us);
4331 }
4332 #else
4333 static inline
4334 void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
4335 			      uint8_t mode, uint8_t ring_id, bool delay_in_us)
4336 {
4337 	struct cdp_tid_tx_stats *tstats =
4338 		&vdev->pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
4339 
4340 	dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
4341 			      delay_in_us);
4342 }
4343 #endif
4344 
4345 /**
4346  * dp_tx_compute_delay() - Compute and fill in all timestamps
4347  *				to pass in correct fields
4348  *
4349  * @vdev: pdev handle
4350  * @tx_desc: tx descriptor
4351  * @tid: tid value
4352  * @ring_id: TCL or WBM ring number for transmit path
4353  * Return: none
4354  */
4355 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
4356 			 uint8_t tid, uint8_t ring_id)
4357 {
4358 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
4359 	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
4360 	uint32_t fwhw_transmit_delay_us;
4361 
4362 	if (qdf_likely(!vdev->pdev->delay_stats_flag) &&
4363 	    qdf_likely(!dp_is_vdev_tx_delay_stats_enabled(vdev)))
4364 		return;
4365 
4366 	if (dp_is_vdev_tx_delay_stats_enabled(vdev)) {
4367 		fwhw_transmit_delay_us =
4368 			qdf_ktime_to_us(qdf_ktime_real_get()) -
4369 			qdf_ktime_to_us(tx_desc->timestamp);
4370 
4371 		/*
4372 		 * Delay between packet enqueued to HW and Tx completion in us
4373 		 */
4374 		dp_update_tx_delay_stats(vdev, fwhw_transmit_delay_us, tid,
4375 					 CDP_DELAY_STATS_FW_HW_TRANSMIT,
4376 					 ring_id, true);
4377 		/*
4378 		 * For MCL, only enqueue to completion delay is required
4379 		 * so return if the vdev flag is enabled.
4380 		 */
4381 		return;
4382 	}
4383 
4384 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
4385 	timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
4386 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
4387 					 timestamp_hw_enqueue);
4388 
4389 	/*
4390 	 * Delay between packet enqueued to HW and Tx completion in ms
4391 	 */
4392 	dp_update_tx_delay_stats(vdev, fwhw_transmit_delay, tid,
4393 				 CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id,
4394 				 false);
4395 
4396 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
4397 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4398 	interframe_delay = (uint32_t)(timestamp_ingress -
4399 				      vdev->prev_tx_enq_tstamp);
4400 
4401 	/*
4402 	 * Delay in software enqueue
4403 	 */
4404 	dp_update_tx_delay_stats(vdev, sw_enqueue_delay, tid,
4405 				 CDP_DELAY_STATS_SW_ENQ, ring_id,
4406 				 false);
4407 
4408 	/*
4409 	 * Update interframe delay stats calculated at hardstart receive point.
4410 	 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
4411 	 * interframe delay will not be calculate correctly for 1st frame.
4412 	 * On the other side, this will help in avoiding extra per packet check
4413 	 * of !vdev->prev_tx_enq_tstamp.
4414 	 */
4415 	dp_update_tx_delay_stats(vdev, interframe_delay, tid,
4416 				 CDP_DELAY_STATS_TX_INTERFRAME, ring_id,
4417 				 false);
4418 	vdev->prev_tx_enq_tstamp = timestamp_ingress;
4419 }
4420 
4421 #ifdef DISABLE_DP_STATS
4422 static
4423 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf,
4424 				   struct dp_txrx_peer *txrx_peer)
4425 {
4426 }
4427 #else
4428 static inline void
4429 dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer)
4430 {
4431 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
4432 
4433 	DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
4434 	if (subtype != QDF_PROTO_INVALID)
4435 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.no_ack_count[subtype],
4436 					  1);
4437 }
4438 #endif
4439 
4440 #ifndef QCA_ENHANCED_STATS_SUPPORT
4441 #ifdef DP_PEER_EXTENDED_API
4442 static inline uint8_t
4443 dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
4444 {
4445 	return txrx_peer->mpdu_retry_threshold;
4446 }
4447 #else
4448 static inline uint8_t
4449 dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
4450 {
4451 	return 0;
4452 }
4453 #endif
4454 
4455 /**
4456  * dp_tx_update_peer_extd_stats()- Update Tx extended path stats for peer
4457  *
4458  * @ts: Tx compltion status
4459  * @txrx_peer: datapath txrx_peer handle
4460  *
4461  * Return: void
4462  */
4463 static inline void
4464 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
4465 			     struct dp_txrx_peer *txrx_peer)
4466 {
4467 	uint8_t mcs, pkt_type, dst_mcs_idx;
4468 	uint8_t retry_threshold = dp_tx_get_mpdu_retry_threshold(txrx_peer);
4469 
4470 	mcs = ts->mcs;
4471 	pkt_type = ts->pkt_type;
4472 	/* do HW to SW pkt type conversion */
4473 	pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX :
4474 		    hal_2_dp_pkt_type_map[pkt_type]);
4475 
4476 	dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs);
4477 	if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
4478 		DP_PEER_EXTD_STATS_INC(txrx_peer,
4479 				       tx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
4480 				       1);
4481 
4482 	DP_PEER_EXTD_STATS_INC(txrx_peer, tx.sgi_count[ts->sgi], 1);
4483 	DP_PEER_EXTD_STATS_INC(txrx_peer, tx.bw[ts->bw], 1);
4484 	DP_PEER_EXTD_STATS_UPD(txrx_peer, tx.last_ack_rssi, ts->ack_frame_rssi);
4485 	DP_PEER_EXTD_STATS_INC(txrx_peer,
4486 			       tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
4487 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.stbc, 1, ts->stbc);
4488 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.ldpc, 1, ts->ldpc);
4489 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries, 1, ts->transmit_cnt > 1);
4490 	if (ts->first_msdu) {
4491 		DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries_mpdu, 1,
4492 					ts->transmit_cnt > 1);
4493 
4494 		if (!retry_threshold)
4495 			return;
4496 		DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.mpdu_success_with_retries,
4497 					qdf_do_div(ts->transmit_cnt,
4498 						   retry_threshold),
4499 					ts->transmit_cnt > retry_threshold);
4500 	}
4501 }
4502 #else
4503 static inline void
4504 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
4505 			     struct dp_txrx_peer *txrx_peer)
4506 {
4507 }
4508 #endif
4509 
4510 /**
4511  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
4512  *				per wbm ring
4513  *
4514  * @tx_desc: software descriptor head pointer
4515  * @ts: Tx completion status
4516  * @peer: peer handle
4517  * @ring_id: ring number
4518  *
4519  * Return: None
4520  */
4521 static inline void
4522 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
4523 			struct hal_tx_completion_status *ts,
4524 			struct dp_txrx_peer *txrx_peer, uint8_t ring_id)
4525 {
4526 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4527 	uint8_t tid = ts->tid;
4528 	uint32_t length;
4529 	struct cdp_tid_tx_stats *tid_stats;
4530 
4531 	if (!pdev)
4532 		return;
4533 
4534 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4535 		tid = CDP_MAX_DATA_TIDS - 1;
4536 
4537 	tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
4538 
4539 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
4540 		dp_err_rl("Release source:%d is not from TQM", ts->release_src);
4541 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.release_src_not_tqm, 1);
4542 		return;
4543 	}
4544 
4545 	length = qdf_nbuf_len(tx_desc->nbuf);
4546 	DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
4547 
4548 	if (qdf_unlikely(pdev->delay_stats_flag) ||
4549 	    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(txrx_peer->vdev)))
4550 		dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id);
4551 
4552 	if (ts->status < CDP_MAX_TX_TQM_STATUS) {
4553 		tid_stats->tqm_status_cnt[ts->status]++;
4554 	}
4555 
4556 	if (qdf_likely(ts->status == HAL_TX_TQM_RR_FRAME_ACKED)) {
4557 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.retry_count, 1,
4558 					   ts->transmit_cnt > 1);
4559 
4560 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.multiple_retry_count,
4561 					   1, ts->transmit_cnt > 2);
4562 
4563 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.ofdma, 1, ts->ofdma);
4564 
4565 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.amsdu_cnt, 1,
4566 					   ts->msdu_part_of_amsdu);
4567 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.non_amsdu_cnt, 1,
4568 					   !ts->msdu_part_of_amsdu);
4569 
4570 		txrx_peer->stats.per_pkt_stats.tx.last_tx_ts =
4571 							qdf_system_ticks();
4572 
4573 		dp_tx_update_peer_extd_stats(ts, txrx_peer);
4574 
4575 		return;
4576 	}
4577 
4578 	/*
4579 	 * tx_failed is ideally supposed to be updated from HTT ppdu
4580 	 * completion stats. But in IPQ807X/IPQ6018 chipsets owing to
4581 	 * hw limitation there are no completions for failed cases.
4582 	 * Hence updating tx_failed from data path. Please note that
4583 	 * if tx_failed is fixed to be from ppdu, then this has to be
4584 	 * removed
4585 	 */
4586 	DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
4587 
4588 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.failed_retry_count, 1,
4589 				   ts->transmit_cnt > DP_RETRY_COUNT);
4590 	dp_update_no_ack_stats(tx_desc->nbuf, txrx_peer);
4591 
4592 	if (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED) {
4593 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.age_out, 1);
4594 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_REM) {
4595 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.dropped.fw_rem, 1,
4596 					      length);
4597 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX) {
4598 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_notx, 1);
4599 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TX) {
4600 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_tx, 1);
4601 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON1) {
4602 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason1, 1);
4603 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON2) {
4604 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason2, 1);
4605 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON3) {
4606 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason3, 1);
4607 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_DISABLE_QUEUE) {
4608 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4609 					  tx.dropped.fw_rem_queue_disable, 1);
4610 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TILL_NONMATCHING) {
4611 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4612 					  tx.dropped.fw_rem_no_match, 1);
4613 	} else if (ts->status == HAL_TX_TQM_RR_DROP_THRESHOLD) {
4614 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4615 					  tx.dropped.drop_threshold, 1);
4616 	} else if (ts->status == HAL_TX_TQM_RR_LINK_DESC_UNAVAILABLE) {
4617 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4618 					  tx.dropped.drop_link_desc_na, 1);
4619 	} else if (ts->status == HAL_TX_TQM_RR_DROP_OR_INVALID_MSDU) {
4620 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4621 					  tx.dropped.invalid_drop, 1);
4622 	} else if (ts->status == HAL_TX_TQM_RR_MULTICAST_DROP) {
4623 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4624 					  tx.dropped.mcast_vdev_drop, 1);
4625 	} else {
4626 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.invalid_rr, 1);
4627 	}
4628 }
4629 
4630 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
4631 /**
4632  * dp_tx_flow_pool_lock() - take flow pool lock
4633  * @soc: core txrx main context
4634  * @tx_desc: tx desc
4635  *
4636  * Return: None
4637  */
4638 static inline
4639 void dp_tx_flow_pool_lock(struct dp_soc *soc,
4640 			  struct dp_tx_desc_s *tx_desc)
4641 {
4642 	struct dp_tx_desc_pool_s *pool;
4643 	uint8_t desc_pool_id;
4644 
4645 	desc_pool_id = tx_desc->pool_id;
4646 	pool = &soc->tx_desc[desc_pool_id];
4647 
4648 	qdf_spin_lock_bh(&pool->flow_pool_lock);
4649 }
4650 
4651 /**
4652  * dp_tx_flow_pool_unlock() - release flow pool lock
4653  * @soc: core txrx main context
4654  * @tx_desc: tx desc
4655  *
4656  * Return: None
4657  */
4658 static inline
4659 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
4660 			    struct dp_tx_desc_s *tx_desc)
4661 {
4662 	struct dp_tx_desc_pool_s *pool;
4663 	uint8_t desc_pool_id;
4664 
4665 	desc_pool_id = tx_desc->pool_id;
4666 	pool = &soc->tx_desc[desc_pool_id];
4667 
4668 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
4669 }
4670 #else
4671 static inline
4672 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
4673 {
4674 }
4675 
4676 static inline
4677 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
4678 {
4679 }
4680 #endif
4681 
4682 /**
4683  * dp_tx_notify_completion() - Notify tx completion for this desc
4684  * @soc: core txrx main context
4685  * @vdev: datapath vdev handle
4686  * @tx_desc: tx desc
4687  * @netbuf:  buffer
4688  * @status: tx status
4689  *
4690  * Return: none
4691  */
4692 static inline void dp_tx_notify_completion(struct dp_soc *soc,
4693 					   struct dp_vdev *vdev,
4694 					   struct dp_tx_desc_s *tx_desc,
4695 					   qdf_nbuf_t netbuf,
4696 					   uint8_t status)
4697 {
4698 	void *osif_dev;
4699 	ol_txrx_completion_fp tx_compl_cbk = NULL;
4700 	uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
4701 
4702 	qdf_assert(tx_desc);
4703 
4704 	if (!vdev ||
4705 	    !vdev->osif_vdev) {
4706 		return;
4707 	}
4708 
4709 	osif_dev = vdev->osif_vdev;
4710 	tx_compl_cbk = vdev->tx_comp;
4711 
4712 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
4713 		flag |= BIT(QDF_TX_RX_STATUS_OK);
4714 
4715 	if (tx_compl_cbk)
4716 		tx_compl_cbk(netbuf, osif_dev, flag);
4717 }
4718 
4719 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
4720  * @pdev: pdev handle
4721  * @tid: tid value
4722  * @txdesc_ts: timestamp from txdesc
4723  * @ppdu_id: ppdu id
4724  *
4725  * Return: none
4726  */
4727 #ifdef FEATURE_PERPKT_INFO
4728 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
4729 					       struct dp_txrx_peer *txrx_peer,
4730 					       uint8_t tid,
4731 					       uint64_t txdesc_ts,
4732 					       uint32_t ppdu_id)
4733 {
4734 	uint64_t delta_ms;
4735 	struct cdp_tx_sojourn_stats *sojourn_stats;
4736 	struct dp_peer *primary_link_peer = NULL;
4737 	struct dp_soc *link_peer_soc = NULL;
4738 
4739 	if (qdf_unlikely(!pdev->enhanced_stats_en))
4740 		return;
4741 
4742 	if (qdf_unlikely(tid == HTT_INVALID_TID ||
4743 			 tid >= CDP_DATA_TID_MAX))
4744 		return;
4745 
4746 	if (qdf_unlikely(!pdev->sojourn_buf))
4747 		return;
4748 
4749 	primary_link_peer = dp_get_primary_link_peer_by_id(pdev->soc,
4750 							   txrx_peer->peer_id,
4751 							   DP_MOD_ID_TX_COMP);
4752 
4753 	if (qdf_unlikely(!primary_link_peer))
4754 		return;
4755 
4756 	sojourn_stats = (struct cdp_tx_sojourn_stats *)
4757 		qdf_nbuf_data(pdev->sojourn_buf);
4758 
4759 	link_peer_soc = primary_link_peer->vdev->pdev->soc;
4760 	sojourn_stats->cookie = (void *)
4761 			dp_monitor_peer_get_peerstats_ctx(link_peer_soc,
4762 							  primary_link_peer);
4763 
4764 	delta_ms = qdf_ktime_to_ms(qdf_ktime_real_get()) -
4765 				txdesc_ts;
4766 	qdf_ewma_tx_lag_add(&txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid],
4767 			    delta_ms);
4768 	sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
4769 	sojourn_stats->num_msdus[tid] = 1;
4770 	sojourn_stats->avg_sojourn_msdu[tid].internal =
4771 		txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid].internal;
4772 	dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
4773 			     pdev->sojourn_buf, HTT_INVALID_PEER,
4774 			     WDI_NO_VAL, pdev->pdev_id);
4775 	sojourn_stats->sum_sojourn_msdu[tid] = 0;
4776 	sojourn_stats->num_msdus[tid] = 0;
4777 	sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
4778 
4779 	dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_TX_COMP);
4780 }
4781 #else
4782 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
4783 					       struct dp_txrx_peer *txrx_peer,
4784 					       uint8_t tid,
4785 					       uint64_t txdesc_ts,
4786 					       uint32_t ppdu_id)
4787 {
4788 }
4789 #endif
4790 
4791 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
4792 /**
4793  * dp_send_completion_to_pkt_capture() - send tx completion to packet capture
4794  * @soc: dp_soc handle
4795  * @desc: Tx Descriptor
4796  * @ts: HAL Tx completion descriptor contents
4797  *
4798  * This function is used to send tx completion to packet capture
4799  */
4800 void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
4801 				       struct dp_tx_desc_s *desc,
4802 				       struct hal_tx_completion_status *ts)
4803 {
4804 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc,
4805 			     desc, ts->peer_id,
4806 			     WDI_NO_VAL, desc->pdev->pdev_id);
4807 }
4808 #endif
4809 
4810 /**
4811  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
4812  * @soc: DP Soc handle
4813  * @tx_desc: software Tx descriptor
4814  * @ts : Tx completion status from HAL/HTT descriptor
4815  *
4816  * Return: none
4817  */
4818 void
4819 dp_tx_comp_process_desc(struct dp_soc *soc,
4820 			struct dp_tx_desc_s *desc,
4821 			struct hal_tx_completion_status *ts,
4822 			struct dp_txrx_peer *txrx_peer)
4823 {
4824 	uint64_t time_latency = 0;
4825 	uint16_t peer_id = DP_INVALID_PEER_ID;
4826 
4827 	/*
4828 	 * m_copy/tx_capture modes are not supported for
4829 	 * scatter gather packets
4830 	 */
4831 	if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
4832 		time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
4833 				qdf_ktime_to_ms(desc->timestamp));
4834 	}
4835 
4836 	dp_send_completion_to_pkt_capture(soc, desc, ts);
4837 
4838 	if (dp_tx_pkt_tracepoints_enabled())
4839 		qdf_trace_dp_packet(desc->nbuf, QDF_TX,
4840 				    desc->msdu_ext_desc ?
4841 				    desc->msdu_ext_desc->tso_desc : NULL,
4842 				    qdf_ktime_to_ms(desc->timestamp));
4843 
4844 	if (!(desc->msdu_ext_desc)) {
4845 		dp_tx_enh_unmap(soc, desc);
4846 		if (txrx_peer)
4847 			peer_id = txrx_peer->peer_id;
4848 
4849 		if (QDF_STATUS_SUCCESS ==
4850 		    dp_monitor_tx_add_to_comp_queue(soc, desc, ts, peer_id)) {
4851 			return;
4852 		}
4853 
4854 		if (QDF_STATUS_SUCCESS ==
4855 		    dp_get_completion_indication_for_stack(soc,
4856 							   desc->pdev,
4857 							   txrx_peer, ts,
4858 							   desc->nbuf,
4859 							   time_latency)) {
4860 			dp_send_completion_to_stack(soc,
4861 						    desc->pdev,
4862 						    ts->peer_id,
4863 						    ts->ppdu_id,
4864 						    desc->nbuf);
4865 			return;
4866 		}
4867 	}
4868 
4869 	desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX;
4870 	dp_tx_comp_free_buf(soc, desc, false);
4871 }
4872 
4873 #ifdef DISABLE_DP_STATS
4874 /**
4875  * dp_tx_update_connectivity_stats() - update tx connectivity stats
4876  * @soc: core txrx main context
4877  * @tx_desc: tx desc
4878  * @status: tx status
4879  *
4880  * Return: none
4881  */
4882 static inline
4883 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
4884 				     struct dp_vdev *vdev,
4885 				     struct dp_tx_desc_s *tx_desc,
4886 				     uint8_t status)
4887 {
4888 }
4889 #else
4890 static inline
4891 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
4892 				     struct dp_vdev *vdev,
4893 				     struct dp_tx_desc_s *tx_desc,
4894 				     uint8_t status)
4895 {
4896 	void *osif_dev;
4897 	ol_txrx_stats_rx_fp stats_cbk;
4898 	uint8_t pkt_type;
4899 
4900 	qdf_assert(tx_desc);
4901 
4902 	if (!vdev ||
4903 	    !vdev->osif_vdev ||
4904 	    !vdev->stats_cb)
4905 		return;
4906 
4907 	osif_dev = vdev->osif_vdev;
4908 	stats_cbk = vdev->stats_cb;
4909 
4910 	stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
4911 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
4912 		stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
4913 			  &pkt_type);
4914 }
4915 #endif
4916 
4917 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
4918 QDF_STATUS
4919 dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
4920 			  uint32_t delta_tsf,
4921 			  uint32_t *delay_us)
4922 {
4923 	uint32_t buffer_ts;
4924 	uint32_t delay;
4925 
4926 	if (!delay_us)
4927 		return QDF_STATUS_E_INVAL;
4928 
4929 	/* Tx_rate_stats_info_valid is 0 and tsf is invalid then */
4930 	if (!ts->valid)
4931 		return QDF_STATUS_E_INVAL;
4932 
4933 	/* buffer_timestamp is in units of 1024 us and is [31:13] of
4934 	 * WBM_RELEASE_RING_4. After left shift 10 bits, it's
4935 	 * valid up to 29 bits.
4936 	 */
4937 	buffer_ts = ts->buffer_timestamp << 10;
4938 
4939 	delay = ts->tsf - buffer_ts - delta_tsf;
4940 
4941 	if (qdf_unlikely(delay & 0x80000000)) {
4942 		dp_err_rl("delay = 0x%x (-ve)\n"
4943 			  "release_src = %d\n"
4944 			  "ppdu_id = 0x%x\n"
4945 			  "peer_id = 0x%x\n"
4946 			  "tid = 0x%x\n"
4947 			  "release_reason = %d\n"
4948 			  "tsf = %u (0x%x)\n"
4949 			  "buffer_timestamp = %u (0x%x)\n"
4950 			  "delta_tsf = %u (0x%x)\n",
4951 			  delay, ts->release_src, ts->ppdu_id, ts->peer_id,
4952 			  ts->tid, ts->status, ts->tsf, ts->tsf,
4953 			  ts->buffer_timestamp, ts->buffer_timestamp,
4954 			  delta_tsf, delta_tsf);
4955 
4956 		delay = 0;
4957 		goto end;
4958 	}
4959 
4960 	delay &= 0x1FFFFFFF; /* mask 29 BITS */
4961 	if (delay > 0x1000000) {
4962 		dp_info_rl("----------------------\n"
4963 			   "Tx completion status:\n"
4964 			   "----------------------\n"
4965 			   "release_src = %d\n"
4966 			   "ppdu_id = 0x%x\n"
4967 			   "release_reason = %d\n"
4968 			   "tsf = %u (0x%x)\n"
4969 			   "buffer_timestamp = %u (0x%x)\n"
4970 			   "delta_tsf = %u (0x%x)\n",
4971 			   ts->release_src, ts->ppdu_id, ts->status,
4972 			   ts->tsf, ts->tsf, ts->buffer_timestamp,
4973 			   ts->buffer_timestamp, delta_tsf, delta_tsf);
4974 		return QDF_STATUS_E_FAILURE;
4975 	}
4976 
4977 
4978 end:
4979 	*delay_us = delay;
4980 
4981 	return QDF_STATUS_SUCCESS;
4982 }
4983 
4984 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4985 		      uint32_t delta_tsf)
4986 {
4987 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4988 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4989 						     DP_MOD_ID_CDP);
4990 
4991 	if (!vdev) {
4992 		dp_err_rl("vdev %d does not exist", vdev_id);
4993 		return;
4994 	}
4995 
4996 	vdev->delta_tsf = delta_tsf;
4997 	dp_debug("vdev id %u delta_tsf %u", vdev_id, delta_tsf);
4998 
4999 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5000 }
5001 #endif
5002 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
5003 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
5004 				      uint8_t vdev_id, bool enable)
5005 {
5006 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5007 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5008 						     DP_MOD_ID_CDP);
5009 
5010 	if (!vdev) {
5011 		dp_err_rl("vdev %d does not exist", vdev_id);
5012 		return QDF_STATUS_E_FAILURE;
5013 	}
5014 
5015 	qdf_atomic_set(&vdev->ul_delay_report, enable);
5016 
5017 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5018 
5019 	return QDF_STATUS_SUCCESS;
5020 }
5021 
5022 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5023 			       uint32_t *val)
5024 {
5025 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5026 	struct dp_vdev *vdev;
5027 	uint32_t delay_accum;
5028 	uint32_t pkts_accum;
5029 
5030 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
5031 	if (!vdev) {
5032 		dp_err_rl("vdev %d does not exist", vdev_id);
5033 		return QDF_STATUS_E_FAILURE;
5034 	}
5035 
5036 	if (!qdf_atomic_read(&vdev->ul_delay_report)) {
5037 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5038 		return QDF_STATUS_E_FAILURE;
5039 	}
5040 
5041 	/* Average uplink delay based on current accumulated values */
5042 	delay_accum = qdf_atomic_read(&vdev->ul_delay_accum);
5043 	pkts_accum = qdf_atomic_read(&vdev->ul_pkts_accum);
5044 
5045 	*val = delay_accum / pkts_accum;
5046 	dp_debug("uplink_delay %u delay_accum %u pkts_accum %u", *val,
5047 		 delay_accum, pkts_accum);
5048 
5049 	/* Reset accumulated values to 0 */
5050 	qdf_atomic_set(&vdev->ul_delay_accum, 0);
5051 	qdf_atomic_set(&vdev->ul_pkts_accum, 0);
5052 
5053 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5054 
5055 	return QDF_STATUS_SUCCESS;
5056 }
5057 
5058 static void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
5059 				      struct hal_tx_completion_status *ts)
5060 {
5061 	uint32_t ul_delay;
5062 
5063 	if (qdf_unlikely(!vdev)) {
5064 		dp_info_rl("vdev is null or delete in progress");
5065 		return;
5066 	}
5067 
5068 	if (!qdf_atomic_read(&vdev->ul_delay_report))
5069 		return;
5070 
5071 	if (QDF_IS_STATUS_ERROR(dp_tx_compute_hw_delay_us(ts,
5072 							  vdev->delta_tsf,
5073 							  &ul_delay)))
5074 		return;
5075 
5076 	ul_delay /= 1000; /* in unit of ms */
5077 
5078 	qdf_atomic_add(ul_delay, &vdev->ul_delay_accum);
5079 	qdf_atomic_inc(&vdev->ul_pkts_accum);
5080 }
5081 #else /* !WLAN_FEATURE_TSF_UPLINK_DELAY */
5082 static inline
5083 void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
5084 			       struct hal_tx_completion_status *ts)
5085 {
5086 }
5087 #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
5088 
5089 /**
5090  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
5091  * @soc: DP soc handle
5092  * @tx_desc: software descriptor head pointer
5093  * @ts: Tx completion status
5094  * @txrx_peer: txrx peer handle
5095  * @ring_id: ring number
5096  *
5097  * Return: none
5098  */
5099 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
5100 				  struct dp_tx_desc_s *tx_desc,
5101 				  struct hal_tx_completion_status *ts,
5102 				  struct dp_txrx_peer *txrx_peer,
5103 				  uint8_t ring_id)
5104 {
5105 	uint32_t length;
5106 	qdf_ether_header_t *eh;
5107 	struct dp_vdev *vdev = NULL;
5108 	qdf_nbuf_t nbuf = tx_desc->nbuf;
5109 	enum qdf_dp_tx_rx_status dp_status;
5110 
5111 	if (!nbuf) {
5112 		dp_info_rl("invalid tx descriptor. nbuf NULL");
5113 		goto out;
5114 	}
5115 
5116 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
5117 	length = qdf_nbuf_len(nbuf);
5118 
5119 	dp_status = dp_tx_hw_to_qdf(ts->status);
5120 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
5121 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
5122 				 QDF_TRACE_DEFAULT_PDEV_ID,
5123 				 qdf_nbuf_data_addr(nbuf),
5124 				 sizeof(qdf_nbuf_data(nbuf)),
5125 				 tx_desc->id, ts->status, dp_status));
5126 
5127 	dp_tx_comp_debug("-------------------- \n"
5128 			 "Tx Completion Stats: \n"
5129 			 "-------------------- \n"
5130 			 "ack_frame_rssi = %d \n"
5131 			 "first_msdu = %d \n"
5132 			 "last_msdu = %d \n"
5133 			 "msdu_part_of_amsdu = %d \n"
5134 			 "rate_stats valid = %d \n"
5135 			 "bw = %d \n"
5136 			 "pkt_type = %d \n"
5137 			 "stbc = %d \n"
5138 			 "ldpc = %d \n"
5139 			 "sgi = %d \n"
5140 			 "mcs = %d \n"
5141 			 "ofdma = %d \n"
5142 			 "tones_in_ru = %d \n"
5143 			 "tsf = %d \n"
5144 			 "ppdu_id = %d \n"
5145 			 "transmit_cnt = %d \n"
5146 			 "tid = %d \n"
5147 			 "peer_id = %d\n"
5148 			 "tx_status = %d\n",
5149 			 ts->ack_frame_rssi, ts->first_msdu,
5150 			 ts->last_msdu, ts->msdu_part_of_amsdu,
5151 			 ts->valid, ts->bw, ts->pkt_type, ts->stbc,
5152 			 ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
5153 			 ts->tones_in_ru, ts->tsf, ts->ppdu_id,
5154 			 ts->transmit_cnt, ts->tid, ts->peer_id,
5155 			 ts->status);
5156 
5157 	/* Update SoC level stats */
5158 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
5159 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
5160 
5161 	if (!txrx_peer) {
5162 		dp_info_rl("peer is null or deletion in progress");
5163 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
5164 		goto out;
5165 	}
5166 	vdev = txrx_peer->vdev;
5167 
5168 	dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
5169 	dp_tx_update_uplink_delay(soc, vdev, ts);
5170 
5171 	/* check tx complete notification */
5172 	if (qdf_nbuf_tx_notify_comp_get(nbuf))
5173 		dp_tx_notify_completion(soc, vdev, tx_desc,
5174 					nbuf, ts->status);
5175 
5176 	/* Update per-packet stats for mesh mode */
5177 	if (qdf_unlikely(vdev->mesh_vdev) &&
5178 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
5179 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
5180 
5181 	/* Update peer level stats */
5182 	if (qdf_unlikely(txrx_peer->bss_peer &&
5183 			 vdev->opmode == wlan_op_mode_ap)) {
5184 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
5185 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1,
5186 						      length);
5187 
5188 			if (txrx_peer->vdev->tx_encap_type ==
5189 				htt_cmn_pkt_type_ethernet &&
5190 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
5191 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
5192 							      tx.bcast, 1,
5193 							      length);
5194 			}
5195 		}
5196 	} else {
5197 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.ucast, 1, length);
5198 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
5199 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.tx_success,
5200 						      1, length);
5201 			if (qdf_unlikely(txrx_peer->in_twt)) {
5202 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
5203 							      tx.tx_success_twt,
5204 							      1, length);
5205 			}
5206 		}
5207 	}
5208 
5209 	dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id);
5210 	dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts, ring_id);
5211 	dp_tx_update_peer_jitter_stats(txrx_peer, tx_desc, ts, ring_id);
5212 	dp_tx_update_peer_sawf_stats(soc, vdev, txrx_peer, tx_desc,
5213 				     ts, ts->tid);
5214 	dp_tx_send_pktlog(soc, vdev->pdev, tx_desc, nbuf, dp_status);
5215 
5216 #ifdef QCA_SUPPORT_RDK_STATS
5217 	if (soc->peerstats_enabled)
5218 		dp_tx_sojourn_stats_process(vdev->pdev, txrx_peer, ts->tid,
5219 					    qdf_ktime_to_ms(tx_desc->timestamp),
5220 					    ts->ppdu_id);
5221 #endif
5222 
5223 out:
5224 	return;
5225 }
5226 
5227 #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \
5228 	defined(QCA_ENHANCED_STATS_SUPPORT)
5229 /*
5230  * dp_tx_update_peer_basic_stats(): Update peer basic stats
5231  * @txrx_peer: Datapath txrx_peer handle
5232  * @length: Length of the packet
5233  * @tx_status: Tx status from TQM/FW
5234  * @update: enhanced flag value present in dp_pdev
5235  *
5236  * Return: none
5237  */
5238 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
5239 				   uint32_t length, uint8_t tx_status,
5240 				   bool update)
5241 {
5242 	if (update || (!txrx_peer->hw_txrx_stats_en)) {
5243 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
5244 
5245 		if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
5246 			DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
5247 	}
5248 }
5249 #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
5250 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
5251 				   uint32_t length, uint8_t tx_status,
5252 				   bool update)
5253 {
5254 	if (!txrx_peer->hw_txrx_stats_en) {
5255 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
5256 
5257 		if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
5258 			DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
5259 	}
5260 }
5261 
5262 #else
5263 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
5264 				   uint32_t length, uint8_t tx_status,
5265 				   bool update)
5266 {
5267 	DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
5268 
5269 	if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
5270 		DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
5271 }
5272 #endif
5273 
5274 /*
5275  * dp_tx_prefetch_next_nbuf_data(): Prefetch nbuf and nbuf data
5276  * @nbuf: skb buffer
5277  *
5278  * Return: none
5279  */
5280 #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
5281 static inline
5282 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
5283 {
5284 	qdf_nbuf_t nbuf = NULL;
5285 
5286 	if (next)
5287 		nbuf = next->nbuf;
5288 	if (nbuf) {
5289 		/* prefetch skb->next and first few bytes of skb->cb */
5290 		qdf_prefetch(next->shinfo_addr);
5291 		qdf_prefetch(nbuf);
5292 		/* prefetch skb fields present in different cachelines */
5293 		qdf_prefetch(&nbuf->len);
5294 		qdf_prefetch(&nbuf->users);
5295 	}
5296 }
5297 #else
5298 static inline
5299 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
5300 {
5301 }
5302 #endif
5303 
5304 /**
5305  * dp_tx_mcast_reinject_handler() - Tx reinjected multicast packets handler
5306  * @soc: core txrx main context
5307  * @desc: software descriptor
5308  *
5309  * Return: true when packet is reinjected
5310  */
5311 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
5312 	defined(WLAN_MCAST_MLO)
5313 static inline bool
5314 dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
5315 {
5316 	struct dp_vdev *vdev = NULL;
5317 
5318 	if (desc->tx_status == HAL_TX_TQM_RR_MULTICAST_DROP) {
5319 		if (!soc->arch_ops.dp_tx_mcast_handler)
5320 			return false;
5321 
5322 		vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
5323 					     DP_MOD_ID_REINJECT);
5324 
5325 		if (qdf_unlikely(!vdev)) {
5326 			dp_tx_comp_info_rl("Unable to get vdev ref  %d",
5327 					   desc->id);
5328 			return false;
5329 		}
5330 		DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
5331 				 qdf_nbuf_len(desc->nbuf));
5332 		soc->arch_ops.dp_tx_mcast_handler(soc, vdev, desc->nbuf);
5333 		dp_tx_desc_release(desc, desc->pool_id);
5334 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
5335 		return true;
5336 	}
5337 
5338 	return false;
5339 }
5340 #else
5341 static inline bool
5342 dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
5343 {
5344 	return false;
5345 }
5346 #endif
5347 
5348 /**
5349  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
5350  * @soc: core txrx main context
5351  * @comp_head: software descriptor head pointer
5352  * @ring_id: ring number
5353  *
5354  * This function will process batch of descriptors reaped by dp_tx_comp_handler
5355  * and release the software descriptors after processing is complete
5356  *
5357  * Return: none
5358  */
5359 static void
5360 dp_tx_comp_process_desc_list(struct dp_soc *soc,
5361 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id)
5362 {
5363 	struct dp_tx_desc_s *desc;
5364 	struct dp_tx_desc_s *next;
5365 	struct hal_tx_completion_status ts;
5366 	struct dp_txrx_peer *txrx_peer = NULL;
5367 	uint16_t peer_id = DP_INVALID_PEER;
5368 	dp_txrx_ref_handle txrx_ref_handle = NULL;
5369 
5370 	desc = comp_head;
5371 
5372 	while (desc) {
5373 		next = desc->next;
5374 		dp_tx_prefetch_next_nbuf_data(next);
5375 
5376 		if (peer_id != desc->peer_id) {
5377 			if (txrx_peer)
5378 				dp_txrx_peer_unref_delete(txrx_ref_handle,
5379 							  DP_MOD_ID_TX_COMP);
5380 			peer_id = desc->peer_id;
5381 			txrx_peer =
5382 				dp_txrx_peer_get_ref_by_id(soc, peer_id,
5383 							   &txrx_ref_handle,
5384 							   DP_MOD_ID_TX_COMP);
5385 		}
5386 
5387 		if (dp_tx_mcast_reinject_handler(soc, desc)) {
5388 			desc = next;
5389 			continue;
5390 		}
5391 
5392 		if (desc->flags & DP_TX_DESC_FLAG_PPEDS) {
5393 			if (qdf_likely(txrx_peer))
5394 				dp_tx_update_peer_basic_stats(txrx_peer,
5395 							      desc->length,
5396 							      desc->tx_status,
5397 							      false);
5398 			qdf_nbuf_free(desc->nbuf);
5399 			dp_ppeds_tx_desc_free(soc, desc);
5400 			desc = next;
5401 			continue;
5402 		}
5403 
5404 		if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
5405 			struct dp_pdev *pdev = desc->pdev;
5406 
5407 			if (qdf_likely(txrx_peer))
5408 				dp_tx_update_peer_basic_stats(txrx_peer,
5409 							      desc->length,
5410 							      desc->tx_status,
5411 							      false);
5412 			qdf_assert(pdev);
5413 			dp_tx_outstanding_dec(pdev);
5414 
5415 			/*
5416 			 * Calling a QDF WRAPPER here is creating significant
5417 			 * performance impact so avoided the wrapper call here
5418 			 */
5419 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
5420 					       desc->id, DP_TX_COMP_UNMAP);
5421 			dp_tx_nbuf_unmap(soc, desc);
5422 			qdf_nbuf_free_simple(desc->nbuf);
5423 			dp_tx_desc_free(soc, desc, desc->pool_id);
5424 			desc = next;
5425 			continue;
5426 		}
5427 
5428 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
5429 
5430 		dp_tx_comp_process_tx_status(soc, desc, &ts, txrx_peer,
5431 					     ring_id);
5432 
5433 		dp_tx_comp_process_desc(soc, desc, &ts, txrx_peer);
5434 
5435 		dp_tx_desc_release(desc, desc->pool_id);
5436 		desc = next;
5437 	}
5438 	if (txrx_peer)
5439 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
5440 }
5441 
5442 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
5443 static inline
5444 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
5445 				   int max_reap_limit)
5446 {
5447 	bool limit_hit = false;
5448 
5449 	limit_hit =
5450 		(num_reaped >= max_reap_limit) ? true : false;
5451 
5452 	if (limit_hit)
5453 		DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
5454 
5455 	return limit_hit;
5456 }
5457 
5458 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
5459 {
5460 	return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
5461 }
5462 
5463 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
5464 {
5465 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
5466 
5467 	return cfg->tx_comp_loop_pkt_limit;
5468 }
5469 #else
5470 static inline
5471 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
5472 				   int max_reap_limit)
5473 {
5474 	return false;
5475 }
5476 
5477 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
5478 {
5479 	return false;
5480 }
5481 
5482 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
5483 {
5484 	return 0;
5485 }
5486 #endif
5487 
5488 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
5489 static inline int
5490 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
5491 				  int *max_reap_limit)
5492 {
5493 	return soc->arch_ops.dp_srng_test_and_update_nf_params(soc, dp_srng,
5494 							       max_reap_limit);
5495 }
5496 #else
5497 static inline int
5498 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
5499 				  int *max_reap_limit)
5500 {
5501 	return 0;
5502 }
5503 #endif
5504 
5505 #ifdef DP_TX_TRACKING
5506 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
5507 {
5508 	if ((tx_desc->magic != DP_TX_MAGIC_PATTERN_INUSE) &&
5509 	    (tx_desc->magic != DP_TX_MAGIC_PATTERN_FREE)) {
5510 		dp_err_rl("tx_desc %u is corrupted", tx_desc->id);
5511 		qdf_trigger_self_recovery(NULL, QDF_TX_DESC_LEAK);
5512 	}
5513 }
5514 #endif
5515 
5516 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
5517 			    hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
5518 			    uint32_t quota)
5519 {
5520 	void *tx_comp_hal_desc;
5521 	void *last_prefetched_hw_desc = NULL;
5522 	struct dp_tx_desc_s *last_prefetched_sw_desc = NULL;
5523 	hal_soc_handle_t hal_soc;
5524 	uint8_t buffer_src;
5525 	struct dp_tx_desc_s *tx_desc = NULL;
5526 	struct dp_tx_desc_s *head_desc = NULL;
5527 	struct dp_tx_desc_s *tail_desc = NULL;
5528 	uint32_t num_processed = 0;
5529 	uint32_t count;
5530 	uint32_t num_avail_for_reap = 0;
5531 	bool force_break = false;
5532 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
5533 	int max_reap_limit, ring_near_full;
5534 
5535 	DP_HIST_INIT();
5536 
5537 more_data:
5538 
5539 	hal_soc = soc->hal_soc;
5540 	/* Re-initialize local variables to be re-used */
5541 	head_desc = NULL;
5542 	tail_desc = NULL;
5543 	count = 0;
5544 	max_reap_limit = dp_tx_comp_get_loop_pkt_limit(soc);
5545 
5546 	ring_near_full = dp_srng_test_and_update_nf_params(soc, tx_comp_ring,
5547 							   &max_reap_limit);
5548 
5549 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
5550 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
5551 		return 0;
5552 	}
5553 
5554 	num_avail_for_reap = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0);
5555 
5556 	if (num_avail_for_reap >= quota)
5557 		num_avail_for_reap = quota;
5558 
5559 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
5560 	last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc,
5561 							    hal_ring_hdl,
5562 							    num_avail_for_reap);
5563 
5564 	/* Find head descriptor from completion ring */
5565 	while (qdf_likely(num_avail_for_reap--)) {
5566 
5567 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
5568 		if (qdf_unlikely(!tx_comp_hal_desc))
5569 			break;
5570 		buffer_src = hal_tx_comp_get_buffer_source(hal_soc,
5571 							   tx_comp_hal_desc);
5572 
5573 		/* If this buffer was not released by TQM or FW, then it is not
5574 		 * Tx completion indication, assert */
5575 		if (qdf_unlikely(buffer_src !=
5576 					HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
5577 				 (qdf_unlikely(buffer_src !=
5578 					HAL_TX_COMP_RELEASE_SOURCE_FW))) {
5579 			uint8_t wbm_internal_error;
5580 
5581 			dp_err_rl(
5582 				"Tx comp release_src != TQM | FW but from %d",
5583 				buffer_src);
5584 			hal_dump_comp_desc(tx_comp_hal_desc);
5585 			DP_STATS_INC(soc, tx.invalid_release_source, 1);
5586 
5587 			/* When WBM sees NULL buffer_addr_info in any of
5588 			 * ingress rings it sends an error indication,
5589 			 * with wbm_internal_error=1, to a specific ring.
5590 			 * The WBM2SW ring used to indicate these errors is
5591 			 * fixed in HW, and that ring is being used as Tx
5592 			 * completion ring. These errors are not related to
5593 			 * Tx completions, and should just be ignored
5594 			 */
5595 			wbm_internal_error = hal_get_wbm_internal_error(
5596 							hal_soc,
5597 							tx_comp_hal_desc);
5598 
5599 			if (wbm_internal_error) {
5600 				dp_err_rl("Tx comp wbm_internal_error!!");
5601 				DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
5602 
5603 				if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
5604 								buffer_src)
5605 					dp_handle_wbm_internal_error(
5606 						soc,
5607 						tx_comp_hal_desc,
5608 						hal_tx_comp_get_buffer_type(
5609 							tx_comp_hal_desc));
5610 
5611 			} else {
5612 				dp_err_rl("Tx comp wbm_internal_error false");
5613 				DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
5614 			}
5615 			continue;
5616 		}
5617 
5618 		soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
5619 							       tx_comp_hal_desc,
5620 							       &tx_desc);
5621 		if (!tx_desc) {
5622 			dp_err("unable to retrieve tx_desc!");
5623 			QDF_BUG(0);
5624 			continue;
5625 		}
5626 		tx_desc->buffer_src = buffer_src;
5627 
5628 		if (tx_desc->flags & DP_TX_DESC_FLAG_PPEDS)
5629 			goto add_to_pool2;
5630 
5631 		/*
5632 		 * If the release source is FW, process the HTT status
5633 		 */
5634 		if (qdf_unlikely(buffer_src ==
5635 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
5636 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
5637 
5638 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
5639 					htt_tx_status);
5640 			/* Collect hw completion contents */
5641 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
5642 					      &tx_desc->comp, 1);
5643 			soc->arch_ops.dp_tx_process_htt_completion(
5644 							soc,
5645 							tx_desc,
5646 							htt_tx_status,
5647 							ring_id);
5648 		} else {
5649 			tx_desc->tx_status =
5650 				hal_tx_comp_get_tx_status(tx_comp_hal_desc);
5651 			tx_desc->buffer_src = buffer_src;
5652 			/*
5653 			 * If the fast completion mode is enabled extended
5654 			 * metadata from descriptor is not copied
5655 			 */
5656 			if (qdf_likely(tx_desc->flags &
5657 						DP_TX_DESC_FLAG_SIMPLE))
5658 				goto add_to_pool;
5659 
5660 			/*
5661 			 * If the descriptor is already freed in vdev_detach,
5662 			 * continue to next descriptor
5663 			 */
5664 			if (qdf_unlikely
5665 				((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
5666 				 !tx_desc->flags)) {
5667 				dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
5668 						   tx_desc->id);
5669 				DP_STATS_INC(soc, tx.tx_comp_exception, 1);
5670 				dp_tx_desc_check_corruption(tx_desc);
5671 				continue;
5672 			}
5673 
5674 			if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
5675 				dp_tx_comp_info_rl("pdev in down state %d",
5676 						   tx_desc->id);
5677 				tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
5678 				dp_tx_comp_free_buf(soc, tx_desc, false);
5679 				dp_tx_desc_release(tx_desc, tx_desc->pool_id);
5680 				goto next_desc;
5681 			}
5682 
5683 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
5684 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
5685 				dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
5686 						 tx_desc->flags, tx_desc->id);
5687 				qdf_assert_always(0);
5688 			}
5689 
5690 			/* Collect hw completion contents */
5691 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
5692 					      &tx_desc->comp, 1);
5693 add_to_pool:
5694 			DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
5695 
5696 add_to_pool2:
5697 			/* First ring descriptor on the cycle */
5698 			if (!head_desc) {
5699 				head_desc = tx_desc;
5700 				tail_desc = tx_desc;
5701 			}
5702 
5703 			tail_desc->next = tx_desc;
5704 			tx_desc->next = NULL;
5705 			tail_desc = tx_desc;
5706 		}
5707 next_desc:
5708 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
5709 
5710 		/*
5711 		 * Processed packet count is more than given quota
5712 		 * stop to processing
5713 		 */
5714 
5715 		count++;
5716 
5717 		dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
5718 					       num_avail_for_reap,
5719 					       hal_ring_hdl,
5720 					       &last_prefetched_hw_desc,
5721 					       &last_prefetched_sw_desc);
5722 
5723 		if (dp_tx_comp_loop_pkt_limit_hit(soc, count, max_reap_limit))
5724 			break;
5725 	}
5726 
5727 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
5728 
5729 	/* Process the reaped descriptors */
5730 	if (head_desc)
5731 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
5732 
5733 	DP_STATS_INC(soc, tx.tx_comp[ring_id], count);
5734 
5735 	/*
5736 	 * If we are processing in near-full condition, there are 3 scenario
5737 	 * 1) Ring entries has reached critical state
5738 	 * 2) Ring entries are still near high threshold
5739 	 * 3) Ring entries are below the safe level
5740 	 *
5741 	 * One more loop will move the state to normal processing and yield
5742 	 */
5743 	if (ring_near_full)
5744 		goto more_data;
5745 
5746 	if (dp_tx_comp_enable_eol_data_check(soc)) {
5747 
5748 		if (num_processed >= quota)
5749 			force_break = true;
5750 
5751 		if (!force_break &&
5752 		    hal_srng_dst_peek_sync_locked(soc->hal_soc,
5753 						  hal_ring_hdl)) {
5754 			DP_STATS_INC(soc, tx.hp_oos2, 1);
5755 			if (!hif_exec_should_yield(soc->hif_handle,
5756 						   int_ctx->dp_intr_id))
5757 				goto more_data;
5758 		}
5759 	}
5760 	DP_TX_HIST_STATS_PER_PDEV();
5761 
5762 	return num_processed;
5763 }
5764 
5765 #ifdef FEATURE_WLAN_TDLS
5766 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5767 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
5768 {
5769 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5770 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5771 						     DP_MOD_ID_TDLS);
5772 
5773 	if (!vdev) {
5774 		dp_err("vdev handle for id %d is NULL", vdev_id);
5775 		return NULL;
5776 	}
5777 
5778 	if (tx_spec & OL_TX_SPEC_NO_FREE)
5779 		vdev->is_tdls_frame = true;
5780 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
5781 
5782 	return dp_tx_send(soc_hdl, vdev_id, msdu_list);
5783 }
5784 #endif
5785 
5786 /**
5787  * dp_tx_vdev_attach() - attach vdev to dp tx
5788  * @vdev: virtual device instance
5789  *
5790  * Return: QDF_STATUS_SUCCESS: success
5791  *         QDF_STATUS_E_RESOURCES: Error return
5792  */
5793 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
5794 {
5795 	int pdev_id;
5796 	/*
5797 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
5798 	 */
5799 	DP_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
5800 				    DP_TCL_METADATA_TYPE_VDEV_BASED);
5801 
5802 	DP_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
5803 				       vdev->vdev_id);
5804 
5805 	pdev_id =
5806 		dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
5807 						       vdev->pdev->pdev_id);
5808 	DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
5809 
5810 	/*
5811 	 * Set HTT Extension Valid bit to 0 by default
5812 	 */
5813 	DP_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
5814 
5815 	dp_tx_vdev_update_search_flags(vdev);
5816 
5817 	return QDF_STATUS_SUCCESS;
5818 }
5819 
5820 #ifndef FEATURE_WDS
5821 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
5822 {
5823 	return false;
5824 }
5825 #endif
5826 
5827 /**
5828  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
5829  * @vdev: virtual device instance
5830  *
5831  * Return: void
5832  *
5833  */
5834 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
5835 {
5836 	struct dp_soc *soc = vdev->pdev->soc;
5837 
5838 	/*
5839 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
5840 	 * for TDLS link
5841 	 *
5842 	 * Enable AddrY (SA based search) only for non-WDS STA and
5843 	 * ProxySTA VAP (in HKv1) modes.
5844 	 *
5845 	 * In all other VAP modes, only DA based search should be
5846 	 * enabled
5847 	 */
5848 	if (vdev->opmode == wlan_op_mode_sta &&
5849 	    vdev->tdls_link_connected)
5850 		vdev->hal_desc_addr_search_flags =
5851 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
5852 	else if ((vdev->opmode == wlan_op_mode_sta) &&
5853 		 !dp_tx_da_search_override(vdev))
5854 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
5855 	else
5856 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
5857 
5858 	if (vdev->opmode == wlan_op_mode_sta && !vdev->tdls_link_connected)
5859 		vdev->search_type = soc->sta_mode_search_policy;
5860 	else
5861 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
5862 }
5863 
5864 static inline bool
5865 dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
5866 			  struct dp_vdev *vdev,
5867 			  struct dp_tx_desc_s *tx_desc)
5868 {
5869 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
5870 		return false;
5871 
5872 	/*
5873 	 * if vdev is given, then only check whether desc
5874 	 * vdev match. if vdev is NULL, then check whether
5875 	 * desc pdev match.
5876 	 */
5877 	return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
5878 		(tx_desc->pdev == pdev);
5879 }
5880 
5881 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
5882 /**
5883  * dp_tx_desc_flush() - release resources associated
5884  *                      to TX Desc
5885  *
5886  * @dp_pdev: Handle to DP pdev structure
5887  * @vdev: virtual device instance
5888  * NULL: no specific Vdev is required and check all allcated TX desc
5889  * on this pdev.
5890  * Non-NULL: only check the allocated TX Desc associated to this Vdev.
5891  *
5892  * @force_free:
5893  * true: flush the TX desc.
5894  * false: only reset the Vdev in each allocated TX desc
5895  * that associated to current Vdev.
5896  *
5897  * This function will go through the TX desc pool to flush
5898  * the outstanding TX data or reset Vdev to NULL in associated TX
5899  * Desc.
5900  */
5901 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
5902 		      bool force_free)
5903 {
5904 	uint8_t i;
5905 	uint32_t j;
5906 	uint32_t num_desc, page_id, offset;
5907 	uint16_t num_desc_per_page;
5908 	struct dp_soc *soc = pdev->soc;
5909 	struct dp_tx_desc_s *tx_desc = NULL;
5910 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
5911 
5912 	if (!vdev && !force_free) {
5913 		dp_err("Reset TX desc vdev, Vdev param is required!");
5914 		return;
5915 	}
5916 
5917 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
5918 		tx_desc_pool = &soc->tx_desc[i];
5919 		if (!(tx_desc_pool->pool_size) ||
5920 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
5921 		    !(tx_desc_pool->desc_pages.cacheable_pages))
5922 			continue;
5923 
5924 		/*
5925 		 * Add flow pool lock protection in case pool is freed
5926 		 * due to all tx_desc is recycled when handle TX completion.
5927 		 * this is not necessary when do force flush as:
5928 		 * a. double lock will happen if dp_tx_desc_release is
5929 		 *    also trying to acquire it.
5930 		 * b. dp interrupt has been disabled before do force TX desc
5931 		 *    flush in dp_pdev_deinit().
5932 		 */
5933 		if (!force_free)
5934 			qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
5935 		num_desc = tx_desc_pool->pool_size;
5936 		num_desc_per_page =
5937 			tx_desc_pool->desc_pages.num_element_per_page;
5938 		for (j = 0; j < num_desc; j++) {
5939 			page_id = j / num_desc_per_page;
5940 			offset = j % num_desc_per_page;
5941 
5942 			if (qdf_unlikely(!(tx_desc_pool->
5943 					 desc_pages.cacheable_pages)))
5944 				break;
5945 
5946 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
5947 
5948 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
5949 				/*
5950 				 * Free TX desc if force free is
5951 				 * required, otherwise only reset vdev
5952 				 * in this TX desc.
5953 				 */
5954 				if (force_free) {
5955 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
5956 					dp_tx_comp_free_buf(soc, tx_desc,
5957 							    false);
5958 					dp_tx_desc_release(tx_desc, i);
5959 				} else {
5960 					tx_desc->vdev_id = DP_INVALID_VDEV_ID;
5961 				}
5962 			}
5963 		}
5964 		if (!force_free)
5965 			qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
5966 	}
5967 }
5968 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
5969 /**
5970  * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
5971  *
5972  * @soc: Handle to DP soc structure
5973  * @tx_desc: pointer of one TX desc
5974  * @desc_pool_id: TX Desc pool id
5975  */
5976 static inline void
5977 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
5978 		      uint8_t desc_pool_id)
5979 {
5980 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
5981 
5982 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
5983 
5984 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
5985 }
5986 
5987 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
5988 		      bool force_free)
5989 {
5990 	uint8_t i, num_pool;
5991 	uint32_t j;
5992 	uint32_t num_desc, page_id, offset;
5993 	uint16_t num_desc_per_page;
5994 	struct dp_soc *soc = pdev->soc;
5995 	struct dp_tx_desc_s *tx_desc = NULL;
5996 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
5997 
5998 	if (!vdev && !force_free) {
5999 		dp_err("Reset TX desc vdev, Vdev param is required!");
6000 		return;
6001 	}
6002 
6003 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6004 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6005 
6006 	for (i = 0; i < num_pool; i++) {
6007 		tx_desc_pool = &soc->tx_desc[i];
6008 		if (!tx_desc_pool->desc_pages.cacheable_pages)
6009 			continue;
6010 
6011 		num_desc_per_page =
6012 			tx_desc_pool->desc_pages.num_element_per_page;
6013 		for (j = 0; j < num_desc; j++) {
6014 			page_id = j / num_desc_per_page;
6015 			offset = j % num_desc_per_page;
6016 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
6017 
6018 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
6019 				if (force_free) {
6020 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
6021 					dp_tx_comp_free_buf(soc, tx_desc,
6022 							    false);
6023 					dp_tx_desc_release(tx_desc, i);
6024 				} else {
6025 					dp_tx_desc_reset_vdev(soc, tx_desc,
6026 							      i);
6027 				}
6028 			}
6029 		}
6030 	}
6031 }
6032 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
6033 
6034 /**
6035  * dp_tx_vdev_detach() - detach vdev from dp tx
6036  * @vdev: virtual device instance
6037  *
6038  * Return: QDF_STATUS_SUCCESS: success
6039  *         QDF_STATUS_E_RESOURCES: Error return
6040  */
6041 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
6042 {
6043 	struct dp_pdev *pdev = vdev->pdev;
6044 
6045 	/* Reset TX desc associated to this Vdev as NULL */
6046 	dp_tx_desc_flush(pdev, vdev, false);
6047 
6048 	return QDF_STATUS_SUCCESS;
6049 }
6050 
6051 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6052 /* Pools will be allocated dynamically */
6053 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
6054 					   int num_desc)
6055 {
6056 	uint8_t i;
6057 
6058 	for (i = 0; i < num_pool; i++) {
6059 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
6060 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
6061 	}
6062 
6063 	return QDF_STATUS_SUCCESS;
6064 }
6065 
6066 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
6067 					  uint32_t num_desc)
6068 {
6069 	return QDF_STATUS_SUCCESS;
6070 }
6071 
6072 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
6073 {
6074 }
6075 
6076 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
6077 {
6078 	uint8_t i;
6079 
6080 	for (i = 0; i < num_pool; i++)
6081 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
6082 }
6083 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
6084 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
6085 					   uint32_t num_desc)
6086 {
6087 	uint8_t i, count;
6088 
6089 	/* Allocate software Tx descriptor pools */
6090 	for (i = 0; i < num_pool; i++) {
6091 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
6092 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6093 				  FL("Tx Desc Pool alloc %d failed %pK"),
6094 				  i, soc);
6095 			goto fail;
6096 		}
6097 	}
6098 	return QDF_STATUS_SUCCESS;
6099 
6100 fail:
6101 	for (count = 0; count < i; count++)
6102 		dp_tx_desc_pool_free(soc, count);
6103 
6104 	return QDF_STATUS_E_NOMEM;
6105 }
6106 
6107 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
6108 					  uint32_t num_desc)
6109 {
6110 	uint8_t i;
6111 	for (i = 0; i < num_pool; i++) {
6112 		if (dp_tx_desc_pool_init(soc, i, num_desc)) {
6113 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6114 				  FL("Tx Desc Pool init %d failed %pK"),
6115 				  i, soc);
6116 			return QDF_STATUS_E_NOMEM;
6117 		}
6118 	}
6119 	return QDF_STATUS_SUCCESS;
6120 }
6121 
6122 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
6123 {
6124 	uint8_t i;
6125 
6126 	for (i = 0; i < num_pool; i++)
6127 		dp_tx_desc_pool_deinit(soc, i);
6128 }
6129 
6130 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
6131 {
6132 	uint8_t i;
6133 
6134 	for (i = 0; i < num_pool; i++)
6135 		dp_tx_desc_pool_free(soc, i);
6136 }
6137 
6138 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
6139 
6140 /**
6141  * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
6142  * @soc: core txrx main context
6143  * @num_pool: number of pools
6144  *
6145  */
6146 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
6147 {
6148 	dp_tx_tso_desc_pool_deinit(soc, num_pool);
6149 	dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
6150 }
6151 
6152 /**
6153  * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
6154  * @soc: core txrx main context
6155  * @num_pool: number of pools
6156  *
6157  */
6158 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
6159 {
6160 	dp_tx_tso_desc_pool_free(soc, num_pool);
6161 	dp_tx_tso_num_seg_pool_free(soc, num_pool);
6162 }
6163 
6164 /**
6165  * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
6166  * @soc: core txrx main context
6167  *
6168  * This function frees all tx related descriptors as below
6169  * 1. Regular TX descriptors (static pools)
6170  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
6171  * 3. TSO descriptors
6172  *
6173  */
6174 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
6175 {
6176 	uint8_t num_pool;
6177 
6178 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6179 
6180 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
6181 	dp_tx_ext_desc_pool_free(soc, num_pool);
6182 	dp_tx_delete_static_pools(soc, num_pool);
6183 }
6184 
6185 /**
6186  * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
6187  * @soc: core txrx main context
6188  *
6189  * This function de-initializes all tx related descriptors as below
6190  * 1. Regular TX descriptors (static pools)
6191  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
6192  * 3. TSO descriptors
6193  *
6194  */
6195 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
6196 {
6197 	uint8_t num_pool;
6198 
6199 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6200 
6201 	dp_tx_flow_control_deinit(soc);
6202 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
6203 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
6204 	dp_tx_deinit_static_pools(soc, num_pool);
6205 }
6206 
6207 /**
6208  * dp_tso_attach() - TSO attach handler
6209  * @txrx_soc: Opaque Dp handle
6210  *
6211  * Reserve TSO descriptor buffers
6212  *
6213  * Return: QDF_STATUS_E_FAILURE on failure or
6214  * QDF_STATUS_SUCCESS on success
6215  */
6216 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
6217 					 uint8_t num_pool,
6218 					 uint32_t num_desc)
6219 {
6220 	if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
6221 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
6222 		return QDF_STATUS_E_FAILURE;
6223 	}
6224 
6225 	if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
6226 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
6227 		       num_pool, soc);
6228 		return QDF_STATUS_E_FAILURE;
6229 	}
6230 	return QDF_STATUS_SUCCESS;
6231 }
6232 
6233 /**
6234  * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
6235  * @soc: DP soc handle
6236  * @num_pool: Number of pools
6237  * @num_desc: Number of descriptors
6238  *
6239  * Initialize TSO descriptor pools
6240  *
6241  * Return: QDF_STATUS_E_FAILURE on failure or
6242  * QDF_STATUS_SUCCESS on success
6243  */
6244 
6245 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
6246 					uint8_t num_pool,
6247 					uint32_t num_desc)
6248 {
6249 	if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
6250 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
6251 		return QDF_STATUS_E_FAILURE;
6252 	}
6253 
6254 	if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
6255 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
6256 		       num_pool, soc);
6257 		return QDF_STATUS_E_FAILURE;
6258 	}
6259 	return QDF_STATUS_SUCCESS;
6260 }
6261 
6262 /**
6263  * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
6264  * @soc: core txrx main context
6265  *
6266  * This function allocates memory for following descriptor pools
6267  * 1. regular sw tx descriptor pools (static pools)
6268  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
6269  * 3. TSO descriptor pools
6270  *
6271  * Return: QDF_STATUS_SUCCESS: success
6272  *         QDF_STATUS_E_RESOURCES: Error return
6273  */
6274 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
6275 {
6276 	uint8_t num_pool;
6277 	uint32_t num_desc;
6278 	uint32_t num_ext_desc;
6279 
6280 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6281 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6282 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
6283 
6284 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6285 		  "%s Tx Desc Alloc num_pool = %d, descs = %d",
6286 		  __func__, num_pool, num_desc);
6287 
6288 	if ((num_pool > MAX_TXDESC_POOLS) ||
6289 	    (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
6290 		goto fail1;
6291 
6292 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
6293 		goto fail1;
6294 
6295 	if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
6296 		goto fail2;
6297 
6298 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
6299 		return QDF_STATUS_SUCCESS;
6300 
6301 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
6302 		goto fail3;
6303 
6304 	return QDF_STATUS_SUCCESS;
6305 
6306 fail3:
6307 	dp_tx_ext_desc_pool_free(soc, num_pool);
6308 fail2:
6309 	dp_tx_delete_static_pools(soc, num_pool);
6310 fail1:
6311 	return QDF_STATUS_E_RESOURCES;
6312 }
6313 
6314 /**
6315  * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
6316  * @soc: core txrx main context
6317  *
6318  * This function initializes the following TX descriptor pools
6319  * 1. regular sw tx descriptor pools (static pools)
6320  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
6321  * 3. TSO descriptor pools
6322  *
6323  * Return: QDF_STATUS_SUCCESS: success
6324  *	   QDF_STATUS_E_RESOURCES: Error return
6325  */
6326 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
6327 {
6328 	uint8_t num_pool;
6329 	uint32_t num_desc;
6330 	uint32_t num_ext_desc;
6331 
6332 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6333 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6334 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
6335 
6336 	if (dp_tx_init_static_pools(soc, num_pool, num_desc))
6337 		goto fail1;
6338 
6339 	if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
6340 		goto fail2;
6341 
6342 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
6343 		return QDF_STATUS_SUCCESS;
6344 
6345 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
6346 		goto fail3;
6347 
6348 	dp_tx_flow_control_init(soc);
6349 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
6350 	return QDF_STATUS_SUCCESS;
6351 
6352 fail3:
6353 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
6354 fail2:
6355 	dp_tx_deinit_static_pools(soc, num_pool);
6356 fail1:
6357 	return QDF_STATUS_E_RESOURCES;
6358 }
6359 
6360 /**
6361  * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
6362  * @txrx_soc: dp soc handle
6363  *
6364  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
6365  *			QDF_STATUS_E_FAILURE
6366  */
6367 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
6368 {
6369 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6370 	uint8_t num_pool;
6371 	uint32_t num_desc;
6372 	uint32_t num_ext_desc;
6373 
6374 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6375 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6376 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
6377 
6378 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
6379 		return QDF_STATUS_E_FAILURE;
6380 
6381 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
6382 		return QDF_STATUS_E_FAILURE;
6383 
6384 	return QDF_STATUS_SUCCESS;
6385 }
6386 
6387 /**
6388  * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
6389  * @txrx_soc: dp soc handle
6390  *
6391  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
6392  */
6393 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
6394 {
6395 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6396 	uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6397 
6398 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
6399 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
6400 
6401 	return QDF_STATUS_SUCCESS;
6402 }
6403 
6404 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
6405 void dp_pkt_add_timestamp(struct dp_vdev *vdev,
6406 			  enum qdf_pkt_timestamp_index index, uint64_t time,
6407 			  qdf_nbuf_t nbuf)
6408 {
6409 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled())) {
6410 		uint64_t tsf_time;
6411 
6412 		if (vdev->get_tsf_time) {
6413 			vdev->get_tsf_time(vdev->osif_vdev, time, &tsf_time);
6414 			qdf_add_dp_pkt_timestamp(nbuf, index, tsf_time);
6415 		}
6416 	}
6417 }
6418 
6419 void dp_pkt_get_timestamp(uint64_t *time)
6420 {
6421 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled()))
6422 		*time = qdf_get_log_timestamp();
6423 }
6424 #endif
6425 
6426