xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision 572d9ff24ecf529ae4f1386400340d7c62e013d6)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "htt.h"
21 #include "dp_htt.h"
22 #include "hal_hw_headers.h"
23 #include "dp_tx.h"
24 #include "dp_tx_desc.h"
25 #include "dp_peer.h"
26 #include "dp_types.h"
27 #include "hal_tx.h"
28 #include "qdf_mem.h"
29 #include "qdf_nbuf.h"
30 #include "qdf_net_types.h"
31 #include "qdf_module.h"
32 #include <wlan_cfg.h>
33 #include "dp_ipa.h"
34 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
35 #include "if_meta_hdr.h"
36 #endif
37 #include "enet.h"
38 #include "dp_internal.h"
39 #ifdef ATH_SUPPORT_IQUE
40 #include "dp_txrx_me.h"
41 #endif
42 #include "dp_hist.h"
43 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
44 #include <wlan_dp_swlm.h>
45 #endif
46 #ifdef WIFI_MONITOR_SUPPORT
47 #include <dp_mon.h>
48 #endif
49 #ifdef FEATURE_WDS
50 #include "dp_txrx_wds.h"
51 #endif
52 #include "cdp_txrx_cmn_reg.h"
53 #ifdef CONFIG_SAWF
54 #include <dp_sawf.h>
55 #endif
56 
57 /* Flag to skip CCE classify when mesh or tid override enabled */
58 #define DP_TX_SKIP_CCE_CLASSIFY \
59 	(DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
60 
61 /* TODO Add support in TSO */
62 #define DP_DESC_NUM_FRAG(x) 0
63 
64 /* disable TQM_BYPASS */
65 #define TQM_BYPASS_WAR 0
66 
67 /* invalid peer id for reinject*/
68 #define DP_INVALID_PEER 0XFFFE
69 
70 #define DP_RETRY_COUNT 7
71 #ifdef WLAN_PEER_JITTER
72 #define DP_AVG_JITTER_WEIGHT_DENOM 4
73 #define DP_AVG_DELAY_WEIGHT_DENOM 3
74 #endif
75 
76 #ifdef QCA_DP_TX_FW_METADATA_V2
77 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
78 	HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
79 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
80 	HTT_TX_TCL_METADATA_V2_VALID_HTT_SET(_var, _val)
81 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
82 	HTT_TX_TCL_METADATA_TYPE_V2_SET(_var, _val)
83 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
84 	HTT_TX_TCL_METADATA_V2_HOST_INSPECTED_SET(_var, _val)
85 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
86 	 HTT_TX_TCL_METADATA_V2_PEER_ID_SET(_var, _val)
87 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
88 	HTT_TX_TCL_METADATA_V2_VDEV_ID_SET(_var, _val)
89 #define DP_TCL_METADATA_TYPE_PEER_BASED \
90 	HTT_TCL_METADATA_V2_TYPE_PEER_BASED
91 #define DP_TCL_METADATA_TYPE_VDEV_BASED \
92 	HTT_TCL_METADATA_V2_TYPE_VDEV_BASED
93 #else
94 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
95 	HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
96 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
97 	HTT_TX_TCL_METADATA_VALID_HTT_SET(_var, _val)
98 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
99 	HTT_TX_TCL_METADATA_TYPE_SET(_var, _val)
100 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
101 	HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val)
102 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
103 	HTT_TX_TCL_METADATA_PEER_ID_SET(_var, _val)
104 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
105 	HTT_TX_TCL_METADATA_VDEV_ID_SET(_var, _val)
106 #define DP_TCL_METADATA_TYPE_PEER_BASED \
107 	HTT_TCL_METADATA_TYPE_PEER_BASED
108 #define DP_TCL_METADATA_TYPE_VDEV_BASED \
109 	HTT_TCL_METADATA_TYPE_VDEV_BASED
110 #endif
111 
112 /*mapping between hal encrypt type and cdp_sec_type*/
113 uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
114 					  HAL_TX_ENCRYPT_TYPE_WEP_128,
115 					  HAL_TX_ENCRYPT_TYPE_WEP_104,
116 					  HAL_TX_ENCRYPT_TYPE_WEP_40,
117 					  HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
118 					  HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
119 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
120 					  HAL_TX_ENCRYPT_TYPE_WAPI,
121 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
122 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
123 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
124 					  HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
125 qdf_export_symbol(sec_type_map);
126 
127 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
128 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
129 {
130 	enum dp_tx_event_type type;
131 
132 	if (flags & DP_TX_DESC_FLAG_FLUSH)
133 		type = DP_TX_DESC_FLUSH;
134 	else if (flags & DP_TX_DESC_FLAG_TX_COMP_ERR)
135 		type = DP_TX_COMP_UNMAP_ERR;
136 	else if (flags & DP_TX_DESC_FLAG_COMPLETED_TX)
137 		type = DP_TX_COMP_UNMAP;
138 	else
139 		type = DP_TX_DESC_UNMAP;
140 
141 	return type;
142 }
143 
144 static inline void
145 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
146 		       qdf_nbuf_t skb, uint32_t sw_cookie,
147 		       enum dp_tx_event_type type)
148 {
149 	struct dp_tx_tcl_history *tx_tcl_history = &soc->tx_tcl_history;
150 	struct dp_tx_comp_history *tx_comp_history = &soc->tx_comp_history;
151 	struct dp_tx_desc_event *entry;
152 	uint32_t idx;
153 	uint16_t slot;
154 
155 	switch (type) {
156 	case DP_TX_COMP_UNMAP:
157 	case DP_TX_COMP_UNMAP_ERR:
158 	case DP_TX_COMP_MSDU_EXT:
159 		if (qdf_unlikely(!tx_comp_history->allocated))
160 			return;
161 
162 		dp_get_frag_hist_next_atomic_idx(&tx_comp_history->index, &idx,
163 						 &slot,
164 						 DP_TX_COMP_HIST_SLOT_SHIFT,
165 						 DP_TX_COMP_HIST_PER_SLOT_MAX,
166 						 DP_TX_COMP_HISTORY_SIZE);
167 		entry = &tx_comp_history->entry[slot][idx];
168 		break;
169 	case DP_TX_DESC_MAP:
170 	case DP_TX_DESC_UNMAP:
171 	case DP_TX_DESC_COOKIE:
172 	case DP_TX_DESC_FLUSH:
173 		if (qdf_unlikely(!tx_tcl_history->allocated))
174 			return;
175 
176 		dp_get_frag_hist_next_atomic_idx(&tx_tcl_history->index, &idx,
177 						 &slot,
178 						 DP_TX_TCL_HIST_SLOT_SHIFT,
179 						 DP_TX_TCL_HIST_PER_SLOT_MAX,
180 						 DP_TX_TCL_HISTORY_SIZE);
181 		entry = &tx_tcl_history->entry[slot][idx];
182 		break;
183 	default:
184 		dp_info_rl("Invalid dp_tx_event_type: %d", type);
185 		return;
186 	}
187 
188 	entry->skb = skb;
189 	entry->paddr = paddr;
190 	entry->sw_cookie = sw_cookie;
191 	entry->type = type;
192 	entry->ts = qdf_get_log_timestamp();
193 }
194 
195 static inline void
196 dp_tx_tso_seg_history_add(struct dp_soc *soc,
197 			  struct qdf_tso_seg_elem_t *tso_seg,
198 			  qdf_nbuf_t skb, uint32_t sw_cookie,
199 			  enum dp_tx_event_type type)
200 {
201 	int i;
202 
203 	for (i = 1; i < tso_seg->seg.num_frags; i++) {
204 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[i].paddr,
205 				       skb, sw_cookie, type);
206 	}
207 
208 	if (!tso_seg->next)
209 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[0].paddr,
210 				       skb, 0xFFFFFFFF, type);
211 }
212 
213 static inline void
214 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
215 		      qdf_nbuf_t skb, uint32_t sw_cookie,
216 		      enum dp_tx_event_type type)
217 {
218 	struct qdf_tso_seg_elem_t *curr_seg = tso_info.tso_seg_list;
219 	uint32_t num_segs = tso_info.num_segs;
220 
221 	while (num_segs) {
222 		dp_tx_tso_seg_history_add(soc, curr_seg, skb, sw_cookie, type);
223 		curr_seg = curr_seg->next;
224 		num_segs--;
225 	}
226 }
227 
228 #else
229 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
230 {
231 	return DP_TX_DESC_INVAL_EVT;
232 }
233 
234 static inline void
235 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
236 		       qdf_nbuf_t skb, uint32_t sw_cookie,
237 		       enum dp_tx_event_type type)
238 {
239 }
240 
241 static inline void
242 dp_tx_tso_seg_history_add(struct dp_soc *soc,
243 			  struct qdf_tso_seg_elem_t *tso_seg,
244 			  qdf_nbuf_t skb, uint32_t sw_cookie,
245 			  enum dp_tx_event_type type)
246 {
247 }
248 
249 static inline void
250 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
251 		      qdf_nbuf_t skb, uint32_t sw_cookie,
252 		      enum dp_tx_event_type type)
253 {
254 }
255 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
256 
257 static int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc);
258 
259 /**
260  * dp_is_tput_high() - Check if throughput is high
261  *
262  * @soc - core txrx main context
263  *
264  * The current function is based of the RTPM tput policy variable where RTPM is
265  * avoided based on throughput.
266  */
267 static inline int dp_is_tput_high(struct dp_soc *soc)
268 {
269 	return dp_get_rtpm_tput_policy_requirement(soc);
270 }
271 
272 #if defined(FEATURE_TSO)
273 /**
274  * dp_tx_tso_unmap_segment() - Unmap TSO segment
275  *
276  * @soc - core txrx main context
277  * @seg_desc - tso segment descriptor
278  * @num_seg_desc - tso number segment descriptor
279  */
280 static void dp_tx_tso_unmap_segment(
281 		struct dp_soc *soc,
282 		struct qdf_tso_seg_elem_t *seg_desc,
283 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
284 {
285 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
286 	if (qdf_unlikely(!seg_desc)) {
287 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
288 			 __func__, __LINE__);
289 		qdf_assert(0);
290 	} else if (qdf_unlikely(!num_seg_desc)) {
291 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
292 			 __func__, __LINE__);
293 		qdf_assert(0);
294 	} else {
295 		bool is_last_seg;
296 		/* no tso segment left to do dma unmap */
297 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
298 			return;
299 
300 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
301 					true : false;
302 		qdf_nbuf_unmap_tso_segment(soc->osdev,
303 					   seg_desc, is_last_seg);
304 		num_seg_desc->num_seg.tso_cmn_num_seg--;
305 	}
306 }
307 
308 /**
309  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
310  *                            back to the freelist
311  *
312  * @soc - soc device handle
313  * @tx_desc - Tx software descriptor
314  */
315 static void dp_tx_tso_desc_release(struct dp_soc *soc,
316 				   struct dp_tx_desc_s *tx_desc)
317 {
318 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
319 	if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_desc)) {
320 		dp_tx_err("SO desc is NULL!");
321 		qdf_assert(0);
322 	} else if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_num_desc)) {
323 		dp_tx_err("TSO num desc is NULL!");
324 		qdf_assert(0);
325 	} else {
326 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
327 			(struct qdf_tso_num_seg_elem_t *)tx_desc->
328 				msdu_ext_desc->tso_num_desc;
329 
330 		/* Add the tso num segment into the free list */
331 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
332 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
333 					    tx_desc->msdu_ext_desc->
334 					    tso_num_desc);
335 			tx_desc->msdu_ext_desc->tso_num_desc = NULL;
336 			DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
337 		}
338 
339 		/* Add the tso segment into the free list*/
340 		dp_tx_tso_desc_free(soc,
341 				    tx_desc->pool_id, tx_desc->msdu_ext_desc->
342 				    tso_desc);
343 		tx_desc->msdu_ext_desc->tso_desc = NULL;
344 	}
345 }
346 #else
347 static void dp_tx_tso_unmap_segment(
348 		struct dp_soc *soc,
349 		struct qdf_tso_seg_elem_t *seg_desc,
350 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
351 
352 {
353 }
354 
355 static void dp_tx_tso_desc_release(struct dp_soc *soc,
356 				   struct dp_tx_desc_s *tx_desc)
357 {
358 }
359 #endif
360 
361 /**
362  * dp_tx_desc_release() - Release Tx Descriptor
363  * @tx_desc : Tx Descriptor
364  * @desc_pool_id: Descriptor Pool ID
365  *
366  * Deallocate all resources attached to Tx descriptor and free the Tx
367  * descriptor.
368  *
369  * Return:
370  */
371 void
372 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
373 {
374 	struct dp_pdev *pdev = tx_desc->pdev;
375 	struct dp_soc *soc;
376 	uint8_t comp_status = 0;
377 
378 	qdf_assert(pdev);
379 
380 	soc = pdev->soc;
381 
382 	dp_tx_outstanding_dec(pdev);
383 
384 	if (tx_desc->msdu_ext_desc) {
385 		if (tx_desc->frm_type == dp_tx_frm_tso)
386 			dp_tx_tso_desc_release(soc, tx_desc);
387 
388 		if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
389 			dp_tx_me_free_buf(tx_desc->pdev,
390 					  tx_desc->msdu_ext_desc->me_buffer);
391 
392 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
393 	}
394 
395 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
396 		qdf_atomic_dec(&soc->num_tx_exception);
397 
398 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
399 				tx_desc->buffer_src)
400 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
401 							     soc->hal_soc);
402 	else
403 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
404 
405 	dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d",
406 		    tx_desc->id, comp_status,
407 		    qdf_atomic_read(&pdev->num_tx_outstanding));
408 
409 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
410 	return;
411 }
412 
413 /**
414  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
415  * @vdev: DP vdev Handle
416  * @nbuf: skb
417  * @msdu_info: msdu_info required to create HTT metadata
418  *
419  * Prepares and fills HTT metadata in the frame pre-header for special frames
420  * that should be transmitted using varying transmit parameters.
421  * There are 2 VDEV modes that currently needs this special metadata -
422  *  1) Mesh Mode
423  *  2) DSRC Mode
424  *
425  * Return: HTT metadata size
426  *
427  */
428 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
429 					  struct dp_tx_msdu_info_s *msdu_info)
430 {
431 	uint32_t *meta_data = msdu_info->meta_data;
432 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
433 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
434 
435 	uint8_t htt_desc_size;
436 
437 	/* Size rounded of multiple of 8 bytes */
438 	uint8_t htt_desc_size_aligned;
439 
440 	uint8_t *hdr = NULL;
441 
442 	/*
443 	 * Metadata - HTT MSDU Extension header
444 	 */
445 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
446 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
447 
448 	if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
449 	    HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
450 							   meta_data[0]) ||
451 	    msdu_info->exception_fw) {
452 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
453 				 htt_desc_size_aligned)) {
454 			nbuf = qdf_nbuf_realloc_headroom(nbuf,
455 							 htt_desc_size_aligned);
456 			if (!nbuf) {
457 				/*
458 				 * qdf_nbuf_realloc_headroom won't do skb_clone
459 				 * as skb_realloc_headroom does. so, no free is
460 				 * needed here.
461 				 */
462 				DP_STATS_INC(vdev,
463 					     tx_i.dropped.headroom_insufficient,
464 					     1);
465 				qdf_print(" %s[%d] skb_realloc_headroom failed",
466 					  __func__, __LINE__);
467 				return 0;
468 			}
469 		}
470 		/* Fill and add HTT metaheader */
471 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
472 		if (!hdr) {
473 			dp_tx_err("Error in filling HTT metadata");
474 
475 			return 0;
476 		}
477 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
478 
479 	} else if (vdev->opmode == wlan_op_mode_ocb) {
480 		/* Todo - Add support for DSRC */
481 	}
482 
483 	return htt_desc_size_aligned;
484 }
485 
486 /**
487  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
488  * @tso_seg: TSO segment to process
489  * @ext_desc: Pointer to MSDU extension descriptor
490  *
491  * Return: void
492  */
493 #if defined(FEATURE_TSO)
494 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
495 		void *ext_desc)
496 {
497 	uint8_t num_frag;
498 	uint32_t tso_flags;
499 
500 	/*
501 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
502 	 * tcp_flag_mask
503 	 *
504 	 * Checksum enable flags are set in TCL descriptor and not in Extension
505 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
506 	 */
507 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
508 
509 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
510 
511 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
512 		tso_seg->tso_flags.ip_len);
513 
514 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
515 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
516 
517 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
518 		uint32_t lo = 0;
519 		uint32_t hi = 0;
520 
521 		qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
522 				  (tso_seg->tso_frags[num_frag].length));
523 
524 		qdf_dmaaddr_to_32s(
525 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
526 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
527 			tso_seg->tso_frags[num_frag].length);
528 	}
529 
530 	return;
531 }
532 #else
533 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
534 		void *ext_desc)
535 {
536 	return;
537 }
538 #endif
539 
540 #if defined(FEATURE_TSO)
541 /**
542  * dp_tx_free_tso_seg_list() - Loop through the tso segments
543  *                             allocated and free them
544  *
545  * @soc: soc handle
546  * @free_seg: list of tso segments
547  * @msdu_info: msdu descriptor
548  *
549  * Return - void
550  */
551 static void dp_tx_free_tso_seg_list(
552 		struct dp_soc *soc,
553 		struct qdf_tso_seg_elem_t *free_seg,
554 		struct dp_tx_msdu_info_s *msdu_info)
555 {
556 	struct qdf_tso_seg_elem_t *next_seg;
557 
558 	while (free_seg) {
559 		next_seg = free_seg->next;
560 		dp_tx_tso_desc_free(soc,
561 				    msdu_info->tx_queue.desc_pool_id,
562 				    free_seg);
563 		free_seg = next_seg;
564 	}
565 }
566 
567 /**
568  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
569  *                                 allocated and free them
570  *
571  * @soc:  soc handle
572  * @free_num_seg: list of tso number segments
573  * @msdu_info: msdu descriptor
574  * Return - void
575  */
576 static void dp_tx_free_tso_num_seg_list(
577 		struct dp_soc *soc,
578 		struct qdf_tso_num_seg_elem_t *free_num_seg,
579 		struct dp_tx_msdu_info_s *msdu_info)
580 {
581 	struct qdf_tso_num_seg_elem_t *next_num_seg;
582 
583 	while (free_num_seg) {
584 		next_num_seg = free_num_seg->next;
585 		dp_tso_num_seg_free(soc,
586 				    msdu_info->tx_queue.desc_pool_id,
587 				    free_num_seg);
588 		free_num_seg = next_num_seg;
589 	}
590 }
591 
592 /**
593  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
594  *                              do dma unmap for each segment
595  *
596  * @soc: soc handle
597  * @free_seg: list of tso segments
598  * @num_seg_desc: tso number segment descriptor
599  *
600  * Return - void
601  */
602 static void dp_tx_unmap_tso_seg_list(
603 		struct dp_soc *soc,
604 		struct qdf_tso_seg_elem_t *free_seg,
605 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
606 {
607 	struct qdf_tso_seg_elem_t *next_seg;
608 
609 	if (qdf_unlikely(!num_seg_desc)) {
610 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
611 		return;
612 	}
613 
614 	while (free_seg) {
615 		next_seg = free_seg->next;
616 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
617 		free_seg = next_seg;
618 	}
619 }
620 
621 #ifdef FEATURE_TSO_STATS
622 /**
623  * dp_tso_get_stats_idx: Retrieve the tso packet id
624  * @pdev - pdev handle
625  *
626  * Return: id
627  */
628 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
629 {
630 	uint32_t stats_idx;
631 
632 	stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
633 						% CDP_MAX_TSO_PACKETS);
634 	return stats_idx;
635 }
636 #else
637 static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
638 {
639 	return 0;
640 }
641 #endif /* FEATURE_TSO_STATS */
642 
643 /**
644  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
645  *				     free the tso segments descriptor and
646  *				     tso num segments descriptor
647  *
648  * @soc:  soc handle
649  * @msdu_info: msdu descriptor
650  * @tso_seg_unmap: flag to show if dma unmap is necessary
651  *
652  * Return - void
653  */
654 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
655 					  struct dp_tx_msdu_info_s *msdu_info,
656 					  bool tso_seg_unmap)
657 {
658 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
659 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
660 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
661 					tso_info->tso_num_seg_list;
662 
663 	/* do dma unmap for each segment */
664 	if (tso_seg_unmap)
665 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
666 
667 	/* free all tso number segment descriptor though looks only have 1 */
668 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
669 
670 	/* free all tso segment descriptor */
671 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
672 }
673 
674 /**
675  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
676  * @vdev: virtual device handle
677  * @msdu: network buffer
678  * @msdu_info: meta data associated with the msdu
679  *
680  * Return: QDF_STATUS_SUCCESS success
681  */
682 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
683 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
684 {
685 	struct qdf_tso_seg_elem_t *tso_seg;
686 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
687 	struct dp_soc *soc = vdev->pdev->soc;
688 	struct dp_pdev *pdev = vdev->pdev;
689 	struct qdf_tso_info_t *tso_info;
690 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
691 	tso_info = &msdu_info->u.tso_info;
692 	tso_info->curr_seg = NULL;
693 	tso_info->tso_seg_list = NULL;
694 	tso_info->num_segs = num_seg;
695 	msdu_info->frm_type = dp_tx_frm_tso;
696 	tso_info->tso_num_seg_list = NULL;
697 
698 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
699 
700 	while (num_seg) {
701 		tso_seg = dp_tx_tso_desc_alloc(
702 				soc, msdu_info->tx_queue.desc_pool_id);
703 		if (tso_seg) {
704 			tso_seg->next = tso_info->tso_seg_list;
705 			tso_info->tso_seg_list = tso_seg;
706 			num_seg--;
707 		} else {
708 			dp_err_rl("Failed to alloc tso seg desc");
709 			DP_STATS_INC_PKT(vdev->pdev,
710 					 tso_stats.tso_no_mem_dropped, 1,
711 					 qdf_nbuf_len(msdu));
712 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
713 
714 			return QDF_STATUS_E_NOMEM;
715 		}
716 	}
717 
718 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
719 
720 	tso_num_seg = dp_tso_num_seg_alloc(soc,
721 			msdu_info->tx_queue.desc_pool_id);
722 
723 	if (tso_num_seg) {
724 		tso_num_seg->next = tso_info->tso_num_seg_list;
725 		tso_info->tso_num_seg_list = tso_num_seg;
726 	} else {
727 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
728 			 __func__);
729 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
730 
731 		return QDF_STATUS_E_NOMEM;
732 	}
733 
734 	msdu_info->num_seg =
735 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
736 
737 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
738 			msdu_info->num_seg);
739 
740 	if (!(msdu_info->num_seg)) {
741 		/*
742 		 * Free allocated TSO seg desc and number seg desc,
743 		 * do unmap for segments if dma map has done.
744 		 */
745 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
746 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
747 
748 		return QDF_STATUS_E_INVAL;
749 	}
750 	dp_tx_tso_history_add(soc, msdu_info->u.tso_info,
751 			      msdu, 0, DP_TX_DESC_MAP);
752 
753 	tso_info->curr_seg = tso_info->tso_seg_list;
754 
755 	tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
756 	dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
757 			     msdu, msdu_info->num_seg);
758 	dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
759 				    tso_info->msdu_stats_idx);
760 	dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
761 	return QDF_STATUS_SUCCESS;
762 }
763 #else
764 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
765 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
766 {
767 	return QDF_STATUS_E_NOMEM;
768 }
769 #endif
770 
771 QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
772 			(DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
773 			 sizeof(struct htt_tx_msdu_desc_ext2_t)));
774 
775 /**
776  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
777  * @vdev: DP Vdev handle
778  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
779  * @desc_pool_id: Descriptor Pool ID
780  *
781  * Return:
782  */
783 static
784 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
785 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
786 {
787 	uint8_t i;
788 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
789 	struct dp_tx_seg_info_s *seg_info;
790 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
791 	struct dp_soc *soc = vdev->pdev->soc;
792 
793 	/* Allocate an extension descriptor */
794 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
795 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
796 
797 	if (!msdu_ext_desc) {
798 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
799 		return NULL;
800 	}
801 
802 	if (msdu_info->exception_fw &&
803 			qdf_unlikely(vdev->mesh_vdev)) {
804 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
805 				&msdu_info->meta_data[0],
806 				sizeof(struct htt_tx_msdu_desc_ext2_t));
807 		qdf_atomic_inc(&soc->num_tx_exception);
808 		msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
809 	}
810 
811 	switch (msdu_info->frm_type) {
812 	case dp_tx_frm_sg:
813 	case dp_tx_frm_me:
814 	case dp_tx_frm_raw:
815 		seg_info = msdu_info->u.sg_info.curr_seg;
816 		/* Update the buffer pointers in MSDU Extension Descriptor */
817 		for (i = 0; i < seg_info->frag_cnt; i++) {
818 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
819 				seg_info->frags[i].paddr_lo,
820 				seg_info->frags[i].paddr_hi,
821 				seg_info->frags[i].len);
822 		}
823 
824 		break;
825 
826 	case dp_tx_frm_tso:
827 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
828 				&cached_ext_desc[0]);
829 		break;
830 
831 
832 	default:
833 		break;
834 	}
835 
836 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
837 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
838 
839 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
840 			msdu_ext_desc->vaddr);
841 
842 	return msdu_ext_desc;
843 }
844 
845 /**
846  * dp_tx_trace_pkt() - Trace TX packet at DP layer
847  *
848  * @skb: skb to be traced
849  * @msdu_id: msdu_id of the packet
850  * @vdev_id: vdev_id of the packet
851  *
852  * Return: None
853  */
854 #ifdef DP_DISABLE_TX_PKT_TRACE
855 static void dp_tx_trace_pkt(struct dp_soc *soc,
856 			    qdf_nbuf_t skb, uint16_t msdu_id,
857 			    uint8_t vdev_id)
858 {
859 }
860 #else
861 static void dp_tx_trace_pkt(struct dp_soc *soc,
862 			    qdf_nbuf_t skb, uint16_t msdu_id,
863 			    uint8_t vdev_id)
864 {
865 	if (dp_is_tput_high(soc))
866 		return;
867 
868 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
869 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
870 	DPTRACE(qdf_dp_trace_ptr(skb,
871 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
872 				 QDF_TRACE_DEFAULT_PDEV_ID,
873 				 qdf_nbuf_data_addr(skb),
874 				 sizeof(qdf_nbuf_data(skb)),
875 				 msdu_id, vdev_id, 0));
876 
877 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
878 
879 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
880 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
881 				      msdu_id, QDF_TX));
882 }
883 #endif
884 
885 #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
886 /**
887  * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as
888  *				      exception by the upper layer (OS_IF)
889  * @soc: DP soc handle
890  * @nbuf: packet to be transmitted
891  *
892  * Returns: 1 if the packet is marked as exception,
893  *	    0, if the packet is not marked as exception.
894  */
895 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
896 						 qdf_nbuf_t nbuf)
897 {
898 	return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf);
899 }
900 #else
901 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
902 						 qdf_nbuf_t nbuf)
903 {
904 	return 0;
905 }
906 #endif
907 
908 #ifdef DP_TRAFFIC_END_INDICATION
909 /**
910  * dp_tx_get_traffic_end_indication_pkt() - Allocate and prepare packet to send
911  *                                          as indication to fw to inform that
912  *                                          data stream has ended
913  * @vdev: DP vdev handle
914  * @nbuf: original buffer from network stack
915  *
916  * Return: NULL on failure,
917  *         nbuf on success
918  */
919 static inline qdf_nbuf_t
920 dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
921 				     qdf_nbuf_t nbuf)
922 {
923 	/* Packet length should be enough to copy upto L3 header */
924 	uint8_t end_nbuf_len = 64;
925 	uint8_t htt_desc_size_aligned;
926 	uint8_t htt_desc_size;
927 	qdf_nbuf_t end_nbuf;
928 
929 	if (qdf_unlikely(QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) ==
930 			 QDF_NBUF_CB_PACKET_TYPE_END_INDICATION)) {
931 		htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
932 		htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
933 
934 		end_nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q);
935 		if (!end_nbuf) {
936 			end_nbuf = qdf_nbuf_alloc(NULL,
937 						  (htt_desc_size_aligned +
938 						  end_nbuf_len),
939 						  htt_desc_size_aligned,
940 						  8, false);
941 			if (!end_nbuf) {
942 				dp_err("Packet allocation failed");
943 				goto out;
944 			}
945 		} else {
946 			qdf_nbuf_reset(end_nbuf, htt_desc_size_aligned, 8);
947 		}
948 		qdf_mem_copy(qdf_nbuf_data(end_nbuf), qdf_nbuf_data(nbuf),
949 			     end_nbuf_len);
950 		qdf_nbuf_set_pktlen(end_nbuf, end_nbuf_len);
951 
952 		return end_nbuf;
953 	}
954 out:
955 	return NULL;
956 }
957 
958 /**
959  * dp_tx_send_traffic_end_indication_pkt() - Send indication packet to FW
960  *                                           via exception path.
961  * @vdev: DP vdev handle
962  * @end_nbuf: skb to send as indication
963  * @msdu_info: msdu_info of original nbuf
964  * @peer_id: peer id
965  *
966  * Return: None
967  */
968 static inline void
969 dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
970 				      qdf_nbuf_t end_nbuf,
971 				      struct dp_tx_msdu_info_s *msdu_info,
972 				      uint16_t peer_id)
973 {
974 	struct dp_tx_msdu_info_s e_msdu_info = {0};
975 	qdf_nbuf_t nbuf;
976 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
977 		(struct htt_tx_msdu_desc_ext2_t *)(e_msdu_info.meta_data);
978 	e_msdu_info.tx_queue = msdu_info->tx_queue;
979 	e_msdu_info.tid = msdu_info->tid;
980 	e_msdu_info.exception_fw = 1;
981 	desc_ext->host_tx_desc_pool = 1;
982 	desc_ext->traffic_end_indication = 1;
983 	nbuf = dp_tx_send_msdu_single(vdev, end_nbuf, &e_msdu_info,
984 				      peer_id, NULL);
985 	if (nbuf) {
986 		dp_err("Traffic end indication packet tx failed");
987 		qdf_nbuf_free(nbuf);
988 	}
989 }
990 
991 /**
992  * dp_tx_traffic_end_indication_set_desc_flag() - Set tx descriptor flag to
993  *                                                mark it traffic end indication
994  *                                                packet.
995  * @tx_desc: Tx descriptor pointer
996  * @msdu_info: msdu_info structure pointer
997  *
998  * Return: None
999  */
1000 static inline void
1001 dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
1002 					   struct dp_tx_msdu_info_s *msdu_info)
1003 {
1004 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
1005 		(struct htt_tx_msdu_desc_ext2_t *)(msdu_info->meta_data);
1006 
1007 	if (qdf_unlikely(desc_ext->traffic_end_indication))
1008 		tx_desc->flags |= DP_TX_DESC_FLAG_TRAFFIC_END_IND;
1009 }
1010 
1011 /**
1012  * dp_tx_traffic_end_indication_enq_ind_pkt() - Enqueue the packet instead of
1013  *                                              freeing which are associated
1014  *                                              with traffic end indication
1015  *                                              flagged descriptor.
1016  * @soc: dp soc handle
1017  * @desc: Tx descriptor pointer
1018  * @nbuf: buffer pointer
1019  *
1020  * Return: True if packet gets enqueued else false
1021  */
1022 static bool
1023 dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
1024 					 struct dp_tx_desc_s *desc,
1025 					 qdf_nbuf_t nbuf)
1026 {
1027 	struct dp_vdev *vdev = NULL;
1028 
1029 	if (qdf_unlikely((desc->flags &
1030 			  DP_TX_DESC_FLAG_TRAFFIC_END_IND) != 0)) {
1031 		vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
1032 					     DP_MOD_ID_TX_COMP);
1033 		if (vdev) {
1034 			qdf_nbuf_queue_add(&vdev->end_ind_pkt_q, nbuf);
1035 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_COMP);
1036 			return true;
1037 		}
1038 	}
1039 	return false;
1040 }
1041 
1042 /**
1043  * dp_tx_traffic_end_indication_is_enabled() - get the feature
1044  *                                             enable/disable status
1045  * @vdev: dp vdev handle
1046  *
1047  * Return: True if feature is enable else false
1048  */
1049 static inline bool
1050 dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
1051 {
1052 	return qdf_unlikely(vdev->traffic_end_ind_en);
1053 }
1054 
1055 static inline qdf_nbuf_t
1056 dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1057 			       struct dp_tx_msdu_info_s *msdu_info,
1058 			       uint16_t peer_id, qdf_nbuf_t end_nbuf)
1059 {
1060 	if (dp_tx_traffic_end_indication_is_enabled(vdev))
1061 		end_nbuf = dp_tx_get_traffic_end_indication_pkt(vdev, nbuf);
1062 
1063 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
1064 
1065 	if (qdf_unlikely(end_nbuf))
1066 		dp_tx_send_traffic_end_indication_pkt(vdev, end_nbuf,
1067 						      msdu_info, peer_id);
1068 	return nbuf;
1069 }
1070 #else
1071 static inline qdf_nbuf_t
1072 dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
1073 				     qdf_nbuf_t nbuf)
1074 {
1075 	return NULL;
1076 }
1077 
1078 static inline void
1079 dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
1080 				      qdf_nbuf_t end_nbuf,
1081 				      struct dp_tx_msdu_info_s *msdu_info,
1082 				      uint16_t peer_id)
1083 {}
1084 
1085 static inline void
1086 dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
1087 					   struct dp_tx_msdu_info_s *msdu_info)
1088 {}
1089 
1090 static inline bool
1091 dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
1092 					 struct dp_tx_desc_s *desc,
1093 					 qdf_nbuf_t nbuf)
1094 {
1095 	return false;
1096 }
1097 
1098 static inline bool
1099 dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
1100 {
1101 	return false;
1102 }
1103 
1104 static inline qdf_nbuf_t
1105 dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1106 			       struct dp_tx_msdu_info_s *msdu_info,
1107 			       uint16_t peer_id, qdf_nbuf_t end_nbuf)
1108 {
1109 	return dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
1110 }
1111 #endif
1112 
1113 /**
1114  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
1115  * @vdev: DP vdev handle
1116  * @nbuf: skb
1117  * @desc_pool_id: Descriptor pool ID
1118  * @meta_data: Metadata to the fw
1119  * @tx_exc_metadata: Handle that holds exception path metadata
1120  * Allocate and prepare Tx descriptor with msdu information.
1121  *
1122  * Return: Pointer to Tx Descriptor on success,
1123  *         NULL on failure
1124  */
1125 static
1126 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
1127 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
1128 		struct dp_tx_msdu_info_s *msdu_info,
1129 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1130 {
1131 	uint8_t align_pad;
1132 	uint8_t is_exception = 0;
1133 	uint8_t htt_hdr_size;
1134 	struct dp_tx_desc_s *tx_desc;
1135 	struct dp_pdev *pdev = vdev->pdev;
1136 	struct dp_soc *soc = pdev->soc;
1137 
1138 	if (dp_tx_limit_check(vdev))
1139 		return NULL;
1140 
1141 	/* Allocate software Tx descriptor */
1142 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1143 
1144 	if (qdf_unlikely(!tx_desc)) {
1145 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1146 		DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
1147 		return NULL;
1148 	}
1149 
1150 	dp_tx_outstanding_inc(pdev);
1151 
1152 	/* Initialize the SW tx descriptor */
1153 	tx_desc->nbuf = nbuf;
1154 	tx_desc->frm_type = dp_tx_frm_std;
1155 	tx_desc->tx_encap_type = ((tx_exc_metadata &&
1156 		(tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
1157 		tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
1158 	tx_desc->vdev_id = vdev->vdev_id;
1159 	tx_desc->pdev = pdev;
1160 	tx_desc->msdu_ext_desc = NULL;
1161 	tx_desc->pkt_offset = 0;
1162 	tx_desc->length = qdf_nbuf_headlen(nbuf);
1163 	tx_desc->shinfo_addr = skb_end_pointer(nbuf);
1164 
1165 	dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
1166 
1167 	if (qdf_unlikely(vdev->multipass_en)) {
1168 		if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
1169 			goto failure;
1170 	}
1171 
1172 	/* Packets marked by upper layer (OS-IF) to be sent to FW */
1173 	if (dp_tx_is_nbuf_marked_exception(soc, nbuf))
1174 		is_exception = 1;
1175 	/*
1176 	 * For special modes (vdev_type == ocb or mesh), data frames should be
1177 	 * transmitted using varying transmit parameters (tx spec) which include
1178 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
1179 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
1180 	 * These frames are sent as exception packets to firmware.
1181 	 *
1182 	 * HW requirement is that metadata should always point to a
1183 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
1184 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
1185 	 *  to get 8-byte aligned start address along with align_pad added
1186 	 *
1187 	 *  |-----------------------------|
1188 	 *  |                             |
1189 	 *  |-----------------------------| <-----Buffer Pointer Address given
1190 	 *  |                             |  ^    in HW descriptor (aligned)
1191 	 *  |       HTT Metadata          |  |
1192 	 *  |                             |  |
1193 	 *  |                             |  | Packet Offset given in descriptor
1194 	 *  |                             |  |
1195 	 *  |-----------------------------|  |
1196 	 *  |       Alignment Pad         |  v
1197 	 *  |-----------------------------| <----- Actual buffer start address
1198 	 *  |        SKB Data             |           (Unaligned)
1199 	 *  |                             |
1200 	 *  |                             |
1201 	 *  |                             |
1202 	 *  |                             |
1203 	 *  |                             |
1204 	 *  |-----------------------------|
1205 	 */
1206 	if (qdf_unlikely((msdu_info->exception_fw)) ||
1207 				(vdev->opmode == wlan_op_mode_ocb) ||
1208 				(tx_exc_metadata &&
1209 				tx_exc_metadata->is_tx_sniffer)) {
1210 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
1211 
1212 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
1213 			DP_STATS_INC(vdev,
1214 				     tx_i.dropped.headroom_insufficient, 1);
1215 			goto failure;
1216 		}
1217 
1218 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
1219 			dp_tx_err("qdf_nbuf_push_head failed");
1220 			goto failure;
1221 		}
1222 
1223 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
1224 				msdu_info);
1225 		if (htt_hdr_size == 0)
1226 			goto failure;
1227 
1228 		tx_desc->length = qdf_nbuf_headlen(nbuf);
1229 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
1230 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1231 		dp_tx_traffic_end_indication_set_desc_flag(tx_desc,
1232 							   msdu_info);
1233 		is_exception = 1;
1234 		tx_desc->length -= tx_desc->pkt_offset;
1235 	}
1236 
1237 #if !TQM_BYPASS_WAR
1238 	if (is_exception || tx_exc_metadata)
1239 #endif
1240 	{
1241 		/* Temporary WAR due to TQM VP issues */
1242 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1243 		qdf_atomic_inc(&soc->num_tx_exception);
1244 	}
1245 
1246 	return tx_desc;
1247 
1248 failure:
1249 	dp_tx_desc_release(tx_desc, desc_pool_id);
1250 	return NULL;
1251 }
1252 
1253 /**
1254  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
1255  * @vdev: DP vdev handle
1256  * @nbuf: skb
1257  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
1258  * @desc_pool_id : Descriptor Pool ID
1259  *
1260  * Allocate and prepare Tx descriptor with msdu and fragment descritor
1261  * information. For frames with fragments, allocate and prepare
1262  * an MSDU extension descriptor
1263  *
1264  * Return: Pointer to Tx Descriptor on success,
1265  *         NULL on failure
1266  */
1267 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
1268 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
1269 		uint8_t desc_pool_id)
1270 {
1271 	struct dp_tx_desc_s *tx_desc;
1272 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
1273 	struct dp_pdev *pdev = vdev->pdev;
1274 	struct dp_soc *soc = pdev->soc;
1275 
1276 	if (dp_tx_limit_check(vdev))
1277 		return NULL;
1278 
1279 	/* Allocate software Tx descriptor */
1280 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1281 	if (!tx_desc) {
1282 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1283 		return NULL;
1284 	}
1285 	dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg,
1286 				  nbuf, tx_desc->id, DP_TX_DESC_COOKIE);
1287 
1288 	dp_tx_outstanding_inc(pdev);
1289 
1290 	/* Initialize the SW tx descriptor */
1291 	tx_desc->nbuf = nbuf;
1292 	tx_desc->frm_type = msdu_info->frm_type;
1293 	tx_desc->tx_encap_type = vdev->tx_encap_type;
1294 	tx_desc->vdev_id = vdev->vdev_id;
1295 	tx_desc->pdev = pdev;
1296 	tx_desc->pkt_offset = 0;
1297 
1298 	dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
1299 
1300 	/* Handle scattered frames - TSO/SG/ME */
1301 	/* Allocate and prepare an extension descriptor for scattered frames */
1302 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
1303 	if (!msdu_ext_desc) {
1304 		dp_tx_info("Tx Extension Descriptor Alloc Fail");
1305 		goto failure;
1306 	}
1307 
1308 #if TQM_BYPASS_WAR
1309 	/* Temporary WAR due to TQM VP issues */
1310 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1311 	qdf_atomic_inc(&soc->num_tx_exception);
1312 #endif
1313 	if (qdf_unlikely(msdu_info->exception_fw))
1314 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1315 
1316 	tx_desc->msdu_ext_desc = msdu_ext_desc;
1317 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
1318 
1319 	msdu_ext_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
1320 	msdu_ext_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
1321 
1322 	tx_desc->dma_addr = msdu_ext_desc->paddr;
1323 
1324 	if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
1325 		tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
1326 	else
1327 		tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
1328 
1329 	return tx_desc;
1330 failure:
1331 	dp_tx_desc_release(tx_desc, desc_pool_id);
1332 	return NULL;
1333 }
1334 
1335 /**
1336  * dp_tx_prepare_raw() - Prepare RAW packet TX
1337  * @vdev: DP vdev handle
1338  * @nbuf: buffer pointer
1339  * @seg_info: Pointer to Segment info Descriptor to be prepared
1340  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
1341  *     descriptor
1342  *
1343  * Return:
1344  */
1345 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1346 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1347 {
1348 	qdf_nbuf_t curr_nbuf = NULL;
1349 	uint16_t total_len = 0;
1350 	qdf_dma_addr_t paddr;
1351 	int32_t i;
1352 	int32_t mapped_buf_num = 0;
1353 
1354 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
1355 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1356 
1357 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
1358 
1359 	/* Continue only if frames are of DATA type */
1360 	if (!DP_FRAME_IS_DATA(qos_wh)) {
1361 		DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
1362 		dp_tx_debug("Pkt. recd is of not data type");
1363 		goto error;
1364 	}
1365 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
1366 	if (vdev->raw_mode_war &&
1367 	    (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
1368 	    (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
1369 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
1370 
1371 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
1372 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
1373 		/*
1374 		 * Number of nbuf's must not exceed the size of the frags
1375 		 * array in seg_info.
1376 		 */
1377 		if (i >= DP_TX_MAX_NUM_FRAGS) {
1378 			dp_err_rl("nbuf cnt exceeds the max number of segs");
1379 			DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
1380 			goto error;
1381 		}
1382 		if (QDF_STATUS_SUCCESS !=
1383 			qdf_nbuf_map_nbytes_single(vdev->osdev,
1384 						   curr_nbuf,
1385 						   QDF_DMA_TO_DEVICE,
1386 						   curr_nbuf->len)) {
1387 			dp_tx_err("%s dma map error ", __func__);
1388 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
1389 			goto error;
1390 		}
1391 		/* Update the count of mapped nbuf's */
1392 		mapped_buf_num++;
1393 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
1394 		seg_info->frags[i].paddr_lo = paddr;
1395 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
1396 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
1397 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
1398 		total_len += qdf_nbuf_len(curr_nbuf);
1399 	}
1400 
1401 	seg_info->frag_cnt = i;
1402 	seg_info->total_len = total_len;
1403 	seg_info->next = NULL;
1404 
1405 	sg_info->curr_seg = seg_info;
1406 
1407 	msdu_info->frm_type = dp_tx_frm_raw;
1408 	msdu_info->num_seg = 1;
1409 
1410 	return nbuf;
1411 
1412 error:
1413 	i = 0;
1414 	while (nbuf) {
1415 		curr_nbuf = nbuf;
1416 		if (i < mapped_buf_num) {
1417 			qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
1418 						     QDF_DMA_TO_DEVICE,
1419 						     curr_nbuf->len);
1420 			i++;
1421 		}
1422 		nbuf = qdf_nbuf_next(nbuf);
1423 		qdf_nbuf_free(curr_nbuf);
1424 	}
1425 	return NULL;
1426 
1427 }
1428 
1429 /**
1430  * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
1431  * @soc: DP soc handle
1432  * @nbuf: Buffer pointer
1433  *
1434  * unmap the chain of nbufs that belong to this RAW frame.
1435  *
1436  * Return: None
1437  */
1438 static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
1439 				    qdf_nbuf_t nbuf)
1440 {
1441 	qdf_nbuf_t cur_nbuf = nbuf;
1442 
1443 	do {
1444 		qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
1445 					     QDF_DMA_TO_DEVICE,
1446 					     cur_nbuf->len);
1447 		cur_nbuf = qdf_nbuf_next(cur_nbuf);
1448 	} while (cur_nbuf);
1449 }
1450 
1451 #ifdef VDEV_PEER_PROTOCOL_COUNT
1452 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
1453 					       qdf_nbuf_t nbuf)
1454 {
1455 	qdf_nbuf_t nbuf_local;
1456 	struct dp_vdev *vdev_local = vdev_hdl;
1457 
1458 	do {
1459 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track)))
1460 			break;
1461 		nbuf_local = nbuf;
1462 		if (qdf_unlikely(((vdev_local)->tx_encap_type) ==
1463 			 htt_cmn_pkt_type_raw))
1464 			break;
1465 		else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local))))
1466 			break;
1467 		else if (qdf_nbuf_is_tso((nbuf_local)))
1468 			break;
1469 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local),
1470 						       (nbuf_local),
1471 						       NULL, 1, 0);
1472 	} while (0);
1473 }
1474 #endif
1475 
1476 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1477 /**
1478  * dp_tx_update_stats() - Update soc level tx stats
1479  * @soc: DP soc handle
1480  * @tx_desc: TX descriptor reference
1481  * @ring_id: TCL ring id
1482  *
1483  * Returns: none
1484  */
1485 void dp_tx_update_stats(struct dp_soc *soc,
1486 			struct dp_tx_desc_s *tx_desc,
1487 			uint8_t ring_id)
1488 {
1489 	uint32_t stats_len = 0;
1490 
1491 	if (tx_desc->frm_type == dp_tx_frm_tso)
1492 		stats_len  = tx_desc->msdu_ext_desc->tso_desc->seg.total_len;
1493 	else
1494 		stats_len = qdf_nbuf_len(tx_desc->nbuf);
1495 
1496 	DP_STATS_INC_PKT(soc, tx.egress[ring_id], 1, stats_len);
1497 }
1498 
1499 int
1500 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1501 			 struct dp_tx_desc_s *tx_desc,
1502 			 uint8_t tid,
1503 			 struct dp_tx_msdu_info_s *msdu_info,
1504 			 uint8_t ring_id)
1505 {
1506 	struct dp_swlm *swlm = &soc->swlm;
1507 	union swlm_data swlm_query_data;
1508 	struct dp_swlm_tcl_data tcl_data;
1509 	QDF_STATUS status;
1510 	int ret;
1511 
1512 	if (!swlm->is_enabled)
1513 		return msdu_info->skip_hp_update;
1514 
1515 	tcl_data.nbuf = tx_desc->nbuf;
1516 	tcl_data.tid = tid;
1517 	tcl_data.ring_id = ring_id;
1518 	if (tx_desc->frm_type == dp_tx_frm_tso) {
1519 		tcl_data.pkt_len  =
1520 			tx_desc->msdu_ext_desc->tso_desc->seg.total_len;
1521 	} else {
1522 		tcl_data.pkt_len = qdf_nbuf_len(tx_desc->nbuf);
1523 	}
1524 	tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
1525 	swlm_query_data.tcl_data = &tcl_data;
1526 
1527 	status = dp_swlm_tcl_pre_check(soc, &tcl_data);
1528 	if (QDF_IS_STATUS_ERROR(status)) {
1529 		dp_swlm_tcl_reset_session_data(soc, ring_id);
1530 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
1531 		return 0;
1532 	}
1533 
1534 	ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
1535 	if (ret) {
1536 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_success, 1);
1537 	} else {
1538 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
1539 	}
1540 
1541 	return ret;
1542 }
1543 
1544 void
1545 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1546 		      int coalesce)
1547 {
1548 	if (coalesce)
1549 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1550 	else
1551 		dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1552 }
1553 
1554 static inline void
1555 dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
1556 {
1557 	if (((i + 1) < msdu_info->num_seg))
1558 		msdu_info->skip_hp_update = 1;
1559 	else
1560 		msdu_info->skip_hp_update = 0;
1561 }
1562 
1563 static inline void
1564 dp_flush_tcp_hp(struct dp_soc *soc, uint8_t ring_id)
1565 {
1566 	hal_ring_handle_t hal_ring_hdl =
1567 		dp_tx_get_hal_ring_hdl(soc, ring_id);
1568 
1569 	if (dp_tx_hal_ring_access_start(soc, hal_ring_hdl)) {
1570 		dp_err("Fillmore: SRNG access start failed");
1571 		return;
1572 	}
1573 
1574 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
1575 }
1576 
1577 static inline void
1578 dp_tx_check_and_flush_hp(struct dp_soc *soc,
1579 			 QDF_STATUS status,
1580 			 struct dp_tx_msdu_info_s *msdu_info)
1581 {
1582 	if (QDF_IS_STATUS_ERROR(status) && !msdu_info->skip_hp_update) {
1583 		dp_flush_tcp_hp(soc,
1584 			(msdu_info->tx_queue.ring_id & DP_TX_QUEUE_MASK));
1585 	}
1586 }
1587 #else
1588 static inline void
1589 dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
1590 {
1591 }
1592 
1593 static inline void
1594 dp_tx_check_and_flush_hp(struct dp_soc *soc,
1595 			 QDF_STATUS status,
1596 			 struct dp_tx_msdu_info_s *msdu_info)
1597 {
1598 }
1599 #endif
1600 
1601 #ifdef FEATURE_RUNTIME_PM
1602 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
1603 {
1604 	int ret;
1605 
1606 	ret = qdf_atomic_read(&soc->rtpm_high_tput_flag) &&
1607 	      (hif_rtpm_get_state() <= HIF_RTPM_STATE_ON);
1608 	return ret;
1609 }
1610 /**
1611  * dp_tx_ring_access_end_wrapper() - Wrapper for ring access end
1612  * @soc: Datapath soc handle
1613  * @hal_ring_hdl: HAL ring handle
1614  * @coalesce: Coalesce the current write or not
1615  *
1616  * Wrapper for HAL ring access end for data transmission for
1617  * FEATURE_RUNTIME_PM
1618  *
1619  * Returns: none
1620  */
1621 void
1622 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1623 			      hal_ring_handle_t hal_ring_hdl,
1624 			      int coalesce)
1625 {
1626 	int ret;
1627 
1628 	/*
1629 	 * Avoid runtime get and put APIs under high throughput scenarios.
1630 	 */
1631 	if (dp_get_rtpm_tput_policy_requirement(soc)) {
1632 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1633 		return;
1634 	}
1635 
1636 	ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
1637 	if (QDF_IS_STATUS_SUCCESS(ret)) {
1638 		if (hif_system_pm_state_check(soc->hif_handle) ||
1639 					qdf_unlikely(soc->is_tx_pause)) {
1640 			dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1641 			hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1642 			hal_srng_inc_flush_cnt(hal_ring_hdl);
1643 		} else {
1644 			dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1645 		}
1646 		hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
1647 	} else {
1648 		dp_runtime_get(soc);
1649 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1650 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1651 		qdf_atomic_inc(&soc->tx_pending_rtpm);
1652 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1653 		dp_runtime_put(soc);
1654 	}
1655 }
1656 #else
1657 
1658 #ifdef DP_POWER_SAVE
1659 void
1660 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1661 			      hal_ring_handle_t hal_ring_hdl,
1662 			      int coalesce)
1663 {
1664 	if (hif_system_pm_state_check(soc->hif_handle) ||
1665 					qdf_unlikely(soc->is_tx_pause)) {
1666 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1667 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1668 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1669 	} else {
1670 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1671 	}
1672 }
1673 #endif
1674 
1675 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
1676 {
1677 	return 0;
1678 }
1679 #endif
1680 
1681 /**
1682  * dp_tx_get_tid() - Obtain TID to be used for this frame
1683  * @vdev: DP vdev handle
1684  * @nbuf: skb
1685  *
1686  * Extract the DSCP or PCP information from frame and map into TID value.
1687  *
1688  * Return: void
1689  */
1690 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1691 			  struct dp_tx_msdu_info_s *msdu_info)
1692 {
1693 	uint8_t tos = 0, dscp_tid_override = 0;
1694 	uint8_t *hdr_ptr, *L3datap;
1695 	uint8_t is_mcast = 0;
1696 	qdf_ether_header_t *eh = NULL;
1697 	qdf_ethervlan_header_t *evh = NULL;
1698 	uint16_t   ether_type;
1699 	qdf_llc_t *llcHdr;
1700 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1701 
1702 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1703 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1704 		eh = (qdf_ether_header_t *)nbuf->data;
1705 		hdr_ptr = (uint8_t *)(eh->ether_dhost);
1706 		L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
1707 	} else {
1708 		qdf_dot3_qosframe_t *qos_wh =
1709 			(qdf_dot3_qosframe_t *) nbuf->data;
1710 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1711 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1712 		return;
1713 	}
1714 
1715 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1716 	ether_type = eh->ether_type;
1717 
1718 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
1719 	/*
1720 	 * Check if packet is dot3 or eth2 type.
1721 	 */
1722 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1723 		ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1724 				sizeof(*llcHdr));
1725 
1726 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1727 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1728 				sizeof(*llcHdr);
1729 			ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
1730 					+ sizeof(*llcHdr) +
1731 					sizeof(qdf_net_vlanhdr_t));
1732 		} else {
1733 			L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
1734 				sizeof(*llcHdr);
1735 		}
1736 	} else {
1737 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1738 			evh = (qdf_ethervlan_header_t *) eh;
1739 			ether_type = evh->ether_type;
1740 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1741 		}
1742 	}
1743 
1744 	/*
1745 	 * Find priority from IP TOS DSCP field
1746 	 */
1747 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1748 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1749 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1750 			/* Only for unicast frames */
1751 			if (!is_mcast) {
1752 				/* send it on VO queue */
1753 				msdu_info->tid = DP_VO_TID;
1754 			}
1755 		} else {
1756 			/*
1757 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1758 			 * from TOS byte.
1759 			 */
1760 			tos = ip->ip_tos;
1761 			dscp_tid_override = 1;
1762 
1763 		}
1764 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1765 		/* TODO
1766 		 * use flowlabel
1767 		 *igmpmld cases to be handled in phase 2
1768 		 */
1769 		unsigned long ver_pri_flowlabel;
1770 		unsigned long pri;
1771 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1772 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1773 			DP_IPV6_PRIORITY_SHIFT;
1774 		tos = pri;
1775 		dscp_tid_override = 1;
1776 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1777 		msdu_info->tid = DP_VO_TID;
1778 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1779 		/* Only for unicast frames */
1780 		if (!is_mcast) {
1781 			/* send ucast arp on VO queue */
1782 			msdu_info->tid = DP_VO_TID;
1783 		}
1784 	}
1785 
1786 	/*
1787 	 * Assign all MCAST packets to BE
1788 	 */
1789 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1790 		if (is_mcast) {
1791 			tos = 0;
1792 			dscp_tid_override = 1;
1793 		}
1794 	}
1795 
1796 	if (dscp_tid_override == 1) {
1797 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1798 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1799 	}
1800 
1801 	if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
1802 		msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
1803 
1804 	return;
1805 }
1806 
1807 /**
1808  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1809  * @vdev: DP vdev handle
1810  * @nbuf: skb
1811  *
1812  * Software based TID classification is required when more than 2 DSCP-TID
1813  * mapping tables are needed.
1814  * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
1815  *
1816  * Return: void
1817  */
1818 static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1819 				      struct dp_tx_msdu_info_s *msdu_info)
1820 {
1821 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1822 
1823 	/*
1824 	 * skip_sw_tid_classification flag will set in below cases-
1825 	 * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
1826 	 * 2. hlos_tid_override enabled for vdev
1827 	 * 3. mesh mode enabled for vdev
1828 	 */
1829 	if (qdf_likely(vdev->skip_sw_tid_classification)) {
1830 		/* Update tid in msdu_info from skb priority */
1831 		if (qdf_unlikely(vdev->skip_sw_tid_classification
1832 			& DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
1833 			uint32_t tid = qdf_nbuf_get_priority(nbuf);
1834 
1835 			if (tid == DP_TX_INVALID_QOS_TAG)
1836 				return;
1837 
1838 			msdu_info->tid = tid;
1839 			return;
1840 		}
1841 		return;
1842 	}
1843 
1844 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1845 }
1846 
1847 #ifdef FEATURE_WLAN_TDLS
1848 /**
1849  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1850  * @soc: datapath SOC
1851  * @vdev: datapath vdev
1852  * @tx_desc: TX descriptor
1853  *
1854  * Return: None
1855  */
1856 static void dp_tx_update_tdls_flags(struct dp_soc *soc,
1857 				    struct dp_vdev *vdev,
1858 				    struct dp_tx_desc_s *tx_desc)
1859 {
1860 	if (vdev) {
1861 		if (vdev->is_tdls_frame) {
1862 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1863 			vdev->is_tdls_frame = false;
1864 		}
1865 	}
1866 }
1867 
1868 static uint8_t dp_htt_tx_comp_get_status(struct dp_soc *soc, char *htt_desc)
1869 {
1870 	uint8_t tx_status = HTT_TX_FW2WBM_TX_STATUS_MAX;
1871 
1872 	switch (soc->arch_id) {
1873 	case CDP_ARCH_TYPE_LI:
1874 		tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
1875 		break;
1876 
1877 	case CDP_ARCH_TYPE_BE:
1878 		tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
1879 		break;
1880 
1881 	default:
1882 		dp_err("Incorrect CDP_ARCH %d", soc->arch_id);
1883 		QDF_BUG(0);
1884 	}
1885 
1886 	return tx_status;
1887 }
1888 
1889 /**
1890  * dp_non_std_htt_tx_comp_free_buff() - Free the non std tx packet buffer
1891  * @soc: dp_soc handle
1892  * @tx_desc: TX descriptor
1893  * @vdev: datapath vdev handle
1894  *
1895  * Return: None
1896  */
1897 static void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
1898 					 struct dp_tx_desc_s *tx_desc)
1899 {
1900 	uint8_t tx_status = 0;
1901 	uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
1902 
1903 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1904 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
1905 						     DP_MOD_ID_TDLS);
1906 
1907 	if (qdf_unlikely(!vdev)) {
1908 		dp_err_rl("vdev is null!");
1909 		goto error;
1910 	}
1911 
1912 	hal_tx_comp_get_htt_desc(&tx_desc->comp, htt_tx_status);
1913 	tx_status = dp_htt_tx_comp_get_status(soc, htt_tx_status);
1914 	dp_debug("vdev_id: %d tx_status: %d", tx_desc->vdev_id, tx_status);
1915 
1916 	if (vdev->tx_non_std_data_callback.func) {
1917 		qdf_nbuf_set_next(nbuf, NULL);
1918 		vdev->tx_non_std_data_callback.func(
1919 				vdev->tx_non_std_data_callback.ctxt,
1920 				nbuf, tx_status);
1921 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1922 		return;
1923 	} else {
1924 		dp_err_rl("callback func is null");
1925 	}
1926 
1927 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1928 error:
1929 	qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
1930 	qdf_nbuf_free(nbuf);
1931 }
1932 
1933 /**
1934  * dp_tx_msdu_single_map() - do nbuf map
1935  * @vdev: DP vdev handle
1936  * @tx_desc: DP TX descriptor pointer
1937  * @nbuf: skb pointer
1938  *
1939  * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
1940  * operation done in other component.
1941  *
1942  * Return: QDF_STATUS
1943  */
1944 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1945 					       struct dp_tx_desc_s *tx_desc,
1946 					       qdf_nbuf_t nbuf)
1947 {
1948 	if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
1949 		return qdf_nbuf_map_nbytes_single(vdev->osdev,
1950 						  nbuf,
1951 						  QDF_DMA_TO_DEVICE,
1952 						  nbuf->len);
1953 	else
1954 		return qdf_nbuf_map_single(vdev->osdev, nbuf,
1955 					   QDF_DMA_TO_DEVICE);
1956 }
1957 #else
1958 static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
1959 					   struct dp_vdev *vdev,
1960 					   struct dp_tx_desc_s *tx_desc)
1961 {
1962 }
1963 
1964 static inline void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
1965 						struct dp_tx_desc_s *tx_desc)
1966 {
1967 }
1968 
1969 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1970 					       struct dp_tx_desc_s *tx_desc,
1971 					       qdf_nbuf_t nbuf)
1972 {
1973 	return qdf_nbuf_map_nbytes_single(vdev->osdev,
1974 					  nbuf,
1975 					  QDF_DMA_TO_DEVICE,
1976 					  nbuf->len);
1977 }
1978 #endif
1979 
1980 static inline
1981 qdf_dma_addr_t dp_tx_nbuf_map_regular(struct dp_vdev *vdev,
1982 				      struct dp_tx_desc_s *tx_desc,
1983 				      qdf_nbuf_t nbuf)
1984 {
1985 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
1986 
1987 	ret = dp_tx_msdu_single_map(vdev, tx_desc, nbuf);
1988 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret)))
1989 		return 0;
1990 
1991 	return qdf_nbuf_mapped_paddr_get(nbuf);
1992 }
1993 
1994 static inline
1995 void dp_tx_nbuf_unmap_regular(struct dp_soc *soc, struct dp_tx_desc_s *desc)
1996 {
1997 	qdf_nbuf_unmap_nbytes_single_paddr(soc->osdev,
1998 					   desc->nbuf,
1999 					   desc->dma_addr,
2000 					   QDF_DMA_TO_DEVICE,
2001 					   desc->length);
2002 }
2003 
2004 #ifdef QCA_DP_TX_RMNET_OPTIMIZATION
2005 static inline bool
2006 is_nbuf_frm_rmnet(qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
2007 {
2008 	struct net_device *ingress_dev;
2009 	skb_frag_t *frag;
2010 	uint16_t buf_len = 0;
2011 	uint16_t linear_data_len = 0;
2012 	uint8_t *payload_addr = NULL;
2013 
2014 	ingress_dev = dev_get_by_index(dev_net(nbuf->dev), nbuf->skb_iif);
2015 
2016 	if ((ingress_dev->priv_flags & IFF_PHONY_HEADROOM)) {
2017 		dev_put(ingress_dev);
2018 		frag = &(skb_shinfo(nbuf)->frags[0]);
2019 		buf_len = skb_frag_size(frag);
2020 		payload_addr = (uint8_t *)skb_frag_address(frag);
2021 		linear_data_len = skb_headlen(nbuf);
2022 
2023 		buf_len += linear_data_len;
2024 		payload_addr = payload_addr - linear_data_len;
2025 		memcpy(payload_addr, nbuf->data, linear_data_len);
2026 
2027 		msdu_info->frm_type = dp_tx_frm_rmnet;
2028 		msdu_info->buf_len = buf_len;
2029 		msdu_info->payload_addr = payload_addr;
2030 
2031 		return true;
2032 	}
2033 	dev_put(ingress_dev);
2034 	return false;
2035 }
2036 
2037 static inline
2038 qdf_dma_addr_t dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s *msdu_info,
2039 				    struct dp_tx_desc_s *tx_desc)
2040 {
2041 	qdf_dma_addr_t paddr;
2042 
2043 	paddr = (qdf_dma_addr_t)qdf_mem_virt_to_phys(msdu_info->payload_addr);
2044 	tx_desc->length  = msdu_info->buf_len;
2045 
2046 	qdf_nbuf_dma_clean_range((void *)msdu_info->payload_addr,
2047 				 (void *)(msdu_info->payload_addr +
2048 					  msdu_info->buf_len));
2049 
2050 	tx_desc->flags |= DP_TX_DESC_FLAG_RMNET;
2051 	return paddr;
2052 }
2053 #else
2054 static inline bool
2055 is_nbuf_frm_rmnet(qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
2056 {
2057 	return false;
2058 }
2059 
2060 static inline
2061 qdf_dma_addr_t dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s *msdu_info,
2062 				    struct dp_tx_desc_s *tx_desc)
2063 {
2064 	return 0;
2065 }
2066 #endif
2067 
2068 #if defined(QCA_DP_TX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
2069 static inline
2070 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
2071 			      struct dp_tx_desc_s *tx_desc,
2072 			      qdf_nbuf_t nbuf)
2073 {
2074 	if (qdf_likely(tx_desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
2075 		qdf_nbuf_dma_clean_range((void *)nbuf->data,
2076 					 (void *)(nbuf->data + nbuf->len));
2077 		return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2078 	} else {
2079 		return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
2080 	}
2081 }
2082 
2083 static inline
2084 void dp_tx_nbuf_unmap(struct dp_soc *soc,
2085 		      struct dp_tx_desc_s *desc)
2086 {
2087 	if (qdf_unlikely(!(desc->flags &
2088 			   (DP_TX_DESC_FLAG_SIMPLE | DP_TX_DESC_FLAG_RMNET))))
2089 		return dp_tx_nbuf_unmap_regular(soc, desc);
2090 }
2091 #else
2092 static inline
2093 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
2094 			      struct dp_tx_desc_s *tx_desc,
2095 			      qdf_nbuf_t nbuf)
2096 {
2097 	return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
2098 }
2099 
2100 static inline
2101 void dp_tx_nbuf_unmap(struct dp_soc *soc,
2102 		      struct dp_tx_desc_s *desc)
2103 {
2104 	return dp_tx_nbuf_unmap_regular(soc, desc);
2105 }
2106 #endif
2107 
2108 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
2109 static inline
2110 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2111 {
2112 	dp_tx_nbuf_unmap(soc, desc);
2113 	desc->flags |= DP_TX_DESC_FLAG_UNMAP_DONE;
2114 }
2115 
2116 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2117 {
2118 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE)))
2119 		dp_tx_nbuf_unmap(soc, desc);
2120 }
2121 #else
2122 static inline
2123 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2124 {
2125 }
2126 
2127 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2128 {
2129 	dp_tx_nbuf_unmap(soc, desc);
2130 }
2131 #endif
2132 
2133 #ifdef MESH_MODE_SUPPORT
2134 /**
2135  * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
2136  * @soc: datapath SOC
2137  * @vdev: datapath vdev
2138  * @tx_desc: TX descriptor
2139  *
2140  * Return: None
2141  */
2142 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
2143 					   struct dp_vdev *vdev,
2144 					   struct dp_tx_desc_s *tx_desc)
2145 {
2146 	if (qdf_unlikely(vdev->mesh_vdev))
2147 		tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
2148 }
2149 
2150 /**
2151  * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
2152  * @soc: dp_soc handle
2153  * @tx_desc: TX descriptor
2154  * @delayed_free: delay the nbuf free
2155  *
2156  * Return: nbuf to be freed late
2157  */
2158 static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
2159 						   struct dp_tx_desc_s *tx_desc,
2160 						   bool delayed_free)
2161 {
2162 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2163 	struct dp_vdev *vdev = NULL;
2164 
2165 	vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, DP_MOD_ID_MESH);
2166 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2167 		if (vdev)
2168 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2169 
2170 		if (delayed_free)
2171 			return nbuf;
2172 
2173 		qdf_nbuf_free(nbuf);
2174 	} else {
2175 		if (vdev && vdev->osif_tx_free_ext) {
2176 			vdev->osif_tx_free_ext((nbuf));
2177 		} else {
2178 			if (delayed_free)
2179 				return nbuf;
2180 
2181 			qdf_nbuf_free(nbuf);
2182 		}
2183 	}
2184 
2185 	if (vdev)
2186 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
2187 
2188 	return NULL;
2189 }
2190 #else
2191 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
2192 					   struct dp_vdev *vdev,
2193 					   struct dp_tx_desc_s *tx_desc)
2194 {
2195 }
2196 
2197 static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
2198 						   struct dp_tx_desc_s *tx_desc,
2199 						   bool delayed_free)
2200 {
2201 	return NULL;
2202 }
2203 #endif
2204 
2205 /**
2206  * dp_tx_frame_is_drop() - checks if the packet is loopback
2207  * @vdev: DP vdev handle
2208  * @nbuf: skb
2209  *
2210  * Return: 1 if frame needs to be dropped else 0
2211  */
2212 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
2213 {
2214 	struct dp_pdev *pdev = NULL;
2215 	struct dp_ast_entry *src_ast_entry = NULL;
2216 	struct dp_ast_entry *dst_ast_entry = NULL;
2217 	struct dp_soc *soc = NULL;
2218 
2219 	qdf_assert(vdev);
2220 	pdev = vdev->pdev;
2221 	qdf_assert(pdev);
2222 	soc = pdev->soc;
2223 
2224 	dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
2225 				(soc, dstmac, vdev->pdev->pdev_id);
2226 
2227 	src_ast_entry = dp_peer_ast_hash_find_by_pdevid
2228 				(soc, srcmac, vdev->pdev->pdev_id);
2229 	if (dst_ast_entry && src_ast_entry) {
2230 		if (dst_ast_entry->peer_id ==
2231 				src_ast_entry->peer_id)
2232 			return 1;
2233 	}
2234 
2235 	return 0;
2236 }
2237 
2238 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
2239 	defined(WLAN_MCAST_MLO)
2240 /* MLO peer id for reinject*/
2241 #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
2242 /* MLO vdev id inc offset */
2243 #define DP_MLO_VDEV_ID_OFFSET 0x80
2244 
2245 static inline void
2246 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2247 {
2248 	if (!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) {
2249 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2250 		qdf_atomic_inc(&soc->num_tx_exception);
2251 	}
2252 }
2253 
2254 static inline void
2255 dp_tx_update_mcast_param(uint16_t peer_id,
2256 			 uint16_t *htt_tcl_metadata,
2257 			 struct dp_vdev *vdev,
2258 			 struct dp_tx_msdu_info_s *msdu_info)
2259 {
2260 	if (peer_id == DP_MLO_MCAST_REINJECT_PEER_ID) {
2261 		*htt_tcl_metadata = 0;
2262 		DP_TX_TCL_METADATA_TYPE_SET(
2263 				*htt_tcl_metadata,
2264 				HTT_TCL_METADATA_V2_TYPE_GLOBAL_SEQ_BASED);
2265 		HTT_TX_TCL_METADATA_GLBL_SEQ_NO_SET(*htt_tcl_metadata,
2266 						    msdu_info->gsn);
2267 
2268 		msdu_info->vdev_id = vdev->vdev_id + DP_MLO_VDEV_ID_OFFSET;
2269 		if (qdf_unlikely(vdev->nawds_enabled))
2270 			HTT_TX_TCL_METADATA_GLBL_SEQ_HOST_INSPECTED_SET(
2271 							*htt_tcl_metadata, 1);
2272 	} else {
2273 		msdu_info->vdev_id = vdev->vdev_id;
2274 	}
2275 }
2276 #else
2277 static inline void
2278 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2279 {
2280 }
2281 
2282 static inline void
2283 dp_tx_update_mcast_param(uint16_t peer_id,
2284 			 uint16_t *htt_tcl_metadata,
2285 			 struct dp_vdev *vdev,
2286 			 struct dp_tx_msdu_info_s *msdu_info)
2287 {
2288 }
2289 #endif
2290 
2291 #ifdef DP_TX_SW_DROP_STATS_INC
2292 static void tx_sw_drop_stats_inc(struct dp_pdev *pdev,
2293 				 qdf_nbuf_t nbuf,
2294 				 enum cdp_tx_sw_drop drop_code)
2295 {
2296 	/* EAPOL Drop stats */
2297 	if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) {
2298 		switch (drop_code) {
2299 		case TX_DESC_ERR:
2300 			DP_STATS_INC(pdev, eap_drop_stats.tx_desc_err, 1);
2301 			break;
2302 		case TX_HAL_RING_ACCESS_ERR:
2303 			DP_STATS_INC(pdev,
2304 				     eap_drop_stats.tx_hal_ring_access_err, 1);
2305 			break;
2306 		case TX_DMA_MAP_ERR:
2307 			DP_STATS_INC(pdev, eap_drop_stats.tx_dma_map_err, 1);
2308 			break;
2309 		case TX_HW_ENQUEUE:
2310 			DP_STATS_INC(pdev, eap_drop_stats.tx_hw_enqueue, 1);
2311 			break;
2312 		case TX_SW_ENQUEUE:
2313 			DP_STATS_INC(pdev, eap_drop_stats.tx_sw_enqueue, 1);
2314 			break;
2315 		default:
2316 			dp_info_rl("Invalid eapol_drop code: %d", drop_code);
2317 			break;
2318 		}
2319 	}
2320 }
2321 #else
2322 static void tx_sw_drop_stats_inc(struct dp_pdev *pdev,
2323 				 qdf_nbuf_t nbuf,
2324 				 enum cdp_tx_sw_drop drop_code)
2325 {
2326 }
2327 #endif
2328 
2329 /**
2330  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
2331  * @vdev: DP vdev handle
2332  * @nbuf: skb
2333  * @tid: TID from HLOS for overriding default DSCP-TID mapping
2334  * @meta_data: Metadata to the fw
2335  * @tx_q: Tx queue to be used for this Tx frame
2336  * @peer_id: peer_id of the peer in case of NAWDS frames
2337  * @tx_exc_metadata: Handle that holds exception path metadata
2338  *
2339  * Return: NULL on success,
2340  *         nbuf when it fails to send
2341  */
2342 qdf_nbuf_t
2343 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2344 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
2345 		       struct cdp_tx_exception_metadata *tx_exc_metadata)
2346 {
2347 	struct dp_pdev *pdev = vdev->pdev;
2348 	struct dp_soc *soc = pdev->soc;
2349 	struct dp_tx_desc_s *tx_desc;
2350 	QDF_STATUS status;
2351 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
2352 	uint16_t htt_tcl_metadata = 0;
2353 	enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
2354 	uint8_t tid = msdu_info->tid;
2355 	struct cdp_tid_tx_stats *tid_stats = NULL;
2356 	qdf_dma_addr_t paddr;
2357 
2358 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
2359 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
2360 			msdu_info, tx_exc_metadata);
2361 	if (!tx_desc) {
2362 		dp_err_rl("Tx_desc prepare Fail vdev_id %d vdev %pK queue %d",
2363 			  vdev->vdev_id, vdev, tx_q->desc_pool_id);
2364 		drop_code = TX_DESC_ERR;
2365 		goto fail_return;
2366 	}
2367 
2368 	dp_tx_update_tdls_flags(soc, vdev, tx_desc);
2369 
2370 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
2371 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2372 		DP_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
2373 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
2374 		DP_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
2375 					    DP_TCL_METADATA_TYPE_PEER_BASED);
2376 		DP_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
2377 					       peer_id);
2378 		dp_tx_bypass_reinjection(soc, tx_desc);
2379 	} else
2380 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2381 
2382 	if (msdu_info->exception_fw)
2383 		DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2384 
2385 	dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
2386 					 !pdev->enhanced_stats_en);
2387 
2388 	dp_tx_update_mesh_flags(soc, vdev, tx_desc);
2389 
2390 	if (qdf_unlikely(msdu_info->frm_type == dp_tx_frm_rmnet))
2391 		paddr = dp_tx_rmnet_nbuf_map(msdu_info, tx_desc);
2392 	else
2393 		paddr =  dp_tx_nbuf_map(vdev, tx_desc, nbuf);
2394 
2395 	if (!paddr) {
2396 		/* Handle failure */
2397 		dp_err("qdf_nbuf_map failed");
2398 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
2399 		drop_code = TX_DMA_MAP_ERR;
2400 		goto release_desc;
2401 	}
2402 
2403 	tx_desc->dma_addr = paddr;
2404 	dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
2405 			       tx_desc->id, DP_TX_DESC_MAP);
2406 	dp_tx_update_mcast_param(peer_id, &htt_tcl_metadata, vdev, msdu_info);
2407 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
2408 	status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
2409 					     htt_tcl_metadata,
2410 					     tx_exc_metadata, msdu_info);
2411 
2412 	if (status != QDF_STATUS_SUCCESS) {
2413 		dp_tx_err_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2414 			     tx_desc, tx_q->ring_id);
2415 		dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
2416 				       tx_desc->id, DP_TX_DESC_UNMAP);
2417 		dp_tx_nbuf_unmap(soc, tx_desc);
2418 		drop_code = TX_HW_ENQUEUE;
2419 		goto release_desc;
2420 	}
2421 
2422 	tx_sw_drop_stats_inc(pdev, nbuf, drop_code);
2423 	return NULL;
2424 
2425 release_desc:
2426 	dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2427 	tx_sw_drop_stats_inc(pdev, nbuf, drop_code);
2428 
2429 fail_return:
2430 	dp_tx_get_tid(vdev, nbuf, msdu_info);
2431 	tx_sw_drop_stats_inc(pdev, nbuf, drop_code);
2432 	tid_stats = &pdev->stats.tid_stats.
2433 		    tid_tx_stats[tx_q->ring_id][tid];
2434 	tid_stats->swdrop_cnt[drop_code]++;
2435 	return nbuf;
2436 }
2437 
2438 /**
2439  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2440  * @soc: Soc handle
2441  * @desc: software Tx descriptor to be processed
2442  * @delayed_free: defer freeing of nbuf
2443  *
2444  * Return: nbuf to be freed later
2445  */
2446 qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
2447 			       bool delayed_free)
2448 {
2449 	qdf_nbuf_t nbuf = desc->nbuf;
2450 	enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
2451 
2452 	/* nbuf already freed in vdev detach path */
2453 	if (!nbuf)
2454 		return NULL;
2455 
2456 	/* If it is TDLS mgmt, don't unmap or free the frame */
2457 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) {
2458 		dp_non_std_htt_tx_comp_free_buff(soc, desc);
2459 		return NULL;
2460 	}
2461 
2462 	/* 0 : MSDU buffer, 1 : MLE */
2463 	if (desc->msdu_ext_desc) {
2464 		/* TSO free */
2465 		if (hal_tx_ext_desc_get_tso_enable(
2466 					desc->msdu_ext_desc->vaddr)) {
2467 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
2468 					       desc->id, DP_TX_COMP_MSDU_EXT);
2469 			dp_tx_tso_seg_history_add(soc,
2470 						  desc->msdu_ext_desc->tso_desc,
2471 						  desc->nbuf, desc->id, type);
2472 			/* unmap eash TSO seg before free the nbuf */
2473 			dp_tx_tso_unmap_segment(soc,
2474 						desc->msdu_ext_desc->tso_desc,
2475 						desc->msdu_ext_desc->
2476 						tso_num_desc);
2477 			goto nbuf_free;
2478 		}
2479 
2480 		if (qdf_unlikely(desc->frm_type == dp_tx_frm_sg)) {
2481 			void *msdu_ext_desc = desc->msdu_ext_desc->vaddr;
2482 			qdf_dma_addr_t iova;
2483 			uint32_t frag_len;
2484 			uint32_t i;
2485 
2486 			qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
2487 						     QDF_DMA_TO_DEVICE,
2488 						     qdf_nbuf_headlen(nbuf));
2489 
2490 			for (i = 1; i < DP_TX_MAX_NUM_FRAGS; i++) {
2491 				hal_tx_ext_desc_get_frag_info(msdu_ext_desc, i,
2492 							      &iova,
2493 							      &frag_len);
2494 				if (!iova || !frag_len)
2495 					break;
2496 
2497 				qdf_mem_unmap_page(soc->osdev, iova, frag_len,
2498 						   QDF_DMA_TO_DEVICE);
2499 			}
2500 
2501 			goto nbuf_free;
2502 		}
2503 	}
2504 	/* If it's ME frame, dont unmap the cloned nbuf's */
2505 	if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
2506 		goto nbuf_free;
2507 
2508 	dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, desc->id, type);
2509 	dp_tx_unmap(soc, desc);
2510 
2511 	if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
2512 		return dp_mesh_tx_comp_free_buff(soc, desc, delayed_free);
2513 
2514 	if (dp_tx_traffic_end_indication_enq_ind_pkt(soc, desc, nbuf))
2515 		return NULL;
2516 
2517 nbuf_free:
2518 	if (delayed_free)
2519 		return nbuf;
2520 
2521 	qdf_nbuf_free(nbuf);
2522 
2523 	return NULL;
2524 }
2525 
2526 /**
2527  * dp_tx_sg_unmap_buf() - Unmap scatter gather fragments
2528  * @soc: DP soc handle
2529  * @nbuf: skb
2530  * @msdu_info: MSDU info
2531  *
2532  * Return: None
2533  */
2534 static inline void
2535 dp_tx_sg_unmap_buf(struct dp_soc *soc, qdf_nbuf_t nbuf,
2536 		   struct dp_tx_msdu_info_s *msdu_info)
2537 {
2538 	uint32_t cur_idx;
2539 	struct dp_tx_seg_info_s *seg = msdu_info->u.sg_info.curr_seg;
2540 
2541 	qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE,
2542 				     qdf_nbuf_headlen(nbuf));
2543 
2544 	for (cur_idx = 1; cur_idx < seg->frag_cnt; cur_idx++)
2545 		qdf_mem_unmap_page(soc->osdev, (qdf_dma_addr_t)
2546 				   (seg->frags[cur_idx].paddr_lo | ((uint64_t)
2547 				    seg->frags[cur_idx].paddr_hi) << 32),
2548 				   seg->frags[cur_idx].len,
2549 				   QDF_DMA_TO_DEVICE);
2550 }
2551 
2552 /**
2553  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
2554  * @vdev: DP vdev handle
2555  * @nbuf: skb
2556  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
2557  *
2558  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
2559  *
2560  * Return: NULL on success,
2561  *         nbuf when it fails to send
2562  */
2563 #if QDF_LOCK_STATS
2564 noinline
2565 #else
2566 #endif
2567 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2568 				    struct dp_tx_msdu_info_s *msdu_info)
2569 {
2570 	uint32_t i;
2571 	struct dp_pdev *pdev = vdev->pdev;
2572 	struct dp_soc *soc = pdev->soc;
2573 	struct dp_tx_desc_s *tx_desc;
2574 	bool is_cce_classified = false;
2575 	QDF_STATUS status;
2576 	uint16_t htt_tcl_metadata = 0;
2577 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
2578 	struct cdp_tid_tx_stats *tid_stats = NULL;
2579 	uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
2580 
2581 	if (msdu_info->frm_type == dp_tx_frm_me)
2582 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2583 
2584 	i = 0;
2585 	/* Print statement to track i and num_seg */
2586 	/*
2587 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
2588 	 * descriptors using information in msdu_info
2589 	 */
2590 	while (i < msdu_info->num_seg) {
2591 		/*
2592 		 * Setup Tx descriptor for an MSDU, and MSDU extension
2593 		 * descriptor
2594 		 */
2595 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
2596 				tx_q->desc_pool_id);
2597 
2598 		if (!tx_desc) {
2599 			if (msdu_info->frm_type == dp_tx_frm_me) {
2600 				prep_desc_fail++;
2601 				dp_tx_me_free_buf(pdev,
2602 					(void *)(msdu_info->u.sg_info
2603 						.curr_seg->frags[0].vaddr));
2604 				if (prep_desc_fail == msdu_info->num_seg) {
2605 					/*
2606 					 * Unmap is needed only if descriptor
2607 					 * preparation failed for all segments.
2608 					 */
2609 					qdf_nbuf_unmap(soc->osdev,
2610 						       msdu_info->u.sg_info.
2611 						       curr_seg->nbuf,
2612 						       QDF_DMA_TO_DEVICE);
2613 				}
2614 				/*
2615 				 * Free the nbuf for the current segment
2616 				 * and make it point to the next in the list.
2617 				 * For me, there are as many segments as there
2618 				 * are no of clients.
2619 				 */
2620 				qdf_nbuf_free(msdu_info->u.sg_info
2621 					      .curr_seg->nbuf);
2622 				if (msdu_info->u.sg_info.curr_seg->next) {
2623 					msdu_info->u.sg_info.curr_seg =
2624 						msdu_info->u.sg_info
2625 						.curr_seg->next;
2626 					nbuf = msdu_info->u.sg_info
2627 					       .curr_seg->nbuf;
2628 				}
2629 				i++;
2630 				continue;
2631 			}
2632 
2633 			if (msdu_info->frm_type == dp_tx_frm_tso) {
2634 				dp_tx_tso_seg_history_add(
2635 						soc,
2636 						msdu_info->u.tso_info.curr_seg,
2637 						nbuf, 0, DP_TX_DESC_UNMAP);
2638 				dp_tx_tso_unmap_segment(soc,
2639 							msdu_info->u.tso_info.
2640 							curr_seg,
2641 							msdu_info->u.tso_info.
2642 							tso_num_seg_list);
2643 
2644 				if (msdu_info->u.tso_info.curr_seg->next) {
2645 					msdu_info->u.tso_info.curr_seg =
2646 					msdu_info->u.tso_info.curr_seg->next;
2647 					i++;
2648 					continue;
2649 				}
2650 			}
2651 
2652 			if (msdu_info->frm_type == dp_tx_frm_sg)
2653 				dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
2654 
2655 			goto done;
2656 		}
2657 
2658 		if (msdu_info->frm_type == dp_tx_frm_me) {
2659 			tx_desc->msdu_ext_desc->me_buffer =
2660 				(struct dp_tx_me_buf_t *)msdu_info->
2661 				u.sg_info.curr_seg->frags[0].vaddr;
2662 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
2663 		}
2664 
2665 		if (is_cce_classified)
2666 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2667 
2668 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2669 		if (msdu_info->exception_fw) {
2670 			DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2671 		}
2672 
2673 		dp_tx_is_hp_update_required(i, msdu_info);
2674 
2675 		/*
2676 		 * For frames with multiple segments (TSO, ME), jump to next
2677 		 * segment.
2678 		 */
2679 		if (msdu_info->frm_type == dp_tx_frm_tso) {
2680 			if (msdu_info->u.tso_info.curr_seg->next) {
2681 				msdu_info->u.tso_info.curr_seg =
2682 					msdu_info->u.tso_info.curr_seg->next;
2683 
2684 				/*
2685 				 * If this is a jumbo nbuf, then increment the
2686 				 * number of nbuf users for each additional
2687 				 * segment of the msdu. This will ensure that
2688 				 * the skb is freed only after receiving tx
2689 				 * completion for all segments of an nbuf
2690 				 */
2691 				qdf_nbuf_inc_users(nbuf);
2692 
2693 				/* Check with MCL if this is needed */
2694 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
2695 				 */
2696 			}
2697 		}
2698 
2699 		dp_tx_update_mcast_param(DP_INVALID_PEER,
2700 					 &htt_tcl_metadata,
2701 					 vdev,
2702 					 msdu_info);
2703 		/*
2704 		 * Enqueue the Tx MSDU descriptor to HW for transmit
2705 		 */
2706 		status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
2707 						     htt_tcl_metadata,
2708 						     NULL, msdu_info);
2709 
2710 		dp_tx_check_and_flush_hp(soc, status, msdu_info);
2711 
2712 		if (status != QDF_STATUS_SUCCESS) {
2713 			dp_info_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2714 				   tx_desc, tx_q->ring_id);
2715 
2716 			dp_tx_get_tid(vdev, nbuf, msdu_info);
2717 			tid_stats = &pdev->stats.tid_stats.
2718 				    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
2719 			tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
2720 
2721 			if (msdu_info->frm_type == dp_tx_frm_me) {
2722 				hw_enq_fail++;
2723 				if (hw_enq_fail == msdu_info->num_seg) {
2724 					/*
2725 					 * Unmap is needed only if enqueue
2726 					 * failed for all segments.
2727 					 */
2728 					qdf_nbuf_unmap(soc->osdev,
2729 						       msdu_info->u.sg_info.
2730 						       curr_seg->nbuf,
2731 						       QDF_DMA_TO_DEVICE);
2732 				}
2733 				/*
2734 				 * Free the nbuf for the current segment
2735 				 * and make it point to the next in the list.
2736 				 * For me, there are as many segments as there
2737 				 * are no of clients.
2738 				 */
2739 				qdf_nbuf_free(msdu_info->u.sg_info
2740 					      .curr_seg->nbuf);
2741 				dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2742 				if (msdu_info->u.sg_info.curr_seg->next) {
2743 					msdu_info->u.sg_info.curr_seg =
2744 						msdu_info->u.sg_info
2745 						.curr_seg->next;
2746 					nbuf = msdu_info->u.sg_info
2747 					       .curr_seg->nbuf;
2748 				} else
2749 					break;
2750 				i++;
2751 				continue;
2752 			}
2753 
2754 			/*
2755 			 * For TSO frames, the nbuf users increment done for
2756 			 * the current segment has to be reverted, since the
2757 			 * hw enqueue for this segment failed
2758 			 */
2759 			if (msdu_info->frm_type == dp_tx_frm_tso &&
2760 			    msdu_info->u.tso_info.curr_seg) {
2761 				/*
2762 				 * unmap and free current,
2763 				 * retransmit remaining segments
2764 				 */
2765 				dp_tx_comp_free_buf(soc, tx_desc, false);
2766 				i++;
2767 				dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2768 				continue;
2769 			}
2770 
2771 			if (msdu_info->frm_type == dp_tx_frm_sg)
2772 				dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
2773 
2774 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2775 			goto done;
2776 		}
2777 
2778 		/*
2779 		 * TODO
2780 		 * if tso_info structure can be modified to have curr_seg
2781 		 * as first element, following 2 blocks of code (for TSO and SG)
2782 		 * can be combined into 1
2783 		 */
2784 
2785 		/*
2786 		 * For Multicast-Unicast converted packets,
2787 		 * each converted frame (for a client) is represented as
2788 		 * 1 segment
2789 		 */
2790 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
2791 				(msdu_info->frm_type == dp_tx_frm_me)) {
2792 			if (msdu_info->u.sg_info.curr_seg->next) {
2793 				msdu_info->u.sg_info.curr_seg =
2794 					msdu_info->u.sg_info.curr_seg->next;
2795 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2796 			} else
2797 				break;
2798 		}
2799 		i++;
2800 	}
2801 
2802 	nbuf = NULL;
2803 
2804 done:
2805 	return nbuf;
2806 }
2807 
2808 /**
2809  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
2810  *                     for SG frames
2811  * @vdev: DP vdev handle
2812  * @nbuf: skb
2813  * @seg_info: Pointer to Segment info Descriptor to be prepared
2814  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2815  *
2816  * Return: NULL on success,
2817  *         nbuf when it fails to send
2818  */
2819 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2820 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
2821 {
2822 	uint32_t cur_frag, nr_frags, i;
2823 	qdf_dma_addr_t paddr;
2824 	struct dp_tx_sg_info_s *sg_info;
2825 
2826 	sg_info = &msdu_info->u.sg_info;
2827 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
2828 
2829 	if (QDF_STATUS_SUCCESS !=
2830 		qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
2831 					   QDF_DMA_TO_DEVICE,
2832 					   qdf_nbuf_headlen(nbuf))) {
2833 		dp_tx_err("dma map error");
2834 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2835 		qdf_nbuf_free(nbuf);
2836 		return NULL;
2837 	}
2838 
2839 	paddr = qdf_nbuf_mapped_paddr_get(nbuf);
2840 	seg_info->frags[0].paddr_lo = paddr;
2841 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
2842 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
2843 	seg_info->frags[0].vaddr = (void *) nbuf;
2844 
2845 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
2846 		if (QDF_STATUS_SUCCESS != qdf_nbuf_frag_map(vdev->osdev,
2847 							    nbuf, 0,
2848 							    QDF_DMA_TO_DEVICE,
2849 							    cur_frag)) {
2850 			dp_tx_err("frag dma map error");
2851 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2852 			goto map_err;
2853 		}
2854 
2855 		paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
2856 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
2857 		seg_info->frags[cur_frag + 1].paddr_hi =
2858 			((uint64_t) paddr) >> 32;
2859 		seg_info->frags[cur_frag + 1].len =
2860 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
2861 	}
2862 
2863 	seg_info->frag_cnt = (cur_frag + 1);
2864 	seg_info->total_len = qdf_nbuf_len(nbuf);
2865 	seg_info->next = NULL;
2866 
2867 	sg_info->curr_seg = seg_info;
2868 
2869 	msdu_info->frm_type = dp_tx_frm_sg;
2870 	msdu_info->num_seg = 1;
2871 
2872 	return nbuf;
2873 map_err:
2874 	/* restore paddr into nbuf before calling unmap */
2875 	qdf_nbuf_mapped_paddr_set(nbuf,
2876 				  (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
2877 				  ((uint64_t)
2878 				  seg_info->frags[0].paddr_hi) << 32));
2879 	qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
2880 				     QDF_DMA_TO_DEVICE,
2881 				     seg_info->frags[0].len);
2882 	for (i = 1; i <= cur_frag; i++) {
2883 		qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
2884 				   (seg_info->frags[i].paddr_lo | ((uint64_t)
2885 				   seg_info->frags[i].paddr_hi) << 32),
2886 				   seg_info->frags[i].len,
2887 				   QDF_DMA_TO_DEVICE);
2888 	}
2889 	qdf_nbuf_free(nbuf);
2890 	return NULL;
2891 }
2892 
2893 /**
2894  * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
2895  * @vdev: DP vdev handle
2896  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2897  * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
2898  *
2899  * Return: NULL on failure,
2900  *         nbuf when extracted successfully
2901  */
2902 static
2903 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
2904 				    struct dp_tx_msdu_info_s *msdu_info,
2905 				    uint16_t ppdu_cookie)
2906 {
2907 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2908 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2909 
2910 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2911 
2912 	HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
2913 				(msdu_info->meta_data[5], 1);
2914 	HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
2915 				(msdu_info->meta_data[5], 1);
2916 	HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
2917 				(msdu_info->meta_data[6], ppdu_cookie);
2918 
2919 	msdu_info->exception_fw = 1;
2920 	msdu_info->is_tx_sniffer = 1;
2921 }
2922 
2923 #ifdef MESH_MODE_SUPPORT
2924 
2925 /**
2926  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
2927 				and prepare msdu_info for mesh frames.
2928  * @vdev: DP vdev handle
2929  * @nbuf: skb
2930  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2931  *
2932  * Return: NULL on failure,
2933  *         nbuf when extracted successfully
2934  */
2935 static
2936 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2937 				struct dp_tx_msdu_info_s *msdu_info)
2938 {
2939 	struct meta_hdr_s *mhdr;
2940 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2941 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2942 
2943 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2944 
2945 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
2946 		msdu_info->exception_fw = 0;
2947 		goto remove_meta_hdr;
2948 	}
2949 
2950 	msdu_info->exception_fw = 1;
2951 
2952 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2953 
2954 	meta_data->host_tx_desc_pool = 1;
2955 	meta_data->update_peer_cache = 1;
2956 	meta_data->learning_frame = 1;
2957 
2958 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
2959 		meta_data->power = mhdr->power;
2960 
2961 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
2962 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
2963 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
2964 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
2965 
2966 		meta_data->dyn_bw = 1;
2967 
2968 		meta_data->valid_pwr = 1;
2969 		meta_data->valid_mcs_mask = 1;
2970 		meta_data->valid_nss_mask = 1;
2971 		meta_data->valid_preamble_type  = 1;
2972 		meta_data->valid_retries = 1;
2973 		meta_data->valid_bw_info = 1;
2974 	}
2975 
2976 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
2977 		meta_data->encrypt_type = 0;
2978 		meta_data->valid_encrypt_type = 1;
2979 		meta_data->learning_frame = 0;
2980 	}
2981 
2982 	meta_data->valid_key_flags = 1;
2983 	meta_data->key_flags = (mhdr->keyix & 0x3);
2984 
2985 remove_meta_hdr:
2986 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
2987 		dp_tx_err("qdf_nbuf_pull_head failed");
2988 		qdf_nbuf_free(nbuf);
2989 		return NULL;
2990 	}
2991 
2992 	msdu_info->tid = qdf_nbuf_get_priority(nbuf);
2993 
2994 	dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x"
2995 		   " tid %d to_fw %d",
2996 		   msdu_info->meta_data[0],
2997 		   msdu_info->meta_data[1],
2998 		   msdu_info->meta_data[2],
2999 		   msdu_info->meta_data[3],
3000 		   msdu_info->meta_data[4],
3001 		   msdu_info->meta_data[5],
3002 		   msdu_info->tid, msdu_info->exception_fw);
3003 
3004 	return nbuf;
3005 }
3006 #else
3007 static
3008 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
3009 				struct dp_tx_msdu_info_s *msdu_info)
3010 {
3011 	return nbuf;
3012 }
3013 
3014 #endif
3015 
3016 /**
3017  * dp_check_exc_metadata() - Checks if parameters are valid
3018  * @tx_exc - holds all exception path parameters
3019  *
3020  * Returns true when all the parameters are valid else false
3021  *
3022  */
3023 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
3024 {
3025 	bool invalid_tid = (tx_exc->tid >= DP_MAX_TIDS && tx_exc->tid !=
3026 			    HTT_INVALID_TID);
3027 	bool invalid_encap_type =
3028 			(tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
3029 			 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
3030 	bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
3031 				 tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
3032 	bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
3033 			       tx_exc->ppdu_cookie == 0);
3034 
3035 	if (tx_exc->is_intrabss_fwd)
3036 		return true;
3037 
3038 	if (invalid_tid || invalid_encap_type || invalid_sec_type ||
3039 	    invalid_cookie) {
3040 		return false;
3041 	}
3042 
3043 	return true;
3044 }
3045 
3046 #ifdef ATH_SUPPORT_IQUE
3047 /**
3048  * dp_tx_mcast_enhance() - Multicast enhancement on TX
3049  * @vdev: vdev handle
3050  * @nbuf: skb
3051  *
3052  * Return: true on success,
3053  *         false on failure
3054  */
3055 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3056 {
3057 	qdf_ether_header_t *eh;
3058 
3059 	/* Mcast to Ucast Conversion*/
3060 	if (qdf_likely(!vdev->mcast_enhancement_en))
3061 		return true;
3062 
3063 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3064 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
3065 	    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
3066 		dp_verbose_debug("Mcast frm for ME %pK", vdev);
3067 		qdf_nbuf_set_next(nbuf, NULL);
3068 
3069 		DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
3070 				 qdf_nbuf_len(nbuf));
3071 		if (dp_tx_prepare_send_me(vdev, nbuf) ==
3072 				QDF_STATUS_SUCCESS) {
3073 			return false;
3074 		}
3075 
3076 		if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
3077 			if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
3078 					QDF_STATUS_SUCCESS) {
3079 				return false;
3080 			}
3081 		}
3082 	}
3083 
3084 	return true;
3085 }
3086 #else
3087 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3088 {
3089 	return true;
3090 }
3091 #endif
3092 
3093 /**
3094  * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
3095  * @nbuf: qdf_nbuf_t
3096  * @vdev: struct dp_vdev *
3097  *
3098  * Allow packet for processing only if it is for peer client which is
3099  * connected with same vap. Drop packet if client is connected to
3100  * different vap.
3101  *
3102  * Return: QDF_STATUS
3103  */
3104 static inline QDF_STATUS
3105 dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
3106 {
3107 	struct dp_ast_entry *dst_ast_entry = NULL;
3108 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3109 
3110 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
3111 	    DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
3112 		return QDF_STATUS_SUCCESS;
3113 
3114 	qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
3115 	dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
3116 							eh->ether_dhost,
3117 							vdev->vdev_id);
3118 
3119 	/* If there is no ast entry, return failure */
3120 	if (qdf_unlikely(!dst_ast_entry)) {
3121 		qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
3122 		return QDF_STATUS_E_FAILURE;
3123 	}
3124 	qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
3125 
3126 	return QDF_STATUS_SUCCESS;
3127 }
3128 
3129 /**
3130  * dp_tx_nawds_handler() - NAWDS handler
3131  *
3132  * @soc: DP soc handle
3133  * @vdev_id: id of DP vdev handle
3134  * @msdu_info: msdu_info required to create HTT metadata
3135  * @nbuf: skb
3136  *
3137  * This API transfers the multicast frames with the peer id
3138  * on NAWDS enabled peer.
3139 
3140  * Return: none
3141  */
3142 
3143 static inline
3144 void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
3145 			 struct dp_tx_msdu_info_s *msdu_info,
3146 			 qdf_nbuf_t nbuf, uint16_t sa_peer_id)
3147 {
3148 	struct dp_peer *peer = NULL;
3149 	qdf_nbuf_t nbuf_clone = NULL;
3150 	uint16_t peer_id = DP_INVALID_PEER;
3151 	struct dp_txrx_peer *txrx_peer;
3152 
3153 	/* This check avoids pkt forwarding which is entered
3154 	 * in the ast table but still doesn't have valid peerid.
3155 	 */
3156 	if (sa_peer_id == HTT_INVALID_PEER)
3157 		return;
3158 
3159 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3160 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3161 		txrx_peer = dp_get_txrx_peer(peer);
3162 		if (!txrx_peer)
3163 			continue;
3164 
3165 		if (!txrx_peer->bss_peer && txrx_peer->nawds_enabled) {
3166 			peer_id = peer->peer_id;
3167 
3168 			if (!dp_peer_is_primary_link_peer(peer))
3169 				continue;
3170 
3171 			/* Multicast packets needs to be
3172 			 * dropped in case of intra bss forwarding
3173 			 */
3174 			if (sa_peer_id == txrx_peer->peer_id) {
3175 				dp_tx_debug("multicast packet");
3176 				DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3177 							  tx.nawds_mcast_drop,
3178 							  1);
3179 				continue;
3180 			}
3181 
3182 			nbuf_clone = qdf_nbuf_clone(nbuf);
3183 
3184 			if (!nbuf_clone) {
3185 				QDF_TRACE(QDF_MODULE_ID_DP,
3186 					  QDF_TRACE_LEVEL_ERROR,
3187 					  FL("nbuf clone failed"));
3188 				break;
3189 			}
3190 
3191 			nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
3192 							    msdu_info, peer_id,
3193 							    NULL);
3194 
3195 			if (nbuf_clone) {
3196 				dp_tx_debug("pkt send failed");
3197 				qdf_nbuf_free(nbuf_clone);
3198 			} else {
3199 				if (peer_id != DP_INVALID_PEER)
3200 					DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
3201 								      tx.nawds_mcast,
3202 								      1, qdf_nbuf_len(nbuf));
3203 			}
3204 		}
3205 	}
3206 
3207 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3208 }
3209 
3210 /**
3211  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
3212  * @soc: DP soc handle
3213  * @vdev_id: id of DP vdev handle
3214  * @nbuf: skb
3215  * @tx_exc_metadata: Handle that holds exception path meta data
3216  *
3217  * Entry point for Core Tx layer (DP_TX) invoked from
3218  * hard_start_xmit in OSIF/HDD to transmit frames through fw
3219  *
3220  * Return: NULL on success,
3221  *         nbuf when it fails to send
3222  */
3223 qdf_nbuf_t
3224 dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3225 		     qdf_nbuf_t nbuf,
3226 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
3227 {
3228 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3229 	qdf_ether_header_t *eh = NULL;
3230 	struct dp_tx_msdu_info_s msdu_info;
3231 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3232 						     DP_MOD_ID_TX_EXCEPTION);
3233 
3234 	if (qdf_unlikely(!vdev))
3235 		goto fail;
3236 
3237 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
3238 
3239 	if (!tx_exc_metadata)
3240 		goto fail;
3241 
3242 	msdu_info.tid = tx_exc_metadata->tid;
3243 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3244 	dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
3245 			 QDF_MAC_ADDR_REF(nbuf->data));
3246 
3247 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
3248 
3249 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
3250 		dp_tx_err("Invalid parameters in exception path");
3251 		goto fail;
3252 	}
3253 
3254 	/* for peer based metadata check if peer is valid */
3255 	if (tx_exc_metadata->peer_id != CDP_INVALID_PEER) {
3256 		struct dp_peer *peer = NULL;
3257 
3258 		 peer = dp_peer_get_ref_by_id(vdev->pdev->soc,
3259 					      tx_exc_metadata->peer_id,
3260 					      DP_MOD_ID_TX_EXCEPTION);
3261 		if (qdf_unlikely(!peer)) {
3262 			DP_STATS_INC(vdev,
3263 				     tx_i.dropped.invalid_peer_id_in_exc_path,
3264 				     1);
3265 			goto fail;
3266 		}
3267 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_EXCEPTION);
3268 	}
3269 	/* Basic sanity checks for unsupported packets */
3270 
3271 	/* MESH mode */
3272 	if (qdf_unlikely(vdev->mesh_vdev)) {
3273 		dp_tx_err("Mesh mode is not supported in exception path");
3274 		goto fail;
3275 	}
3276 
3277 	/*
3278 	 * Classify the frame and call corresponding
3279 	 * "prepare" function which extracts the segment (TSO)
3280 	 * and fragmentation information (for TSO , SG, ME, or Raw)
3281 	 * into MSDU_INFO structure which is later used to fill
3282 	 * SW and HW descriptors.
3283 	 */
3284 	if (qdf_nbuf_is_tso(nbuf)) {
3285 		dp_verbose_debug("TSO frame %pK", vdev);
3286 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
3287 				 qdf_nbuf_len(nbuf));
3288 
3289 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
3290 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
3291 					 qdf_nbuf_len(nbuf));
3292 			goto fail;
3293 		}
3294 
3295 		goto send_multiple;
3296 	}
3297 
3298 	/* SG */
3299 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
3300 		struct dp_tx_seg_info_s seg_info = {0};
3301 
3302 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
3303 		if (!nbuf)
3304 			goto fail;
3305 
3306 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
3307 
3308 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
3309 				 qdf_nbuf_len(nbuf));
3310 
3311 		goto send_multiple;
3312 	}
3313 
3314 	if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
3315 		DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
3316 				 qdf_nbuf_len(nbuf));
3317 
3318 		dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
3319 					       tx_exc_metadata->ppdu_cookie);
3320 	}
3321 
3322 	/*
3323 	 * Get HW Queue to use for this frame.
3324 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
3325 	 * dedicated for data and 1 for command.
3326 	 * "queue_id" maps to one hardware ring.
3327 	 *  With each ring, we also associate a unique Tx descriptor pool
3328 	 *  to minimize lock contention for these resources.
3329 	 */
3330 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3331 
3332 	if (qdf_likely(tx_exc_metadata->is_intrabss_fwd)) {
3333 		if (qdf_unlikely(vdev->nawds_enabled)) {
3334 			/*
3335 			 * This is a multicast packet
3336 			 */
3337 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
3338 					    tx_exc_metadata->peer_id);
3339 			DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
3340 					 1, qdf_nbuf_len(nbuf));
3341 		}
3342 
3343 		nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
3344 					      DP_INVALID_PEER, NULL);
3345 	} else {
3346 		/*
3347 		 * Check exception descriptors
3348 		 */
3349 		if (dp_tx_exception_limit_check(vdev))
3350 			goto fail;
3351 
3352 		/*  Single linear frame */
3353 		/*
3354 		 * If nbuf is a simple linear frame, use send_single function to
3355 		 * prepare direct-buffer type TCL descriptor and enqueue to TCL
3356 		 * SRNG. There is no need to setup a MSDU extension descriptor.
3357 		 */
3358 		nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
3359 					      tx_exc_metadata->peer_id,
3360 					      tx_exc_metadata);
3361 	}
3362 
3363 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3364 	return nbuf;
3365 
3366 send_multiple:
3367 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3368 
3369 fail:
3370 	if (vdev)
3371 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3372 	dp_verbose_debug("pkt send failed");
3373 	return nbuf;
3374 }
3375 
3376 /**
3377  * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
3378  *      in exception path in special case to avoid regular exception path chk.
3379  * @soc: DP soc handle
3380  * @vdev_id: id of DP vdev handle
3381  * @nbuf: skb
3382  * @tx_exc_metadata: Handle that holds exception path meta data
3383  *
3384  * Entry point for Core Tx layer (DP_TX) invoked from
3385  * hard_start_xmit in OSIF/HDD to transmit frames through fw
3386  *
3387  * Return: NULL on success,
3388  *         nbuf when it fails to send
3389  */
3390 qdf_nbuf_t
3391 dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
3392 				   uint8_t vdev_id, qdf_nbuf_t nbuf,
3393 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
3394 {
3395 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3396 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3397 						     DP_MOD_ID_TX_EXCEPTION);
3398 
3399 	if (qdf_unlikely(!vdev))
3400 		goto fail;
3401 
3402 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
3403 			== QDF_STATUS_E_FAILURE)) {
3404 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
3405 		goto fail;
3406 	}
3407 
3408 	/* Unref count as it will again be taken inside dp_tx_exception */
3409 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3410 
3411 	return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
3412 
3413 fail:
3414 	if (vdev)
3415 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3416 	dp_verbose_debug("pkt send failed");
3417 	return nbuf;
3418 }
3419 
3420 /**
3421  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
3422  * @soc: DP soc handle
3423  * @vdev_id: DP vdev handle
3424  * @nbuf: skb
3425  *
3426  * Entry point for Core Tx layer (DP_TX) invoked from
3427  * hard_start_xmit in OSIF/HDD
3428  *
3429  * Return: NULL on success,
3430  *         nbuf when it fails to send
3431  */
3432 #ifdef MESH_MODE_SUPPORT
3433 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3434 			   qdf_nbuf_t nbuf)
3435 {
3436 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3437 	struct meta_hdr_s *mhdr;
3438 	qdf_nbuf_t nbuf_mesh = NULL;
3439 	qdf_nbuf_t nbuf_clone = NULL;
3440 	struct dp_vdev *vdev;
3441 	uint8_t no_enc_frame = 0;
3442 
3443 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
3444 	if (!nbuf_mesh) {
3445 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3446 				"qdf_nbuf_unshare failed");
3447 		return nbuf;
3448 	}
3449 
3450 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
3451 	if (!vdev) {
3452 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3453 				"vdev is NULL for vdev_id %d", vdev_id);
3454 		return nbuf;
3455 	}
3456 
3457 	nbuf = nbuf_mesh;
3458 
3459 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
3460 
3461 	if ((vdev->sec_type != cdp_sec_type_none) &&
3462 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
3463 		no_enc_frame = 1;
3464 
3465 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
3466 		qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
3467 
3468 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
3469 		       !no_enc_frame) {
3470 		nbuf_clone = qdf_nbuf_clone(nbuf);
3471 		if (!nbuf_clone) {
3472 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3473 				"qdf_nbuf_clone failed");
3474 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
3475 			return nbuf;
3476 		}
3477 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
3478 	}
3479 
3480 	if (nbuf_clone) {
3481 		if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
3482 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
3483 		} else {
3484 			qdf_nbuf_free(nbuf_clone);
3485 		}
3486 	}
3487 
3488 	if (no_enc_frame)
3489 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
3490 	else
3491 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
3492 
3493 	nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
3494 	if ((!nbuf) && no_enc_frame) {
3495 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
3496 	}
3497 
3498 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
3499 	return nbuf;
3500 }
3501 
3502 #else
3503 
3504 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
3505 			   qdf_nbuf_t nbuf)
3506 {
3507 	return dp_tx_send(soc, vdev_id, nbuf);
3508 }
3509 
3510 #endif
3511 
3512 #ifdef QCA_DP_TX_NBUF_AND_NBUF_DATA_PREFETCH
3513 static inline
3514 void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
3515 {
3516 	if (nbuf) {
3517 		qdf_prefetch(&nbuf->len);
3518 		qdf_prefetch(&nbuf->data);
3519 	}
3520 }
3521 #else
3522 static inline
3523 void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
3524 {
3525 }
3526 #endif
3527 
3528 #ifdef DP_UMAC_HW_RESET_SUPPORT
3529 /*
3530  * dp_tx_drop() - Drop the frame on a given VAP
3531  * @soc: DP soc handle
3532  * @vdev_id: id of DP vdev handle
3533  * @nbuf: skb
3534  *
3535  * Drop all the incoming packets
3536  *
3537  * Return: nbuf
3538  *
3539  */
3540 qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3541 		      qdf_nbuf_t nbuf)
3542 {
3543 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3544 	struct dp_vdev *vdev = NULL;
3545 
3546 	vdev = soc->vdev_id_map[vdev_id];
3547 	if (qdf_unlikely(!vdev))
3548 		return nbuf;
3549 
3550 	DP_STATS_INC(vdev, tx_i.dropped.drop_ingress, 1);
3551 	return nbuf;
3552 }
3553 
3554 /*
3555  * dp_tx_exc_drop() - Drop the frame on a given VAP
3556  * @soc: DP soc handle
3557  * @vdev_id: id of DP vdev handle
3558  * @nbuf: skb
3559  * @tx_exc_metadata: Handle that holds exception path meta data
3560  *
3561  * Drop all the incoming packets
3562  *
3563  * Return: nbuf
3564  *
3565  */
3566 qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3567 			  qdf_nbuf_t nbuf,
3568 			  struct cdp_tx_exception_metadata *tx_exc_metadata)
3569 {
3570 	return dp_tx_drop(soc_hdl, vdev_id, nbuf);
3571 }
3572 #endif
3573 
3574 /*
3575  * dp_tx_send() - Transmit a frame on a given VAP
3576  * @soc: DP soc handle
3577  * @vdev_id: id of DP vdev handle
3578  * @nbuf: skb
3579  *
3580  * Entry point for Core Tx layer (DP_TX) invoked from
3581  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
3582  * cases
3583  *
3584  * Return: NULL on success,
3585  *         nbuf when it fails to send
3586  */
3587 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3588 		      qdf_nbuf_t nbuf)
3589 {
3590 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3591 	uint16_t peer_id = HTT_INVALID_PEER;
3592 	/*
3593 	 * doing a memzero is causing additional function call overhead
3594 	 * so doing static stack clearing
3595 	 */
3596 	struct dp_tx_msdu_info_s msdu_info = {0};
3597 	struct dp_vdev *vdev = NULL;
3598 	qdf_nbuf_t end_nbuf = NULL;
3599 
3600 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3601 		return nbuf;
3602 
3603 	/*
3604 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3605 	 * this in per packet path.
3606 	 *
3607 	 * As in this path vdev memory is already protected with netdev
3608 	 * tx lock
3609 	 */
3610 	vdev = soc->vdev_id_map[vdev_id];
3611 	if (qdf_unlikely(!vdev))
3612 		return nbuf;
3613 
3614 	/*
3615 	 * Set Default Host TID value to invalid TID
3616 	 * (TID override disabled)
3617 	 */
3618 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
3619 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_headlen(nbuf));
3620 
3621 	if (qdf_unlikely(vdev->mesh_vdev)) {
3622 		qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
3623 								&msdu_info);
3624 		if (!nbuf_mesh) {
3625 			dp_verbose_debug("Extracting mesh metadata failed");
3626 			return nbuf;
3627 		}
3628 		nbuf = nbuf_mesh;
3629 	}
3630 
3631 	/*
3632 	 * Get HW Queue to use for this frame.
3633 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
3634 	 * dedicated for data and 1 for command.
3635 	 * "queue_id" maps to one hardware ring.
3636 	 *  With each ring, we also associate a unique Tx descriptor pool
3637 	 *  to minimize lock contention for these resources.
3638 	 */
3639 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3640 	DP_STATS_INC(vdev, tx_i.rcvd_per_core[msdu_info.tx_queue.desc_pool_id],
3641 		     1);
3642 
3643 	/*
3644 	 * TCL H/W supports 2 DSCP-TID mapping tables.
3645 	 *  Table 1 - Default DSCP-TID mapping table
3646 	 *  Table 2 - 1 DSCP-TID override table
3647 	 *
3648 	 * If we need a different DSCP-TID mapping for this vap,
3649 	 * call tid_classify to extract DSCP/ToS from frame and
3650 	 * map to a TID and store in msdu_info. This is later used
3651 	 * to fill in TCL Input descriptor (per-packet TID override).
3652 	 */
3653 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
3654 
3655 	/*
3656 	 * Classify the frame and call corresponding
3657 	 * "prepare" function which extracts the segment (TSO)
3658 	 * and fragmentation information (for TSO , SG, ME, or Raw)
3659 	 * into MSDU_INFO structure which is later used to fill
3660 	 * SW and HW descriptors.
3661 	 */
3662 	if (qdf_nbuf_is_tso(nbuf)) {
3663 		dp_verbose_debug("TSO frame %pK", vdev);
3664 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
3665 				 qdf_nbuf_len(nbuf));
3666 
3667 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
3668 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
3669 					 qdf_nbuf_len(nbuf));
3670 			return nbuf;
3671 		}
3672 
3673 		goto send_multiple;
3674 	}
3675 
3676 	/* SG */
3677 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
3678 		if (qdf_nbuf_get_nr_frags(nbuf) > DP_TX_MAX_NUM_FRAGS - 1) {
3679 			if (qdf_unlikely(qdf_nbuf_linearize(nbuf)))
3680 				return nbuf;
3681 		} else {
3682 			struct dp_tx_seg_info_s seg_info = {0};
3683 
3684 			if (qdf_unlikely(is_nbuf_frm_rmnet(nbuf, &msdu_info)))
3685 				goto send_single;
3686 
3687 			nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info,
3688 						&msdu_info);
3689 			if (!nbuf)
3690 				return NULL;
3691 
3692 			dp_verbose_debug("non-TSO SG frame %pK", vdev);
3693 
3694 			DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
3695 					 qdf_nbuf_len(nbuf));
3696 
3697 			goto send_multiple;
3698 		}
3699 	}
3700 
3701 	if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
3702 		return NULL;
3703 
3704 	/* RAW */
3705 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
3706 		struct dp_tx_seg_info_s seg_info = {0};
3707 
3708 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
3709 		if (!nbuf)
3710 			return NULL;
3711 
3712 		dp_verbose_debug("Raw frame %pK", vdev);
3713 
3714 		goto send_multiple;
3715 
3716 	}
3717 
3718 	if (qdf_unlikely(vdev->nawds_enabled)) {
3719 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
3720 					  qdf_nbuf_data(nbuf);
3721 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
3722 			uint16_t sa_peer_id = DP_INVALID_PEER;
3723 
3724 			if (!soc->ast_offload_support) {
3725 				struct dp_ast_entry *ast_entry = NULL;
3726 
3727 				qdf_spin_lock_bh(&soc->ast_lock);
3728 				ast_entry = dp_peer_ast_hash_find_by_pdevid
3729 					(soc,
3730 					 (uint8_t *)(eh->ether_shost),
3731 					 vdev->pdev->pdev_id);
3732 				if (ast_entry)
3733 					sa_peer_id = ast_entry->peer_id;
3734 				qdf_spin_unlock_bh(&soc->ast_lock);
3735 			}
3736 
3737 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
3738 					    sa_peer_id);
3739 		}
3740 		peer_id = DP_INVALID_PEER;
3741 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
3742 				 1, qdf_nbuf_len(nbuf));
3743 	}
3744 
3745 send_single:
3746 	/*  Single linear frame */
3747 	/*
3748 	 * If nbuf is a simple linear frame, use send_single function to
3749 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
3750 	 * SRNG. There is no need to setup a MSDU extension descriptor.
3751 	 */
3752 	dp_tx_prefetch_nbuf_data(nbuf);
3753 
3754 	nbuf = dp_tx_send_msdu_single_wrapper(vdev, nbuf, &msdu_info,
3755 					      peer_id, end_nbuf);
3756 	return nbuf;
3757 
3758 send_multiple:
3759 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3760 
3761 	if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
3762 		dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
3763 
3764 	return nbuf;
3765 }
3766 
3767 /**
3768  * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
3769  *      case to vaoid check in perpkt path.
3770  * @soc: DP soc handle
3771  * @vdev_id: id of DP vdev handle
3772  * @nbuf: skb
3773  *
3774  * Entry point for Core Tx layer (DP_TX) invoked from
3775  * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
3776  * with special condition to avoid per pkt check in dp_tx_send
3777  *
3778  * Return: NULL on success,
3779  *         nbuf when it fails to send
3780  */
3781 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
3782 				    uint8_t vdev_id, qdf_nbuf_t nbuf)
3783 {
3784 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3785 	struct dp_vdev *vdev = NULL;
3786 
3787 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3788 		return nbuf;
3789 
3790 	/*
3791 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3792 	 * this in per packet path.
3793 	 *
3794 	 * As in this path vdev memory is already protected with netdev
3795 	 * tx lock
3796 	 */
3797 	vdev = soc->vdev_id_map[vdev_id];
3798 	if (qdf_unlikely(!vdev))
3799 		return nbuf;
3800 
3801 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
3802 			== QDF_STATUS_E_FAILURE)) {
3803 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
3804 		return nbuf;
3805 	}
3806 
3807 	return dp_tx_send(soc_hdl, vdev_id, nbuf);
3808 }
3809 
3810 #ifdef UMAC_SUPPORT_PROXY_ARP
3811 /**
3812  * dp_tx_proxy_arp() - Tx proxy arp handler
3813  * @vdev: datapath vdev handle
3814  * @buf: sk buffer
3815  *
3816  * Return: status
3817  */
3818 static inline
3819 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3820 {
3821 	if (vdev->osif_proxy_arp)
3822 		return vdev->osif_proxy_arp(vdev->osif_vdev, nbuf);
3823 
3824 	/*
3825 	 * when UMAC_SUPPORT_PROXY_ARP is defined, we expect
3826 	 * osif_proxy_arp has a valid function pointer assigned
3827 	 * to it
3828 	 */
3829 	dp_tx_err("valid function pointer for osif_proxy_arp is expected!!\n");
3830 
3831 	return QDF_STATUS_NOT_INITIALIZED;
3832 }
3833 #else
3834 /**
3835  * dp_tx_proxy_arp() - Tx proxy arp handler
3836  * @vdev: datapath vdev handle
3837  * @buf: sk buffer
3838  *
3839  * This function always return 0 when UMAC_SUPPORT_PROXY_ARP
3840  * is not defined.
3841  *
3842  * Return: status
3843  */
3844 static inline
3845 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3846 {
3847 	return QDF_STATUS_SUCCESS;
3848 }
3849 #endif
3850 
3851 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
3852 #ifdef WLAN_MCAST_MLO
3853 static bool
3854 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
3855 		       struct dp_tx_desc_s *tx_desc,
3856 		       qdf_nbuf_t nbuf,
3857 		       uint8_t reinject_reason)
3858 {
3859 	if (reinject_reason == HTT_TX_FW2WBM_REINJECT_REASON_MLO_MCAST) {
3860 		if (soc->arch_ops.dp_tx_mcast_handler)
3861 			soc->arch_ops.dp_tx_mcast_handler(soc, vdev, nbuf);
3862 
3863 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3864 		return true;
3865 	}
3866 
3867 	return false;
3868 }
3869 #else /* WLAN_MCAST_MLO */
3870 static inline bool
3871 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
3872 		       struct dp_tx_desc_s *tx_desc,
3873 		       qdf_nbuf_t nbuf,
3874 		       uint8_t reinject_reason)
3875 {
3876 	return false;
3877 }
3878 #endif /* WLAN_MCAST_MLO */
3879 #else
3880 static inline bool
3881 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
3882 		       struct dp_tx_desc_s *tx_desc,
3883 		       qdf_nbuf_t nbuf,
3884 		       uint8_t reinject_reason)
3885 {
3886 	return false;
3887 }
3888 #endif
3889 
3890 /**
3891  * dp_tx_reinject_handler() - Tx Reinject Handler
3892  * @soc: datapath soc handle
3893  * @vdev: datapath vdev handle
3894  * @tx_desc: software descriptor head pointer
3895  * @status : Tx completion status from HTT descriptor
3896  * @reinject_reason : reinject reason from HTT descriptor
3897  *
3898  * This function reinjects frames back to Target.
3899  * Todo - Host queue needs to be added
3900  *
3901  * Return: none
3902  */
3903 void dp_tx_reinject_handler(struct dp_soc *soc,
3904 			    struct dp_vdev *vdev,
3905 			    struct dp_tx_desc_s *tx_desc,
3906 			    uint8_t *status,
3907 			    uint8_t reinject_reason)
3908 {
3909 	struct dp_peer *peer = NULL;
3910 	uint32_t peer_id = HTT_INVALID_PEER;
3911 	qdf_nbuf_t nbuf = tx_desc->nbuf;
3912 	qdf_nbuf_t nbuf_copy = NULL;
3913 	struct dp_tx_msdu_info_s msdu_info;
3914 #ifdef WDS_VENDOR_EXTENSION
3915 	int is_mcast = 0, is_ucast = 0;
3916 	int num_peers_3addr = 0;
3917 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
3918 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
3919 #endif
3920 	struct dp_txrx_peer *txrx_peer;
3921 
3922 	qdf_assert(vdev);
3923 
3924 	dp_tx_debug("Tx reinject path");
3925 
3926 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
3927 			qdf_nbuf_len(tx_desc->nbuf));
3928 
3929 	if (dp_tx_reinject_mlo_hdl(soc, vdev, tx_desc, nbuf, reinject_reason))
3930 		return;
3931 
3932 #ifdef WDS_VENDOR_EXTENSION
3933 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
3934 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
3935 	} else {
3936 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
3937 	}
3938 	is_ucast = !is_mcast;
3939 
3940 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3941 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3942 		txrx_peer = dp_get_txrx_peer(peer);
3943 
3944 		if (!txrx_peer || txrx_peer->bss_peer)
3945 			continue;
3946 
3947 		/* Detect wds peers that use 3-addr framing for mcast.
3948 		 * if there are any, the bss_peer is used to send the
3949 		 * the mcast frame using 3-addr format. all wds enabled
3950 		 * peers that use 4-addr framing for mcast frames will
3951 		 * be duplicated and sent as 4-addr frames below.
3952 		 */
3953 		if (!txrx_peer->wds_enabled ||
3954 		    !txrx_peer->wds_ecm.wds_tx_mcast_4addr) {
3955 			num_peers_3addr = 1;
3956 			break;
3957 		}
3958 	}
3959 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3960 #endif
3961 
3962 	if (qdf_unlikely(vdev->mesh_vdev)) {
3963 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
3964 	} else {
3965 		qdf_spin_lock_bh(&vdev->peer_list_lock);
3966 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3967 			txrx_peer = dp_get_txrx_peer(peer);
3968 			if (!txrx_peer)
3969 				continue;
3970 
3971 			if ((txrx_peer->peer_id != HTT_INVALID_PEER) &&
3972 #ifdef WDS_VENDOR_EXTENSION
3973 			/*
3974 			 * . if 3-addr STA, then send on BSS Peer
3975 			 * . if Peer WDS enabled and accept 4-addr mcast,
3976 			 * send mcast on that peer only
3977 			 * . if Peer WDS enabled and accept 4-addr ucast,
3978 			 * send ucast on that peer only
3979 			 */
3980 			((txrx_peer->bss_peer && num_peers_3addr && is_mcast) ||
3981 			 (txrx_peer->wds_enabled &&
3982 			 ((is_mcast && txrx_peer->wds_ecm.wds_tx_mcast_4addr) ||
3983 			 (is_ucast &&
3984 			 txrx_peer->wds_ecm.wds_tx_ucast_4addr))))) {
3985 #else
3986 			(txrx_peer->bss_peer &&
3987 			 (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) {
3988 #endif
3989 				peer_id = DP_INVALID_PEER;
3990 
3991 				nbuf_copy = qdf_nbuf_copy(nbuf);
3992 
3993 				if (!nbuf_copy) {
3994 					dp_tx_debug("nbuf copy failed");
3995 					break;
3996 				}
3997 				qdf_mem_zero(&msdu_info, sizeof(msdu_info));
3998 				dp_tx_get_queue(vdev, nbuf,
3999 						&msdu_info.tx_queue);
4000 
4001 				nbuf_copy = dp_tx_send_msdu_single(vdev,
4002 						nbuf_copy,
4003 						&msdu_info,
4004 						peer_id,
4005 						NULL);
4006 
4007 				if (nbuf_copy) {
4008 					dp_tx_debug("pkt send failed");
4009 					qdf_nbuf_free(nbuf_copy);
4010 				}
4011 			}
4012 		}
4013 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4014 
4015 		qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
4016 					     QDF_DMA_TO_DEVICE, nbuf->len);
4017 		qdf_nbuf_free(nbuf);
4018 	}
4019 
4020 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
4021 }
4022 
4023 /**
4024  * dp_tx_inspect_handler() - Tx Inspect Handler
4025  * @soc: datapath soc handle
4026  * @vdev: datapath vdev handle
4027  * @tx_desc: software descriptor head pointer
4028  * @status : Tx completion status from HTT descriptor
4029  *
4030  * Handles Tx frames sent back to Host for inspection
4031  * (ProxyARP)
4032  *
4033  * Return: none
4034  */
4035 void dp_tx_inspect_handler(struct dp_soc *soc,
4036 			   struct dp_vdev *vdev,
4037 			   struct dp_tx_desc_s *tx_desc,
4038 			   uint8_t *status)
4039 {
4040 
4041 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4042 			"%s Tx inspect path",
4043 			__func__);
4044 
4045 	DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
4046 			 qdf_nbuf_len(tx_desc->nbuf));
4047 
4048 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
4049 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
4050 }
4051 
4052 #ifdef MESH_MODE_SUPPORT
4053 /**
4054  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
4055  *                                         in mesh meta header
4056  * @tx_desc: software descriptor head pointer
4057  * @ts: pointer to tx completion stats
4058  * Return: none
4059  */
4060 static
4061 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
4062 		struct hal_tx_completion_status *ts)
4063 {
4064 	qdf_nbuf_t netbuf = tx_desc->nbuf;
4065 
4066 	if (!tx_desc->msdu_ext_desc) {
4067 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
4068 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4069 				"netbuf %pK offset %d",
4070 				netbuf, tx_desc->pkt_offset);
4071 			return;
4072 		}
4073 	}
4074 }
4075 
4076 #else
4077 static
4078 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
4079 		struct hal_tx_completion_status *ts)
4080 {
4081 }
4082 
4083 #endif
4084 
4085 #ifdef CONFIG_SAWF
4086 static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
4087 					 struct dp_vdev *vdev,
4088 					 struct dp_txrx_peer *txrx_peer,
4089 					 struct dp_tx_desc_s *tx_desc,
4090 					 struct hal_tx_completion_status *ts,
4091 					 uint8_t tid)
4092 {
4093 	dp_sawf_tx_compl_update_peer_stats(soc, vdev, txrx_peer, tx_desc,
4094 					   ts, tid);
4095 }
4096 
4097 static void dp_tx_compute_delay_avg(struct cdp_delay_tx_stats  *tx_delay,
4098 				    uint32_t nw_delay,
4099 				    uint32_t sw_delay,
4100 				    uint32_t hw_delay)
4101 {
4102 	dp_peer_tid_delay_avg(tx_delay,
4103 			      nw_delay,
4104 			      sw_delay,
4105 			      hw_delay);
4106 }
4107 #else
4108 static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
4109 					 struct dp_vdev *vdev,
4110 					 struct dp_txrx_peer *txrx_peer,
4111 					 struct dp_tx_desc_s *tx_desc,
4112 					 struct hal_tx_completion_status *ts,
4113 					 uint8_t tid)
4114 {
4115 }
4116 
4117 static inline void
4118 dp_tx_compute_delay_avg(struct cdp_delay_tx_stats *tx_delay,
4119 			uint32_t nw_delay, uint32_t sw_delay,
4120 			uint32_t hw_delay)
4121 {
4122 }
4123 #endif
4124 
4125 #ifdef QCA_PEER_EXT_STATS
4126 #ifdef WLAN_CONFIG_TX_DELAY
4127 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
4128 				    struct dp_tx_desc_s *tx_desc,
4129 				    struct hal_tx_completion_status *ts,
4130 				    struct dp_vdev *vdev)
4131 {
4132 	struct dp_soc *soc = vdev->pdev->soc;
4133 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
4134 	int64_t timestamp_ingress, timestamp_hw_enqueue;
4135 	uint32_t sw_enqueue_delay, fwhw_transmit_delay = 0;
4136 
4137 	if (!ts->valid)
4138 		return;
4139 
4140 	timestamp_ingress = qdf_nbuf_get_timestamp_us(tx_desc->nbuf);
4141 	timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
4142 
4143 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4144 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
4145 
4146 	if (soc->arch_ops.dp_tx_compute_hw_delay)
4147 		if (!soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts,
4148 							  &fwhw_transmit_delay))
4149 			dp_hist_update_stats(&tx_delay->hwtx_delay,
4150 					     fwhw_transmit_delay);
4151 
4152 	dp_tx_compute_delay_avg(tx_delay, 0, sw_enqueue_delay,
4153 				fwhw_transmit_delay);
4154 }
4155 #else
4156 /*
4157  * dp_tx_compute_tid_delay() - Compute per TID delay
4158  * @stats: Per TID delay stats
4159  * @tx_desc: Software Tx descriptor
4160  * @ts: Tx completion status
4161  * @vdev: vdev
4162  *
4163  * Compute the software enqueue and hw enqueue delays and
4164  * update the respective histograms
4165  *
4166  * Return: void
4167  */
4168 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
4169 				    struct dp_tx_desc_s *tx_desc,
4170 				    struct hal_tx_completion_status *ts,
4171 				    struct dp_vdev *vdev)
4172 {
4173 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
4174 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
4175 	uint32_t sw_enqueue_delay, fwhw_transmit_delay;
4176 
4177 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
4178 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
4179 	timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
4180 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4181 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
4182 					 timestamp_hw_enqueue);
4183 
4184 	/*
4185 	 * Update the Tx software enqueue delay and HW enque-Completion delay.
4186 	 */
4187 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
4188 	dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
4189 }
4190 #endif
4191 
4192 /*
4193  * dp_tx_update_peer_delay_stats() - Update the peer delay stats
4194  * @txrx_peer: DP peer context
4195  * @tx_desc: Tx software descriptor
4196  * @tid: Transmission ID
4197  * @ring_id: Rx CPU context ID/CPU_ID
4198  *
4199  * Update the peer extended stats. These are enhanced other
4200  * delay stats per msdu level.
4201  *
4202  * Return: void
4203  */
4204 static void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
4205 					  struct dp_tx_desc_s *tx_desc,
4206 					  struct hal_tx_completion_status *ts,
4207 					  uint8_t ring_id)
4208 {
4209 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4210 	struct dp_soc *soc = NULL;
4211 	struct dp_peer_delay_stats *delay_stats = NULL;
4212 	uint8_t tid;
4213 
4214 	soc = pdev->soc;
4215 	if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
4216 		return;
4217 
4218 	tid = ts->tid;
4219 	delay_stats = txrx_peer->delay_stats;
4220 
4221 	qdf_assert(delay_stats);
4222 	qdf_assert(ring < CDP_MAX_TXRX_CTX);
4223 
4224 	/*
4225 	 * For non-TID packets use the TID 9
4226 	 */
4227 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4228 		tid = CDP_MAX_DATA_TIDS - 1;
4229 
4230 	dp_tx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
4231 				tx_desc, ts, txrx_peer->vdev);
4232 }
4233 #else
4234 static inline
4235 void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
4236 				   struct dp_tx_desc_s *tx_desc,
4237 				   struct hal_tx_completion_status *ts,
4238 				   uint8_t ring_id)
4239 {
4240 }
4241 #endif
4242 
4243 #ifdef WLAN_PEER_JITTER
4244 /*
4245  * dp_tx_jitter_get_avg_jitter() - compute the average jitter
4246  * @curr_delay: Current delay
4247  * @prev_Delay: Previous delay
4248  * @avg_jitter: Average Jitter
4249  * Return: Newly Computed Average Jitter
4250  */
4251 static uint32_t dp_tx_jitter_get_avg_jitter(uint32_t curr_delay,
4252 					    uint32_t prev_delay,
4253 					    uint32_t avg_jitter)
4254 {
4255 	uint32_t curr_jitter;
4256 	int32_t jitter_diff;
4257 
4258 	curr_jitter = qdf_abs(curr_delay - prev_delay);
4259 	if (!avg_jitter)
4260 		return curr_jitter;
4261 
4262 	jitter_diff = curr_jitter - avg_jitter;
4263 	if (jitter_diff < 0)
4264 		avg_jitter = avg_jitter -
4265 			(qdf_abs(jitter_diff) >> DP_AVG_JITTER_WEIGHT_DENOM);
4266 	else
4267 		avg_jitter = avg_jitter +
4268 			(qdf_abs(jitter_diff) >> DP_AVG_JITTER_WEIGHT_DENOM);
4269 
4270 	return avg_jitter;
4271 }
4272 
4273 /*
4274  * dp_tx_jitter_get_avg_delay() - compute the average delay
4275  * @curr_delay: Current delay
4276  * @avg_Delay: Average delay
4277  * Return: Newly Computed Average Delay
4278  */
4279 static uint32_t dp_tx_jitter_get_avg_delay(uint32_t curr_delay,
4280 					   uint32_t avg_delay)
4281 {
4282 	int32_t delay_diff;
4283 
4284 	if (!avg_delay)
4285 		return curr_delay;
4286 
4287 	delay_diff = curr_delay - avg_delay;
4288 	if (delay_diff < 0)
4289 		avg_delay = avg_delay - (qdf_abs(delay_diff) >>
4290 					DP_AVG_DELAY_WEIGHT_DENOM);
4291 	else
4292 		avg_delay = avg_delay + (qdf_abs(delay_diff) >>
4293 					DP_AVG_DELAY_WEIGHT_DENOM);
4294 
4295 	return avg_delay;
4296 }
4297 
4298 #ifdef WLAN_CONFIG_TX_DELAY
4299 /*
4300  * dp_tx_compute_cur_delay() - get the current delay
4301  * @soc: soc handle
4302  * @vdev: vdev structure for data path state
4303  * @ts: Tx completion status
4304  * @curr_delay: current delay
4305  * @tx_desc: tx descriptor
4306  * Return: void
4307  */
4308 static
4309 QDF_STATUS dp_tx_compute_cur_delay(struct dp_soc *soc,
4310 				   struct dp_vdev *vdev,
4311 				   struct hal_tx_completion_status *ts,
4312 				   uint32_t *curr_delay,
4313 				   struct dp_tx_desc_s *tx_desc)
4314 {
4315 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
4316 
4317 	if (soc->arch_ops.dp_tx_compute_hw_delay)
4318 		status = soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts,
4319 							      curr_delay);
4320 	return status;
4321 }
4322 #else
4323 static
4324 QDF_STATUS dp_tx_compute_cur_delay(struct dp_soc *soc,
4325 				   struct dp_vdev *vdev,
4326 				   struct hal_tx_completion_status *ts,
4327 				   uint32_t *curr_delay,
4328 				   struct dp_tx_desc_s *tx_desc)
4329 {
4330 	int64_t current_timestamp, timestamp_hw_enqueue;
4331 
4332 	current_timestamp = qdf_ktime_to_us(qdf_ktime_real_get());
4333 	timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
4334 	*curr_delay = (uint32_t)(current_timestamp - timestamp_hw_enqueue);
4335 
4336 	return QDF_STATUS_SUCCESS;
4337 }
4338 #endif
4339 
4340 /* dp_tx_compute_tid_jitter() - compute per tid per ring jitter
4341  * @jiiter - per tid per ring jitter stats
4342  * @ts: Tx completion status
4343  * @vdev - vdev structure for data path state
4344  * @tx_desc - tx descriptor
4345  * Return: void
4346  */
4347 static void dp_tx_compute_tid_jitter(struct cdp_peer_tid_stats *jitter,
4348 				     struct hal_tx_completion_status *ts,
4349 				     struct dp_vdev *vdev,
4350 				     struct dp_tx_desc_s *tx_desc)
4351 {
4352 	uint32_t curr_delay, avg_delay, avg_jitter, prev_delay;
4353 	struct dp_soc *soc = vdev->pdev->soc;
4354 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
4355 
4356 	if (ts->status !=  HAL_TX_TQM_RR_FRAME_ACKED) {
4357 		jitter->tx_drop += 1;
4358 		return;
4359 	}
4360 
4361 	status = dp_tx_compute_cur_delay(soc, vdev, ts, &curr_delay,
4362 					 tx_desc);
4363 
4364 	if (QDF_IS_STATUS_SUCCESS(status)) {
4365 		avg_delay = jitter->tx_avg_delay;
4366 		avg_jitter = jitter->tx_avg_jitter;
4367 		prev_delay = jitter->tx_prev_delay;
4368 		avg_jitter = dp_tx_jitter_get_avg_jitter(curr_delay,
4369 							 prev_delay,
4370 							 avg_jitter);
4371 		avg_delay = dp_tx_jitter_get_avg_delay(curr_delay, avg_delay);
4372 		jitter->tx_avg_delay = avg_delay;
4373 		jitter->tx_avg_jitter = avg_jitter;
4374 		jitter->tx_prev_delay = curr_delay;
4375 		jitter->tx_total_success += 1;
4376 	} else if (status == QDF_STATUS_E_FAILURE) {
4377 		jitter->tx_avg_err += 1;
4378 	}
4379 }
4380 
4381 /* dp_tx_update_peer_jitter_stats() - Update the peer jitter stats
4382  * @txrx_peer: DP peer context
4383  * @tx_desc: Tx software descriptor
4384  * @ts: Tx completion status
4385  * @ring_id: Rx CPU context ID/CPU_ID
4386  * Return: void
4387  */
4388 static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer,
4389 					   struct dp_tx_desc_s *tx_desc,
4390 					   struct hal_tx_completion_status *ts,
4391 					   uint8_t ring_id)
4392 {
4393 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4394 	struct dp_soc *soc = pdev->soc;
4395 	struct cdp_peer_tid_stats *jitter_stats = NULL;
4396 	uint8_t tid;
4397 	struct cdp_peer_tid_stats *rx_tid = NULL;
4398 
4399 	if (qdf_likely(!wlan_cfg_is_peer_jitter_stats_enabled(soc->wlan_cfg_ctx)))
4400 		return;
4401 
4402 	tid = ts->tid;
4403 	jitter_stats = txrx_peer->jitter_stats;
4404 	qdf_assert_always(jitter_stats);
4405 	qdf_assert(ring < CDP_MAX_TXRX_CTX);
4406 	/*
4407 	 * For non-TID packets use the TID 9
4408 	 */
4409 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4410 		tid = CDP_MAX_DATA_TIDS - 1;
4411 
4412 	rx_tid = &jitter_stats[tid * CDP_MAX_TXRX_CTX + ring_id];
4413 	dp_tx_compute_tid_jitter(rx_tid,
4414 				 ts, txrx_peer->vdev, tx_desc);
4415 }
4416 #else
4417 static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer,
4418 					   struct dp_tx_desc_s *tx_desc,
4419 					   struct hal_tx_completion_status *ts,
4420 					   uint8_t ring_id)
4421 {
4422 }
4423 #endif
4424 
4425 #ifdef HW_TX_DELAY_STATS_ENABLE
4426 /**
4427  * dp_update_tx_delay_stats() - update the delay stats
4428  * @vdev: vdev handle
4429  * @delay: delay in ms or us based on the flag delay_in_us
4430  * @tid: tid value
4431  * @mode: type of tx delay mode
4432  * @ring id: ring number
4433  * @delay_in_us: flag to indicate whether the delay is in ms or us
4434  *
4435  * Return: none
4436  */
4437 static inline
4438 void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
4439 			      uint8_t mode, uint8_t ring_id, bool delay_in_us)
4440 {
4441 	struct cdp_tid_tx_stats *tstats =
4442 		&vdev->stats.tid_tx_stats[ring_id][tid];
4443 
4444 	dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
4445 			      delay_in_us);
4446 }
4447 #else
4448 static inline
4449 void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
4450 			      uint8_t mode, uint8_t ring_id, bool delay_in_us)
4451 {
4452 	struct cdp_tid_tx_stats *tstats =
4453 		&vdev->pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
4454 
4455 	dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
4456 			      delay_in_us);
4457 }
4458 #endif
4459 
4460 /**
4461  * dp_tx_compute_delay() - Compute and fill in all timestamps
4462  *				to pass in correct fields
4463  *
4464  * @vdev: pdev handle
4465  * @tx_desc: tx descriptor
4466  * @tid: tid value
4467  * @ring_id: TCL or WBM ring number for transmit path
4468  * Return: none
4469  */
4470 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
4471 			 uint8_t tid, uint8_t ring_id)
4472 {
4473 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
4474 	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
4475 	uint32_t fwhw_transmit_delay_us;
4476 
4477 	if (qdf_likely(!vdev->pdev->delay_stats_flag) &&
4478 	    qdf_likely(!dp_is_vdev_tx_delay_stats_enabled(vdev)))
4479 		return;
4480 
4481 	if (dp_is_vdev_tx_delay_stats_enabled(vdev)) {
4482 		fwhw_transmit_delay_us =
4483 			qdf_ktime_to_us(qdf_ktime_real_get()) -
4484 			qdf_ktime_to_us(tx_desc->timestamp);
4485 
4486 		/*
4487 		 * Delay between packet enqueued to HW and Tx completion in us
4488 		 */
4489 		dp_update_tx_delay_stats(vdev, fwhw_transmit_delay_us, tid,
4490 					 CDP_DELAY_STATS_FW_HW_TRANSMIT,
4491 					 ring_id, true);
4492 		/*
4493 		 * For MCL, only enqueue to completion delay is required
4494 		 * so return if the vdev flag is enabled.
4495 		 */
4496 		return;
4497 	}
4498 
4499 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
4500 	timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
4501 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
4502 					 timestamp_hw_enqueue);
4503 
4504 	/*
4505 	 * Delay between packet enqueued to HW and Tx completion in ms
4506 	 */
4507 	dp_update_tx_delay_stats(vdev, fwhw_transmit_delay, tid,
4508 				 CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id,
4509 				 false);
4510 
4511 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
4512 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4513 	interframe_delay = (uint32_t)(timestamp_ingress -
4514 				      vdev->prev_tx_enq_tstamp);
4515 
4516 	/*
4517 	 * Delay in software enqueue
4518 	 */
4519 	dp_update_tx_delay_stats(vdev, sw_enqueue_delay, tid,
4520 				 CDP_DELAY_STATS_SW_ENQ, ring_id,
4521 				 false);
4522 
4523 	/*
4524 	 * Update interframe delay stats calculated at hardstart receive point.
4525 	 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
4526 	 * interframe delay will not be calculate correctly for 1st frame.
4527 	 * On the other side, this will help in avoiding extra per packet check
4528 	 * of !vdev->prev_tx_enq_tstamp.
4529 	 */
4530 	dp_update_tx_delay_stats(vdev, interframe_delay, tid,
4531 				 CDP_DELAY_STATS_TX_INTERFRAME, ring_id,
4532 				 false);
4533 	vdev->prev_tx_enq_tstamp = timestamp_ingress;
4534 }
4535 
4536 #ifdef DISABLE_DP_STATS
4537 static
4538 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf,
4539 				   struct dp_txrx_peer *txrx_peer)
4540 {
4541 }
4542 #else
4543 static inline void
4544 dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer)
4545 {
4546 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
4547 
4548 	DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
4549 	if (subtype != QDF_PROTO_INVALID)
4550 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.no_ack_count[subtype],
4551 					  1);
4552 }
4553 #endif
4554 
4555 #ifndef QCA_ENHANCED_STATS_SUPPORT
4556 #ifdef DP_PEER_EXTENDED_API
4557 static inline uint8_t
4558 dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
4559 {
4560 	return txrx_peer->mpdu_retry_threshold;
4561 }
4562 #else
4563 static inline uint8_t
4564 dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
4565 {
4566 	return 0;
4567 }
4568 #endif
4569 
4570 /**
4571  * dp_tx_update_peer_extd_stats()- Update Tx extended path stats for peer
4572  *
4573  * @ts: Tx compltion status
4574  * @txrx_peer: datapath txrx_peer handle
4575  *
4576  * Return: void
4577  */
4578 static inline void
4579 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
4580 			     struct dp_txrx_peer *txrx_peer)
4581 {
4582 	uint8_t mcs, pkt_type, dst_mcs_idx;
4583 	uint8_t retry_threshold = dp_tx_get_mpdu_retry_threshold(txrx_peer);
4584 
4585 	mcs = ts->mcs;
4586 	pkt_type = ts->pkt_type;
4587 	/* do HW to SW pkt type conversion */
4588 	pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX :
4589 		    hal_2_dp_pkt_type_map[pkt_type]);
4590 
4591 	dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs);
4592 	if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
4593 		DP_PEER_EXTD_STATS_INC(txrx_peer,
4594 				       tx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
4595 				       1);
4596 
4597 	DP_PEER_EXTD_STATS_INC(txrx_peer, tx.sgi_count[ts->sgi], 1);
4598 	DP_PEER_EXTD_STATS_INC(txrx_peer, tx.bw[ts->bw], 1);
4599 	DP_PEER_EXTD_STATS_UPD(txrx_peer, tx.last_ack_rssi, ts->ack_frame_rssi);
4600 	DP_PEER_EXTD_STATS_INC(txrx_peer,
4601 			       tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
4602 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.stbc, 1, ts->stbc);
4603 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.ldpc, 1, ts->ldpc);
4604 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries, 1, ts->transmit_cnt > 1);
4605 	if (ts->first_msdu) {
4606 		DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries_mpdu, 1,
4607 					ts->transmit_cnt > 1);
4608 
4609 		if (!retry_threshold)
4610 			return;
4611 		DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.mpdu_success_with_retries,
4612 					qdf_do_div(ts->transmit_cnt,
4613 						   retry_threshold),
4614 					ts->transmit_cnt > retry_threshold);
4615 	}
4616 }
4617 #else
4618 static inline void
4619 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
4620 			     struct dp_txrx_peer *txrx_peer)
4621 {
4622 }
4623 #endif
4624 
4625 /**
4626  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
4627  *				per wbm ring
4628  *
4629  * @tx_desc: software descriptor head pointer
4630  * @ts: Tx completion status
4631  * @peer: peer handle
4632  * @ring_id: ring number
4633  *
4634  * Return: None
4635  */
4636 static inline void
4637 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
4638 			struct hal_tx_completion_status *ts,
4639 			struct dp_txrx_peer *txrx_peer, uint8_t ring_id)
4640 {
4641 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4642 	uint8_t tid = ts->tid;
4643 	uint32_t length;
4644 	struct cdp_tid_tx_stats *tid_stats;
4645 
4646 	if (!pdev)
4647 		return;
4648 
4649 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4650 		tid = CDP_MAX_DATA_TIDS - 1;
4651 
4652 	tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
4653 
4654 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
4655 		dp_err_rl("Release source:%d is not from TQM", ts->release_src);
4656 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.release_src_not_tqm, 1);
4657 		return;
4658 	}
4659 
4660 	length = qdf_nbuf_len(tx_desc->nbuf);
4661 	DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
4662 
4663 	if (qdf_unlikely(pdev->delay_stats_flag) ||
4664 	    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(txrx_peer->vdev)))
4665 		dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id);
4666 
4667 	if (ts->status < CDP_MAX_TX_TQM_STATUS) {
4668 		tid_stats->tqm_status_cnt[ts->status]++;
4669 	}
4670 
4671 	if (qdf_likely(ts->status == HAL_TX_TQM_RR_FRAME_ACKED)) {
4672 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.retry_count, 1,
4673 					   ts->transmit_cnt > 1);
4674 
4675 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.multiple_retry_count,
4676 					   1, ts->transmit_cnt > 2);
4677 
4678 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.ofdma, 1, ts->ofdma);
4679 
4680 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.amsdu_cnt, 1,
4681 					   ts->msdu_part_of_amsdu);
4682 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.non_amsdu_cnt, 1,
4683 					   !ts->msdu_part_of_amsdu);
4684 
4685 		txrx_peer->stats.per_pkt_stats.tx.last_tx_ts =
4686 							qdf_system_ticks();
4687 
4688 		dp_tx_update_peer_extd_stats(ts, txrx_peer);
4689 
4690 		return;
4691 	}
4692 
4693 	/*
4694 	 * tx_failed is ideally supposed to be updated from HTT ppdu
4695 	 * completion stats. But in IPQ807X/IPQ6018 chipsets owing to
4696 	 * hw limitation there are no completions for failed cases.
4697 	 * Hence updating tx_failed from data path. Please note that
4698 	 * if tx_failed is fixed to be from ppdu, then this has to be
4699 	 * removed
4700 	 */
4701 	DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
4702 
4703 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.failed_retry_count, 1,
4704 				   ts->transmit_cnt > DP_RETRY_COUNT);
4705 	dp_update_no_ack_stats(tx_desc->nbuf, txrx_peer);
4706 
4707 	if (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED) {
4708 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.age_out, 1);
4709 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_REM) {
4710 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.dropped.fw_rem, 1,
4711 					      length);
4712 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX) {
4713 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_notx, 1);
4714 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TX) {
4715 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_tx, 1);
4716 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON1) {
4717 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason1, 1);
4718 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON2) {
4719 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason2, 1);
4720 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON3) {
4721 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason3, 1);
4722 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_DISABLE_QUEUE) {
4723 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4724 					  tx.dropped.fw_rem_queue_disable, 1);
4725 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TILL_NONMATCHING) {
4726 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4727 					  tx.dropped.fw_rem_no_match, 1);
4728 	} else if (ts->status == HAL_TX_TQM_RR_DROP_THRESHOLD) {
4729 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4730 					  tx.dropped.drop_threshold, 1);
4731 	} else if (ts->status == HAL_TX_TQM_RR_LINK_DESC_UNAVAILABLE) {
4732 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4733 					  tx.dropped.drop_link_desc_na, 1);
4734 	} else if (ts->status == HAL_TX_TQM_RR_DROP_OR_INVALID_MSDU) {
4735 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4736 					  tx.dropped.invalid_drop, 1);
4737 	} else if (ts->status == HAL_TX_TQM_RR_MULTICAST_DROP) {
4738 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4739 					  tx.dropped.mcast_vdev_drop, 1);
4740 	} else {
4741 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.invalid_rr, 1);
4742 	}
4743 }
4744 
4745 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
4746 /**
4747  * dp_tx_flow_pool_lock() - take flow pool lock
4748  * @soc: core txrx main context
4749  * @tx_desc: tx desc
4750  *
4751  * Return: None
4752  */
4753 static inline
4754 void dp_tx_flow_pool_lock(struct dp_soc *soc,
4755 			  struct dp_tx_desc_s *tx_desc)
4756 {
4757 	struct dp_tx_desc_pool_s *pool;
4758 	uint8_t desc_pool_id;
4759 
4760 	desc_pool_id = tx_desc->pool_id;
4761 	pool = &soc->tx_desc[desc_pool_id];
4762 
4763 	qdf_spin_lock_bh(&pool->flow_pool_lock);
4764 }
4765 
4766 /**
4767  * dp_tx_flow_pool_unlock() - release flow pool lock
4768  * @soc: core txrx main context
4769  * @tx_desc: tx desc
4770  *
4771  * Return: None
4772  */
4773 static inline
4774 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
4775 			    struct dp_tx_desc_s *tx_desc)
4776 {
4777 	struct dp_tx_desc_pool_s *pool;
4778 	uint8_t desc_pool_id;
4779 
4780 	desc_pool_id = tx_desc->pool_id;
4781 	pool = &soc->tx_desc[desc_pool_id];
4782 
4783 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
4784 }
4785 #else
4786 static inline
4787 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
4788 {
4789 }
4790 
4791 static inline
4792 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
4793 {
4794 }
4795 #endif
4796 
4797 /**
4798  * dp_tx_notify_completion() - Notify tx completion for this desc
4799  * @soc: core txrx main context
4800  * @vdev: datapath vdev handle
4801  * @tx_desc: tx desc
4802  * @netbuf:  buffer
4803  * @status: tx status
4804  *
4805  * Return: none
4806  */
4807 static inline void dp_tx_notify_completion(struct dp_soc *soc,
4808 					   struct dp_vdev *vdev,
4809 					   struct dp_tx_desc_s *tx_desc,
4810 					   qdf_nbuf_t netbuf,
4811 					   uint8_t status)
4812 {
4813 	void *osif_dev;
4814 	ol_txrx_completion_fp tx_compl_cbk = NULL;
4815 	uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
4816 
4817 	qdf_assert(tx_desc);
4818 
4819 	if (!vdev ||
4820 	    !vdev->osif_vdev) {
4821 		return;
4822 	}
4823 
4824 	osif_dev = vdev->osif_vdev;
4825 	tx_compl_cbk = vdev->tx_comp;
4826 
4827 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
4828 		flag |= BIT(QDF_TX_RX_STATUS_OK);
4829 
4830 	if (tx_compl_cbk)
4831 		tx_compl_cbk(netbuf, osif_dev, flag);
4832 }
4833 
4834 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
4835  * @pdev: pdev handle
4836  * @tid: tid value
4837  * @txdesc_ts: timestamp from txdesc
4838  * @ppdu_id: ppdu id
4839  *
4840  * Return: none
4841  */
4842 #ifdef FEATURE_PERPKT_INFO
4843 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
4844 					       struct dp_txrx_peer *txrx_peer,
4845 					       uint8_t tid,
4846 					       uint64_t txdesc_ts,
4847 					       uint32_t ppdu_id)
4848 {
4849 	uint64_t delta_ms;
4850 	struct cdp_tx_sojourn_stats *sojourn_stats;
4851 	struct dp_peer *primary_link_peer = NULL;
4852 	struct dp_soc *link_peer_soc = NULL;
4853 
4854 	if (qdf_unlikely(!pdev->enhanced_stats_en))
4855 		return;
4856 
4857 	if (qdf_unlikely(tid == HTT_INVALID_TID ||
4858 			 tid >= CDP_DATA_TID_MAX))
4859 		return;
4860 
4861 	if (qdf_unlikely(!pdev->sojourn_buf))
4862 		return;
4863 
4864 	primary_link_peer = dp_get_primary_link_peer_by_id(pdev->soc,
4865 							   txrx_peer->peer_id,
4866 							   DP_MOD_ID_TX_COMP);
4867 
4868 	if (qdf_unlikely(!primary_link_peer))
4869 		return;
4870 
4871 	sojourn_stats = (struct cdp_tx_sojourn_stats *)
4872 		qdf_nbuf_data(pdev->sojourn_buf);
4873 
4874 	link_peer_soc = primary_link_peer->vdev->pdev->soc;
4875 	sojourn_stats->cookie = (void *)
4876 			dp_monitor_peer_get_peerstats_ctx(link_peer_soc,
4877 							  primary_link_peer);
4878 
4879 	delta_ms = qdf_ktime_to_ms(qdf_ktime_real_get()) -
4880 				txdesc_ts;
4881 	qdf_ewma_tx_lag_add(&txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid],
4882 			    delta_ms);
4883 	sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
4884 	sojourn_stats->num_msdus[tid] = 1;
4885 	sojourn_stats->avg_sojourn_msdu[tid].internal =
4886 		txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid].internal;
4887 	dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
4888 			     pdev->sojourn_buf, HTT_INVALID_PEER,
4889 			     WDI_NO_VAL, pdev->pdev_id);
4890 	sojourn_stats->sum_sojourn_msdu[tid] = 0;
4891 	sojourn_stats->num_msdus[tid] = 0;
4892 	sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
4893 
4894 	dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_TX_COMP);
4895 }
4896 #else
4897 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
4898 					       struct dp_txrx_peer *txrx_peer,
4899 					       uint8_t tid,
4900 					       uint64_t txdesc_ts,
4901 					       uint32_t ppdu_id)
4902 {
4903 }
4904 #endif
4905 
4906 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
4907 /**
4908  * dp_send_completion_to_pkt_capture() - send tx completion to packet capture
4909  * @soc: dp_soc handle
4910  * @desc: Tx Descriptor
4911  * @ts: HAL Tx completion descriptor contents
4912  *
4913  * This function is used to send tx completion to packet capture
4914  */
4915 void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
4916 				       struct dp_tx_desc_s *desc,
4917 				       struct hal_tx_completion_status *ts)
4918 {
4919 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc,
4920 			     desc, ts->peer_id,
4921 			     WDI_NO_VAL, desc->pdev->pdev_id);
4922 }
4923 #endif
4924 
4925 /**
4926  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
4927  * @soc: DP Soc handle
4928  * @tx_desc: software Tx descriptor
4929  * @ts : Tx completion status from HAL/HTT descriptor
4930  *
4931  * Return: none
4932  */
4933 void
4934 dp_tx_comp_process_desc(struct dp_soc *soc,
4935 			struct dp_tx_desc_s *desc,
4936 			struct hal_tx_completion_status *ts,
4937 			struct dp_txrx_peer *txrx_peer)
4938 {
4939 	uint64_t time_latency = 0;
4940 	uint16_t peer_id = DP_INVALID_PEER_ID;
4941 
4942 	/*
4943 	 * m_copy/tx_capture modes are not supported for
4944 	 * scatter gather packets
4945 	 */
4946 	if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
4947 		time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
4948 				qdf_ktime_to_ms(desc->timestamp));
4949 	}
4950 
4951 	dp_send_completion_to_pkt_capture(soc, desc, ts);
4952 
4953 	if (dp_tx_pkt_tracepoints_enabled())
4954 		qdf_trace_dp_packet(desc->nbuf, QDF_TX,
4955 				    desc->msdu_ext_desc ?
4956 				    desc->msdu_ext_desc->tso_desc : NULL,
4957 				    qdf_ktime_to_ms(desc->timestamp));
4958 
4959 	if (!(desc->msdu_ext_desc)) {
4960 		dp_tx_enh_unmap(soc, desc);
4961 		if (txrx_peer)
4962 			peer_id = txrx_peer->peer_id;
4963 
4964 		if (QDF_STATUS_SUCCESS ==
4965 		    dp_monitor_tx_add_to_comp_queue(soc, desc, ts, peer_id)) {
4966 			return;
4967 		}
4968 
4969 		if (QDF_STATUS_SUCCESS ==
4970 		    dp_get_completion_indication_for_stack(soc,
4971 							   desc->pdev,
4972 							   txrx_peer, ts,
4973 							   desc->nbuf,
4974 							   time_latency)) {
4975 			dp_send_completion_to_stack(soc,
4976 						    desc->pdev,
4977 						    ts->peer_id,
4978 						    ts->ppdu_id,
4979 						    desc->nbuf);
4980 			return;
4981 		}
4982 	}
4983 
4984 	desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX;
4985 	dp_tx_comp_free_buf(soc, desc, false);
4986 }
4987 
4988 #ifdef DISABLE_DP_STATS
4989 /**
4990  * dp_tx_update_connectivity_stats() - update tx connectivity stats
4991  * @soc: core txrx main context
4992  * @tx_desc: tx desc
4993  * @status: tx status
4994  *
4995  * Return: none
4996  */
4997 static inline
4998 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
4999 				     struct dp_vdev *vdev,
5000 				     struct dp_tx_desc_s *tx_desc,
5001 				     uint8_t status)
5002 {
5003 }
5004 #else
5005 static inline
5006 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
5007 				     struct dp_vdev *vdev,
5008 				     struct dp_tx_desc_s *tx_desc,
5009 				     uint8_t status)
5010 {
5011 	void *osif_dev;
5012 	ol_txrx_stats_rx_fp stats_cbk;
5013 	uint8_t pkt_type;
5014 
5015 	qdf_assert(tx_desc);
5016 
5017 	if (!vdev ||
5018 	    !vdev->osif_vdev ||
5019 	    !vdev->stats_cb)
5020 		return;
5021 
5022 	osif_dev = vdev->osif_vdev;
5023 	stats_cbk = vdev->stats_cb;
5024 
5025 	stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
5026 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
5027 		stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
5028 			  &pkt_type);
5029 }
5030 #endif
5031 
5032 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
5033 QDF_STATUS
5034 dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
5035 			  uint32_t delta_tsf,
5036 			  uint32_t *delay_us)
5037 {
5038 	uint32_t buffer_ts;
5039 	uint32_t delay;
5040 
5041 	if (!delay_us)
5042 		return QDF_STATUS_E_INVAL;
5043 
5044 	/* Tx_rate_stats_info_valid is 0 and tsf is invalid then */
5045 	if (!ts->valid)
5046 		return QDF_STATUS_E_INVAL;
5047 
5048 	/* buffer_timestamp is in units of 1024 us and is [31:13] of
5049 	 * WBM_RELEASE_RING_4. After left shift 10 bits, it's
5050 	 * valid up to 29 bits.
5051 	 */
5052 	buffer_ts = ts->buffer_timestamp << 10;
5053 
5054 	delay = ts->tsf - buffer_ts - delta_tsf;
5055 
5056 	if (qdf_unlikely(delay & 0x80000000)) {
5057 		dp_err_rl("delay = 0x%x (-ve)\n"
5058 			  "release_src = %d\n"
5059 			  "ppdu_id = 0x%x\n"
5060 			  "peer_id = 0x%x\n"
5061 			  "tid = 0x%x\n"
5062 			  "release_reason = %d\n"
5063 			  "tsf = %u (0x%x)\n"
5064 			  "buffer_timestamp = %u (0x%x)\n"
5065 			  "delta_tsf = %u (0x%x)\n",
5066 			  delay, ts->release_src, ts->ppdu_id, ts->peer_id,
5067 			  ts->tid, ts->status, ts->tsf, ts->tsf,
5068 			  ts->buffer_timestamp, ts->buffer_timestamp,
5069 			  delta_tsf, delta_tsf);
5070 
5071 		delay = 0;
5072 		goto end;
5073 	}
5074 
5075 	delay &= 0x1FFFFFFF; /* mask 29 BITS */
5076 	if (delay > 0x1000000) {
5077 		dp_info_rl("----------------------\n"
5078 			   "Tx completion status:\n"
5079 			   "----------------------\n"
5080 			   "release_src = %d\n"
5081 			   "ppdu_id = 0x%x\n"
5082 			   "release_reason = %d\n"
5083 			   "tsf = %u (0x%x)\n"
5084 			   "buffer_timestamp = %u (0x%x)\n"
5085 			   "delta_tsf = %u (0x%x)\n",
5086 			   ts->release_src, ts->ppdu_id, ts->status,
5087 			   ts->tsf, ts->tsf, ts->buffer_timestamp,
5088 			   ts->buffer_timestamp, delta_tsf, delta_tsf);
5089 		return QDF_STATUS_E_FAILURE;
5090 	}
5091 
5092 
5093 end:
5094 	*delay_us = delay;
5095 
5096 	return QDF_STATUS_SUCCESS;
5097 }
5098 
5099 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5100 		      uint32_t delta_tsf)
5101 {
5102 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5103 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5104 						     DP_MOD_ID_CDP);
5105 
5106 	if (!vdev) {
5107 		dp_err_rl("vdev %d does not exist", vdev_id);
5108 		return;
5109 	}
5110 
5111 	vdev->delta_tsf = delta_tsf;
5112 	dp_debug("vdev id %u delta_tsf %u", vdev_id, delta_tsf);
5113 
5114 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5115 }
5116 #endif
5117 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
5118 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
5119 				      uint8_t vdev_id, bool enable)
5120 {
5121 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5122 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5123 						     DP_MOD_ID_CDP);
5124 
5125 	if (!vdev) {
5126 		dp_err_rl("vdev %d does not exist", vdev_id);
5127 		return QDF_STATUS_E_FAILURE;
5128 	}
5129 
5130 	qdf_atomic_set(&vdev->ul_delay_report, enable);
5131 
5132 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5133 
5134 	return QDF_STATUS_SUCCESS;
5135 }
5136 
5137 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5138 			       uint32_t *val)
5139 {
5140 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5141 	struct dp_vdev *vdev;
5142 	uint32_t delay_accum;
5143 	uint32_t pkts_accum;
5144 
5145 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
5146 	if (!vdev) {
5147 		dp_err_rl("vdev %d does not exist", vdev_id);
5148 		return QDF_STATUS_E_FAILURE;
5149 	}
5150 
5151 	if (!qdf_atomic_read(&vdev->ul_delay_report)) {
5152 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5153 		return QDF_STATUS_E_FAILURE;
5154 	}
5155 
5156 	/* Average uplink delay based on current accumulated values */
5157 	delay_accum = qdf_atomic_read(&vdev->ul_delay_accum);
5158 	pkts_accum = qdf_atomic_read(&vdev->ul_pkts_accum);
5159 
5160 	*val = delay_accum / pkts_accum;
5161 	dp_debug("uplink_delay %u delay_accum %u pkts_accum %u", *val,
5162 		 delay_accum, pkts_accum);
5163 
5164 	/* Reset accumulated values to 0 */
5165 	qdf_atomic_set(&vdev->ul_delay_accum, 0);
5166 	qdf_atomic_set(&vdev->ul_pkts_accum, 0);
5167 
5168 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5169 
5170 	return QDF_STATUS_SUCCESS;
5171 }
5172 
5173 static void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
5174 				      struct hal_tx_completion_status *ts)
5175 {
5176 	uint32_t ul_delay;
5177 
5178 	if (qdf_unlikely(!vdev)) {
5179 		dp_info_rl("vdev is null or delete in progress");
5180 		return;
5181 	}
5182 
5183 	if (!qdf_atomic_read(&vdev->ul_delay_report))
5184 		return;
5185 
5186 	if (QDF_IS_STATUS_ERROR(dp_tx_compute_hw_delay_us(ts,
5187 							  vdev->delta_tsf,
5188 							  &ul_delay)))
5189 		return;
5190 
5191 	ul_delay /= 1000; /* in unit of ms */
5192 
5193 	qdf_atomic_add(ul_delay, &vdev->ul_delay_accum);
5194 	qdf_atomic_inc(&vdev->ul_pkts_accum);
5195 }
5196 #else /* !WLAN_FEATURE_TSF_UPLINK_DELAY */
5197 static inline
5198 void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
5199 			       struct hal_tx_completion_status *ts)
5200 {
5201 }
5202 #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
5203 
5204 /**
5205  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
5206  * @soc: DP soc handle
5207  * @tx_desc: software descriptor head pointer
5208  * @ts: Tx completion status
5209  * @txrx_peer: txrx peer handle
5210  * @ring_id: ring number
5211  *
5212  * Return: none
5213  */
5214 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
5215 				  struct dp_tx_desc_s *tx_desc,
5216 				  struct hal_tx_completion_status *ts,
5217 				  struct dp_txrx_peer *txrx_peer,
5218 				  uint8_t ring_id)
5219 {
5220 	uint32_t length;
5221 	qdf_ether_header_t *eh;
5222 	struct dp_vdev *vdev = NULL;
5223 	qdf_nbuf_t nbuf = tx_desc->nbuf;
5224 	enum qdf_dp_tx_rx_status dp_status;
5225 
5226 	if (!nbuf) {
5227 		dp_info_rl("invalid tx descriptor. nbuf NULL");
5228 		goto out;
5229 	}
5230 
5231 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
5232 	length = qdf_nbuf_len(nbuf);
5233 
5234 	dp_status = dp_tx_hw_to_qdf(ts->status);
5235 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
5236 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
5237 				 QDF_TRACE_DEFAULT_PDEV_ID,
5238 				 qdf_nbuf_data_addr(nbuf),
5239 				 sizeof(qdf_nbuf_data(nbuf)),
5240 				 tx_desc->id, ts->status, dp_status));
5241 
5242 	dp_tx_comp_debug("-------------------- \n"
5243 			 "Tx Completion Stats: \n"
5244 			 "-------------------- \n"
5245 			 "ack_frame_rssi = %d \n"
5246 			 "first_msdu = %d \n"
5247 			 "last_msdu = %d \n"
5248 			 "msdu_part_of_amsdu = %d \n"
5249 			 "rate_stats valid = %d \n"
5250 			 "bw = %d \n"
5251 			 "pkt_type = %d \n"
5252 			 "stbc = %d \n"
5253 			 "ldpc = %d \n"
5254 			 "sgi = %d \n"
5255 			 "mcs = %d \n"
5256 			 "ofdma = %d \n"
5257 			 "tones_in_ru = %d \n"
5258 			 "tsf = %d \n"
5259 			 "ppdu_id = %d \n"
5260 			 "transmit_cnt = %d \n"
5261 			 "tid = %d \n"
5262 			 "peer_id = %d\n"
5263 			 "tx_status = %d\n",
5264 			 ts->ack_frame_rssi, ts->first_msdu,
5265 			 ts->last_msdu, ts->msdu_part_of_amsdu,
5266 			 ts->valid, ts->bw, ts->pkt_type, ts->stbc,
5267 			 ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
5268 			 ts->tones_in_ru, ts->tsf, ts->ppdu_id,
5269 			 ts->transmit_cnt, ts->tid, ts->peer_id,
5270 			 ts->status);
5271 
5272 	/* Update SoC level stats */
5273 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
5274 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
5275 
5276 	if (!txrx_peer) {
5277 		dp_info_rl("peer is null or deletion in progress");
5278 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
5279 		goto out;
5280 	}
5281 	vdev = txrx_peer->vdev;
5282 
5283 	dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
5284 	dp_tx_update_uplink_delay(soc, vdev, ts);
5285 
5286 	/* check tx complete notification */
5287 	if (qdf_nbuf_tx_notify_comp_get(nbuf))
5288 		dp_tx_notify_completion(soc, vdev, tx_desc,
5289 					nbuf, ts->status);
5290 
5291 	/* Update per-packet stats for mesh mode */
5292 	if (qdf_unlikely(vdev->mesh_vdev) &&
5293 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
5294 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
5295 
5296 	/* Update peer level stats */
5297 	if (qdf_unlikely(txrx_peer->bss_peer &&
5298 			 vdev->opmode == wlan_op_mode_ap)) {
5299 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
5300 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1,
5301 						      length);
5302 
5303 			if (txrx_peer->vdev->tx_encap_type ==
5304 				htt_cmn_pkt_type_ethernet &&
5305 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
5306 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
5307 							      tx.bcast, 1,
5308 							      length);
5309 			}
5310 		}
5311 	} else {
5312 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.ucast, 1, length);
5313 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
5314 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.tx_success,
5315 						      1, length);
5316 			if (qdf_unlikely(txrx_peer->in_twt)) {
5317 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
5318 							      tx.tx_success_twt,
5319 							      1, length);
5320 			}
5321 		}
5322 	}
5323 
5324 	dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id);
5325 	dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts, ring_id);
5326 	dp_tx_update_peer_jitter_stats(txrx_peer, tx_desc, ts, ring_id);
5327 	dp_tx_update_peer_sawf_stats(soc, vdev, txrx_peer, tx_desc,
5328 				     ts, ts->tid);
5329 	dp_tx_send_pktlog(soc, vdev->pdev, tx_desc, nbuf, dp_status);
5330 
5331 #ifdef QCA_SUPPORT_RDK_STATS
5332 	if (soc->peerstats_enabled)
5333 		dp_tx_sojourn_stats_process(vdev->pdev, txrx_peer, ts->tid,
5334 					    qdf_ktime_to_ms(tx_desc->timestamp),
5335 					    ts->ppdu_id);
5336 #endif
5337 
5338 out:
5339 	return;
5340 }
5341 
5342 #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \
5343 	defined(QCA_ENHANCED_STATS_SUPPORT)
5344 /*
5345  * dp_tx_update_peer_basic_stats(): Update peer basic stats
5346  * @txrx_peer: Datapath txrx_peer handle
5347  * @length: Length of the packet
5348  * @tx_status: Tx status from TQM/FW
5349  * @update: enhanced flag value present in dp_pdev
5350  *
5351  * Return: none
5352  */
5353 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
5354 				   uint32_t length, uint8_t tx_status,
5355 				   bool update)
5356 {
5357 	if (update || (!txrx_peer->hw_txrx_stats_en)) {
5358 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
5359 
5360 		if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
5361 			DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
5362 	}
5363 }
5364 #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
5365 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
5366 				   uint32_t length, uint8_t tx_status,
5367 				   bool update)
5368 {
5369 	if (!txrx_peer->hw_txrx_stats_en) {
5370 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
5371 
5372 		if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
5373 			DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
5374 	}
5375 }
5376 
5377 #else
5378 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
5379 				   uint32_t length, uint8_t tx_status,
5380 				   bool update)
5381 {
5382 	DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
5383 
5384 	if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
5385 		DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
5386 }
5387 #endif
5388 
5389 /*
5390  * dp_tx_prefetch_next_nbuf_data(): Prefetch nbuf and nbuf data
5391  * @nbuf: skb buffer
5392  *
5393  * Return: none
5394  */
5395 #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
5396 static inline
5397 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
5398 {
5399 	qdf_nbuf_t nbuf = NULL;
5400 
5401 	if (next)
5402 		nbuf = next->nbuf;
5403 	if (nbuf) {
5404 		/* prefetch skb->next and first few bytes of skb->cb */
5405 		qdf_prefetch(next->shinfo_addr);
5406 		qdf_prefetch(nbuf);
5407 		/* prefetch skb fields present in different cachelines */
5408 		qdf_prefetch(&nbuf->len);
5409 		qdf_prefetch(&nbuf->users);
5410 	}
5411 }
5412 #else
5413 static inline
5414 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
5415 {
5416 }
5417 #endif
5418 
5419 /**
5420  * dp_tx_mcast_reinject_handler() - Tx reinjected multicast packets handler
5421  * @soc: core txrx main context
5422  * @desc: software descriptor
5423  *
5424  * Return: true when packet is reinjected
5425  */
5426 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
5427 	defined(WLAN_MCAST_MLO)
5428 static inline bool
5429 dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
5430 {
5431 	struct dp_vdev *vdev = NULL;
5432 
5433 	if (desc->tx_status == HAL_TX_TQM_RR_MULTICAST_DROP) {
5434 		if (!soc->arch_ops.dp_tx_mcast_handler)
5435 			return false;
5436 
5437 		vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
5438 					     DP_MOD_ID_REINJECT);
5439 
5440 		if (qdf_unlikely(!vdev)) {
5441 			dp_tx_comp_info_rl("Unable to get vdev ref  %d",
5442 					   desc->id);
5443 			return false;
5444 		}
5445 		DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
5446 				 qdf_nbuf_len(desc->nbuf));
5447 		soc->arch_ops.dp_tx_mcast_handler(soc, vdev, desc->nbuf);
5448 		dp_tx_desc_release(desc, desc->pool_id);
5449 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
5450 		return true;
5451 	}
5452 
5453 	return false;
5454 }
5455 #else
5456 static inline bool
5457 dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
5458 {
5459 	return false;
5460 }
5461 #endif
5462 
5463 /**
5464  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
5465  * @soc: core txrx main context
5466  * @comp_head: software descriptor head pointer
5467  * @ring_id: ring number
5468  *
5469  * This function will process batch of descriptors reaped by dp_tx_comp_handler
5470  * and release the software descriptors after processing is complete
5471  *
5472  * Return: none
5473  */
5474 void
5475 dp_tx_comp_process_desc_list(struct dp_soc *soc,
5476 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id)
5477 {
5478 	struct dp_tx_desc_s *desc;
5479 	struct dp_tx_desc_s *next;
5480 	struct hal_tx_completion_status ts;
5481 	struct dp_txrx_peer *txrx_peer = NULL;
5482 	uint16_t peer_id = DP_INVALID_PEER;
5483 	dp_txrx_ref_handle txrx_ref_handle = NULL;
5484 
5485 	desc = comp_head;
5486 
5487 	while (desc) {
5488 		next = desc->next;
5489 		dp_tx_prefetch_next_nbuf_data(next);
5490 
5491 		if (peer_id != desc->peer_id) {
5492 			if (txrx_peer)
5493 				dp_txrx_peer_unref_delete(txrx_ref_handle,
5494 							  DP_MOD_ID_TX_COMP);
5495 			peer_id = desc->peer_id;
5496 			txrx_peer =
5497 				dp_txrx_peer_get_ref_by_id(soc, peer_id,
5498 							   &txrx_ref_handle,
5499 							   DP_MOD_ID_TX_COMP);
5500 		}
5501 
5502 		if (dp_tx_mcast_reinject_handler(soc, desc)) {
5503 			desc = next;
5504 			continue;
5505 		}
5506 
5507 		if (desc->flags & DP_TX_DESC_FLAG_PPEDS) {
5508 			if (qdf_likely(txrx_peer))
5509 				dp_tx_update_peer_basic_stats(txrx_peer,
5510 							      desc->length,
5511 							      desc->tx_status,
5512 							      false);
5513 			qdf_nbuf_free(desc->nbuf);
5514 			dp_ppeds_tx_desc_free(soc, desc);
5515 			desc = next;
5516 			continue;
5517 		}
5518 
5519 		if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
5520 			struct dp_pdev *pdev = desc->pdev;
5521 
5522 			if (qdf_likely(txrx_peer))
5523 				dp_tx_update_peer_basic_stats(txrx_peer,
5524 							      desc->length,
5525 							      desc->tx_status,
5526 							      false);
5527 			qdf_assert(pdev);
5528 			dp_tx_outstanding_dec(pdev);
5529 
5530 			/*
5531 			 * Calling a QDF WRAPPER here is creating significant
5532 			 * performance impact so avoided the wrapper call here
5533 			 */
5534 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
5535 					       desc->id, DP_TX_COMP_UNMAP);
5536 			dp_tx_nbuf_unmap(soc, desc);
5537 			qdf_nbuf_free_simple(desc->nbuf);
5538 			dp_tx_desc_free(soc, desc, desc->pool_id);
5539 			desc = next;
5540 			continue;
5541 		}
5542 
5543 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
5544 
5545 		dp_tx_comp_process_tx_status(soc, desc, &ts, txrx_peer,
5546 					     ring_id);
5547 
5548 		dp_tx_comp_process_desc(soc, desc, &ts, txrx_peer);
5549 
5550 		dp_tx_desc_release(desc, desc->pool_id);
5551 		desc = next;
5552 	}
5553 	if (txrx_peer)
5554 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
5555 }
5556 
5557 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
5558 static inline
5559 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
5560 				   int max_reap_limit)
5561 {
5562 	bool limit_hit = false;
5563 
5564 	limit_hit =
5565 		(num_reaped >= max_reap_limit) ? true : false;
5566 
5567 	if (limit_hit)
5568 		DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
5569 
5570 	return limit_hit;
5571 }
5572 
5573 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
5574 {
5575 	return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
5576 }
5577 
5578 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
5579 {
5580 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
5581 
5582 	return cfg->tx_comp_loop_pkt_limit;
5583 }
5584 #else
5585 static inline
5586 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
5587 				   int max_reap_limit)
5588 {
5589 	return false;
5590 }
5591 
5592 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
5593 {
5594 	return false;
5595 }
5596 
5597 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
5598 {
5599 	return 0;
5600 }
5601 #endif
5602 
5603 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
5604 static inline int
5605 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
5606 				  int *max_reap_limit)
5607 {
5608 	return soc->arch_ops.dp_srng_test_and_update_nf_params(soc, dp_srng,
5609 							       max_reap_limit);
5610 }
5611 #else
5612 static inline int
5613 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
5614 				  int *max_reap_limit)
5615 {
5616 	return 0;
5617 }
5618 #endif
5619 
5620 #ifdef DP_TX_TRACKING
5621 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
5622 {
5623 	if ((tx_desc->magic != DP_TX_MAGIC_PATTERN_INUSE) &&
5624 	    (tx_desc->magic != DP_TX_MAGIC_PATTERN_FREE)) {
5625 		dp_err_rl("tx_desc %u is corrupted", tx_desc->id);
5626 		qdf_trigger_self_recovery(NULL, QDF_TX_DESC_LEAK);
5627 	}
5628 }
5629 #endif
5630 
5631 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
5632 			    hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
5633 			    uint32_t quota)
5634 {
5635 	void *tx_comp_hal_desc;
5636 	void *last_prefetched_hw_desc = NULL;
5637 	struct dp_tx_desc_s *last_prefetched_sw_desc = NULL;
5638 	hal_soc_handle_t hal_soc;
5639 	uint8_t buffer_src;
5640 	struct dp_tx_desc_s *tx_desc = NULL;
5641 	struct dp_tx_desc_s *head_desc = NULL;
5642 	struct dp_tx_desc_s *tail_desc = NULL;
5643 	uint32_t num_processed = 0;
5644 	uint32_t count;
5645 	uint32_t num_avail_for_reap = 0;
5646 	bool force_break = false;
5647 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
5648 	int max_reap_limit, ring_near_full;
5649 	uint32_t num_entries;
5650 
5651 	DP_HIST_INIT();
5652 
5653 	num_entries = hal_srng_get_num_entries(soc->hal_soc, hal_ring_hdl);
5654 
5655 more_data:
5656 
5657 	hal_soc = soc->hal_soc;
5658 	/* Re-initialize local variables to be re-used */
5659 	head_desc = NULL;
5660 	tail_desc = NULL;
5661 	count = 0;
5662 	max_reap_limit = dp_tx_comp_get_loop_pkt_limit(soc);
5663 
5664 	ring_near_full = dp_srng_test_and_update_nf_params(soc, tx_comp_ring,
5665 							   &max_reap_limit);
5666 
5667 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
5668 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
5669 		return 0;
5670 	}
5671 
5672 	if (!num_avail_for_reap)
5673 		num_avail_for_reap = hal_srng_dst_num_valid(hal_soc,
5674 							    hal_ring_hdl, 0);
5675 
5676 	if (num_avail_for_reap >= quota)
5677 		num_avail_for_reap = quota;
5678 
5679 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
5680 	last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc,
5681 							    hal_ring_hdl,
5682 							    num_avail_for_reap);
5683 
5684 	/* Find head descriptor from completion ring */
5685 	while (qdf_likely(num_avail_for_reap--)) {
5686 
5687 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
5688 		if (qdf_unlikely(!tx_comp_hal_desc))
5689 			break;
5690 		buffer_src = hal_tx_comp_get_buffer_source(hal_soc,
5691 							   tx_comp_hal_desc);
5692 
5693 		/* If this buffer was not released by TQM or FW, then it is not
5694 		 * Tx completion indication, assert */
5695 		if (qdf_unlikely(buffer_src !=
5696 					HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
5697 				 (qdf_unlikely(buffer_src !=
5698 					HAL_TX_COMP_RELEASE_SOURCE_FW))) {
5699 			uint8_t wbm_internal_error;
5700 
5701 			dp_err_rl(
5702 				"Tx comp release_src != TQM | FW but from %d",
5703 				buffer_src);
5704 			hal_dump_comp_desc(tx_comp_hal_desc);
5705 			DP_STATS_INC(soc, tx.invalid_release_source, 1);
5706 
5707 			/* When WBM sees NULL buffer_addr_info in any of
5708 			 * ingress rings it sends an error indication,
5709 			 * with wbm_internal_error=1, to a specific ring.
5710 			 * The WBM2SW ring used to indicate these errors is
5711 			 * fixed in HW, and that ring is being used as Tx
5712 			 * completion ring. These errors are not related to
5713 			 * Tx completions, and should just be ignored
5714 			 */
5715 			wbm_internal_error = hal_get_wbm_internal_error(
5716 							hal_soc,
5717 							tx_comp_hal_desc);
5718 
5719 			if (wbm_internal_error) {
5720 				dp_err_rl("Tx comp wbm_internal_error!!");
5721 				DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
5722 
5723 				if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
5724 								buffer_src)
5725 					dp_handle_wbm_internal_error(
5726 						soc,
5727 						tx_comp_hal_desc,
5728 						hal_tx_comp_get_buffer_type(
5729 							tx_comp_hal_desc));
5730 
5731 			} else {
5732 				dp_err_rl("Tx comp wbm_internal_error false");
5733 				DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
5734 			}
5735 			continue;
5736 		}
5737 
5738 		soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
5739 							       tx_comp_hal_desc,
5740 							       &tx_desc);
5741 		if (!tx_desc) {
5742 			dp_err("unable to retrieve tx_desc!");
5743 			QDF_BUG(0);
5744 			continue;
5745 		}
5746 		tx_desc->buffer_src = buffer_src;
5747 
5748 		if (tx_desc->flags & DP_TX_DESC_FLAG_PPEDS)
5749 			goto add_to_pool2;
5750 
5751 		/*
5752 		 * If the release source is FW, process the HTT status
5753 		 */
5754 		if (qdf_unlikely(buffer_src ==
5755 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
5756 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
5757 
5758 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
5759 					htt_tx_status);
5760 			/* Collect hw completion contents */
5761 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
5762 					      &tx_desc->comp, 1);
5763 			soc->arch_ops.dp_tx_process_htt_completion(
5764 							soc,
5765 							tx_desc,
5766 							htt_tx_status,
5767 							ring_id);
5768 		} else {
5769 			tx_desc->tx_status =
5770 				hal_tx_comp_get_tx_status(tx_comp_hal_desc);
5771 			tx_desc->buffer_src = buffer_src;
5772 			/*
5773 			 * If the fast completion mode is enabled extended
5774 			 * metadata from descriptor is not copied
5775 			 */
5776 			if (qdf_likely(tx_desc->flags &
5777 						DP_TX_DESC_FLAG_SIMPLE))
5778 				goto add_to_pool;
5779 
5780 			/*
5781 			 * If the descriptor is already freed in vdev_detach,
5782 			 * continue to next descriptor
5783 			 */
5784 			if (qdf_unlikely
5785 				((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
5786 				 !tx_desc->flags)) {
5787 				dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
5788 						   tx_desc->id);
5789 				DP_STATS_INC(soc, tx.tx_comp_exception, 1);
5790 				dp_tx_desc_check_corruption(tx_desc);
5791 				continue;
5792 			}
5793 
5794 			if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
5795 				dp_tx_comp_info_rl("pdev in down state %d",
5796 						   tx_desc->id);
5797 				tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
5798 				dp_tx_comp_free_buf(soc, tx_desc, false);
5799 				dp_tx_desc_release(tx_desc, tx_desc->pool_id);
5800 				goto next_desc;
5801 			}
5802 
5803 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
5804 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
5805 				dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
5806 						 tx_desc->flags, tx_desc->id);
5807 				qdf_assert_always(0);
5808 			}
5809 
5810 			/* Collect hw completion contents */
5811 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
5812 					      &tx_desc->comp, 1);
5813 add_to_pool:
5814 			DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
5815 
5816 add_to_pool2:
5817 			/* First ring descriptor on the cycle */
5818 			if (!head_desc) {
5819 				head_desc = tx_desc;
5820 				tail_desc = tx_desc;
5821 			}
5822 
5823 			tail_desc->next = tx_desc;
5824 			tx_desc->next = NULL;
5825 			tail_desc = tx_desc;
5826 		}
5827 next_desc:
5828 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
5829 
5830 		/*
5831 		 * Processed packet count is more than given quota
5832 		 * stop to processing
5833 		 */
5834 
5835 		count++;
5836 
5837 		dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
5838 					       num_avail_for_reap,
5839 					       hal_ring_hdl,
5840 					       &last_prefetched_hw_desc,
5841 					       &last_prefetched_sw_desc);
5842 
5843 		if (dp_tx_comp_loop_pkt_limit_hit(soc, count, max_reap_limit))
5844 			break;
5845 	}
5846 
5847 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
5848 
5849 	/* Process the reaped descriptors */
5850 	if (head_desc)
5851 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
5852 
5853 	DP_STATS_INC(soc, tx.tx_comp[ring_id], count);
5854 
5855 	/*
5856 	 * If we are processing in near-full condition, there are 3 scenario
5857 	 * 1) Ring entries has reached critical state
5858 	 * 2) Ring entries are still near high threshold
5859 	 * 3) Ring entries are below the safe level
5860 	 *
5861 	 * One more loop will move the state to normal processing and yield
5862 	 */
5863 	if (ring_near_full)
5864 		goto more_data;
5865 
5866 	if (dp_tx_comp_enable_eol_data_check(soc)) {
5867 
5868 		if (num_processed >= quota)
5869 			force_break = true;
5870 
5871 		if (!force_break &&
5872 		    hal_srng_dst_peek_sync_locked(soc->hal_soc,
5873 						  hal_ring_hdl)) {
5874 			DP_STATS_INC(soc, tx.hp_oos2, 1);
5875 			if (!hif_exec_should_yield(soc->hif_handle,
5876 						   int_ctx->dp_intr_id))
5877 				goto more_data;
5878 
5879 			num_avail_for_reap =
5880 				hal_srng_dst_num_valid_locked(soc->hal_soc,
5881 							      hal_ring_hdl,
5882 							      true);
5883 			if (qdf_unlikely(num_entries &&
5884 					 (num_avail_for_reap >=
5885 					  num_entries >> 1))) {
5886 				DP_STATS_INC(soc, tx.near_full, 1);
5887 				goto more_data;
5888 			}
5889 		}
5890 	}
5891 	DP_TX_HIST_STATS_PER_PDEV();
5892 
5893 	return num_processed;
5894 }
5895 
5896 #ifdef FEATURE_WLAN_TDLS
5897 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5898 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
5899 {
5900 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5901 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5902 						     DP_MOD_ID_TDLS);
5903 
5904 	if (!vdev) {
5905 		dp_err("vdev handle for id %d is NULL", vdev_id);
5906 		return NULL;
5907 	}
5908 
5909 	if (tx_spec & OL_TX_SPEC_NO_FREE)
5910 		vdev->is_tdls_frame = true;
5911 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
5912 
5913 	return dp_tx_send(soc_hdl, vdev_id, msdu_list);
5914 }
5915 #endif
5916 
5917 /**
5918  * dp_tx_vdev_attach() - attach vdev to dp tx
5919  * @vdev: virtual device instance
5920  *
5921  * Return: QDF_STATUS_SUCCESS: success
5922  *         QDF_STATUS_E_RESOURCES: Error return
5923  */
5924 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
5925 {
5926 	int pdev_id;
5927 	/*
5928 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
5929 	 */
5930 	DP_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
5931 				    DP_TCL_METADATA_TYPE_VDEV_BASED);
5932 
5933 	DP_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
5934 				       vdev->vdev_id);
5935 
5936 	pdev_id =
5937 		dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
5938 						       vdev->pdev->pdev_id);
5939 	DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
5940 
5941 	/*
5942 	 * Set HTT Extension Valid bit to 0 by default
5943 	 */
5944 	DP_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
5945 
5946 	dp_tx_vdev_update_search_flags(vdev);
5947 
5948 	return QDF_STATUS_SUCCESS;
5949 }
5950 
5951 #ifndef FEATURE_WDS
5952 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
5953 {
5954 	return false;
5955 }
5956 #endif
5957 
5958 /**
5959  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
5960  * @vdev: virtual device instance
5961  *
5962  * Return: void
5963  *
5964  */
5965 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
5966 {
5967 	struct dp_soc *soc = vdev->pdev->soc;
5968 
5969 	/*
5970 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
5971 	 * for TDLS link
5972 	 *
5973 	 * Enable AddrY (SA based search) only for non-WDS STA and
5974 	 * ProxySTA VAP (in HKv1) modes.
5975 	 *
5976 	 * In all other VAP modes, only DA based search should be
5977 	 * enabled
5978 	 */
5979 	if (vdev->opmode == wlan_op_mode_sta &&
5980 	    vdev->tdls_link_connected)
5981 		vdev->hal_desc_addr_search_flags =
5982 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
5983 	else if ((vdev->opmode == wlan_op_mode_sta) &&
5984 		 !dp_tx_da_search_override(vdev))
5985 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
5986 	else
5987 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
5988 
5989 	if (vdev->opmode == wlan_op_mode_sta && !vdev->tdls_link_connected)
5990 		vdev->search_type = soc->sta_mode_search_policy;
5991 	else
5992 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
5993 }
5994 
5995 static inline bool
5996 dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
5997 			  struct dp_vdev *vdev,
5998 			  struct dp_tx_desc_s *tx_desc)
5999 {
6000 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
6001 		return false;
6002 
6003 	/*
6004 	 * if vdev is given, then only check whether desc
6005 	 * vdev match. if vdev is NULL, then check whether
6006 	 * desc pdev match.
6007 	 */
6008 	return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
6009 		(tx_desc->pdev == pdev);
6010 }
6011 
6012 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6013 /**
6014  * dp_tx_desc_flush() - release resources associated
6015  *                      to TX Desc
6016  *
6017  * @dp_pdev: Handle to DP pdev structure
6018  * @vdev: virtual device instance
6019  * NULL: no specific Vdev is required and check all allcated TX desc
6020  * on this pdev.
6021  * Non-NULL: only check the allocated TX Desc associated to this Vdev.
6022  *
6023  * @force_free:
6024  * true: flush the TX desc.
6025  * false: only reset the Vdev in each allocated TX desc
6026  * that associated to current Vdev.
6027  *
6028  * This function will go through the TX desc pool to flush
6029  * the outstanding TX data or reset Vdev to NULL in associated TX
6030  * Desc.
6031  */
6032 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
6033 		      bool force_free)
6034 {
6035 	uint8_t i;
6036 	uint32_t j;
6037 	uint32_t num_desc, page_id, offset;
6038 	uint16_t num_desc_per_page;
6039 	struct dp_soc *soc = pdev->soc;
6040 	struct dp_tx_desc_s *tx_desc = NULL;
6041 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
6042 
6043 	if (!vdev && !force_free) {
6044 		dp_err("Reset TX desc vdev, Vdev param is required!");
6045 		return;
6046 	}
6047 
6048 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
6049 		tx_desc_pool = &soc->tx_desc[i];
6050 		if (!(tx_desc_pool->pool_size) ||
6051 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
6052 		    !(tx_desc_pool->desc_pages.cacheable_pages))
6053 			continue;
6054 
6055 		/*
6056 		 * Add flow pool lock protection in case pool is freed
6057 		 * due to all tx_desc is recycled when handle TX completion.
6058 		 * this is not necessary when do force flush as:
6059 		 * a. double lock will happen if dp_tx_desc_release is
6060 		 *    also trying to acquire it.
6061 		 * b. dp interrupt has been disabled before do force TX desc
6062 		 *    flush in dp_pdev_deinit().
6063 		 */
6064 		if (!force_free)
6065 			qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
6066 		num_desc = tx_desc_pool->pool_size;
6067 		num_desc_per_page =
6068 			tx_desc_pool->desc_pages.num_element_per_page;
6069 		for (j = 0; j < num_desc; j++) {
6070 			page_id = j / num_desc_per_page;
6071 			offset = j % num_desc_per_page;
6072 
6073 			if (qdf_unlikely(!(tx_desc_pool->
6074 					 desc_pages.cacheable_pages)))
6075 				break;
6076 
6077 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
6078 
6079 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
6080 				/*
6081 				 * Free TX desc if force free is
6082 				 * required, otherwise only reset vdev
6083 				 * in this TX desc.
6084 				 */
6085 				if (force_free) {
6086 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
6087 					dp_tx_comp_free_buf(soc, tx_desc,
6088 							    false);
6089 					dp_tx_desc_release(tx_desc, i);
6090 				} else {
6091 					tx_desc->vdev_id = DP_INVALID_VDEV_ID;
6092 				}
6093 			}
6094 		}
6095 		if (!force_free)
6096 			qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
6097 	}
6098 }
6099 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
6100 /**
6101  * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
6102  *
6103  * @soc: Handle to DP soc structure
6104  * @tx_desc: pointer of one TX desc
6105  * @desc_pool_id: TX Desc pool id
6106  */
6107 static inline void
6108 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
6109 		      uint8_t desc_pool_id)
6110 {
6111 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
6112 
6113 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
6114 
6115 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
6116 }
6117 
6118 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
6119 		      bool force_free)
6120 {
6121 	uint8_t i, num_pool;
6122 	uint32_t j;
6123 	uint32_t num_desc, page_id, offset;
6124 	uint16_t num_desc_per_page;
6125 	struct dp_soc *soc = pdev->soc;
6126 	struct dp_tx_desc_s *tx_desc = NULL;
6127 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
6128 
6129 	if (!vdev && !force_free) {
6130 		dp_err("Reset TX desc vdev, Vdev param is required!");
6131 		return;
6132 	}
6133 
6134 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6135 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6136 
6137 	for (i = 0; i < num_pool; i++) {
6138 		tx_desc_pool = &soc->tx_desc[i];
6139 		if (!tx_desc_pool->desc_pages.cacheable_pages)
6140 			continue;
6141 
6142 		num_desc_per_page =
6143 			tx_desc_pool->desc_pages.num_element_per_page;
6144 		for (j = 0; j < num_desc; j++) {
6145 			page_id = j / num_desc_per_page;
6146 			offset = j % num_desc_per_page;
6147 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
6148 
6149 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
6150 				if (force_free) {
6151 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
6152 					dp_tx_comp_free_buf(soc, tx_desc,
6153 							    false);
6154 					dp_tx_desc_release(tx_desc, i);
6155 				} else {
6156 					dp_tx_desc_reset_vdev(soc, tx_desc,
6157 							      i);
6158 				}
6159 			}
6160 		}
6161 	}
6162 }
6163 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
6164 
6165 /**
6166  * dp_tx_vdev_detach() - detach vdev from dp tx
6167  * @vdev: virtual device instance
6168  *
6169  * Return: QDF_STATUS_SUCCESS: success
6170  *         QDF_STATUS_E_RESOURCES: Error return
6171  */
6172 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
6173 {
6174 	struct dp_pdev *pdev = vdev->pdev;
6175 
6176 	/* Reset TX desc associated to this Vdev as NULL */
6177 	dp_tx_desc_flush(pdev, vdev, false);
6178 
6179 	return QDF_STATUS_SUCCESS;
6180 }
6181 
6182 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6183 /* Pools will be allocated dynamically */
6184 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
6185 					   int num_desc)
6186 {
6187 	uint8_t i;
6188 
6189 	for (i = 0; i < num_pool; i++) {
6190 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
6191 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
6192 	}
6193 
6194 	return QDF_STATUS_SUCCESS;
6195 }
6196 
6197 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
6198 					  uint32_t num_desc)
6199 {
6200 	return QDF_STATUS_SUCCESS;
6201 }
6202 
6203 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
6204 {
6205 }
6206 
6207 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
6208 {
6209 	uint8_t i;
6210 
6211 	for (i = 0; i < num_pool; i++)
6212 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
6213 }
6214 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
6215 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
6216 					   uint32_t num_desc)
6217 {
6218 	uint8_t i, count;
6219 
6220 	/* Allocate software Tx descriptor pools */
6221 	for (i = 0; i < num_pool; i++) {
6222 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
6223 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6224 				  FL("Tx Desc Pool alloc %d failed %pK"),
6225 				  i, soc);
6226 			goto fail;
6227 		}
6228 	}
6229 	return QDF_STATUS_SUCCESS;
6230 
6231 fail:
6232 	for (count = 0; count < i; count++)
6233 		dp_tx_desc_pool_free(soc, count);
6234 
6235 	return QDF_STATUS_E_NOMEM;
6236 }
6237 
6238 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
6239 					  uint32_t num_desc)
6240 {
6241 	uint8_t i;
6242 	for (i = 0; i < num_pool; i++) {
6243 		if (dp_tx_desc_pool_init(soc, i, num_desc)) {
6244 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6245 				  FL("Tx Desc Pool init %d failed %pK"),
6246 				  i, soc);
6247 			return QDF_STATUS_E_NOMEM;
6248 		}
6249 	}
6250 	return QDF_STATUS_SUCCESS;
6251 }
6252 
6253 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
6254 {
6255 	uint8_t i;
6256 
6257 	for (i = 0; i < num_pool; i++)
6258 		dp_tx_desc_pool_deinit(soc, i);
6259 }
6260 
6261 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
6262 {
6263 	uint8_t i;
6264 
6265 	for (i = 0; i < num_pool; i++)
6266 		dp_tx_desc_pool_free(soc, i);
6267 }
6268 
6269 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
6270 
6271 /**
6272  * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
6273  * @soc: core txrx main context
6274  * @num_pool: number of pools
6275  *
6276  */
6277 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
6278 {
6279 	dp_tx_tso_desc_pool_deinit(soc, num_pool);
6280 	dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
6281 }
6282 
6283 /**
6284  * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
6285  * @soc: core txrx main context
6286  * @num_pool: number of pools
6287  *
6288  */
6289 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
6290 {
6291 	dp_tx_tso_desc_pool_free(soc, num_pool);
6292 	dp_tx_tso_num_seg_pool_free(soc, num_pool);
6293 }
6294 
6295 /**
6296  * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
6297  * @soc: core txrx main context
6298  *
6299  * This function frees all tx related descriptors as below
6300  * 1. Regular TX descriptors (static pools)
6301  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
6302  * 3. TSO descriptors
6303  *
6304  */
6305 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
6306 {
6307 	uint8_t num_pool;
6308 
6309 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6310 
6311 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
6312 	dp_tx_ext_desc_pool_free(soc, num_pool);
6313 	dp_tx_delete_static_pools(soc, num_pool);
6314 }
6315 
6316 /**
6317  * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
6318  * @soc: core txrx main context
6319  *
6320  * This function de-initializes all tx related descriptors as below
6321  * 1. Regular TX descriptors (static pools)
6322  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
6323  * 3. TSO descriptors
6324  *
6325  */
6326 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
6327 {
6328 	uint8_t num_pool;
6329 
6330 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6331 
6332 	dp_tx_flow_control_deinit(soc);
6333 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
6334 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
6335 	dp_tx_deinit_static_pools(soc, num_pool);
6336 }
6337 
6338 /**
6339  * dp_tso_attach() - TSO attach handler
6340  * @txrx_soc: Opaque Dp handle
6341  *
6342  * Reserve TSO descriptor buffers
6343  *
6344  * Return: QDF_STATUS_E_FAILURE on failure or
6345  * QDF_STATUS_SUCCESS on success
6346  */
6347 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
6348 					 uint8_t num_pool,
6349 					 uint32_t num_desc)
6350 {
6351 	if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
6352 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
6353 		return QDF_STATUS_E_FAILURE;
6354 	}
6355 
6356 	if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
6357 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
6358 		       num_pool, soc);
6359 		return QDF_STATUS_E_FAILURE;
6360 	}
6361 	return QDF_STATUS_SUCCESS;
6362 }
6363 
6364 /**
6365  * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
6366  * @soc: DP soc handle
6367  * @num_pool: Number of pools
6368  * @num_desc: Number of descriptors
6369  *
6370  * Initialize TSO descriptor pools
6371  *
6372  * Return: QDF_STATUS_E_FAILURE on failure or
6373  * QDF_STATUS_SUCCESS on success
6374  */
6375 
6376 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
6377 					uint8_t num_pool,
6378 					uint32_t num_desc)
6379 {
6380 	if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
6381 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
6382 		return QDF_STATUS_E_FAILURE;
6383 	}
6384 
6385 	if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
6386 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
6387 		       num_pool, soc);
6388 		return QDF_STATUS_E_FAILURE;
6389 	}
6390 	return QDF_STATUS_SUCCESS;
6391 }
6392 
6393 /**
6394  * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
6395  * @soc: core txrx main context
6396  *
6397  * This function allocates memory for following descriptor pools
6398  * 1. regular sw tx descriptor pools (static pools)
6399  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
6400  * 3. TSO descriptor pools
6401  *
6402  * Return: QDF_STATUS_SUCCESS: success
6403  *         QDF_STATUS_E_RESOURCES: Error return
6404  */
6405 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
6406 {
6407 	uint8_t num_pool;
6408 	uint32_t num_desc;
6409 	uint32_t num_ext_desc;
6410 
6411 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6412 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6413 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
6414 
6415 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6416 		  "%s Tx Desc Alloc num_pool = %d, descs = %d",
6417 		  __func__, num_pool, num_desc);
6418 
6419 	if ((num_pool > MAX_TXDESC_POOLS) ||
6420 	    (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
6421 		goto fail1;
6422 
6423 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
6424 		goto fail1;
6425 
6426 	if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
6427 		goto fail2;
6428 
6429 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
6430 		return QDF_STATUS_SUCCESS;
6431 
6432 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
6433 		goto fail3;
6434 
6435 	return QDF_STATUS_SUCCESS;
6436 
6437 fail3:
6438 	dp_tx_ext_desc_pool_free(soc, num_pool);
6439 fail2:
6440 	dp_tx_delete_static_pools(soc, num_pool);
6441 fail1:
6442 	return QDF_STATUS_E_RESOURCES;
6443 }
6444 
6445 /**
6446  * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
6447  * @soc: core txrx main context
6448  *
6449  * This function initializes the following TX descriptor pools
6450  * 1. regular sw tx descriptor pools (static pools)
6451  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
6452  * 3. TSO descriptor pools
6453  *
6454  * Return: QDF_STATUS_SUCCESS: success
6455  *	   QDF_STATUS_E_RESOURCES: Error return
6456  */
6457 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
6458 {
6459 	uint8_t num_pool;
6460 	uint32_t num_desc;
6461 	uint32_t num_ext_desc;
6462 
6463 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6464 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6465 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
6466 
6467 	if (dp_tx_init_static_pools(soc, num_pool, num_desc))
6468 		goto fail1;
6469 
6470 	if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
6471 		goto fail2;
6472 
6473 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
6474 		return QDF_STATUS_SUCCESS;
6475 
6476 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
6477 		goto fail3;
6478 
6479 	dp_tx_flow_control_init(soc);
6480 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
6481 	return QDF_STATUS_SUCCESS;
6482 
6483 fail3:
6484 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
6485 fail2:
6486 	dp_tx_deinit_static_pools(soc, num_pool);
6487 fail1:
6488 	return QDF_STATUS_E_RESOURCES;
6489 }
6490 
6491 /**
6492  * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
6493  * @txrx_soc: dp soc handle
6494  *
6495  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
6496  *			QDF_STATUS_E_FAILURE
6497  */
6498 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
6499 {
6500 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6501 	uint8_t num_pool;
6502 	uint32_t num_desc;
6503 	uint32_t num_ext_desc;
6504 
6505 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6506 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
6507 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
6508 
6509 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
6510 		return QDF_STATUS_E_FAILURE;
6511 
6512 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
6513 		return QDF_STATUS_E_FAILURE;
6514 
6515 	return QDF_STATUS_SUCCESS;
6516 }
6517 
6518 /**
6519  * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
6520  * @txrx_soc: dp soc handle
6521  *
6522  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
6523  */
6524 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
6525 {
6526 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6527 	uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6528 
6529 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
6530 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
6531 
6532 	return QDF_STATUS_SUCCESS;
6533 }
6534 
6535 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
6536 void dp_pkt_add_timestamp(struct dp_vdev *vdev,
6537 			  enum qdf_pkt_timestamp_index index, uint64_t time,
6538 			  qdf_nbuf_t nbuf)
6539 {
6540 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled())) {
6541 		uint64_t tsf_time;
6542 
6543 		if (vdev->get_tsf_time) {
6544 			vdev->get_tsf_time(vdev->osif_vdev, time, &tsf_time);
6545 			qdf_add_dp_pkt_timestamp(nbuf, index, tsf_time);
6546 		}
6547 	}
6548 }
6549 
6550 void dp_pkt_get_timestamp(uint64_t *time)
6551 {
6552 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled()))
6553 		*time = qdf_get_log_timestamp();
6554 }
6555 #endif
6556 
6557