xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "htt.h"
21 #include "dp_htt.h"
22 #include "hal_hw_headers.h"
23 #include "dp_tx.h"
24 #include "dp_tx_desc.h"
25 #include "dp_peer.h"
26 #include "dp_types.h"
27 #include "hal_tx.h"
28 #include "qdf_mem.h"
29 #include "qdf_nbuf.h"
30 #include "qdf_net_types.h"
31 #include "qdf_module.h"
32 #include <wlan_cfg.h>
33 #include "dp_ipa.h"
34 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
35 #include "if_meta_hdr.h"
36 #endif
37 #include "enet.h"
38 #include "dp_internal.h"
39 #ifdef ATH_SUPPORT_IQUE
40 #include "dp_txrx_me.h"
41 #endif
42 #include "dp_hist.h"
43 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
44 #include <wlan_dp_swlm.h>
45 #endif
46 #ifdef WIFI_MONITOR_SUPPORT
47 #include <dp_mon.h>
48 #endif
49 #ifdef FEATURE_WDS
50 #include "dp_txrx_wds.h"
51 #endif
52 #include "cdp_txrx_cmn_reg.h"
53 #ifdef CONFIG_SAWF
54 #include <dp_sawf.h>
55 #endif
56 
57 /* Flag to skip CCE classify when mesh or tid override enabled */
58 #define DP_TX_SKIP_CCE_CLASSIFY \
59 	(DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
60 
61 /* TODO Add support in TSO */
62 #define DP_DESC_NUM_FRAG(x) 0
63 
64 /* disable TQM_BYPASS */
65 #define TQM_BYPASS_WAR 0
66 
67 /* invalid peer id for reinject*/
68 #define DP_INVALID_PEER 0XFFFE
69 
70 #define DP_RETRY_COUNT 7
71 
72 #ifdef QCA_DP_TX_FW_METADATA_V2
73 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
74 	HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
75 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
76 	HTT_TX_TCL_METADATA_V2_VALID_HTT_SET(_var, _val)
77 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
78 	HTT_TX_TCL_METADATA_TYPE_V2_SET(_var, _val)
79 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
80 	HTT_TX_TCL_METADATA_V2_HOST_INSPECTED_SET(_var, _val)
81 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
82 	 HTT_TX_TCL_METADATA_V2_PEER_ID_SET(_var, _val)
83 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
84 	HTT_TX_TCL_METADATA_V2_VDEV_ID_SET(_var, _val)
85 #define DP_TCL_METADATA_TYPE_PEER_BASED \
86 	HTT_TCL_METADATA_V2_TYPE_PEER_BASED
87 #define DP_TCL_METADATA_TYPE_VDEV_BASED \
88 	HTT_TCL_METADATA_V2_TYPE_VDEV_BASED
89 #else
90 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
91 	HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
92 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
93 	HTT_TX_TCL_METADATA_VALID_HTT_SET(_var, _val)
94 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
95 	HTT_TX_TCL_METADATA_TYPE_SET(_var, _val)
96 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
97 	HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val)
98 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
99 	HTT_TX_TCL_METADATA_PEER_ID_SET(_var, _val)
100 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
101 	HTT_TX_TCL_METADATA_VDEV_ID_SET(_var, _val)
102 #define DP_TCL_METADATA_TYPE_PEER_BASED \
103 	HTT_TCL_METADATA_TYPE_PEER_BASED
104 #define DP_TCL_METADATA_TYPE_VDEV_BASED \
105 	HTT_TCL_METADATA_TYPE_VDEV_BASED
106 #endif
107 
108 /*mapping between hal encrypt type and cdp_sec_type*/
109 uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
110 					  HAL_TX_ENCRYPT_TYPE_WEP_128,
111 					  HAL_TX_ENCRYPT_TYPE_WEP_104,
112 					  HAL_TX_ENCRYPT_TYPE_WEP_40,
113 					  HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
114 					  HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
115 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
116 					  HAL_TX_ENCRYPT_TYPE_WAPI,
117 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
118 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
119 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
120 					  HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
121 qdf_export_symbol(sec_type_map);
122 
123 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
124 /**
125  * dp_update_tx_desc_stats - Update the increase or decrease in
126  * outstanding tx desc count
127  * values on pdev and soc
128  * @vdev: DP pdev handle
129  *
130  * Return: void
131  */
132 static inline void
133 dp_update_tx_desc_stats(struct dp_pdev *pdev)
134 {
135 	int32_t tx_descs_cnt =
136 		qdf_atomic_read(&pdev->num_tx_outstanding);
137 	if (pdev->tx_descs_max < tx_descs_cnt)
138 		pdev->tx_descs_max = tx_descs_cnt;
139 	qdf_mem_tx_desc_cnt_update(pdev->num_tx_outstanding,
140 				   pdev->tx_descs_max);
141 }
142 
143 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
144 
145 static inline void
146 dp_update_tx_desc_stats(struct dp_pdev *pdev)
147 {
148 }
149 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
150 
151 #ifdef QCA_TX_LIMIT_CHECK
152 /**
153  * dp_tx_limit_check - Check if allocated tx descriptors reached
154  * soc max limit and pdev max limit
155  * @vdev: DP vdev handle
156  *
157  * Return: true if allocated tx descriptors reached max configured value, else
158  * false
159  */
160 static inline bool
161 dp_tx_limit_check(struct dp_vdev *vdev)
162 {
163 	struct dp_pdev *pdev = vdev->pdev;
164 	struct dp_soc *soc = pdev->soc;
165 
166 	if (qdf_atomic_read(&soc->num_tx_outstanding) >=
167 			soc->num_tx_allowed) {
168 		dp_tx_info("queued packets are more than max tx, drop the frame");
169 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
170 		return true;
171 	}
172 
173 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
174 			pdev->num_tx_allowed) {
175 		dp_tx_info("queued packets are more than max tx, drop the frame");
176 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
177 		DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_outstand.num, 1);
178 		return true;
179 	}
180 	return false;
181 }
182 
183 /**
184  * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
185  * reached soc max limit
186  * @vdev: DP vdev handle
187  *
188  * Return: true if allocated tx descriptors reached max configured value, else
189  * false
190  */
191 static inline bool
192 dp_tx_exception_limit_check(struct dp_vdev *vdev)
193 {
194 	struct dp_pdev *pdev = vdev->pdev;
195 	struct dp_soc *soc = pdev->soc;
196 
197 	if (qdf_atomic_read(&soc->num_tx_exception) >=
198 			soc->num_msdu_exception_desc) {
199 		dp_info("exc packets are more than max drop the exc pkt");
200 		DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
201 		return true;
202 	}
203 
204 	return false;
205 }
206 
207 /**
208  * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
209  * @vdev: DP pdev handle
210  *
211  * Return: void
212  */
213 static inline void
214 dp_tx_outstanding_inc(struct dp_pdev *pdev)
215 {
216 	struct dp_soc *soc = pdev->soc;
217 
218 	qdf_atomic_inc(&pdev->num_tx_outstanding);
219 	qdf_atomic_inc(&soc->num_tx_outstanding);
220 	dp_update_tx_desc_stats(pdev);
221 }
222 
223 /**
224  * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
225  * @vdev: DP pdev handle
226  *
227  * Return: void
228  */
229 static inline void
230 dp_tx_outstanding_dec(struct dp_pdev *pdev)
231 {
232 	struct dp_soc *soc = pdev->soc;
233 
234 	qdf_atomic_dec(&pdev->num_tx_outstanding);
235 	qdf_atomic_dec(&soc->num_tx_outstanding);
236 	dp_update_tx_desc_stats(pdev);
237 }
238 
239 #else //QCA_TX_LIMIT_CHECK
240 static inline bool
241 dp_tx_limit_check(struct dp_vdev *vdev)
242 {
243 	return false;
244 }
245 
246 static inline bool
247 dp_tx_exception_limit_check(struct dp_vdev *vdev)
248 {
249 	return false;
250 }
251 
252 static inline void
253 dp_tx_outstanding_inc(struct dp_pdev *pdev)
254 {
255 	qdf_atomic_inc(&pdev->num_tx_outstanding);
256 	dp_update_tx_desc_stats(pdev);
257 }
258 
259 static inline void
260 dp_tx_outstanding_dec(struct dp_pdev *pdev)
261 {
262 	qdf_atomic_dec(&pdev->num_tx_outstanding);
263 	dp_update_tx_desc_stats(pdev);
264 }
265 #endif //QCA_TX_LIMIT_CHECK
266 
267 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
268 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
269 {
270 	enum dp_tx_event_type type;
271 
272 	if (flags & DP_TX_DESC_FLAG_FLUSH)
273 		type = DP_TX_DESC_FLUSH;
274 	else if (flags & DP_TX_DESC_FLAG_TX_COMP_ERR)
275 		type = DP_TX_COMP_UNMAP_ERR;
276 	else if (flags & DP_TX_DESC_FLAG_COMPLETED_TX)
277 		type = DP_TX_COMP_UNMAP;
278 	else
279 		type = DP_TX_DESC_UNMAP;
280 
281 	return type;
282 }
283 
284 static inline void
285 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
286 		       qdf_nbuf_t skb, uint32_t sw_cookie,
287 		       enum dp_tx_event_type type)
288 {
289 	struct dp_tx_desc_event *entry;
290 	uint32_t idx;
291 
292 	if (qdf_unlikely(!soc->tx_tcl_history || !soc->tx_comp_history))
293 		return;
294 
295 	switch (type) {
296 	case DP_TX_COMP_UNMAP:
297 	case DP_TX_COMP_UNMAP_ERR:
298 	case DP_TX_COMP_MSDU_EXT:
299 		idx = dp_history_get_next_index(&soc->tx_comp_history->index,
300 						DP_TX_COMP_HISTORY_SIZE);
301 		entry = &soc->tx_comp_history->entry[idx];
302 		break;
303 	case DP_TX_DESC_MAP:
304 	case DP_TX_DESC_UNMAP:
305 	case DP_TX_DESC_COOKIE:
306 	case DP_TX_DESC_FLUSH:
307 		idx = dp_history_get_next_index(&soc->tx_tcl_history->index,
308 						DP_TX_TCL_HISTORY_SIZE);
309 		entry = &soc->tx_tcl_history->entry[idx];
310 		break;
311 	default:
312 		dp_info_rl("Invalid dp_tx_event_type: %d", type);
313 		return;
314 	}
315 
316 	entry->skb = skb;
317 	entry->paddr = paddr;
318 	entry->sw_cookie = sw_cookie;
319 	entry->type = type;
320 	entry->ts = qdf_get_log_timestamp();
321 }
322 
323 static inline void
324 dp_tx_tso_seg_history_add(struct dp_soc *soc,
325 			  struct qdf_tso_seg_elem_t *tso_seg,
326 			  qdf_nbuf_t skb, uint32_t sw_cookie,
327 			  enum dp_tx_event_type type)
328 {
329 	int i;
330 
331 	for (i = 1; i < tso_seg->seg.num_frags; i++) {
332 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[i].paddr,
333 				       skb, sw_cookie, type);
334 	}
335 
336 	if (!tso_seg->next)
337 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[0].paddr,
338 				       skb, 0xFFFFFFFF, type);
339 }
340 
341 static inline void
342 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
343 		      qdf_nbuf_t skb, uint32_t sw_cookie,
344 		      enum dp_tx_event_type type)
345 {
346 	struct qdf_tso_seg_elem_t *curr_seg = tso_info.tso_seg_list;
347 	uint32_t num_segs = tso_info.num_segs;
348 
349 	while (num_segs) {
350 		dp_tx_tso_seg_history_add(soc, curr_seg, skb, sw_cookie, type);
351 		curr_seg = curr_seg->next;
352 		num_segs--;
353 	}
354 }
355 
356 #else
357 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
358 {
359 	return DP_TX_DESC_INVAL_EVT;
360 }
361 
362 static inline void
363 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
364 		       qdf_nbuf_t skb, uint32_t sw_cookie,
365 		       enum dp_tx_event_type type)
366 {
367 }
368 
369 static inline void
370 dp_tx_tso_seg_history_add(struct dp_soc *soc,
371 			  struct qdf_tso_seg_elem_t *tso_seg,
372 			  qdf_nbuf_t skb, uint32_t sw_cookie,
373 			  enum dp_tx_event_type type)
374 {
375 }
376 
377 static inline void
378 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
379 		      qdf_nbuf_t skb, uint32_t sw_cookie,
380 		      enum dp_tx_event_type type)
381 {
382 }
383 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
384 
385 static int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc);
386 
387 /**
388  * dp_is_tput_high() - Check if throughput is high
389  *
390  * @soc - core txrx main context
391  *
392  * The current function is based of the RTPM tput policy variable where RTPM is
393  * avoided based on throughput.
394  */
395 static inline int dp_is_tput_high(struct dp_soc *soc)
396 {
397 	return dp_get_rtpm_tput_policy_requirement(soc);
398 }
399 
400 #if defined(FEATURE_TSO)
401 /**
402  * dp_tx_tso_unmap_segment() - Unmap TSO segment
403  *
404  * @soc - core txrx main context
405  * @seg_desc - tso segment descriptor
406  * @num_seg_desc - tso number segment descriptor
407  */
408 static void dp_tx_tso_unmap_segment(
409 		struct dp_soc *soc,
410 		struct qdf_tso_seg_elem_t *seg_desc,
411 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
412 {
413 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
414 	if (qdf_unlikely(!seg_desc)) {
415 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
416 			 __func__, __LINE__);
417 		qdf_assert(0);
418 	} else if (qdf_unlikely(!num_seg_desc)) {
419 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
420 			 __func__, __LINE__);
421 		qdf_assert(0);
422 	} else {
423 		bool is_last_seg;
424 		/* no tso segment left to do dma unmap */
425 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
426 			return;
427 
428 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
429 					true : false;
430 		qdf_nbuf_unmap_tso_segment(soc->osdev,
431 					   seg_desc, is_last_seg);
432 		num_seg_desc->num_seg.tso_cmn_num_seg--;
433 	}
434 }
435 
436 /**
437  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
438  *                            back to the freelist
439  *
440  * @soc - soc device handle
441  * @tx_desc - Tx software descriptor
442  */
443 static void dp_tx_tso_desc_release(struct dp_soc *soc,
444 				   struct dp_tx_desc_s *tx_desc)
445 {
446 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
447 	if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_desc)) {
448 		dp_tx_err("SO desc is NULL!");
449 		qdf_assert(0);
450 	} else if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_num_desc)) {
451 		dp_tx_err("TSO num desc is NULL!");
452 		qdf_assert(0);
453 	} else {
454 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
455 			(struct qdf_tso_num_seg_elem_t *)tx_desc->
456 				msdu_ext_desc->tso_num_desc;
457 
458 		/* Add the tso num segment into the free list */
459 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
460 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
461 					    tx_desc->msdu_ext_desc->
462 					    tso_num_desc);
463 			tx_desc->msdu_ext_desc->tso_num_desc = NULL;
464 			DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
465 		}
466 
467 		/* Add the tso segment into the free list*/
468 		dp_tx_tso_desc_free(soc,
469 				    tx_desc->pool_id, tx_desc->msdu_ext_desc->
470 				    tso_desc);
471 		tx_desc->msdu_ext_desc->tso_desc = NULL;
472 	}
473 }
474 #else
475 static void dp_tx_tso_unmap_segment(
476 		struct dp_soc *soc,
477 		struct qdf_tso_seg_elem_t *seg_desc,
478 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
479 
480 {
481 }
482 
483 static void dp_tx_tso_desc_release(struct dp_soc *soc,
484 				   struct dp_tx_desc_s *tx_desc)
485 {
486 }
487 #endif
488 
489 /**
490  * dp_tx_desc_release() - Release Tx Descriptor
491  * @tx_desc : Tx Descriptor
492  * @desc_pool_id: Descriptor Pool ID
493  *
494  * Deallocate all resources attached to Tx descriptor and free the Tx
495  * descriptor.
496  *
497  * Return:
498  */
499 void
500 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
501 {
502 	struct dp_pdev *pdev = tx_desc->pdev;
503 	struct dp_soc *soc;
504 	uint8_t comp_status = 0;
505 
506 	qdf_assert(pdev);
507 
508 	soc = pdev->soc;
509 
510 	dp_tx_outstanding_dec(pdev);
511 
512 	if (tx_desc->msdu_ext_desc) {
513 		if (tx_desc->frm_type == dp_tx_frm_tso)
514 			dp_tx_tso_desc_release(soc, tx_desc);
515 
516 		if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
517 			dp_tx_me_free_buf(tx_desc->pdev,
518 					  tx_desc->msdu_ext_desc->me_buffer);
519 
520 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
521 	}
522 
523 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
524 		qdf_atomic_dec(&soc->num_tx_exception);
525 
526 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
527 				tx_desc->buffer_src)
528 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
529 							     soc->hal_soc);
530 	else
531 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
532 
533 	dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d",
534 		    tx_desc->id, comp_status,
535 		    qdf_atomic_read(&pdev->num_tx_outstanding));
536 
537 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
538 	return;
539 }
540 
541 /**
542  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
543  * @vdev: DP vdev Handle
544  * @nbuf: skb
545  * @msdu_info: msdu_info required to create HTT metadata
546  *
547  * Prepares and fills HTT metadata in the frame pre-header for special frames
548  * that should be transmitted using varying transmit parameters.
549  * There are 2 VDEV modes that currently needs this special metadata -
550  *  1) Mesh Mode
551  *  2) DSRC Mode
552  *
553  * Return: HTT metadata size
554  *
555  */
556 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
557 					  struct dp_tx_msdu_info_s *msdu_info)
558 {
559 	uint32_t *meta_data = msdu_info->meta_data;
560 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
561 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
562 
563 	uint8_t htt_desc_size;
564 
565 	/* Size rounded of multiple of 8 bytes */
566 	uint8_t htt_desc_size_aligned;
567 
568 	uint8_t *hdr = NULL;
569 
570 	/*
571 	 * Metadata - HTT MSDU Extension header
572 	 */
573 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
574 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
575 
576 	if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
577 	    HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
578 							   meta_data[0])) {
579 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
580 				 htt_desc_size_aligned)) {
581 			nbuf = qdf_nbuf_realloc_headroom(nbuf,
582 							 htt_desc_size_aligned);
583 			if (!nbuf) {
584 				/*
585 				 * qdf_nbuf_realloc_headroom won't do skb_clone
586 				 * as skb_realloc_headroom does. so, no free is
587 				 * needed here.
588 				 */
589 				DP_STATS_INC(vdev,
590 					     tx_i.dropped.headroom_insufficient,
591 					     1);
592 				qdf_print(" %s[%d] skb_realloc_headroom failed",
593 					  __func__, __LINE__);
594 				return 0;
595 			}
596 		}
597 		/* Fill and add HTT metaheader */
598 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
599 		if (!hdr) {
600 			dp_tx_err("Error in filling HTT metadata");
601 
602 			return 0;
603 		}
604 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
605 
606 	} else if (vdev->opmode == wlan_op_mode_ocb) {
607 		/* Todo - Add support for DSRC */
608 	}
609 
610 	return htt_desc_size_aligned;
611 }
612 
613 /**
614  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
615  * @tso_seg: TSO segment to process
616  * @ext_desc: Pointer to MSDU extension descriptor
617  *
618  * Return: void
619  */
620 #if defined(FEATURE_TSO)
621 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
622 		void *ext_desc)
623 {
624 	uint8_t num_frag;
625 	uint32_t tso_flags;
626 
627 	/*
628 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
629 	 * tcp_flag_mask
630 	 *
631 	 * Checksum enable flags are set in TCL descriptor and not in Extension
632 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
633 	 */
634 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
635 
636 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
637 
638 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
639 		tso_seg->tso_flags.ip_len);
640 
641 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
642 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
643 
644 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
645 		uint32_t lo = 0;
646 		uint32_t hi = 0;
647 
648 		qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
649 				  (tso_seg->tso_frags[num_frag].length));
650 
651 		qdf_dmaaddr_to_32s(
652 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
653 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
654 			tso_seg->tso_frags[num_frag].length);
655 	}
656 
657 	return;
658 }
659 #else
660 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
661 		void *ext_desc)
662 {
663 	return;
664 }
665 #endif
666 
667 #if defined(FEATURE_TSO)
668 /**
669  * dp_tx_free_tso_seg_list() - Loop through the tso segments
670  *                             allocated and free them
671  *
672  * @soc: soc handle
673  * @free_seg: list of tso segments
674  * @msdu_info: msdu descriptor
675  *
676  * Return - void
677  */
678 static void dp_tx_free_tso_seg_list(
679 		struct dp_soc *soc,
680 		struct qdf_tso_seg_elem_t *free_seg,
681 		struct dp_tx_msdu_info_s *msdu_info)
682 {
683 	struct qdf_tso_seg_elem_t *next_seg;
684 
685 	while (free_seg) {
686 		next_seg = free_seg->next;
687 		dp_tx_tso_desc_free(soc,
688 				    msdu_info->tx_queue.desc_pool_id,
689 				    free_seg);
690 		free_seg = next_seg;
691 	}
692 }
693 
694 /**
695  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
696  *                                 allocated and free them
697  *
698  * @soc:  soc handle
699  * @free_num_seg: list of tso number segments
700  * @msdu_info: msdu descriptor
701  * Return - void
702  */
703 static void dp_tx_free_tso_num_seg_list(
704 		struct dp_soc *soc,
705 		struct qdf_tso_num_seg_elem_t *free_num_seg,
706 		struct dp_tx_msdu_info_s *msdu_info)
707 {
708 	struct qdf_tso_num_seg_elem_t *next_num_seg;
709 
710 	while (free_num_seg) {
711 		next_num_seg = free_num_seg->next;
712 		dp_tso_num_seg_free(soc,
713 				    msdu_info->tx_queue.desc_pool_id,
714 				    free_num_seg);
715 		free_num_seg = next_num_seg;
716 	}
717 }
718 
719 /**
720  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
721  *                              do dma unmap for each segment
722  *
723  * @soc: soc handle
724  * @free_seg: list of tso segments
725  * @num_seg_desc: tso number segment descriptor
726  *
727  * Return - void
728  */
729 static void dp_tx_unmap_tso_seg_list(
730 		struct dp_soc *soc,
731 		struct qdf_tso_seg_elem_t *free_seg,
732 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
733 {
734 	struct qdf_tso_seg_elem_t *next_seg;
735 
736 	if (qdf_unlikely(!num_seg_desc)) {
737 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
738 		return;
739 	}
740 
741 	while (free_seg) {
742 		next_seg = free_seg->next;
743 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
744 		free_seg = next_seg;
745 	}
746 }
747 
748 #ifdef FEATURE_TSO_STATS
749 /**
750  * dp_tso_get_stats_idx: Retrieve the tso packet id
751  * @pdev - pdev handle
752  *
753  * Return: id
754  */
755 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
756 {
757 	uint32_t stats_idx;
758 
759 	stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
760 						% CDP_MAX_TSO_PACKETS);
761 	return stats_idx;
762 }
763 #else
764 static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
765 {
766 	return 0;
767 }
768 #endif /* FEATURE_TSO_STATS */
769 
770 /**
771  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
772  *				     free the tso segments descriptor and
773  *				     tso num segments descriptor
774  *
775  * @soc:  soc handle
776  * @msdu_info: msdu descriptor
777  * @tso_seg_unmap: flag to show if dma unmap is necessary
778  *
779  * Return - void
780  */
781 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
782 					  struct dp_tx_msdu_info_s *msdu_info,
783 					  bool tso_seg_unmap)
784 {
785 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
786 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
787 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
788 					tso_info->tso_num_seg_list;
789 
790 	/* do dma unmap for each segment */
791 	if (tso_seg_unmap)
792 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
793 
794 	/* free all tso number segment descriptor though looks only have 1 */
795 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
796 
797 	/* free all tso segment descriptor */
798 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
799 }
800 
801 /**
802  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
803  * @vdev: virtual device handle
804  * @msdu: network buffer
805  * @msdu_info: meta data associated with the msdu
806  *
807  * Return: QDF_STATUS_SUCCESS success
808  */
809 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
810 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
811 {
812 	struct qdf_tso_seg_elem_t *tso_seg;
813 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
814 	struct dp_soc *soc = vdev->pdev->soc;
815 	struct dp_pdev *pdev = vdev->pdev;
816 	struct qdf_tso_info_t *tso_info;
817 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
818 	tso_info = &msdu_info->u.tso_info;
819 	tso_info->curr_seg = NULL;
820 	tso_info->tso_seg_list = NULL;
821 	tso_info->num_segs = num_seg;
822 	msdu_info->frm_type = dp_tx_frm_tso;
823 	tso_info->tso_num_seg_list = NULL;
824 
825 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
826 
827 	while (num_seg) {
828 		tso_seg = dp_tx_tso_desc_alloc(
829 				soc, msdu_info->tx_queue.desc_pool_id);
830 		if (tso_seg) {
831 			tso_seg->next = tso_info->tso_seg_list;
832 			tso_info->tso_seg_list = tso_seg;
833 			num_seg--;
834 		} else {
835 			dp_err_rl("Failed to alloc tso seg desc");
836 			DP_STATS_INC_PKT(vdev->pdev,
837 					 tso_stats.tso_no_mem_dropped, 1,
838 					 qdf_nbuf_len(msdu));
839 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
840 
841 			return QDF_STATUS_E_NOMEM;
842 		}
843 	}
844 
845 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
846 
847 	tso_num_seg = dp_tso_num_seg_alloc(soc,
848 			msdu_info->tx_queue.desc_pool_id);
849 
850 	if (tso_num_seg) {
851 		tso_num_seg->next = tso_info->tso_num_seg_list;
852 		tso_info->tso_num_seg_list = tso_num_seg;
853 	} else {
854 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
855 			 __func__);
856 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
857 
858 		return QDF_STATUS_E_NOMEM;
859 	}
860 
861 	msdu_info->num_seg =
862 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
863 
864 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
865 			msdu_info->num_seg);
866 
867 	if (!(msdu_info->num_seg)) {
868 		/*
869 		 * Free allocated TSO seg desc and number seg desc,
870 		 * do unmap for segments if dma map has done.
871 		 */
872 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
873 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
874 
875 		return QDF_STATUS_E_INVAL;
876 	}
877 	dp_tx_tso_history_add(soc, msdu_info->u.tso_info,
878 			      msdu, 0, DP_TX_DESC_MAP);
879 
880 	tso_info->curr_seg = tso_info->tso_seg_list;
881 
882 	tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
883 	dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
884 			     msdu, msdu_info->num_seg);
885 	dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
886 				    tso_info->msdu_stats_idx);
887 	dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
888 	return QDF_STATUS_SUCCESS;
889 }
890 #else
891 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
892 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
893 {
894 	return QDF_STATUS_E_NOMEM;
895 }
896 #endif
897 
898 QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
899 			(DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
900 			 sizeof(struct htt_tx_msdu_desc_ext2_t)));
901 
902 /**
903  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
904  * @vdev: DP Vdev handle
905  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
906  * @desc_pool_id: Descriptor Pool ID
907  *
908  * Return:
909  */
910 static
911 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
912 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
913 {
914 	uint8_t i;
915 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
916 	struct dp_tx_seg_info_s *seg_info;
917 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
918 	struct dp_soc *soc = vdev->pdev->soc;
919 
920 	/* Allocate an extension descriptor */
921 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
922 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
923 
924 	if (!msdu_ext_desc) {
925 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
926 		return NULL;
927 	}
928 
929 	if (msdu_info->exception_fw &&
930 			qdf_unlikely(vdev->mesh_vdev)) {
931 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
932 				&msdu_info->meta_data[0],
933 				sizeof(struct htt_tx_msdu_desc_ext2_t));
934 		qdf_atomic_inc(&soc->num_tx_exception);
935 		msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
936 	}
937 
938 	switch (msdu_info->frm_type) {
939 	case dp_tx_frm_sg:
940 	case dp_tx_frm_me:
941 	case dp_tx_frm_raw:
942 		seg_info = msdu_info->u.sg_info.curr_seg;
943 		/* Update the buffer pointers in MSDU Extension Descriptor */
944 		for (i = 0; i < seg_info->frag_cnt; i++) {
945 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
946 				seg_info->frags[i].paddr_lo,
947 				seg_info->frags[i].paddr_hi,
948 				seg_info->frags[i].len);
949 		}
950 
951 		break;
952 
953 	case dp_tx_frm_tso:
954 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
955 				&cached_ext_desc[0]);
956 		break;
957 
958 
959 	default:
960 		break;
961 	}
962 
963 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
964 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
965 
966 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
967 			msdu_ext_desc->vaddr);
968 
969 	return msdu_ext_desc;
970 }
971 
972 /**
973  * dp_tx_trace_pkt() - Trace TX packet at DP layer
974  *
975  * @skb: skb to be traced
976  * @msdu_id: msdu_id of the packet
977  * @vdev_id: vdev_id of the packet
978  *
979  * Return: None
980  */
981 #ifdef DP_DISABLE_TX_PKT_TRACE
982 static void dp_tx_trace_pkt(struct dp_soc *soc,
983 			    qdf_nbuf_t skb, uint16_t msdu_id,
984 			    uint8_t vdev_id)
985 {
986 }
987 #else
988 static void dp_tx_trace_pkt(struct dp_soc *soc,
989 			    qdf_nbuf_t skb, uint16_t msdu_id,
990 			    uint8_t vdev_id)
991 {
992 	if (dp_is_tput_high(soc))
993 		return;
994 
995 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
996 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
997 	DPTRACE(qdf_dp_trace_ptr(skb,
998 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
999 				 QDF_TRACE_DEFAULT_PDEV_ID,
1000 				 qdf_nbuf_data_addr(skb),
1001 				 sizeof(qdf_nbuf_data(skb)),
1002 				 msdu_id, vdev_id, 0));
1003 
1004 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
1005 
1006 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
1007 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
1008 				      msdu_id, QDF_TX));
1009 }
1010 #endif
1011 
1012 #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
1013 /**
1014  * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as
1015  *				      exception by the upper layer (OS_IF)
1016  * @soc: DP soc handle
1017  * @nbuf: packet to be transmitted
1018  *
1019  * Returns: 1 if the packet is marked as exception,
1020  *	    0, if the packet is not marked as exception.
1021  */
1022 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
1023 						 qdf_nbuf_t nbuf)
1024 {
1025 	return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf);
1026 }
1027 #else
1028 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
1029 						 qdf_nbuf_t nbuf)
1030 {
1031 	return 0;
1032 }
1033 #endif
1034 
1035 /**
1036  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
1037  * @vdev: DP vdev handle
1038  * @nbuf: skb
1039  * @desc_pool_id: Descriptor pool ID
1040  * @meta_data: Metadata to the fw
1041  * @tx_exc_metadata: Handle that holds exception path metadata
1042  * Allocate and prepare Tx descriptor with msdu information.
1043  *
1044  * Return: Pointer to Tx Descriptor on success,
1045  *         NULL on failure
1046  */
1047 static
1048 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
1049 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
1050 		struct dp_tx_msdu_info_s *msdu_info,
1051 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1052 {
1053 	uint8_t align_pad;
1054 	uint8_t is_exception = 0;
1055 	uint8_t htt_hdr_size;
1056 	struct dp_tx_desc_s *tx_desc;
1057 	struct dp_pdev *pdev = vdev->pdev;
1058 	struct dp_soc *soc = pdev->soc;
1059 
1060 	if (dp_tx_limit_check(vdev))
1061 		return NULL;
1062 
1063 	/* Allocate software Tx descriptor */
1064 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1065 
1066 	if (qdf_unlikely(!tx_desc)) {
1067 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1068 		DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
1069 		return NULL;
1070 	}
1071 
1072 	dp_tx_outstanding_inc(pdev);
1073 
1074 	/* Initialize the SW tx descriptor */
1075 	tx_desc->nbuf = nbuf;
1076 	tx_desc->frm_type = dp_tx_frm_std;
1077 	tx_desc->tx_encap_type = ((tx_exc_metadata &&
1078 		(tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
1079 		tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
1080 	tx_desc->vdev_id = vdev->vdev_id;
1081 	tx_desc->pdev = pdev;
1082 	tx_desc->msdu_ext_desc = NULL;
1083 	tx_desc->pkt_offset = 0;
1084 	tx_desc->length = qdf_nbuf_headlen(nbuf);
1085 
1086 	dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
1087 
1088 	if (qdf_unlikely(vdev->multipass_en)) {
1089 		if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
1090 			goto failure;
1091 	}
1092 
1093 	/* Packets marked by upper layer (OS-IF) to be sent to FW */
1094 	if (dp_tx_is_nbuf_marked_exception(soc, nbuf))
1095 		is_exception = 1;
1096 	/*
1097 	 * For special modes (vdev_type == ocb or mesh), data frames should be
1098 	 * transmitted using varying transmit parameters (tx spec) which include
1099 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
1100 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
1101 	 * These frames are sent as exception packets to firmware.
1102 	 *
1103 	 * HW requirement is that metadata should always point to a
1104 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
1105 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
1106 	 *  to get 8-byte aligned start address along with align_pad added
1107 	 *
1108 	 *  |-----------------------------|
1109 	 *  |                             |
1110 	 *  |-----------------------------| <-----Buffer Pointer Address given
1111 	 *  |                             |  ^    in HW descriptor (aligned)
1112 	 *  |       HTT Metadata          |  |
1113 	 *  |                             |  |
1114 	 *  |                             |  | Packet Offset given in descriptor
1115 	 *  |                             |  |
1116 	 *  |-----------------------------|  |
1117 	 *  |       Alignment Pad         |  v
1118 	 *  |-----------------------------| <----- Actual buffer start address
1119 	 *  |        SKB Data             |           (Unaligned)
1120 	 *  |                             |
1121 	 *  |                             |
1122 	 *  |                             |
1123 	 *  |                             |
1124 	 *  |                             |
1125 	 *  |-----------------------------|
1126 	 */
1127 	if (qdf_unlikely((msdu_info->exception_fw)) ||
1128 				(vdev->opmode == wlan_op_mode_ocb) ||
1129 				(tx_exc_metadata &&
1130 				tx_exc_metadata->is_tx_sniffer)) {
1131 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
1132 
1133 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
1134 			DP_STATS_INC(vdev,
1135 				     tx_i.dropped.headroom_insufficient, 1);
1136 			goto failure;
1137 		}
1138 
1139 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
1140 			dp_tx_err("qdf_nbuf_push_head failed");
1141 			goto failure;
1142 		}
1143 
1144 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
1145 				msdu_info);
1146 		if (htt_hdr_size == 0)
1147 			goto failure;
1148 
1149 		tx_desc->length = qdf_nbuf_headlen(nbuf);
1150 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
1151 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1152 		is_exception = 1;
1153 		tx_desc->length -= tx_desc->pkt_offset;
1154 	}
1155 
1156 #if !TQM_BYPASS_WAR
1157 	if (is_exception || tx_exc_metadata)
1158 #endif
1159 	{
1160 		/* Temporary WAR due to TQM VP issues */
1161 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1162 		qdf_atomic_inc(&soc->num_tx_exception);
1163 	}
1164 
1165 	return tx_desc;
1166 
1167 failure:
1168 	dp_tx_desc_release(tx_desc, desc_pool_id);
1169 	return NULL;
1170 }
1171 
1172 /**
1173  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
1174  * @vdev: DP vdev handle
1175  * @nbuf: skb
1176  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
1177  * @desc_pool_id : Descriptor Pool ID
1178  *
1179  * Allocate and prepare Tx descriptor with msdu and fragment descritor
1180  * information. For frames wth fragments, allocate and prepare
1181  * an MSDU extension descriptor
1182  *
1183  * Return: Pointer to Tx Descriptor on success,
1184  *         NULL on failure
1185  */
1186 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
1187 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
1188 		uint8_t desc_pool_id)
1189 {
1190 	struct dp_tx_desc_s *tx_desc;
1191 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
1192 	struct dp_pdev *pdev = vdev->pdev;
1193 	struct dp_soc *soc = pdev->soc;
1194 
1195 	if (dp_tx_limit_check(vdev))
1196 		return NULL;
1197 
1198 	/* Allocate software Tx descriptor */
1199 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1200 	if (!tx_desc) {
1201 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1202 		return NULL;
1203 	}
1204 	dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg,
1205 				  nbuf, tx_desc->id, DP_TX_DESC_COOKIE);
1206 
1207 	dp_tx_outstanding_inc(pdev);
1208 
1209 	/* Initialize the SW tx descriptor */
1210 	tx_desc->nbuf = nbuf;
1211 	tx_desc->frm_type = msdu_info->frm_type;
1212 	tx_desc->tx_encap_type = vdev->tx_encap_type;
1213 	tx_desc->vdev_id = vdev->vdev_id;
1214 	tx_desc->pdev = pdev;
1215 	tx_desc->pkt_offset = 0;
1216 
1217 	dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
1218 
1219 	/* Handle scattered frames - TSO/SG/ME */
1220 	/* Allocate and prepare an extension descriptor for scattered frames */
1221 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
1222 	if (!msdu_ext_desc) {
1223 		dp_tx_info("Tx Extension Descriptor Alloc Fail");
1224 		goto failure;
1225 	}
1226 
1227 #if TQM_BYPASS_WAR
1228 	/* Temporary WAR due to TQM VP issues */
1229 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1230 	qdf_atomic_inc(&soc->num_tx_exception);
1231 #endif
1232 	if (qdf_unlikely(msdu_info->exception_fw))
1233 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1234 
1235 	tx_desc->msdu_ext_desc = msdu_ext_desc;
1236 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
1237 
1238 	msdu_ext_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
1239 	msdu_ext_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
1240 
1241 	tx_desc->dma_addr = msdu_ext_desc->paddr;
1242 
1243 	if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
1244 		tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
1245 	else
1246 		tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
1247 
1248 	return tx_desc;
1249 failure:
1250 	dp_tx_desc_release(tx_desc, desc_pool_id);
1251 	return NULL;
1252 }
1253 
1254 /**
1255  * dp_tx_prepare_raw() - Prepare RAW packet TX
1256  * @vdev: DP vdev handle
1257  * @nbuf: buffer pointer
1258  * @seg_info: Pointer to Segment info Descriptor to be prepared
1259  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
1260  *     descriptor
1261  *
1262  * Return:
1263  */
1264 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1265 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1266 {
1267 	qdf_nbuf_t curr_nbuf = NULL;
1268 	uint16_t total_len = 0;
1269 	qdf_dma_addr_t paddr;
1270 	int32_t i;
1271 	int32_t mapped_buf_num = 0;
1272 
1273 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
1274 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1275 
1276 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
1277 
1278 	/* Continue only if frames are of DATA type */
1279 	if (!DP_FRAME_IS_DATA(qos_wh)) {
1280 		DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
1281 		dp_tx_debug("Pkt. recd is of not data type");
1282 		goto error;
1283 	}
1284 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
1285 	if (vdev->raw_mode_war &&
1286 	    (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
1287 	    (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
1288 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
1289 
1290 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
1291 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
1292 		/*
1293 		 * Number of nbuf's must not exceed the size of the frags
1294 		 * array in seg_info.
1295 		 */
1296 		if (i >= DP_TX_MAX_NUM_FRAGS) {
1297 			dp_err_rl("nbuf cnt exceeds the max number of segs");
1298 			DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
1299 			goto error;
1300 		}
1301 		if (QDF_STATUS_SUCCESS !=
1302 			qdf_nbuf_map_nbytes_single(vdev->osdev,
1303 						   curr_nbuf,
1304 						   QDF_DMA_TO_DEVICE,
1305 						   curr_nbuf->len)) {
1306 			dp_tx_err("%s dma map error ", __func__);
1307 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
1308 			goto error;
1309 		}
1310 		/* Update the count of mapped nbuf's */
1311 		mapped_buf_num++;
1312 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
1313 		seg_info->frags[i].paddr_lo = paddr;
1314 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
1315 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
1316 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
1317 		total_len += qdf_nbuf_len(curr_nbuf);
1318 	}
1319 
1320 	seg_info->frag_cnt = i;
1321 	seg_info->total_len = total_len;
1322 	seg_info->next = NULL;
1323 
1324 	sg_info->curr_seg = seg_info;
1325 
1326 	msdu_info->frm_type = dp_tx_frm_raw;
1327 	msdu_info->num_seg = 1;
1328 
1329 	return nbuf;
1330 
1331 error:
1332 	i = 0;
1333 	while (nbuf) {
1334 		curr_nbuf = nbuf;
1335 		if (i < mapped_buf_num) {
1336 			qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
1337 						     QDF_DMA_TO_DEVICE,
1338 						     curr_nbuf->len);
1339 			i++;
1340 		}
1341 		nbuf = qdf_nbuf_next(nbuf);
1342 		qdf_nbuf_free(curr_nbuf);
1343 	}
1344 	return NULL;
1345 
1346 }
1347 
1348 /**
1349  * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
1350  * @soc: DP soc handle
1351  * @nbuf: Buffer pointer
1352  *
1353  * unmap the chain of nbufs that belong to this RAW frame.
1354  *
1355  * Return: None
1356  */
1357 static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
1358 				    qdf_nbuf_t nbuf)
1359 {
1360 	qdf_nbuf_t cur_nbuf = nbuf;
1361 
1362 	do {
1363 		qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
1364 					     QDF_DMA_TO_DEVICE,
1365 					     cur_nbuf->len);
1366 		cur_nbuf = qdf_nbuf_next(cur_nbuf);
1367 	} while (cur_nbuf);
1368 }
1369 
1370 #ifdef VDEV_PEER_PROTOCOL_COUNT
1371 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
1372 					       qdf_nbuf_t nbuf)
1373 {
1374 	qdf_nbuf_t nbuf_local;
1375 	struct dp_vdev *vdev_local = vdev_hdl;
1376 
1377 	do {
1378 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track)))
1379 			break;
1380 		nbuf_local = nbuf;
1381 		if (qdf_unlikely(((vdev_local)->tx_encap_type) ==
1382 			 htt_cmn_pkt_type_raw))
1383 			break;
1384 		else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local))))
1385 			break;
1386 		else if (qdf_nbuf_is_tso((nbuf_local)))
1387 			break;
1388 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local),
1389 						       (nbuf_local),
1390 						       NULL, 1, 0);
1391 	} while (0);
1392 }
1393 #endif
1394 
1395 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1396 /**
1397  * dp_tx_update_stats() - Update soc level tx stats
1398  * @soc: DP soc handle
1399  * @tx_desc: TX descriptor reference
1400  * @ring_id: TCL ring id
1401  *
1402  * Returns: none
1403  */
1404 void dp_tx_update_stats(struct dp_soc *soc,
1405 			struct dp_tx_desc_s *tx_desc,
1406 			uint8_t ring_id)
1407 {
1408 	uint32_t stats_len = 0;
1409 
1410 	if (tx_desc->frm_type == dp_tx_frm_tso)
1411 		stats_len  = tx_desc->msdu_ext_desc->tso_desc->seg.total_len;
1412 	else
1413 		stats_len = qdf_nbuf_len(tx_desc->nbuf);
1414 
1415 	DP_STATS_INC_PKT(soc, tx.egress[ring_id], 1, stats_len);
1416 }
1417 
1418 int
1419 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1420 			 struct dp_tx_desc_s *tx_desc,
1421 			 uint8_t tid,
1422 			 struct dp_tx_msdu_info_s *msdu_info,
1423 			 uint8_t ring_id)
1424 {
1425 	struct dp_swlm *swlm = &soc->swlm;
1426 	union swlm_data swlm_query_data;
1427 	struct dp_swlm_tcl_data tcl_data;
1428 	QDF_STATUS status;
1429 	int ret;
1430 
1431 	if (!swlm->is_enabled)
1432 		return msdu_info->skip_hp_update;
1433 
1434 	tcl_data.nbuf = tx_desc->nbuf;
1435 	tcl_data.tid = tid;
1436 	tcl_data.ring_id = ring_id;
1437 	if (tx_desc->frm_type == dp_tx_frm_tso) {
1438 		tcl_data.pkt_len  =
1439 			tx_desc->msdu_ext_desc->tso_desc->seg.total_len;
1440 	} else {
1441 		tcl_data.pkt_len = qdf_nbuf_len(tx_desc->nbuf);
1442 	}
1443 	tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
1444 	swlm_query_data.tcl_data = &tcl_data;
1445 
1446 	status = dp_swlm_tcl_pre_check(soc, &tcl_data);
1447 	if (QDF_IS_STATUS_ERROR(status)) {
1448 		dp_swlm_tcl_reset_session_data(soc, ring_id);
1449 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
1450 		return 0;
1451 	}
1452 
1453 	ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
1454 	if (ret) {
1455 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_success, 1);
1456 	} else {
1457 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
1458 	}
1459 
1460 	return ret;
1461 }
1462 
1463 void
1464 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1465 		      int coalesce)
1466 {
1467 	if (coalesce)
1468 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1469 	else
1470 		dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1471 }
1472 
1473 static inline void
1474 dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
1475 {
1476 	if (((i + 1) < msdu_info->num_seg))
1477 		msdu_info->skip_hp_update = 1;
1478 	else
1479 		msdu_info->skip_hp_update = 0;
1480 }
1481 
1482 static inline void
1483 dp_flush_tcp_hp(struct dp_soc *soc, uint8_t ring_id)
1484 {
1485 	hal_ring_handle_t hal_ring_hdl =
1486 		dp_tx_get_hal_ring_hdl(soc, ring_id);
1487 
1488 	if (dp_tx_hal_ring_access_start(soc, hal_ring_hdl)) {
1489 		dp_err("Fillmore: SRNG access start failed");
1490 		return;
1491 	}
1492 
1493 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
1494 }
1495 
1496 static inline void
1497 dp_tx_check_and_flush_hp(struct dp_soc *soc,
1498 			 QDF_STATUS status,
1499 			 struct dp_tx_msdu_info_s *msdu_info)
1500 {
1501 	if (QDF_IS_STATUS_ERROR(status) && !msdu_info->skip_hp_update) {
1502 		dp_flush_tcp_hp(soc,
1503 			(msdu_info->tx_queue.ring_id & DP_TX_QUEUE_MASK));
1504 	}
1505 }
1506 #else
1507 static inline void
1508 dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
1509 {
1510 }
1511 
1512 static inline void
1513 dp_tx_check_and_flush_hp(struct dp_soc *soc,
1514 			 QDF_STATUS status,
1515 			 struct dp_tx_msdu_info_s *msdu_info)
1516 {
1517 }
1518 #endif
1519 
1520 #ifdef FEATURE_RUNTIME_PM
1521 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
1522 {
1523 	return qdf_atomic_read(&soc->rtpm_high_tput_flag);
1524 }
1525 /**
1526  * dp_tx_ring_access_end_wrapper() - Wrapper for ring access end
1527  * @soc: Datapath soc handle
1528  * @hal_ring_hdl: HAL ring handle
1529  * @coalesce: Coalesce the current write or not
1530  *
1531  * Wrapper for HAL ring access end for data transmission for
1532  * FEATURE_RUNTIME_PM
1533  *
1534  * Returns: none
1535  */
1536 void
1537 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1538 			      hal_ring_handle_t hal_ring_hdl,
1539 			      int coalesce)
1540 {
1541 	int ret;
1542 
1543 	/*
1544 	 * Avoid runtime get and put APIs under high throughput scenarios.
1545 	 */
1546 	if (dp_get_rtpm_tput_policy_requirement(soc)) {
1547 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1548 		return;
1549 	}
1550 
1551 	ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
1552 	if (QDF_IS_STATUS_SUCCESS(ret)) {
1553 		if (hif_system_pm_state_check(soc->hif_handle)) {
1554 			dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1555 			hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1556 			hal_srng_inc_flush_cnt(hal_ring_hdl);
1557 		} else {
1558 			dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1559 		}
1560 		hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
1561 	} else {
1562 		dp_runtime_get(soc);
1563 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1564 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1565 		qdf_atomic_inc(&soc->tx_pending_rtpm);
1566 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1567 		dp_runtime_put(soc);
1568 	}
1569 }
1570 #else
1571 
1572 #ifdef DP_POWER_SAVE
1573 void
1574 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1575 			      hal_ring_handle_t hal_ring_hdl,
1576 			      int coalesce)
1577 {
1578 	if (hif_system_pm_state_check(soc->hif_handle)) {
1579 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1580 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1581 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1582 	} else {
1583 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1584 	}
1585 }
1586 #endif
1587 
1588 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
1589 {
1590 	return 0;
1591 }
1592 #endif
1593 
1594 /**
1595  * dp_tx_get_tid() - Obtain TID to be used for this frame
1596  * @vdev: DP vdev handle
1597  * @nbuf: skb
1598  *
1599  * Extract the DSCP or PCP information from frame and map into TID value.
1600  *
1601  * Return: void
1602  */
1603 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1604 			  struct dp_tx_msdu_info_s *msdu_info)
1605 {
1606 	uint8_t tos = 0, dscp_tid_override = 0;
1607 	uint8_t *hdr_ptr, *L3datap;
1608 	uint8_t is_mcast = 0;
1609 	qdf_ether_header_t *eh = NULL;
1610 	qdf_ethervlan_header_t *evh = NULL;
1611 	uint16_t   ether_type;
1612 	qdf_llc_t *llcHdr;
1613 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1614 
1615 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1616 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1617 		eh = (qdf_ether_header_t *)nbuf->data;
1618 		hdr_ptr = (uint8_t *)(eh->ether_dhost);
1619 		L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
1620 	} else {
1621 		qdf_dot3_qosframe_t *qos_wh =
1622 			(qdf_dot3_qosframe_t *) nbuf->data;
1623 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1624 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1625 		return;
1626 	}
1627 
1628 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1629 	ether_type = eh->ether_type;
1630 
1631 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
1632 	/*
1633 	 * Check if packet is dot3 or eth2 type.
1634 	 */
1635 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1636 		ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1637 				sizeof(*llcHdr));
1638 
1639 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1640 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1641 				sizeof(*llcHdr);
1642 			ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
1643 					+ sizeof(*llcHdr) +
1644 					sizeof(qdf_net_vlanhdr_t));
1645 		} else {
1646 			L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
1647 				sizeof(*llcHdr);
1648 		}
1649 	} else {
1650 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1651 			evh = (qdf_ethervlan_header_t *) eh;
1652 			ether_type = evh->ether_type;
1653 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1654 		}
1655 	}
1656 
1657 	/*
1658 	 * Find priority from IP TOS DSCP field
1659 	 */
1660 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1661 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1662 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1663 			/* Only for unicast frames */
1664 			if (!is_mcast) {
1665 				/* send it on VO queue */
1666 				msdu_info->tid = DP_VO_TID;
1667 			}
1668 		} else {
1669 			/*
1670 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1671 			 * from TOS byte.
1672 			 */
1673 			tos = ip->ip_tos;
1674 			dscp_tid_override = 1;
1675 
1676 		}
1677 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1678 		/* TODO
1679 		 * use flowlabel
1680 		 *igmpmld cases to be handled in phase 2
1681 		 */
1682 		unsigned long ver_pri_flowlabel;
1683 		unsigned long pri;
1684 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1685 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1686 			DP_IPV6_PRIORITY_SHIFT;
1687 		tos = pri;
1688 		dscp_tid_override = 1;
1689 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1690 		msdu_info->tid = DP_VO_TID;
1691 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1692 		/* Only for unicast frames */
1693 		if (!is_mcast) {
1694 			/* send ucast arp on VO queue */
1695 			msdu_info->tid = DP_VO_TID;
1696 		}
1697 	}
1698 
1699 	/*
1700 	 * Assign all MCAST packets to BE
1701 	 */
1702 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1703 		if (is_mcast) {
1704 			tos = 0;
1705 			dscp_tid_override = 1;
1706 		}
1707 	}
1708 
1709 	if (dscp_tid_override == 1) {
1710 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1711 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1712 	}
1713 
1714 	if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
1715 		msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
1716 
1717 	return;
1718 }
1719 
1720 /**
1721  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1722  * @vdev: DP vdev handle
1723  * @nbuf: skb
1724  *
1725  * Software based TID classification is required when more than 2 DSCP-TID
1726  * mapping tables are needed.
1727  * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
1728  *
1729  * Return: void
1730  */
1731 static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1732 				      struct dp_tx_msdu_info_s *msdu_info)
1733 {
1734 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1735 
1736 	/*
1737 	 * skip_sw_tid_classification flag will set in below cases-
1738 	 * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
1739 	 * 2. hlos_tid_override enabled for vdev
1740 	 * 3. mesh mode enabled for vdev
1741 	 */
1742 	if (qdf_likely(vdev->skip_sw_tid_classification)) {
1743 		/* Update tid in msdu_info from skb priority */
1744 		if (qdf_unlikely(vdev->skip_sw_tid_classification
1745 			& DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
1746 			uint32_t tid = qdf_nbuf_get_priority(nbuf);
1747 
1748 			if (tid == DP_TX_INVALID_QOS_TAG)
1749 				return;
1750 
1751 			msdu_info->tid = tid;
1752 			return;
1753 		}
1754 		return;
1755 	}
1756 
1757 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1758 }
1759 
1760 #ifdef FEATURE_WLAN_TDLS
1761 /**
1762  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1763  * @soc: datapath SOC
1764  * @vdev: datapath vdev
1765  * @tx_desc: TX descriptor
1766  *
1767  * Return: None
1768  */
1769 static void dp_tx_update_tdls_flags(struct dp_soc *soc,
1770 				    struct dp_vdev *vdev,
1771 				    struct dp_tx_desc_s *tx_desc)
1772 {
1773 	if (vdev) {
1774 		if (vdev->is_tdls_frame) {
1775 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1776 			vdev->is_tdls_frame = false;
1777 		}
1778 	}
1779 }
1780 
1781 static uint8_t dp_htt_tx_comp_get_status(struct dp_soc *soc, char *htt_desc)
1782 {
1783 	uint8_t tx_status = HTT_TX_FW2WBM_TX_STATUS_MAX;
1784 
1785 	switch (soc->arch_id) {
1786 	case CDP_ARCH_TYPE_LI:
1787 		tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
1788 		break;
1789 
1790 	case CDP_ARCH_TYPE_BE:
1791 		tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
1792 		break;
1793 
1794 	default:
1795 		dp_err("Incorrect CDP_ARCH %d", soc->arch_id);
1796 		QDF_BUG(0);
1797 	}
1798 
1799 	return tx_status;
1800 }
1801 
1802 /**
1803  * dp_non_std_htt_tx_comp_free_buff() - Free the non std tx packet buffer
1804  * @soc: dp_soc handle
1805  * @tx_desc: TX descriptor
1806  * @vdev: datapath vdev handle
1807  *
1808  * Return: None
1809  */
1810 static void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
1811 					 struct dp_tx_desc_s *tx_desc)
1812 {
1813 	uint8_t tx_status = 0;
1814 	uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
1815 
1816 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1817 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
1818 						     DP_MOD_ID_TDLS);
1819 
1820 	if (qdf_unlikely(!vdev)) {
1821 		dp_err_rl("vdev is null!");
1822 		goto error;
1823 	}
1824 
1825 	hal_tx_comp_get_htt_desc(&tx_desc->comp, htt_tx_status);
1826 	tx_status = dp_htt_tx_comp_get_status(soc, htt_tx_status);
1827 	dp_debug("vdev_id: %d tx_status: %d", tx_desc->vdev_id, tx_status);
1828 
1829 	if (vdev->tx_non_std_data_callback.func) {
1830 		qdf_nbuf_set_next(nbuf, NULL);
1831 		vdev->tx_non_std_data_callback.func(
1832 				vdev->tx_non_std_data_callback.ctxt,
1833 				nbuf, tx_status);
1834 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1835 		return;
1836 	} else {
1837 		dp_err_rl("callback func is null");
1838 	}
1839 
1840 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1841 error:
1842 	qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
1843 	qdf_nbuf_free(nbuf);
1844 }
1845 
1846 /**
1847  * dp_tx_msdu_single_map() - do nbuf map
1848  * @vdev: DP vdev handle
1849  * @tx_desc: DP TX descriptor pointer
1850  * @nbuf: skb pointer
1851  *
1852  * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
1853  * operation done in other component.
1854  *
1855  * Return: QDF_STATUS
1856  */
1857 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1858 					       struct dp_tx_desc_s *tx_desc,
1859 					       qdf_nbuf_t nbuf)
1860 {
1861 	if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
1862 		return qdf_nbuf_map_nbytes_single(vdev->osdev,
1863 						  nbuf,
1864 						  QDF_DMA_TO_DEVICE,
1865 						  nbuf->len);
1866 	else
1867 		return qdf_nbuf_map_single(vdev->osdev, nbuf,
1868 					   QDF_DMA_TO_DEVICE);
1869 }
1870 #else
1871 static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
1872 					   struct dp_vdev *vdev,
1873 					   struct dp_tx_desc_s *tx_desc)
1874 {
1875 }
1876 
1877 static inline void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
1878 						struct dp_tx_desc_s *tx_desc)
1879 {
1880 }
1881 
1882 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1883 					       struct dp_tx_desc_s *tx_desc,
1884 					       qdf_nbuf_t nbuf)
1885 {
1886 	return qdf_nbuf_map_nbytes_single(vdev->osdev,
1887 					  nbuf,
1888 					  QDF_DMA_TO_DEVICE,
1889 					  nbuf->len);
1890 }
1891 #endif
1892 
1893 static inline
1894 qdf_dma_addr_t dp_tx_nbuf_map_regular(struct dp_vdev *vdev,
1895 				      struct dp_tx_desc_s *tx_desc,
1896 				      qdf_nbuf_t nbuf)
1897 {
1898 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
1899 
1900 	ret = dp_tx_msdu_single_map(vdev, tx_desc, nbuf);
1901 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret)))
1902 		return 0;
1903 
1904 	return qdf_nbuf_mapped_paddr_get(nbuf);
1905 }
1906 
1907 static inline
1908 void dp_tx_nbuf_unmap_regular(struct dp_soc *soc, struct dp_tx_desc_s *desc)
1909 {
1910 	qdf_nbuf_unmap_nbytes_single_paddr(soc->osdev,
1911 					   desc->nbuf,
1912 					   desc->dma_addr,
1913 					   QDF_DMA_TO_DEVICE,
1914 					   desc->length);
1915 }
1916 
1917 #if defined(QCA_DP_TX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
1918 static inline
1919 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
1920 			      struct dp_tx_desc_s *tx_desc,
1921 			      qdf_nbuf_t nbuf)
1922 {
1923 	if (qdf_likely(tx_desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
1924 		qdf_nbuf_dma_clean_range((void *)nbuf->data,
1925 					 (void *)(nbuf->data + nbuf->len));
1926 		return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
1927 	} else {
1928 		return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
1929 	}
1930 }
1931 
1932 static inline
1933 void dp_tx_nbuf_unmap(struct dp_soc *soc,
1934 		      struct dp_tx_desc_s *desc)
1935 {
1936 	if (qdf_unlikely(!(desc->flags & DP_TX_DESC_FLAG_SIMPLE)))
1937 		return dp_tx_nbuf_unmap_regular(soc, desc);
1938 }
1939 #else
1940 static inline
1941 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
1942 			      struct dp_tx_desc_s *tx_desc,
1943 			      qdf_nbuf_t nbuf)
1944 {
1945 	return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
1946 }
1947 
1948 static inline
1949 void dp_tx_nbuf_unmap(struct dp_soc *soc,
1950 		      struct dp_tx_desc_s *desc)
1951 {
1952 	return dp_tx_nbuf_unmap_regular(soc, desc);
1953 }
1954 #endif
1955 
1956 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
1957 static inline
1958 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
1959 {
1960 	dp_tx_nbuf_unmap(soc, desc);
1961 	desc->flags |= DP_TX_DESC_FLAG_UNMAP_DONE;
1962 }
1963 
1964 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
1965 {
1966 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE)))
1967 		dp_tx_nbuf_unmap(soc, desc);
1968 }
1969 #else
1970 static inline
1971 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
1972 {
1973 }
1974 
1975 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
1976 {
1977 	dp_tx_nbuf_unmap(soc, desc);
1978 }
1979 #endif
1980 
1981 #ifdef MESH_MODE_SUPPORT
1982 /**
1983  * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
1984  * @soc: datapath SOC
1985  * @vdev: datapath vdev
1986  * @tx_desc: TX descriptor
1987  *
1988  * Return: None
1989  */
1990 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
1991 					   struct dp_vdev *vdev,
1992 					   struct dp_tx_desc_s *tx_desc)
1993 {
1994 	if (qdf_unlikely(vdev->mesh_vdev))
1995 		tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
1996 }
1997 
1998 /**
1999  * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
2000  * @soc: dp_soc handle
2001  * @tx_desc: TX descriptor
2002  * @vdev: datapath vdev handle
2003  *
2004  * Return: None
2005  */
2006 static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
2007 					     struct dp_tx_desc_s *tx_desc)
2008 {
2009 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2010 	struct dp_vdev *vdev = NULL;
2011 
2012 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2013 		qdf_nbuf_free(nbuf);
2014 		DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2015 	} else {
2016 		vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
2017 					     DP_MOD_ID_MESH);
2018 		if (vdev && vdev->osif_tx_free_ext)
2019 			vdev->osif_tx_free_ext((nbuf));
2020 		else
2021 			qdf_nbuf_free(nbuf);
2022 
2023 		if (vdev)
2024 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
2025 	}
2026 }
2027 #else
2028 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
2029 					   struct dp_vdev *vdev,
2030 					   struct dp_tx_desc_s *tx_desc)
2031 {
2032 }
2033 
2034 static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
2035 					     struct dp_tx_desc_s *tx_desc)
2036 {
2037 }
2038 #endif
2039 
2040 /**
2041  * dp_tx_frame_is_drop() - checks if the packet is loopback
2042  * @vdev: DP vdev handle
2043  * @nbuf: skb
2044  *
2045  * Return: 1 if frame needs to be dropped else 0
2046  */
2047 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
2048 {
2049 	struct dp_pdev *pdev = NULL;
2050 	struct dp_ast_entry *src_ast_entry = NULL;
2051 	struct dp_ast_entry *dst_ast_entry = NULL;
2052 	struct dp_soc *soc = NULL;
2053 
2054 	qdf_assert(vdev);
2055 	pdev = vdev->pdev;
2056 	qdf_assert(pdev);
2057 	soc = pdev->soc;
2058 
2059 	dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
2060 				(soc, dstmac, vdev->pdev->pdev_id);
2061 
2062 	src_ast_entry = dp_peer_ast_hash_find_by_pdevid
2063 				(soc, srcmac, vdev->pdev->pdev_id);
2064 	if (dst_ast_entry && src_ast_entry) {
2065 		if (dst_ast_entry->peer_id ==
2066 				src_ast_entry->peer_id)
2067 			return 1;
2068 	}
2069 
2070 	return 0;
2071 }
2072 
2073 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
2074 	defined(WLAN_MCAST_MLO)
2075 /* MLO peer id for reinject*/
2076 #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
2077 /* MLO vdev id inc offset */
2078 #define DP_MLO_VDEV_ID_OFFSET 0x80
2079 
2080 static inline void
2081 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2082 {
2083 	if (!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) {
2084 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2085 		qdf_atomic_inc(&soc->num_tx_exception);
2086 	}
2087 }
2088 
2089 static inline void
2090 dp_tx_update_mcast_param(uint16_t peer_id,
2091 			 uint16_t *htt_tcl_metadata,
2092 			 struct dp_vdev *vdev,
2093 			 struct dp_tx_msdu_info_s *msdu_info)
2094 {
2095 	if (peer_id == DP_MLO_MCAST_REINJECT_PEER_ID) {
2096 		*htt_tcl_metadata = 0;
2097 		DP_TX_TCL_METADATA_TYPE_SET(
2098 				*htt_tcl_metadata,
2099 				HTT_TCL_METADATA_V2_TYPE_GLOBAL_SEQ_BASED);
2100 		HTT_TX_TCL_METADATA_GLBL_SEQ_NO_SET(*htt_tcl_metadata,
2101 						    msdu_info->gsn);
2102 
2103 		msdu_info->vdev_id = vdev->vdev_id + DP_MLO_VDEV_ID_OFFSET;
2104 		if (qdf_unlikely(vdev->nawds_enabled))
2105 			HTT_TX_TCL_METADATA_GLBL_SEQ_HOST_INSPECTED_SET(
2106 							*htt_tcl_metadata, 1);
2107 	} else {
2108 		msdu_info->vdev_id = vdev->vdev_id;
2109 	}
2110 }
2111 #else
2112 static inline void
2113 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2114 {
2115 }
2116 
2117 static inline void
2118 dp_tx_update_mcast_param(uint16_t peer_id,
2119 			 uint16_t *htt_tcl_metadata,
2120 			 struct dp_vdev *vdev,
2121 			 struct dp_tx_msdu_info_s *msdu_info)
2122 {
2123 }
2124 #endif
2125 /**
2126  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
2127  * @vdev: DP vdev handle
2128  * @nbuf: skb
2129  * @tid: TID from HLOS for overriding default DSCP-TID mapping
2130  * @meta_data: Metadata to the fw
2131  * @tx_q: Tx queue to be used for this Tx frame
2132  * @peer_id: peer_id of the peer in case of NAWDS frames
2133  * @tx_exc_metadata: Handle that holds exception path metadata
2134  *
2135  * Return: NULL on success,
2136  *         nbuf when it fails to send
2137  */
2138 qdf_nbuf_t
2139 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2140 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
2141 		       struct cdp_tx_exception_metadata *tx_exc_metadata)
2142 {
2143 	struct dp_pdev *pdev = vdev->pdev;
2144 	struct dp_soc *soc = pdev->soc;
2145 	struct dp_tx_desc_s *tx_desc;
2146 	QDF_STATUS status;
2147 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
2148 	uint16_t htt_tcl_metadata = 0;
2149 	enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
2150 	uint8_t tid = msdu_info->tid;
2151 	struct cdp_tid_tx_stats *tid_stats = NULL;
2152 	qdf_dma_addr_t paddr;
2153 
2154 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
2155 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
2156 			msdu_info, tx_exc_metadata);
2157 	if (!tx_desc) {
2158 		dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
2159 			  vdev, tx_q->desc_pool_id);
2160 		drop_code = TX_DESC_ERR;
2161 		goto fail_return;
2162 	}
2163 
2164 	dp_tx_update_tdls_flags(soc, vdev, tx_desc);
2165 
2166 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
2167 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2168 		DP_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
2169 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
2170 		DP_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
2171 					    DP_TCL_METADATA_TYPE_PEER_BASED);
2172 		DP_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
2173 					       peer_id);
2174 		dp_tx_bypass_reinjection(soc, tx_desc);
2175 	} else
2176 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2177 
2178 	if (msdu_info->exception_fw)
2179 		DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2180 
2181 	dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
2182 					 !pdev->enhanced_stats_en);
2183 
2184 	dp_tx_update_mesh_flags(soc, vdev, tx_desc);
2185 
2186 	paddr =  dp_tx_nbuf_map(vdev, tx_desc, nbuf);
2187 	if (!paddr) {
2188 		/* Handle failure */
2189 		dp_err("qdf_nbuf_map failed");
2190 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
2191 		drop_code = TX_DMA_MAP_ERR;
2192 		goto release_desc;
2193 	}
2194 
2195 	tx_desc->dma_addr = paddr;
2196 	dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
2197 			       tx_desc->id, DP_TX_DESC_MAP);
2198 	dp_tx_update_mcast_param(peer_id, &htt_tcl_metadata, vdev, msdu_info);
2199 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
2200 	status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
2201 					     htt_tcl_metadata,
2202 					     tx_exc_metadata, msdu_info);
2203 
2204 	if (status != QDF_STATUS_SUCCESS) {
2205 		dp_tx_err_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2206 			     tx_desc, tx_q->ring_id);
2207 		dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
2208 				       tx_desc->id, DP_TX_DESC_UNMAP);
2209 		dp_tx_nbuf_unmap(soc, tx_desc);
2210 		drop_code = TX_HW_ENQUEUE;
2211 		goto release_desc;
2212 	}
2213 
2214 	return NULL;
2215 
2216 release_desc:
2217 	dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2218 
2219 fail_return:
2220 	dp_tx_get_tid(vdev, nbuf, msdu_info);
2221 	tid_stats = &pdev->stats.tid_stats.
2222 		    tid_tx_stats[tx_q->ring_id][tid];
2223 	tid_stats->swdrop_cnt[drop_code]++;
2224 	return nbuf;
2225 }
2226 
2227 /**
2228  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2229  * @soc: Soc handle
2230  * @desc: software Tx descriptor to be processed
2231  *
2232  * Return: none
2233  */
2234 void dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2235 {
2236 	qdf_nbuf_t nbuf = desc->nbuf;
2237 	enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
2238 
2239 	/* nbuf already freed in vdev detach path */
2240 	if (!nbuf)
2241 		return;
2242 
2243 	/* If it is TDLS mgmt, don't unmap or free the frame */
2244 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2245 		return dp_non_std_htt_tx_comp_free_buff(soc, desc);
2246 
2247 	/* 0 : MSDU buffer, 1 : MLE */
2248 	if (desc->msdu_ext_desc) {
2249 		/* TSO free */
2250 		if (hal_tx_ext_desc_get_tso_enable(
2251 					desc->msdu_ext_desc->vaddr)) {
2252 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
2253 					       desc->id, DP_TX_COMP_MSDU_EXT);
2254 			dp_tx_tso_seg_history_add(soc,
2255 						  desc->msdu_ext_desc->tso_desc,
2256 						  desc->nbuf, desc->id, type);
2257 			/* unmap eash TSO seg before free the nbuf */
2258 			dp_tx_tso_unmap_segment(soc,
2259 						desc->msdu_ext_desc->tso_desc,
2260 						desc->msdu_ext_desc->
2261 						tso_num_desc);
2262 			qdf_nbuf_free(nbuf);
2263 			return;
2264 		}
2265 
2266 		if (qdf_unlikely(desc->frm_type == dp_tx_frm_sg)) {
2267 			void *msdu_ext_desc = desc->msdu_ext_desc->vaddr;
2268 			qdf_dma_addr_t iova;
2269 			uint32_t frag_len;
2270 			uint32_t i;
2271 
2272 			qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
2273 						     QDF_DMA_TO_DEVICE,
2274 						     qdf_nbuf_headlen(nbuf));
2275 
2276 			for (i = 1; i < DP_TX_MAX_NUM_FRAGS; i++) {
2277 				hal_tx_ext_desc_get_frag_info(msdu_ext_desc, i,
2278 							      &iova,
2279 							      &frag_len);
2280 				if (!iova || !frag_len)
2281 					break;
2282 
2283 				qdf_mem_unmap_page(soc->osdev, iova, frag_len,
2284 						   QDF_DMA_TO_DEVICE);
2285 			}
2286 
2287 			qdf_nbuf_free(nbuf);
2288 			return;
2289 		}
2290 	}
2291 	/* If it's ME frame, dont unmap the cloned nbuf's */
2292 	if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
2293 		goto nbuf_free;
2294 
2295 	dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, desc->id, type);
2296 	dp_tx_unmap(soc, desc);
2297 
2298 	if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
2299 		return dp_mesh_tx_comp_free_buff(soc, desc);
2300 nbuf_free:
2301 	qdf_nbuf_free(nbuf);
2302 }
2303 
2304 /**
2305  * dp_tx_sg_unmap_buf() - Unmap scatter gather fragments
2306  * @soc: DP soc handle
2307  * @nbuf: skb
2308  * @msdu_info: MSDU info
2309  *
2310  * Return: None
2311  */
2312 static inline void
2313 dp_tx_sg_unmap_buf(struct dp_soc *soc, qdf_nbuf_t nbuf,
2314 		   struct dp_tx_msdu_info_s *msdu_info)
2315 {
2316 	uint32_t cur_idx;
2317 	struct dp_tx_seg_info_s *seg = msdu_info->u.sg_info.curr_seg;
2318 
2319 	qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE,
2320 				     qdf_nbuf_headlen(nbuf));
2321 
2322 	for (cur_idx = 1; cur_idx < seg->frag_cnt; cur_idx++)
2323 		qdf_mem_unmap_page(soc->osdev, (qdf_dma_addr_t)
2324 				   (seg->frags[cur_idx].paddr_lo | ((uint64_t)
2325 				    seg->frags[cur_idx].paddr_hi) << 32),
2326 				   seg->frags[cur_idx].len,
2327 				   QDF_DMA_TO_DEVICE);
2328 }
2329 
2330 /**
2331  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
2332  * @vdev: DP vdev handle
2333  * @nbuf: skb
2334  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
2335  *
2336  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
2337  *
2338  * Return: NULL on success,
2339  *         nbuf when it fails to send
2340  */
2341 #if QDF_LOCK_STATS
2342 noinline
2343 #else
2344 #endif
2345 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2346 				    struct dp_tx_msdu_info_s *msdu_info)
2347 {
2348 	uint32_t i;
2349 	struct dp_pdev *pdev = vdev->pdev;
2350 	struct dp_soc *soc = pdev->soc;
2351 	struct dp_tx_desc_s *tx_desc;
2352 	bool is_cce_classified = false;
2353 	QDF_STATUS status;
2354 	uint16_t htt_tcl_metadata = 0;
2355 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
2356 	struct cdp_tid_tx_stats *tid_stats = NULL;
2357 	uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
2358 
2359 	if (msdu_info->frm_type == dp_tx_frm_me)
2360 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2361 
2362 	i = 0;
2363 	/* Print statement to track i and num_seg */
2364 	/*
2365 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
2366 	 * descriptors using information in msdu_info
2367 	 */
2368 	while (i < msdu_info->num_seg) {
2369 		/*
2370 		 * Setup Tx descriptor for an MSDU, and MSDU extension
2371 		 * descriptor
2372 		 */
2373 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
2374 				tx_q->desc_pool_id);
2375 
2376 		if (!tx_desc) {
2377 			if (msdu_info->frm_type == dp_tx_frm_me) {
2378 				prep_desc_fail++;
2379 				dp_tx_me_free_buf(pdev,
2380 					(void *)(msdu_info->u.sg_info
2381 						.curr_seg->frags[0].vaddr));
2382 				if (prep_desc_fail == msdu_info->num_seg) {
2383 					/*
2384 					 * Unmap is needed only if descriptor
2385 					 * preparation failed for all segments.
2386 					 */
2387 					qdf_nbuf_unmap(soc->osdev,
2388 						       msdu_info->u.sg_info.
2389 						       curr_seg->nbuf,
2390 						       QDF_DMA_TO_DEVICE);
2391 				}
2392 				/*
2393 				 * Free the nbuf for the current segment
2394 				 * and make it point to the next in the list.
2395 				 * For me, there are as many segments as there
2396 				 * are no of clients.
2397 				 */
2398 				qdf_nbuf_free(msdu_info->u.sg_info
2399 					      .curr_seg->nbuf);
2400 				if (msdu_info->u.sg_info.curr_seg->next) {
2401 					msdu_info->u.sg_info.curr_seg =
2402 						msdu_info->u.sg_info
2403 						.curr_seg->next;
2404 					nbuf = msdu_info->u.sg_info
2405 					       .curr_seg->nbuf;
2406 				}
2407 				i++;
2408 				continue;
2409 			}
2410 
2411 			if (msdu_info->frm_type == dp_tx_frm_tso) {
2412 				dp_tx_tso_seg_history_add(
2413 						soc,
2414 						msdu_info->u.tso_info.curr_seg,
2415 						nbuf, 0, DP_TX_DESC_UNMAP);
2416 				dp_tx_tso_unmap_segment(soc,
2417 							msdu_info->u.tso_info.
2418 							curr_seg,
2419 							msdu_info->u.tso_info.
2420 							tso_num_seg_list);
2421 
2422 				if (msdu_info->u.tso_info.curr_seg->next) {
2423 					msdu_info->u.tso_info.curr_seg =
2424 					msdu_info->u.tso_info.curr_seg->next;
2425 					i++;
2426 					continue;
2427 				}
2428 			}
2429 
2430 			if (msdu_info->frm_type == dp_tx_frm_sg)
2431 				dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
2432 
2433 			goto done;
2434 		}
2435 
2436 		if (msdu_info->frm_type == dp_tx_frm_me) {
2437 			tx_desc->msdu_ext_desc->me_buffer =
2438 				(struct dp_tx_me_buf_t *)msdu_info->
2439 				u.sg_info.curr_seg->frags[0].vaddr;
2440 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
2441 		}
2442 
2443 		if (is_cce_classified)
2444 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2445 
2446 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2447 		if (msdu_info->exception_fw) {
2448 			DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2449 		}
2450 
2451 		dp_tx_is_hp_update_required(i, msdu_info);
2452 
2453 		/*
2454 		 * For frames with multiple segments (TSO, ME), jump to next
2455 		 * segment.
2456 		 */
2457 		if (msdu_info->frm_type == dp_tx_frm_tso) {
2458 			if (msdu_info->u.tso_info.curr_seg->next) {
2459 				msdu_info->u.tso_info.curr_seg =
2460 					msdu_info->u.tso_info.curr_seg->next;
2461 
2462 				/*
2463 				 * If this is a jumbo nbuf, then increment the
2464 				 * number of nbuf users for each additional
2465 				 * segment of the msdu. This will ensure that
2466 				 * the skb is freed only after receiving tx
2467 				 * completion for all segments of an nbuf
2468 				 */
2469 				qdf_nbuf_inc_users(nbuf);
2470 
2471 				/* Check with MCL if this is needed */
2472 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
2473 				 */
2474 			}
2475 		}
2476 
2477 		dp_tx_update_mcast_param(DP_INVALID_PEER,
2478 					 &htt_tcl_metadata,
2479 					 vdev,
2480 					 msdu_info);
2481 		/*
2482 		 * Enqueue the Tx MSDU descriptor to HW for transmit
2483 		 */
2484 		status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
2485 						     htt_tcl_metadata,
2486 						     NULL, msdu_info);
2487 
2488 		dp_tx_check_and_flush_hp(soc, status, msdu_info);
2489 
2490 		if (status != QDF_STATUS_SUCCESS) {
2491 			dp_info_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2492 				   tx_desc, tx_q->ring_id);
2493 
2494 			dp_tx_get_tid(vdev, nbuf, msdu_info);
2495 			tid_stats = &pdev->stats.tid_stats.
2496 				    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
2497 			tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
2498 
2499 			if (msdu_info->frm_type == dp_tx_frm_me) {
2500 				hw_enq_fail++;
2501 				if (hw_enq_fail == msdu_info->num_seg) {
2502 					/*
2503 					 * Unmap is needed only if enqueue
2504 					 * failed for all segments.
2505 					 */
2506 					qdf_nbuf_unmap(soc->osdev,
2507 						       msdu_info->u.sg_info.
2508 						       curr_seg->nbuf,
2509 						       QDF_DMA_TO_DEVICE);
2510 				}
2511 				/*
2512 				 * Free the nbuf for the current segment
2513 				 * and make it point to the next in the list.
2514 				 * For me, there are as many segments as there
2515 				 * are no of clients.
2516 				 */
2517 				qdf_nbuf_free(msdu_info->u.sg_info
2518 					      .curr_seg->nbuf);
2519 				dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2520 				if (msdu_info->u.sg_info.curr_seg->next) {
2521 					msdu_info->u.sg_info.curr_seg =
2522 						msdu_info->u.sg_info
2523 						.curr_seg->next;
2524 					nbuf = msdu_info->u.sg_info
2525 					       .curr_seg->nbuf;
2526 				} else
2527 					break;
2528 				i++;
2529 				continue;
2530 			}
2531 
2532 			/*
2533 			 * For TSO frames, the nbuf users increment done for
2534 			 * the current segment has to be reverted, since the
2535 			 * hw enqueue for this segment failed
2536 			 */
2537 			if (msdu_info->frm_type == dp_tx_frm_tso &&
2538 			    msdu_info->u.tso_info.curr_seg) {
2539 				/*
2540 				 * unmap and free current,
2541 				 * retransmit remaining segments
2542 				 */
2543 				dp_tx_comp_free_buf(soc, tx_desc);
2544 				i++;
2545 				dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2546 				continue;
2547 			}
2548 
2549 			if (msdu_info->frm_type == dp_tx_frm_sg)
2550 				dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
2551 
2552 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2553 			goto done;
2554 		}
2555 
2556 		/*
2557 		 * TODO
2558 		 * if tso_info structure can be modified to have curr_seg
2559 		 * as first element, following 2 blocks of code (for TSO and SG)
2560 		 * can be combined into 1
2561 		 */
2562 
2563 		/*
2564 		 * For Multicast-Unicast converted packets,
2565 		 * each converted frame (for a client) is represented as
2566 		 * 1 segment
2567 		 */
2568 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
2569 				(msdu_info->frm_type == dp_tx_frm_me)) {
2570 			if (msdu_info->u.sg_info.curr_seg->next) {
2571 				msdu_info->u.sg_info.curr_seg =
2572 					msdu_info->u.sg_info.curr_seg->next;
2573 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2574 			} else
2575 				break;
2576 		}
2577 		i++;
2578 	}
2579 
2580 	nbuf = NULL;
2581 
2582 done:
2583 	return nbuf;
2584 }
2585 
2586 /**
2587  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
2588  *                     for SG frames
2589  * @vdev: DP vdev handle
2590  * @nbuf: skb
2591  * @seg_info: Pointer to Segment info Descriptor to be prepared
2592  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2593  *
2594  * Return: NULL on success,
2595  *         nbuf when it fails to send
2596  */
2597 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2598 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
2599 {
2600 	uint32_t cur_frag, nr_frags, i;
2601 	qdf_dma_addr_t paddr;
2602 	struct dp_tx_sg_info_s *sg_info;
2603 
2604 	sg_info = &msdu_info->u.sg_info;
2605 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
2606 
2607 	if (QDF_STATUS_SUCCESS !=
2608 		qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
2609 					   QDF_DMA_TO_DEVICE,
2610 					   qdf_nbuf_headlen(nbuf))) {
2611 		dp_tx_err("dma map error");
2612 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2613 		qdf_nbuf_free(nbuf);
2614 		return NULL;
2615 	}
2616 
2617 	paddr = qdf_nbuf_mapped_paddr_get(nbuf);
2618 	seg_info->frags[0].paddr_lo = paddr;
2619 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
2620 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
2621 	seg_info->frags[0].vaddr = (void *) nbuf;
2622 
2623 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
2624 		if (QDF_STATUS_SUCCESS != qdf_nbuf_frag_map(vdev->osdev,
2625 							    nbuf, 0,
2626 							    QDF_DMA_TO_DEVICE,
2627 							    cur_frag)) {
2628 			dp_tx_err("frag dma map error");
2629 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2630 			goto map_err;
2631 		}
2632 
2633 		paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
2634 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
2635 		seg_info->frags[cur_frag + 1].paddr_hi =
2636 			((uint64_t) paddr) >> 32;
2637 		seg_info->frags[cur_frag + 1].len =
2638 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
2639 	}
2640 
2641 	seg_info->frag_cnt = (cur_frag + 1);
2642 	seg_info->total_len = qdf_nbuf_len(nbuf);
2643 	seg_info->next = NULL;
2644 
2645 	sg_info->curr_seg = seg_info;
2646 
2647 	msdu_info->frm_type = dp_tx_frm_sg;
2648 	msdu_info->num_seg = 1;
2649 
2650 	return nbuf;
2651 map_err:
2652 	/* restore paddr into nbuf before calling unmap */
2653 	qdf_nbuf_mapped_paddr_set(nbuf,
2654 				  (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
2655 				  ((uint64_t)
2656 				  seg_info->frags[0].paddr_hi) << 32));
2657 	qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
2658 				     QDF_DMA_TO_DEVICE,
2659 				     seg_info->frags[0].len);
2660 	for (i = 1; i <= cur_frag; i++) {
2661 		qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
2662 				   (seg_info->frags[i].paddr_lo | ((uint64_t)
2663 				   seg_info->frags[i].paddr_hi) << 32),
2664 				   seg_info->frags[i].len,
2665 				   QDF_DMA_TO_DEVICE);
2666 	}
2667 	qdf_nbuf_free(nbuf);
2668 	return NULL;
2669 }
2670 
2671 /**
2672  * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
2673  * @vdev: DP vdev handle
2674  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2675  * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
2676  *
2677  * Return: NULL on failure,
2678  *         nbuf when extracted successfully
2679  */
2680 static
2681 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
2682 				    struct dp_tx_msdu_info_s *msdu_info,
2683 				    uint16_t ppdu_cookie)
2684 {
2685 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2686 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2687 
2688 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2689 
2690 	HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
2691 				(msdu_info->meta_data[5], 1);
2692 	HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
2693 				(msdu_info->meta_data[5], 1);
2694 	HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
2695 				(msdu_info->meta_data[6], ppdu_cookie);
2696 
2697 	msdu_info->exception_fw = 1;
2698 	msdu_info->is_tx_sniffer = 1;
2699 }
2700 
2701 #ifdef MESH_MODE_SUPPORT
2702 
2703 /**
2704  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
2705 				and prepare msdu_info for mesh frames.
2706  * @vdev: DP vdev handle
2707  * @nbuf: skb
2708  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2709  *
2710  * Return: NULL on failure,
2711  *         nbuf when extracted successfully
2712  */
2713 static
2714 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2715 				struct dp_tx_msdu_info_s *msdu_info)
2716 {
2717 	struct meta_hdr_s *mhdr;
2718 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2719 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2720 
2721 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2722 
2723 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
2724 		msdu_info->exception_fw = 0;
2725 		goto remove_meta_hdr;
2726 	}
2727 
2728 	msdu_info->exception_fw = 1;
2729 
2730 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2731 
2732 	meta_data->host_tx_desc_pool = 1;
2733 	meta_data->update_peer_cache = 1;
2734 	meta_data->learning_frame = 1;
2735 
2736 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
2737 		meta_data->power = mhdr->power;
2738 
2739 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
2740 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
2741 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
2742 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
2743 
2744 		meta_data->dyn_bw = 1;
2745 
2746 		meta_data->valid_pwr = 1;
2747 		meta_data->valid_mcs_mask = 1;
2748 		meta_data->valid_nss_mask = 1;
2749 		meta_data->valid_preamble_type  = 1;
2750 		meta_data->valid_retries = 1;
2751 		meta_data->valid_bw_info = 1;
2752 	}
2753 
2754 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
2755 		meta_data->encrypt_type = 0;
2756 		meta_data->valid_encrypt_type = 1;
2757 		meta_data->learning_frame = 0;
2758 	}
2759 
2760 	meta_data->valid_key_flags = 1;
2761 	meta_data->key_flags = (mhdr->keyix & 0x3);
2762 
2763 remove_meta_hdr:
2764 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
2765 		dp_tx_err("qdf_nbuf_pull_head failed");
2766 		qdf_nbuf_free(nbuf);
2767 		return NULL;
2768 	}
2769 
2770 	msdu_info->tid = qdf_nbuf_get_priority(nbuf);
2771 
2772 	dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x"
2773 		   " tid %d to_fw %d",
2774 		   msdu_info->meta_data[0],
2775 		   msdu_info->meta_data[1],
2776 		   msdu_info->meta_data[2],
2777 		   msdu_info->meta_data[3],
2778 		   msdu_info->meta_data[4],
2779 		   msdu_info->meta_data[5],
2780 		   msdu_info->tid, msdu_info->exception_fw);
2781 
2782 	return nbuf;
2783 }
2784 #else
2785 static
2786 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2787 				struct dp_tx_msdu_info_s *msdu_info)
2788 {
2789 	return nbuf;
2790 }
2791 
2792 #endif
2793 
2794 /**
2795  * dp_check_exc_metadata() - Checks if parameters are valid
2796  * @tx_exc - holds all exception path parameters
2797  *
2798  * Returns true when all the parameters are valid else false
2799  *
2800  */
2801 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
2802 {
2803 	bool invalid_tid = (tx_exc->tid >= DP_MAX_TIDS && tx_exc->tid !=
2804 			    HTT_INVALID_TID);
2805 	bool invalid_encap_type =
2806 			(tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
2807 			 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
2808 	bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
2809 				 tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
2810 	bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
2811 			       tx_exc->ppdu_cookie == 0);
2812 
2813 	if (tx_exc->is_intrabss_fwd)
2814 		return true;
2815 
2816 	if (invalid_tid || invalid_encap_type || invalid_sec_type ||
2817 	    invalid_cookie) {
2818 		return false;
2819 	}
2820 
2821 	return true;
2822 }
2823 
2824 #ifdef ATH_SUPPORT_IQUE
2825 /**
2826  * dp_tx_mcast_enhance() - Multicast enhancement on TX
2827  * @vdev: vdev handle
2828  * @nbuf: skb
2829  *
2830  * Return: true on success,
2831  *         false on failure
2832  */
2833 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
2834 {
2835 	qdf_ether_header_t *eh;
2836 
2837 	/* Mcast to Ucast Conversion*/
2838 	if (qdf_likely(!vdev->mcast_enhancement_en))
2839 		return true;
2840 
2841 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2842 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2843 	    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2844 		dp_verbose_debug("Mcast frm for ME %pK", vdev);
2845 		qdf_nbuf_set_next(nbuf, NULL);
2846 
2847 		DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
2848 				 qdf_nbuf_len(nbuf));
2849 		if (dp_tx_prepare_send_me(vdev, nbuf) ==
2850 				QDF_STATUS_SUCCESS) {
2851 			return false;
2852 		}
2853 
2854 		if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
2855 			if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
2856 					QDF_STATUS_SUCCESS) {
2857 				return false;
2858 			}
2859 		}
2860 	}
2861 
2862 	return true;
2863 }
2864 #else
2865 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
2866 {
2867 	return true;
2868 }
2869 #endif
2870 
2871 /**
2872  * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
2873  * @nbuf: qdf_nbuf_t
2874  * @vdev: struct dp_vdev *
2875  *
2876  * Allow packet for processing only if it is for peer client which is
2877  * connected with same vap. Drop packet if client is connected to
2878  * different vap.
2879  *
2880  * Return: QDF_STATUS
2881  */
2882 static inline QDF_STATUS
2883 dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
2884 {
2885 	struct dp_ast_entry *dst_ast_entry = NULL;
2886 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2887 
2888 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
2889 	    DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
2890 		return QDF_STATUS_SUCCESS;
2891 
2892 	qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
2893 	dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
2894 							eh->ether_dhost,
2895 							vdev->vdev_id);
2896 
2897 	/* If there is no ast entry, return failure */
2898 	if (qdf_unlikely(!dst_ast_entry)) {
2899 		qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
2900 		return QDF_STATUS_E_FAILURE;
2901 	}
2902 	qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
2903 
2904 	return QDF_STATUS_SUCCESS;
2905 }
2906 
2907 /**
2908  * dp_tx_nawds_handler() - NAWDS handler
2909  *
2910  * @soc: DP soc handle
2911  * @vdev_id: id of DP vdev handle
2912  * @msdu_info: msdu_info required to create HTT metadata
2913  * @nbuf: skb
2914  *
2915  * This API transfers the multicast frames with the peer id
2916  * on NAWDS enabled peer.
2917 
2918  * Return: none
2919  */
2920 
2921 static inline
2922 void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
2923 			 struct dp_tx_msdu_info_s *msdu_info,
2924 			 qdf_nbuf_t nbuf, uint16_t sa_peer_id)
2925 {
2926 	struct dp_peer *peer = NULL;
2927 	qdf_nbuf_t nbuf_clone = NULL;
2928 	uint16_t peer_id = DP_INVALID_PEER;
2929 	struct dp_txrx_peer *txrx_peer;
2930 
2931 	/* This check avoids pkt forwarding which is entered
2932 	 * in the ast table but still doesn't have valid peerid.
2933 	 */
2934 	if (sa_peer_id == HTT_INVALID_PEER)
2935 		return;
2936 
2937 	qdf_spin_lock_bh(&vdev->peer_list_lock);
2938 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2939 		txrx_peer = dp_get_txrx_peer(peer);
2940 		if (!txrx_peer)
2941 			continue;
2942 
2943 		if (!txrx_peer->bss_peer && txrx_peer->nawds_enabled) {
2944 			peer_id = peer->peer_id;
2945 
2946 			if (!dp_peer_is_primary_link_peer(peer))
2947 				continue;
2948 
2949 			/* Multicast packets needs to be
2950 			 * dropped in case of intra bss forwarding
2951 			 */
2952 			if (sa_peer_id == txrx_peer->peer_id) {
2953 				dp_tx_debug("multicast packet");
2954 				DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2955 							  tx.nawds_mcast_drop,
2956 							  1);
2957 				continue;
2958 			}
2959 
2960 			nbuf_clone = qdf_nbuf_clone(nbuf);
2961 
2962 			if (!nbuf_clone) {
2963 				QDF_TRACE(QDF_MODULE_ID_DP,
2964 					  QDF_TRACE_LEVEL_ERROR,
2965 					  FL("nbuf clone failed"));
2966 				break;
2967 			}
2968 
2969 			nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
2970 							    msdu_info, peer_id,
2971 							    NULL);
2972 
2973 			if (nbuf_clone) {
2974 				dp_tx_debug("pkt send failed");
2975 				qdf_nbuf_free(nbuf_clone);
2976 			} else {
2977 				if (peer_id != DP_INVALID_PEER)
2978 					DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
2979 								      tx.nawds_mcast,
2980 								      1, qdf_nbuf_len(nbuf));
2981 			}
2982 		}
2983 	}
2984 
2985 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
2986 }
2987 
2988 /**
2989  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
2990  * @soc: DP soc handle
2991  * @vdev_id: id of DP vdev handle
2992  * @nbuf: skb
2993  * @tx_exc_metadata: Handle that holds exception path meta data
2994  *
2995  * Entry point for Core Tx layer (DP_TX) invoked from
2996  * hard_start_xmit in OSIF/HDD to transmit frames through fw
2997  *
2998  * Return: NULL on success,
2999  *         nbuf when it fails to send
3000  */
3001 qdf_nbuf_t
3002 dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3003 		     qdf_nbuf_t nbuf,
3004 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
3005 {
3006 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3007 	qdf_ether_header_t *eh = NULL;
3008 	struct dp_tx_msdu_info_s msdu_info;
3009 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3010 						     DP_MOD_ID_TX_EXCEPTION);
3011 
3012 	if (qdf_unlikely(!vdev))
3013 		goto fail;
3014 
3015 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
3016 
3017 	if (!tx_exc_metadata)
3018 		goto fail;
3019 
3020 	msdu_info.tid = tx_exc_metadata->tid;
3021 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3022 	dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
3023 			 QDF_MAC_ADDR_REF(nbuf->data));
3024 
3025 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
3026 
3027 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
3028 		dp_tx_err("Invalid parameters in exception path");
3029 		goto fail;
3030 	}
3031 
3032 	/* Basic sanity checks for unsupported packets */
3033 
3034 	/* MESH mode */
3035 	if (qdf_unlikely(vdev->mesh_vdev)) {
3036 		dp_tx_err("Mesh mode is not supported in exception path");
3037 		goto fail;
3038 	}
3039 
3040 	/*
3041 	 * Classify the frame and call corresponding
3042 	 * "prepare" function which extracts the segment (TSO)
3043 	 * and fragmentation information (for TSO , SG, ME, or Raw)
3044 	 * into MSDU_INFO structure which is later used to fill
3045 	 * SW and HW descriptors.
3046 	 */
3047 	if (qdf_nbuf_is_tso(nbuf)) {
3048 		dp_verbose_debug("TSO frame %pK", vdev);
3049 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
3050 				 qdf_nbuf_len(nbuf));
3051 
3052 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
3053 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
3054 					 qdf_nbuf_len(nbuf));
3055 			goto fail;
3056 		}
3057 
3058 		goto send_multiple;
3059 	}
3060 
3061 	/* SG */
3062 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
3063 		struct dp_tx_seg_info_s seg_info = {0};
3064 
3065 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
3066 		if (!nbuf)
3067 			goto fail;
3068 
3069 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
3070 
3071 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
3072 				 qdf_nbuf_len(nbuf));
3073 
3074 		goto send_multiple;
3075 	}
3076 
3077 	if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
3078 		DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
3079 				 qdf_nbuf_len(nbuf));
3080 
3081 		dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
3082 					       tx_exc_metadata->ppdu_cookie);
3083 	}
3084 
3085 	/*
3086 	 * Get HW Queue to use for this frame.
3087 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
3088 	 * dedicated for data and 1 for command.
3089 	 * "queue_id" maps to one hardware ring.
3090 	 *  With each ring, we also associate a unique Tx descriptor pool
3091 	 *  to minimize lock contention for these resources.
3092 	 */
3093 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3094 
3095 	if (qdf_likely(tx_exc_metadata->is_intrabss_fwd)) {
3096 		if (qdf_unlikely(vdev->nawds_enabled)) {
3097 			/*
3098 			 * This is a multicast packet
3099 			 */
3100 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
3101 					    tx_exc_metadata->peer_id);
3102 			DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
3103 					 1, qdf_nbuf_len(nbuf));
3104 		}
3105 
3106 		nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
3107 					      DP_INVALID_PEER, NULL);
3108 	} else {
3109 		/*
3110 		 * Check exception descriptors
3111 		 */
3112 		if (dp_tx_exception_limit_check(vdev))
3113 			goto fail;
3114 
3115 		/*  Single linear frame */
3116 		/*
3117 		 * If nbuf is a simple linear frame, use send_single function to
3118 		 * prepare direct-buffer type TCL descriptor and enqueue to TCL
3119 		 * SRNG. There is no need to setup a MSDU extension descriptor.
3120 		 */
3121 		nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
3122 					      tx_exc_metadata->peer_id,
3123 					      tx_exc_metadata);
3124 	}
3125 
3126 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3127 	return nbuf;
3128 
3129 send_multiple:
3130 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3131 
3132 fail:
3133 	if (vdev)
3134 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3135 	dp_verbose_debug("pkt send failed");
3136 	return nbuf;
3137 }
3138 
3139 /**
3140  * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
3141  *      in exception path in special case to avoid regular exception path chk.
3142  * @soc: DP soc handle
3143  * @vdev_id: id of DP vdev handle
3144  * @nbuf: skb
3145  * @tx_exc_metadata: Handle that holds exception path meta data
3146  *
3147  * Entry point for Core Tx layer (DP_TX) invoked from
3148  * hard_start_xmit in OSIF/HDD to transmit frames through fw
3149  *
3150  * Return: NULL on success,
3151  *         nbuf when it fails to send
3152  */
3153 qdf_nbuf_t
3154 dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
3155 				   uint8_t vdev_id, qdf_nbuf_t nbuf,
3156 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
3157 {
3158 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3159 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3160 						     DP_MOD_ID_TX_EXCEPTION);
3161 
3162 	if (qdf_unlikely(!vdev))
3163 		goto fail;
3164 
3165 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
3166 			== QDF_STATUS_E_FAILURE)) {
3167 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
3168 		goto fail;
3169 	}
3170 
3171 	/* Unref count as it will agin be taken inside dp_tx_exception */
3172 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3173 
3174 	return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
3175 
3176 fail:
3177 	if (vdev)
3178 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3179 	dp_verbose_debug("pkt send failed");
3180 	return nbuf;
3181 }
3182 
3183 /**
3184  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
3185  * @soc: DP soc handle
3186  * @vdev_id: DP vdev handle
3187  * @nbuf: skb
3188  *
3189  * Entry point for Core Tx layer (DP_TX) invoked from
3190  * hard_start_xmit in OSIF/HDD
3191  *
3192  * Return: NULL on success,
3193  *         nbuf when it fails to send
3194  */
3195 #ifdef MESH_MODE_SUPPORT
3196 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3197 			   qdf_nbuf_t nbuf)
3198 {
3199 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3200 	struct meta_hdr_s *mhdr;
3201 	qdf_nbuf_t nbuf_mesh = NULL;
3202 	qdf_nbuf_t nbuf_clone = NULL;
3203 	struct dp_vdev *vdev;
3204 	uint8_t no_enc_frame = 0;
3205 
3206 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
3207 	if (!nbuf_mesh) {
3208 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3209 				"qdf_nbuf_unshare failed");
3210 		return nbuf;
3211 	}
3212 
3213 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
3214 	if (!vdev) {
3215 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3216 				"vdev is NULL for vdev_id %d", vdev_id);
3217 		return nbuf;
3218 	}
3219 
3220 	nbuf = nbuf_mesh;
3221 
3222 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
3223 
3224 	if ((vdev->sec_type != cdp_sec_type_none) &&
3225 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
3226 		no_enc_frame = 1;
3227 
3228 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
3229 		qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
3230 
3231 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
3232 		       !no_enc_frame) {
3233 		nbuf_clone = qdf_nbuf_clone(nbuf);
3234 		if (!nbuf_clone) {
3235 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3236 				"qdf_nbuf_clone failed");
3237 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
3238 			return nbuf;
3239 		}
3240 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
3241 	}
3242 
3243 	if (nbuf_clone) {
3244 		if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
3245 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
3246 		} else {
3247 			qdf_nbuf_free(nbuf_clone);
3248 		}
3249 	}
3250 
3251 	if (no_enc_frame)
3252 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
3253 	else
3254 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
3255 
3256 	nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
3257 	if ((!nbuf) && no_enc_frame) {
3258 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
3259 	}
3260 
3261 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
3262 	return nbuf;
3263 }
3264 
3265 #else
3266 
3267 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
3268 			   qdf_nbuf_t nbuf)
3269 {
3270 	return dp_tx_send(soc, vdev_id, nbuf);
3271 }
3272 
3273 #endif
3274 
3275 #ifdef QCA_DP_TX_NBUF_AND_NBUF_DATA_PREFETCH
3276 static inline
3277 void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
3278 {
3279 	if (nbuf) {
3280 		qdf_prefetch(&nbuf->len);
3281 		qdf_prefetch(&nbuf->data);
3282 	}
3283 }
3284 #else
3285 static inline
3286 void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
3287 {
3288 }
3289 #endif
3290 
3291 /**
3292  * dp_tx_send() - Transmit a frame on a given VAP
3293  * @soc: DP soc handle
3294  * @vdev_id: id of DP vdev handle
3295  * @nbuf: skb
3296  *
3297  * Entry point for Core Tx layer (DP_TX) invoked from
3298  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
3299  * cases
3300  *
3301  * Return: NULL on success,
3302  *         nbuf when it fails to send
3303  */
3304 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3305 		      qdf_nbuf_t nbuf)
3306 {
3307 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3308 	uint16_t peer_id = HTT_INVALID_PEER;
3309 	/*
3310 	 * doing a memzero is causing additional function call overhead
3311 	 * so doing static stack clearing
3312 	 */
3313 	struct dp_tx_msdu_info_s msdu_info = {0};
3314 	struct dp_vdev *vdev = NULL;
3315 
3316 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3317 		return nbuf;
3318 
3319 	/*
3320 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3321 	 * this in per packet path.
3322 	 *
3323 	 * As in this path vdev memory is already protected with netdev
3324 	 * tx lock
3325 	 */
3326 	vdev = soc->vdev_id_map[vdev_id];
3327 	if (qdf_unlikely(!vdev))
3328 		return nbuf;
3329 
3330 	/*
3331 	 * Set Default Host TID value to invalid TID
3332 	 * (TID override disabled)
3333 	 */
3334 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
3335 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_headlen(nbuf));
3336 
3337 	if (qdf_unlikely(vdev->mesh_vdev)) {
3338 		qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
3339 								&msdu_info);
3340 		if (!nbuf_mesh) {
3341 			dp_verbose_debug("Extracting mesh metadata failed");
3342 			return nbuf;
3343 		}
3344 		nbuf = nbuf_mesh;
3345 	}
3346 
3347 	/*
3348 	 * Get HW Queue to use for this frame.
3349 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
3350 	 * dedicated for data and 1 for command.
3351 	 * "queue_id" maps to one hardware ring.
3352 	 *  With each ring, we also associate a unique Tx descriptor pool
3353 	 *  to minimize lock contention for these resources.
3354 	 */
3355 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3356 
3357 	/*
3358 	 * TCL H/W supports 2 DSCP-TID mapping tables.
3359 	 *  Table 1 - Default DSCP-TID mapping table
3360 	 *  Table 2 - 1 DSCP-TID override table
3361 	 *
3362 	 * If we need a different DSCP-TID mapping for this vap,
3363 	 * call tid_classify to extract DSCP/ToS from frame and
3364 	 * map to a TID and store in msdu_info. This is later used
3365 	 * to fill in TCL Input descriptor (per-packet TID override).
3366 	 */
3367 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
3368 
3369 	/*
3370 	 * Classify the frame and call corresponding
3371 	 * "prepare" function which extracts the segment (TSO)
3372 	 * and fragmentation information (for TSO , SG, ME, or Raw)
3373 	 * into MSDU_INFO structure which is later used to fill
3374 	 * SW and HW descriptors.
3375 	 */
3376 	if (qdf_nbuf_is_tso(nbuf)) {
3377 		dp_verbose_debug("TSO frame %pK", vdev);
3378 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
3379 				 qdf_nbuf_len(nbuf));
3380 
3381 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
3382 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
3383 					 qdf_nbuf_len(nbuf));
3384 			return nbuf;
3385 		}
3386 
3387 		goto send_multiple;
3388 	}
3389 
3390 	/* SG */
3391 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
3392 		if (qdf_nbuf_get_nr_frags(nbuf) > DP_TX_MAX_NUM_FRAGS - 1) {
3393 			if (qdf_unlikely(qdf_nbuf_linearize(nbuf)))
3394 				return nbuf;
3395 		} else {
3396 			struct dp_tx_seg_info_s seg_info = {0};
3397 
3398 			nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info,
3399 						&msdu_info);
3400 			if (!nbuf)
3401 				return NULL;
3402 
3403 			dp_verbose_debug("non-TSO SG frame %pK", vdev);
3404 
3405 			DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
3406 					 qdf_nbuf_len(nbuf));
3407 
3408 			goto send_multiple;
3409 		}
3410 	}
3411 
3412 	if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
3413 		return NULL;
3414 
3415 	/* RAW */
3416 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
3417 		struct dp_tx_seg_info_s seg_info = {0};
3418 
3419 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
3420 		if (!nbuf)
3421 			return NULL;
3422 
3423 		dp_verbose_debug("Raw frame %pK", vdev);
3424 
3425 		goto send_multiple;
3426 
3427 	}
3428 
3429 	if (qdf_unlikely(vdev->nawds_enabled)) {
3430 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
3431 					  qdf_nbuf_data(nbuf);
3432 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
3433 			uint16_t sa_peer_id = DP_INVALID_PEER;
3434 
3435 			if (!soc->ast_offload_support) {
3436 				struct dp_ast_entry *ast_entry = NULL;
3437 
3438 				qdf_spin_lock_bh(&soc->ast_lock);
3439 				ast_entry = dp_peer_ast_hash_find_by_pdevid
3440 					(soc,
3441 					 (uint8_t *)(eh->ether_shost),
3442 					 vdev->pdev->pdev_id);
3443 				if (ast_entry)
3444 					sa_peer_id = ast_entry->peer_id;
3445 				qdf_spin_unlock_bh(&soc->ast_lock);
3446 			}
3447 
3448 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
3449 					    sa_peer_id);
3450 		}
3451 		peer_id = DP_INVALID_PEER;
3452 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
3453 				 1, qdf_nbuf_len(nbuf));
3454 	}
3455 
3456 	/*  Single linear frame */
3457 	/*
3458 	 * If nbuf is a simple linear frame, use send_single function to
3459 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
3460 	 * SRNG. There is no need to setup a MSDU extension descriptor.
3461 	 */
3462 	dp_tx_prefetch_nbuf_data(nbuf);
3463 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
3464 
3465 	return nbuf;
3466 
3467 send_multiple:
3468 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3469 
3470 	if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
3471 		dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
3472 
3473 	return nbuf;
3474 }
3475 
3476 /**
3477  * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
3478  *      case to vaoid check in perpkt path.
3479  * @soc: DP soc handle
3480  * @vdev_id: id of DP vdev handle
3481  * @nbuf: skb
3482  *
3483  * Entry point for Core Tx layer (DP_TX) invoked from
3484  * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
3485  * with special condition to avoid per pkt check in dp_tx_send
3486  *
3487  * Return: NULL on success,
3488  *         nbuf when it fails to send
3489  */
3490 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
3491 				    uint8_t vdev_id, qdf_nbuf_t nbuf)
3492 {
3493 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3494 	struct dp_vdev *vdev = NULL;
3495 
3496 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3497 		return nbuf;
3498 
3499 	/*
3500 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3501 	 * this in per packet path.
3502 	 *
3503 	 * As in this path vdev memory is already protected with netdev
3504 	 * tx lock
3505 	 */
3506 	vdev = soc->vdev_id_map[vdev_id];
3507 	if (qdf_unlikely(!vdev))
3508 		return nbuf;
3509 
3510 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
3511 			== QDF_STATUS_E_FAILURE)) {
3512 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
3513 		return nbuf;
3514 	}
3515 
3516 	return dp_tx_send(soc_hdl, vdev_id, nbuf);
3517 }
3518 
3519 #ifdef UMAC_SUPPORT_PROXY_ARP
3520 /**
3521  * dp_tx_proxy_arp() - Tx proxy arp handler
3522  * @vdev: datapath vdev handle
3523  * @buf: sk buffer
3524  *
3525  * Return: status
3526  */
3527 static inline
3528 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3529 {
3530 	if (vdev->osif_proxy_arp)
3531 		return vdev->osif_proxy_arp(vdev->osif_vdev, nbuf);
3532 
3533 	/*
3534 	 * when UMAC_SUPPORT_PROXY_ARP is defined, we expect
3535 	 * osif_proxy_arp has a valid function pointer assigned
3536 	 * to it
3537 	 */
3538 	dp_tx_err("valid function pointer for osif_proxy_arp is expected!!\n");
3539 
3540 	return QDF_STATUS_NOT_INITIALIZED;
3541 }
3542 #else
3543 /**
3544  * dp_tx_proxy_arp() - Tx proxy arp handler
3545  * @vdev: datapath vdev handle
3546  * @buf: sk buffer
3547  *
3548  * This function always return 0 when UMAC_SUPPORT_PROXY_ARP
3549  * is not defined.
3550  *
3551  * Return: status
3552  */
3553 static inline
3554 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3555 {
3556 	return QDF_STATUS_SUCCESS;
3557 }
3558 #endif
3559 
3560 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
3561 #ifdef WLAN_MCAST_MLO
3562 static bool
3563 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
3564 		       struct dp_tx_desc_s *tx_desc,
3565 		       qdf_nbuf_t nbuf,
3566 		       uint8_t reinject_reason)
3567 {
3568 	if (reinject_reason == HTT_TX_FW2WBM_REINJECT_REASON_MLO_MCAST) {
3569 		if (soc->arch_ops.dp_tx_mcast_handler)
3570 			soc->arch_ops.dp_tx_mcast_handler(soc, vdev, nbuf);
3571 
3572 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3573 		return true;
3574 	}
3575 
3576 	return false;
3577 }
3578 #else /* WLAN_MCAST_MLO */
3579 static inline bool
3580 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
3581 		       struct dp_tx_desc_s *tx_desc,
3582 		       qdf_nbuf_t nbuf,
3583 		       uint8_t reinject_reason)
3584 {
3585 	return false;
3586 }
3587 #endif /* WLAN_MCAST_MLO */
3588 #else
3589 static inline bool
3590 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
3591 		       struct dp_tx_desc_s *tx_desc,
3592 		       qdf_nbuf_t nbuf,
3593 		       uint8_t reinject_reason)
3594 {
3595 	return false;
3596 }
3597 #endif
3598 
3599 /**
3600  * dp_tx_reinject_handler() - Tx Reinject Handler
3601  * @soc: datapath soc handle
3602  * @vdev: datapath vdev handle
3603  * @tx_desc: software descriptor head pointer
3604  * @status : Tx completion status from HTT descriptor
3605  * @reinject_reason : reinject reason from HTT descriptor
3606  *
3607  * This function reinjects frames back to Target.
3608  * Todo - Host queue needs to be added
3609  *
3610  * Return: none
3611  */
3612 void dp_tx_reinject_handler(struct dp_soc *soc,
3613 			    struct dp_vdev *vdev,
3614 			    struct dp_tx_desc_s *tx_desc,
3615 			    uint8_t *status,
3616 			    uint8_t reinject_reason)
3617 {
3618 	struct dp_peer *peer = NULL;
3619 	uint32_t peer_id = HTT_INVALID_PEER;
3620 	qdf_nbuf_t nbuf = tx_desc->nbuf;
3621 	qdf_nbuf_t nbuf_copy = NULL;
3622 	struct dp_tx_msdu_info_s msdu_info;
3623 #ifdef WDS_VENDOR_EXTENSION
3624 	int is_mcast = 0, is_ucast = 0;
3625 	int num_peers_3addr = 0;
3626 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
3627 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
3628 #endif
3629 	struct dp_txrx_peer *txrx_peer;
3630 
3631 	qdf_assert(vdev);
3632 
3633 	dp_tx_debug("Tx reinject path");
3634 
3635 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
3636 			qdf_nbuf_len(tx_desc->nbuf));
3637 
3638 	if (dp_tx_reinject_mlo_hdl(soc, vdev, tx_desc, nbuf, reinject_reason))
3639 		return;
3640 
3641 #ifdef WDS_VENDOR_EXTENSION
3642 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
3643 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
3644 	} else {
3645 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
3646 	}
3647 	is_ucast = !is_mcast;
3648 
3649 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3650 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3651 		txrx_peer = dp_get_txrx_peer(peer);
3652 
3653 		if (!txrx_peer || txrx_peer->bss_peer)
3654 			continue;
3655 
3656 		/* Detect wds peers that use 3-addr framing for mcast.
3657 		 * if there are any, the bss_peer is used to send the
3658 		 * the mcast frame using 3-addr format. all wds enabled
3659 		 * peers that use 4-addr framing for mcast frames will
3660 		 * be duplicated and sent as 4-addr frames below.
3661 		 */
3662 		if (!txrx_peer->wds_enabled ||
3663 		    !txrx_peer->wds_ecm.wds_tx_mcast_4addr) {
3664 			num_peers_3addr = 1;
3665 			break;
3666 		}
3667 	}
3668 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3669 #endif
3670 
3671 	if (qdf_unlikely(vdev->mesh_vdev)) {
3672 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
3673 	} else {
3674 		qdf_spin_lock_bh(&vdev->peer_list_lock);
3675 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3676 			txrx_peer = dp_get_txrx_peer(peer);
3677 			if (!txrx_peer)
3678 				continue;
3679 
3680 			if ((txrx_peer->peer_id != HTT_INVALID_PEER) &&
3681 #ifdef WDS_VENDOR_EXTENSION
3682 			/*
3683 			 * . if 3-addr STA, then send on BSS Peer
3684 			 * . if Peer WDS enabled and accept 4-addr mcast,
3685 			 * send mcast on that peer only
3686 			 * . if Peer WDS enabled and accept 4-addr ucast,
3687 			 * send ucast on that peer only
3688 			 */
3689 			((txrx_peer->bss_peer && num_peers_3addr && is_mcast) ||
3690 			 (txrx_peer->wds_enabled &&
3691 			 ((is_mcast && txrx_peer->wds_ecm.wds_tx_mcast_4addr) ||
3692 			 (is_ucast &&
3693 			 txrx_peer->wds_ecm.wds_tx_ucast_4addr))))) {
3694 #else
3695 			(txrx_peer->bss_peer &&
3696 			 (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) {
3697 #endif
3698 				peer_id = DP_INVALID_PEER;
3699 
3700 				nbuf_copy = qdf_nbuf_copy(nbuf);
3701 
3702 				if (!nbuf_copy) {
3703 					dp_tx_debug("nbuf copy failed");
3704 					break;
3705 				}
3706 				qdf_mem_zero(&msdu_info, sizeof(msdu_info));
3707 				dp_tx_get_queue(vdev, nbuf,
3708 						&msdu_info.tx_queue);
3709 
3710 				nbuf_copy = dp_tx_send_msdu_single(vdev,
3711 						nbuf_copy,
3712 						&msdu_info,
3713 						peer_id,
3714 						NULL);
3715 
3716 				if (nbuf_copy) {
3717 					dp_tx_debug("pkt send failed");
3718 					qdf_nbuf_free(nbuf_copy);
3719 				}
3720 			}
3721 		}
3722 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
3723 	}
3724 
3725 	qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE,
3726 				     nbuf->len);
3727 	qdf_nbuf_free(nbuf);
3728 
3729 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3730 }
3731 
3732 /**
3733  * dp_tx_inspect_handler() - Tx Inspect Handler
3734  * @soc: datapath soc handle
3735  * @vdev: datapath vdev handle
3736  * @tx_desc: software descriptor head pointer
3737  * @status : Tx completion status from HTT descriptor
3738  *
3739  * Handles Tx frames sent back to Host for inspection
3740  * (ProxyARP)
3741  *
3742  * Return: none
3743  */
3744 void dp_tx_inspect_handler(struct dp_soc *soc,
3745 			   struct dp_vdev *vdev,
3746 			   struct dp_tx_desc_s *tx_desc,
3747 			   uint8_t *status)
3748 {
3749 
3750 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3751 			"%s Tx inspect path",
3752 			__func__);
3753 
3754 	DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
3755 			 qdf_nbuf_len(tx_desc->nbuf));
3756 
3757 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
3758 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3759 }
3760 
3761 #ifdef MESH_MODE_SUPPORT
3762 /**
3763  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
3764  *                                         in mesh meta header
3765  * @tx_desc: software descriptor head pointer
3766  * @ts: pointer to tx completion stats
3767  * Return: none
3768  */
3769 static
3770 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
3771 		struct hal_tx_completion_status *ts)
3772 {
3773 	qdf_nbuf_t netbuf = tx_desc->nbuf;
3774 
3775 	if (!tx_desc->msdu_ext_desc) {
3776 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
3777 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3778 				"netbuf %pK offset %d",
3779 				netbuf, tx_desc->pkt_offset);
3780 			return;
3781 		}
3782 	}
3783 }
3784 
3785 #else
3786 static
3787 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
3788 		struct hal_tx_completion_status *ts)
3789 {
3790 }
3791 
3792 #endif
3793 
3794 #ifdef CONFIG_SAWF
3795 static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
3796 					 struct dp_vdev *vdev,
3797 					 struct dp_txrx_peer *txrx_peer,
3798 					 struct dp_tx_desc_s *tx_desc,
3799 					 struct hal_tx_completion_status *ts,
3800 					 uint8_t tid)
3801 {
3802 	dp_sawf_tx_compl_update_peer_stats(soc, vdev, txrx_peer, tx_desc,
3803 					   ts, tid);
3804 }
3805 
3806 #else
3807 static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
3808 					 struct dp_vdev *vdev,
3809 					 struct dp_txrx_peer *txrx_peer,
3810 					 struct dp_tx_desc_s *tx_desc,
3811 					 struct hal_tx_completion_status *ts,
3812 					 uint8_t tid)
3813 {
3814 }
3815 
3816 #endif
3817 
3818 #ifdef QCA_PEER_EXT_STATS
3819 /*
3820  * dp_tx_compute_tid_delay() - Compute per TID delay
3821  * @stats: Per TID delay stats
3822  * @tx_desc: Software Tx descriptor
3823  *
3824  * Compute the software enqueue and hw enqueue delays and
3825  * update the respective histograms
3826  *
3827  * Return: void
3828  */
3829 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
3830 				    struct dp_tx_desc_s *tx_desc)
3831 {
3832 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
3833 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
3834 	uint32_t sw_enqueue_delay, fwhw_transmit_delay;
3835 
3836 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
3837 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
3838 	timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
3839 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
3840 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
3841 					 timestamp_hw_enqueue);
3842 
3843 	/*
3844 	 * Update the Tx software enqueue delay and HW enque-Completion delay.
3845 	 */
3846 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
3847 	dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
3848 }
3849 
3850 /*
3851  * dp_tx_update_peer_delay_stats() - Update the peer delay stats
3852  * @txrx_peer: DP peer context
3853  * @tx_desc: Tx software descriptor
3854  * @tid: Transmission ID
3855  * @ring_id: Rx CPU context ID/CPU_ID
3856  *
3857  * Update the peer extended stats. These are enhanced other
3858  * delay stats per msdu level.
3859  *
3860  * Return: void
3861  */
3862 static void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
3863 					  struct dp_tx_desc_s *tx_desc,
3864 					  uint8_t tid, uint8_t ring_id)
3865 {
3866 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
3867 	struct dp_soc *soc = NULL;
3868 	struct dp_peer_delay_stats *delay_stats = NULL;
3869 
3870 	soc = pdev->soc;
3871 	if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
3872 		return;
3873 
3874 	delay_stats = txrx_peer->delay_stats;
3875 
3876 	qdf_assert(delay_stats);
3877 	qdf_assert(ring < CDP_MAX_TXRX_CTX);
3878 
3879 	/*
3880 	 * For non-TID packets use the TID 9
3881 	 */
3882 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
3883 		tid = CDP_MAX_DATA_TIDS - 1;
3884 
3885 	dp_tx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
3886 				tx_desc);
3887 }
3888 #else
3889 static inline void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
3890 						 struct dp_tx_desc_s *tx_desc,
3891 						 uint8_t tid, uint8_t ring_id)
3892 {
3893 }
3894 #endif
3895 
3896 #ifdef HW_TX_DELAY_STATS_ENABLE
3897 /**
3898  * dp_update_tx_delay_stats() - update the delay stats
3899  * @vdev: vdev handle
3900  * @delay: delay in ms or us based on the flag delay_in_us
3901  * @tid: tid value
3902  * @mode: type of tx delay mode
3903  * @ring id: ring number
3904  * @delay_in_us: flag to indicate whether the delay is in ms or us
3905  *
3906  * Return: none
3907  */
3908 static inline
3909 void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
3910 			      uint8_t mode, uint8_t ring_id, bool delay_in_us)
3911 {
3912 	struct cdp_tid_tx_stats *tstats =
3913 		&vdev->stats.tid_tx_stats[ring_id][tid];
3914 
3915 	dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
3916 			      delay_in_us);
3917 }
3918 #else
3919 static inline
3920 void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
3921 			      uint8_t mode, uint8_t ring_id, bool delay_in_us)
3922 {
3923 	struct cdp_tid_tx_stats *tstats =
3924 		&vdev->pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
3925 
3926 	dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
3927 			      delay_in_us);
3928 }
3929 #endif
3930 
3931 /**
3932  * dp_tx_compute_delay() - Compute and fill in all timestamps
3933  *				to pass in correct fields
3934  *
3935  * @vdev: pdev handle
3936  * @tx_desc: tx descriptor
3937  * @tid: tid value
3938  * @ring_id: TCL or WBM ring number for transmit path
3939  * Return: none
3940  */
3941 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
3942 			 uint8_t tid, uint8_t ring_id)
3943 {
3944 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
3945 	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
3946 	uint32_t fwhw_transmit_delay_us;
3947 
3948 	if (qdf_likely(!vdev->pdev->delay_stats_flag) &&
3949 	    qdf_likely(!dp_is_vdev_tx_delay_stats_enabled(vdev)))
3950 		return;
3951 
3952 	if (dp_is_vdev_tx_delay_stats_enabled(vdev)) {
3953 		fwhw_transmit_delay_us =
3954 			qdf_ktime_to_us(qdf_ktime_real_get()) -
3955 			qdf_ktime_to_us(tx_desc->timestamp);
3956 
3957 		/*
3958 		 * Delay between packet enqueued to HW and Tx completion in us
3959 		 */
3960 		dp_update_tx_delay_stats(vdev, fwhw_transmit_delay_us, tid,
3961 					 CDP_DELAY_STATS_FW_HW_TRANSMIT,
3962 					 ring_id, true);
3963 		/*
3964 		 * For MCL, only enqueue to completion delay is required
3965 		 * so return if the vdev flag is enabled.
3966 		 */
3967 		return;
3968 	}
3969 
3970 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
3971 	timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
3972 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
3973 					 timestamp_hw_enqueue);
3974 
3975 	/*
3976 	 * Delay between packet enqueued to HW and Tx completion in ms
3977 	 */
3978 	dp_update_tx_delay_stats(vdev, fwhw_transmit_delay, tid,
3979 				 CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id,
3980 				 false);
3981 
3982 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
3983 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
3984 	interframe_delay = (uint32_t)(timestamp_ingress -
3985 				      vdev->prev_tx_enq_tstamp);
3986 
3987 	/*
3988 	 * Delay in software enqueue
3989 	 */
3990 	dp_update_tx_delay_stats(vdev, sw_enqueue_delay, tid,
3991 				 CDP_DELAY_STATS_SW_ENQ, ring_id,
3992 				 false);
3993 
3994 	/*
3995 	 * Update interframe delay stats calculated at hardstart receive point.
3996 	 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
3997 	 * interframe delay will not be calculate correctly for 1st frame.
3998 	 * On the other side, this will help in avoiding extra per packet check
3999 	 * of !vdev->prev_tx_enq_tstamp.
4000 	 */
4001 	dp_update_tx_delay_stats(vdev, interframe_delay, tid,
4002 				 CDP_DELAY_STATS_TX_INTERFRAME, ring_id,
4003 				 false);
4004 	vdev->prev_tx_enq_tstamp = timestamp_ingress;
4005 }
4006 
4007 #ifdef DISABLE_DP_STATS
4008 static
4009 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf,
4010 				   struct dp_txrx_peer *txrx_peer)
4011 {
4012 }
4013 #else
4014 static inline void
4015 dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer)
4016 {
4017 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
4018 
4019 	DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
4020 	if (subtype != QDF_PROTO_INVALID)
4021 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.no_ack_count[subtype],
4022 					  1);
4023 }
4024 #endif
4025 
4026 #ifndef QCA_ENHANCED_STATS_SUPPORT
4027 /**
4028  * dp_tx_update_peer_extd_stats()- Update Tx extended path stats for peer
4029  *
4030  * @ts: Tx compltion status
4031  * @txrx_peer: datapath txrx_peer handle
4032  *
4033  * Return: void
4034  */
4035 static inline void
4036 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
4037 			     struct dp_txrx_peer *txrx_peer)
4038 {
4039 	uint8_t mcs, pkt_type, dst_mcs_idx;
4040 	uint8_t retry_threshold = txrx_peer->mpdu_retry_threshold;
4041 
4042 	mcs = ts->mcs;
4043 	pkt_type = ts->pkt_type;
4044 	/* do HW to SW pkt type conversion */
4045 	pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX :
4046 		    hal_2_dp_pkt_type_map[pkt_type]);
4047 
4048 	dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs);
4049 	if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
4050 		DP_PEER_EXTD_STATS_INC(txrx_peer,
4051 				       tx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
4052 				       1);
4053 
4054 	DP_PEER_EXTD_STATS_INC(txrx_peer, tx.sgi_count[ts->sgi], 1);
4055 	DP_PEER_EXTD_STATS_INC(txrx_peer, tx.bw[ts->bw], 1);
4056 	DP_PEER_EXTD_STATS_UPD(txrx_peer, tx.last_ack_rssi, ts->ack_frame_rssi);
4057 	DP_PEER_EXTD_STATS_INC(txrx_peer,
4058 			       tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
4059 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.stbc, 1, ts->stbc);
4060 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.ldpc, 1, ts->ldpc);
4061 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries, 1, ts->transmit_cnt > 1);
4062 	if (ts->first_msdu) {
4063 		DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries_mpdu, 1,
4064 					ts->transmit_cnt > 1);
4065 
4066 		if (!retry_threshold)
4067 			return;
4068 		DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.mpdu_success_with_retries,
4069 					qdf_do_div(ts->transmit_cnt,
4070 						   retry_threshold),
4071 					ts->transmit_cnt > retry_threshold);
4072 	}
4073 }
4074 #else
4075 static inline void
4076 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
4077 			     struct dp_txrx_peer *txrx_peer)
4078 {
4079 }
4080 #endif
4081 
4082 /**
4083  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
4084  *				per wbm ring
4085  *
4086  * @tx_desc: software descriptor head pointer
4087  * @ts: Tx completion status
4088  * @peer: peer handle
4089  * @ring_id: ring number
4090  *
4091  * Return: None
4092  */
4093 static inline void
4094 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
4095 			struct hal_tx_completion_status *ts,
4096 			struct dp_txrx_peer *txrx_peer, uint8_t ring_id)
4097 {
4098 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4099 	uint8_t tid = ts->tid;
4100 	uint32_t length;
4101 	struct cdp_tid_tx_stats *tid_stats;
4102 
4103 	if (!pdev)
4104 		return;
4105 
4106 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4107 		tid = CDP_MAX_DATA_TIDS - 1;
4108 
4109 	tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
4110 
4111 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
4112 		dp_err_rl("Release source:%d is not from TQM", ts->release_src);
4113 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.release_src_not_tqm, 1);
4114 		return;
4115 	}
4116 
4117 	length = qdf_nbuf_len(tx_desc->nbuf);
4118 	DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
4119 
4120 	if (qdf_unlikely(pdev->delay_stats_flag) ||
4121 	    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(txrx_peer->vdev)))
4122 		dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id);
4123 
4124 	if (ts->status < CDP_MAX_TX_TQM_STATUS) {
4125 		tid_stats->tqm_status_cnt[ts->status]++;
4126 	}
4127 
4128 	if (qdf_likely(ts->status == HAL_TX_TQM_RR_FRAME_ACKED)) {
4129 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.retry_count, 1,
4130 					   ts->transmit_cnt > 1);
4131 
4132 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.multiple_retry_count,
4133 					   1, ts->transmit_cnt > 2);
4134 
4135 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.ofdma, 1, ts->ofdma);
4136 
4137 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.amsdu_cnt, 1,
4138 					   ts->msdu_part_of_amsdu);
4139 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.non_amsdu_cnt, 1,
4140 					   !ts->msdu_part_of_amsdu);
4141 
4142 		txrx_peer->stats.per_pkt_stats.tx.last_tx_ts =
4143 							qdf_system_ticks();
4144 
4145 		dp_tx_update_peer_extd_stats(ts, txrx_peer);
4146 
4147 		return;
4148 	}
4149 
4150 	/*
4151 	 * tx_failed is ideally supposed to be updated from HTT ppdu
4152 	 * completion stats. But in IPQ807X/IPQ6018 chipsets owing to
4153 	 * hw limitation there are no completions for failed cases.
4154 	 * Hence updating tx_failed from data path. Please note that
4155 	 * if tx_failed is fixed to be from ppdu, then this has to be
4156 	 * removed
4157 	 */
4158 	DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
4159 
4160 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.failed_retry_count, 1,
4161 				   ts->transmit_cnt > DP_RETRY_COUNT);
4162 	dp_update_no_ack_stats(tx_desc->nbuf, txrx_peer);
4163 
4164 	if (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED) {
4165 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.age_out, 1);
4166 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_REM) {
4167 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.dropped.fw_rem, 1,
4168 					      length);
4169 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX) {
4170 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_notx, 1);
4171 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TX) {
4172 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_tx, 1);
4173 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON1) {
4174 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason1, 1);
4175 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON2) {
4176 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason2, 1);
4177 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON3) {
4178 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason3, 1);
4179 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_DISABLE_QUEUE) {
4180 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4181 					  tx.dropped.fw_rem_queue_disable, 1);
4182 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TILL_NONMATCHING) {
4183 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4184 					  tx.dropped.fw_rem_no_match, 1);
4185 	} else if (ts->status == HAL_TX_TQM_RR_DROP_THRESHOLD) {
4186 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4187 					  tx.dropped.drop_threshold, 1);
4188 	} else if (ts->status == HAL_TX_TQM_RR_LINK_DESC_UNAVAILABLE) {
4189 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4190 					  tx.dropped.drop_link_desc_na, 1);
4191 	} else if (ts->status == HAL_TX_TQM_RR_DROP_OR_INVALID_MSDU) {
4192 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4193 					  tx.dropped.invalid_drop, 1);
4194 	} else if (ts->status == HAL_TX_TQM_RR_MULTICAST_DROP) {
4195 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
4196 					  tx.dropped.mcast_vdev_drop, 1);
4197 	} else {
4198 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.invalid_rr, 1);
4199 	}
4200 }
4201 
4202 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
4203 /**
4204  * dp_tx_flow_pool_lock() - take flow pool lock
4205  * @soc: core txrx main context
4206  * @tx_desc: tx desc
4207  *
4208  * Return: None
4209  */
4210 static inline
4211 void dp_tx_flow_pool_lock(struct dp_soc *soc,
4212 			  struct dp_tx_desc_s *tx_desc)
4213 {
4214 	struct dp_tx_desc_pool_s *pool;
4215 	uint8_t desc_pool_id;
4216 
4217 	desc_pool_id = tx_desc->pool_id;
4218 	pool = &soc->tx_desc[desc_pool_id];
4219 
4220 	qdf_spin_lock_bh(&pool->flow_pool_lock);
4221 }
4222 
4223 /**
4224  * dp_tx_flow_pool_unlock() - release flow pool lock
4225  * @soc: core txrx main context
4226  * @tx_desc: tx desc
4227  *
4228  * Return: None
4229  */
4230 static inline
4231 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
4232 			    struct dp_tx_desc_s *tx_desc)
4233 {
4234 	struct dp_tx_desc_pool_s *pool;
4235 	uint8_t desc_pool_id;
4236 
4237 	desc_pool_id = tx_desc->pool_id;
4238 	pool = &soc->tx_desc[desc_pool_id];
4239 
4240 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
4241 }
4242 #else
4243 static inline
4244 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
4245 {
4246 }
4247 
4248 static inline
4249 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
4250 {
4251 }
4252 #endif
4253 
4254 /**
4255  * dp_tx_notify_completion() - Notify tx completion for this desc
4256  * @soc: core txrx main context
4257  * @vdev: datapath vdev handle
4258  * @tx_desc: tx desc
4259  * @netbuf:  buffer
4260  * @status: tx status
4261  *
4262  * Return: none
4263  */
4264 static inline void dp_tx_notify_completion(struct dp_soc *soc,
4265 					   struct dp_vdev *vdev,
4266 					   struct dp_tx_desc_s *tx_desc,
4267 					   qdf_nbuf_t netbuf,
4268 					   uint8_t status)
4269 {
4270 	void *osif_dev;
4271 	ol_txrx_completion_fp tx_compl_cbk = NULL;
4272 	uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
4273 
4274 	qdf_assert(tx_desc);
4275 
4276 	if (!vdev ||
4277 	    !vdev->osif_vdev) {
4278 		return;
4279 	}
4280 
4281 	osif_dev = vdev->osif_vdev;
4282 	tx_compl_cbk = vdev->tx_comp;
4283 
4284 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
4285 		flag |= BIT(QDF_TX_RX_STATUS_OK);
4286 
4287 	if (tx_compl_cbk)
4288 		tx_compl_cbk(netbuf, osif_dev, flag);
4289 }
4290 
4291 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
4292  * @pdev: pdev handle
4293  * @tid: tid value
4294  * @txdesc_ts: timestamp from txdesc
4295  * @ppdu_id: ppdu id
4296  *
4297  * Return: none
4298  */
4299 #ifdef FEATURE_PERPKT_INFO
4300 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
4301 					       struct dp_txrx_peer *txrx_peer,
4302 					       uint8_t tid,
4303 					       uint64_t txdesc_ts,
4304 					       uint32_t ppdu_id)
4305 {
4306 	uint64_t delta_ms;
4307 	struct cdp_tx_sojourn_stats *sojourn_stats;
4308 	struct dp_peer *primary_link_peer = NULL;
4309 	struct dp_soc *link_peer_soc = NULL;
4310 
4311 	if (qdf_unlikely(!pdev->enhanced_stats_en))
4312 		return;
4313 
4314 	if (qdf_unlikely(tid == HTT_INVALID_TID ||
4315 			 tid >= CDP_DATA_TID_MAX))
4316 		return;
4317 
4318 	if (qdf_unlikely(!pdev->sojourn_buf))
4319 		return;
4320 
4321 	primary_link_peer = dp_get_primary_link_peer_by_id(pdev->soc,
4322 							   txrx_peer->peer_id,
4323 							   DP_MOD_ID_TX_COMP);
4324 
4325 	if (qdf_unlikely(!primary_link_peer))
4326 		return;
4327 
4328 	sojourn_stats = (struct cdp_tx_sojourn_stats *)
4329 		qdf_nbuf_data(pdev->sojourn_buf);
4330 
4331 	link_peer_soc = primary_link_peer->vdev->pdev->soc;
4332 	sojourn_stats->cookie = (void *)
4333 			dp_monitor_peer_get_peerstats_ctx(link_peer_soc,
4334 							  primary_link_peer);
4335 
4336 	delta_ms = qdf_ktime_to_ms(qdf_ktime_real_get()) -
4337 				txdesc_ts;
4338 	qdf_ewma_tx_lag_add(&txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid],
4339 			    delta_ms);
4340 	sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
4341 	sojourn_stats->num_msdus[tid] = 1;
4342 	sojourn_stats->avg_sojourn_msdu[tid].internal =
4343 		txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid].internal;
4344 	dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
4345 			     pdev->sojourn_buf, HTT_INVALID_PEER,
4346 			     WDI_NO_VAL, pdev->pdev_id);
4347 	sojourn_stats->sum_sojourn_msdu[tid] = 0;
4348 	sojourn_stats->num_msdus[tid] = 0;
4349 	sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
4350 
4351 	dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_TX_COMP);
4352 }
4353 #else
4354 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
4355 					       struct dp_txrx_peer *txrx_peer,
4356 					       uint8_t tid,
4357 					       uint64_t txdesc_ts,
4358 					       uint32_t ppdu_id)
4359 {
4360 }
4361 #endif
4362 
4363 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
4364 /**
4365  * dp_send_completion_to_pkt_capture() - send tx completion to packet capture
4366  * @soc: dp_soc handle
4367  * @desc: Tx Descriptor
4368  * @ts: HAL Tx completion descriptor contents
4369  *
4370  * This function is used to send tx completion to packet capture
4371  */
4372 void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
4373 				       struct dp_tx_desc_s *desc,
4374 				       struct hal_tx_completion_status *ts)
4375 {
4376 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc,
4377 			     desc, ts->peer_id,
4378 			     WDI_NO_VAL, desc->pdev->pdev_id);
4379 }
4380 #endif
4381 
4382 /**
4383  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
4384  * @soc: DP Soc handle
4385  * @tx_desc: software Tx descriptor
4386  * @ts : Tx completion status from HAL/HTT descriptor
4387  *
4388  * Return: none
4389  */
4390 void
4391 dp_tx_comp_process_desc(struct dp_soc *soc,
4392 			struct dp_tx_desc_s *desc,
4393 			struct hal_tx_completion_status *ts,
4394 			struct dp_txrx_peer *txrx_peer)
4395 {
4396 	uint64_t time_latency = 0;
4397 	uint16_t peer_id = DP_INVALID_PEER_ID;
4398 
4399 	/*
4400 	 * m_copy/tx_capture modes are not supported for
4401 	 * scatter gather packets
4402 	 */
4403 	if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
4404 		time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
4405 				qdf_ktime_to_ms(desc->timestamp));
4406 	}
4407 
4408 	dp_send_completion_to_pkt_capture(soc, desc, ts);
4409 
4410 	if (dp_tx_pkt_tracepoints_enabled())
4411 		qdf_trace_dp_packet(desc->nbuf, QDF_TX,
4412 				    desc->msdu_ext_desc ?
4413 				    desc->msdu_ext_desc->tso_desc : NULL,
4414 				    qdf_ktime_to_ms(desc->timestamp));
4415 
4416 	if (!(desc->msdu_ext_desc)) {
4417 		dp_tx_enh_unmap(soc, desc);
4418 		if (txrx_peer)
4419 			peer_id = txrx_peer->peer_id;
4420 
4421 		if (QDF_STATUS_SUCCESS ==
4422 		    dp_monitor_tx_add_to_comp_queue(soc, desc, ts, peer_id)) {
4423 			return;
4424 		}
4425 
4426 		if (QDF_STATUS_SUCCESS ==
4427 		    dp_get_completion_indication_for_stack(soc,
4428 							   desc->pdev,
4429 							   txrx_peer, ts,
4430 							   desc->nbuf,
4431 							   time_latency)) {
4432 			dp_send_completion_to_stack(soc,
4433 						    desc->pdev,
4434 						    ts->peer_id,
4435 						    ts->ppdu_id,
4436 						    desc->nbuf);
4437 			return;
4438 		}
4439 	}
4440 
4441 	desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX;
4442 	dp_tx_comp_free_buf(soc, desc);
4443 }
4444 
4445 #ifdef DISABLE_DP_STATS
4446 /**
4447  * dp_tx_update_connectivity_stats() - update tx connectivity stats
4448  * @soc: core txrx main context
4449  * @tx_desc: tx desc
4450  * @status: tx status
4451  *
4452  * Return: none
4453  */
4454 static inline
4455 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
4456 				     struct dp_vdev *vdev,
4457 				     struct dp_tx_desc_s *tx_desc,
4458 				     uint8_t status)
4459 {
4460 }
4461 #else
4462 static inline
4463 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
4464 				     struct dp_vdev *vdev,
4465 				     struct dp_tx_desc_s *tx_desc,
4466 				     uint8_t status)
4467 {
4468 	void *osif_dev;
4469 	ol_txrx_stats_rx_fp stats_cbk;
4470 	uint8_t pkt_type;
4471 
4472 	qdf_assert(tx_desc);
4473 
4474 	if (!vdev ||
4475 	    !vdev->osif_vdev ||
4476 	    !vdev->stats_cb)
4477 		return;
4478 
4479 	osif_dev = vdev->osif_vdev;
4480 	stats_cbk = vdev->stats_cb;
4481 
4482 	stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
4483 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
4484 		stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
4485 			  &pkt_type);
4486 }
4487 #endif
4488 
4489 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(CONFIG_SAWF)
4490 QDF_STATUS
4491 dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
4492 			  uint32_t delta_tsf,
4493 			  uint32_t *delay_us)
4494 {
4495 	uint32_t buffer_ts;
4496 	uint32_t delay;
4497 
4498 	if (!delay_us)
4499 		return QDF_STATUS_E_INVAL;
4500 
4501 	/* Tx_rate_stats_info_valid is 0 and tsf is invalid then */
4502 	if (!ts->valid)
4503 		return QDF_STATUS_E_INVAL;
4504 
4505 	/* buffer_timestamp is in units of 1024 us and is [31:13] of
4506 	 * WBM_RELEASE_RING_4. After left shift 10 bits, it's
4507 	 * valid up to 29 bits.
4508 	 */
4509 	buffer_ts = ts->buffer_timestamp << 10;
4510 
4511 	delay = ts->tsf - buffer_ts - delta_tsf;
4512 	delay &= 0x1FFFFFFF; /* mask 29 BITS */
4513 	if (delay > 0x1000000) {
4514 		dp_info_rl("----------------------\n"
4515 			   "Tx completion status:\n"
4516 			   "----------------------\n"
4517 			   "release_src = %d\n"
4518 			   "ppdu_id = 0x%x\n"
4519 			   "release_reason = %d\n"
4520 			   "tsf = %u (0x%x)\n"
4521 			   "buffer_timestamp = %u (0x%x)\n"
4522 			   "delta_tsf = %u (0x%x)\n",
4523 			   ts->release_src, ts->ppdu_id, ts->status,
4524 			   ts->tsf, ts->tsf, ts->buffer_timestamp,
4525 			   ts->buffer_timestamp, delta_tsf, delta_tsf);
4526 		return QDF_STATUS_E_FAILURE;
4527 	}
4528 
4529 	*delay_us = delay;
4530 
4531 	return QDF_STATUS_SUCCESS;
4532 }
4533 
4534 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4535 		      uint32_t delta_tsf)
4536 {
4537 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4538 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4539 						     DP_MOD_ID_CDP);
4540 
4541 	if (!vdev) {
4542 		dp_err_rl("vdev %d does not exist", vdev_id);
4543 		return;
4544 	}
4545 
4546 	vdev->delta_tsf = delta_tsf;
4547 	dp_debug("vdev id %u delta_tsf %u", vdev_id, delta_tsf);
4548 
4549 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4550 }
4551 #endif
4552 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
4553 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
4554 				      uint8_t vdev_id, bool enable)
4555 {
4556 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4557 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4558 						     DP_MOD_ID_CDP);
4559 
4560 	if (!vdev) {
4561 		dp_err_rl("vdev %d does not exist", vdev_id);
4562 		return QDF_STATUS_E_FAILURE;
4563 	}
4564 
4565 	qdf_atomic_set(&vdev->ul_delay_report, enable);
4566 
4567 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4568 
4569 	return QDF_STATUS_SUCCESS;
4570 }
4571 
4572 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4573 			       uint32_t *val)
4574 {
4575 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4576 	struct dp_vdev *vdev;
4577 	uint32_t delay_accum;
4578 	uint32_t pkts_accum;
4579 
4580 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
4581 	if (!vdev) {
4582 		dp_err_rl("vdev %d does not exist", vdev_id);
4583 		return QDF_STATUS_E_FAILURE;
4584 	}
4585 
4586 	if (!qdf_atomic_read(&vdev->ul_delay_report)) {
4587 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4588 		return QDF_STATUS_E_FAILURE;
4589 	}
4590 
4591 	/* Average uplink delay based on current accumulated values */
4592 	delay_accum = qdf_atomic_read(&vdev->ul_delay_accum);
4593 	pkts_accum = qdf_atomic_read(&vdev->ul_pkts_accum);
4594 
4595 	*val = delay_accum / pkts_accum;
4596 	dp_debug("uplink_delay %u delay_accum %u pkts_accum %u", *val,
4597 		 delay_accum, pkts_accum);
4598 
4599 	/* Reset accumulated values to 0 */
4600 	qdf_atomic_set(&vdev->ul_delay_accum, 0);
4601 	qdf_atomic_set(&vdev->ul_pkts_accum, 0);
4602 
4603 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4604 
4605 	return QDF_STATUS_SUCCESS;
4606 }
4607 
4608 static void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
4609 				      struct hal_tx_completion_status *ts)
4610 {
4611 	uint32_t ul_delay;
4612 
4613 	if (qdf_unlikely(!vdev)) {
4614 		dp_info_rl("vdev is null or delete in progrss");
4615 		return;
4616 	}
4617 
4618 	if (!qdf_atomic_read(&vdev->ul_delay_report))
4619 		return;
4620 
4621 	if (QDF_IS_STATUS_ERROR(dp_tx_compute_hw_delay_us(ts,
4622 							  vdev->delta_tsf,
4623 							  &ul_delay)))
4624 		return;
4625 
4626 	ul_delay /= 1000; /* in unit of ms */
4627 
4628 	qdf_atomic_add(ul_delay, &vdev->ul_delay_accum);
4629 	qdf_atomic_inc(&vdev->ul_pkts_accum);
4630 }
4631 #else /* !WLAN_FEATURE_TSF_UPLINK_DELAY */
4632 static inline
4633 void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
4634 			       struct hal_tx_completion_status *ts)
4635 {
4636 }
4637 #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
4638 
4639 /**
4640  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
4641  * @soc: DP soc handle
4642  * @tx_desc: software descriptor head pointer
4643  * @ts: Tx completion status
4644  * @txrx_peer: txrx peer handle
4645  * @ring_id: ring number
4646  *
4647  * Return: none
4648  */
4649 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
4650 				  struct dp_tx_desc_s *tx_desc,
4651 				  struct hal_tx_completion_status *ts,
4652 				  struct dp_txrx_peer *txrx_peer,
4653 				  uint8_t ring_id)
4654 {
4655 	uint32_t length;
4656 	qdf_ether_header_t *eh;
4657 	struct dp_vdev *vdev = NULL;
4658 	qdf_nbuf_t nbuf = tx_desc->nbuf;
4659 	enum qdf_dp_tx_rx_status dp_status;
4660 
4661 	if (!nbuf) {
4662 		dp_info_rl("invalid tx descriptor. nbuf NULL");
4663 		goto out;
4664 	}
4665 
4666 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
4667 	length = qdf_nbuf_len(nbuf);
4668 
4669 	dp_status = dp_tx_hw_to_qdf(ts->status);
4670 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
4671 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
4672 				 QDF_TRACE_DEFAULT_PDEV_ID,
4673 				 qdf_nbuf_data_addr(nbuf),
4674 				 sizeof(qdf_nbuf_data(nbuf)),
4675 				 tx_desc->id, ts->status, dp_status));
4676 
4677 	dp_tx_comp_debug("-------------------- \n"
4678 			 "Tx Completion Stats: \n"
4679 			 "-------------------- \n"
4680 			 "ack_frame_rssi = %d \n"
4681 			 "first_msdu = %d \n"
4682 			 "last_msdu = %d \n"
4683 			 "msdu_part_of_amsdu = %d \n"
4684 			 "rate_stats valid = %d \n"
4685 			 "bw = %d \n"
4686 			 "pkt_type = %d \n"
4687 			 "stbc = %d \n"
4688 			 "ldpc = %d \n"
4689 			 "sgi = %d \n"
4690 			 "mcs = %d \n"
4691 			 "ofdma = %d \n"
4692 			 "tones_in_ru = %d \n"
4693 			 "tsf = %d \n"
4694 			 "ppdu_id = %d \n"
4695 			 "transmit_cnt = %d \n"
4696 			 "tid = %d \n"
4697 			 "peer_id = %d\n"
4698 			 "tx_status = %d\n",
4699 			 ts->ack_frame_rssi, ts->first_msdu,
4700 			 ts->last_msdu, ts->msdu_part_of_amsdu,
4701 			 ts->valid, ts->bw, ts->pkt_type, ts->stbc,
4702 			 ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
4703 			 ts->tones_in_ru, ts->tsf, ts->ppdu_id,
4704 			 ts->transmit_cnt, ts->tid, ts->peer_id,
4705 			 ts->status);
4706 
4707 	/* Update SoC level stats */
4708 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
4709 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
4710 
4711 	if (!txrx_peer) {
4712 		dp_info_rl("peer is null or deletion in progress");
4713 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
4714 		goto out;
4715 	}
4716 	vdev = txrx_peer->vdev;
4717 
4718 	dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
4719 	dp_tx_update_uplink_delay(soc, vdev, ts);
4720 
4721 	/* check tx complete notification */
4722 	if (qdf_nbuf_tx_notify_comp_get(nbuf))
4723 		dp_tx_notify_completion(soc, vdev, tx_desc,
4724 					nbuf, ts->status);
4725 
4726 	/* Update per-packet stats for mesh mode */
4727 	if (qdf_unlikely(vdev->mesh_vdev) &&
4728 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
4729 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
4730 
4731 	/* Update peer level stats */
4732 	if (qdf_unlikely(txrx_peer->bss_peer &&
4733 			 vdev->opmode == wlan_op_mode_ap)) {
4734 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
4735 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1,
4736 						      length);
4737 
4738 			if (txrx_peer->vdev->tx_encap_type ==
4739 				htt_cmn_pkt_type_ethernet &&
4740 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
4741 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
4742 							      tx.bcast, 1,
4743 							      length);
4744 			}
4745 		}
4746 	} else {
4747 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.ucast, 1, length);
4748 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
4749 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.tx_success,
4750 						      1, length);
4751 			if (qdf_unlikely(txrx_peer->in_twt)) {
4752 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
4753 							      tx.tx_success_twt,
4754 							      1, length);
4755 			}
4756 		}
4757 	}
4758 
4759 	dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id);
4760 	dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts->tid, ring_id);
4761 	dp_tx_update_peer_sawf_stats(soc, vdev, txrx_peer, tx_desc,
4762 				     ts, ts->tid);
4763 	dp_tx_send_pktlog(soc, vdev->pdev, tx_desc, nbuf, dp_status);
4764 
4765 #ifdef QCA_SUPPORT_RDK_STATS
4766 	if (soc->peerstats_enabled)
4767 		dp_tx_sojourn_stats_process(vdev->pdev, txrx_peer, ts->tid,
4768 					    qdf_ktime_to_ms(tx_desc->timestamp),
4769 					    ts->ppdu_id);
4770 #endif
4771 
4772 out:
4773 	return;
4774 }
4775 
4776 #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \
4777 	defined(QCA_ENHANCED_STATS_SUPPORT)
4778 /*
4779  * dp_tx_update_peer_basic_stats(): Update peer basic stats
4780  * @txrx_peer: Datapath txrx_peer handle
4781  * @length: Length of the packet
4782  * @tx_status: Tx status from TQM/FW
4783  * @update: enhanced flag value present in dp_pdev
4784  *
4785  * Return: none
4786  */
4787 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
4788 				   uint32_t length, uint8_t tx_status,
4789 				   bool update)
4790 {
4791 	if ((!txrx_peer->hw_txrx_stats_en) || update) {
4792 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
4793 
4794 		if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
4795 			DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
4796 	}
4797 }
4798 #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
4799 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
4800 				   uint32_t length, uint8_t tx_status,
4801 				   bool update)
4802 {
4803 	if (!peer->hw_txrx_stats_en) {
4804 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
4805 
4806 		if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
4807 			DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
4808 	}
4809 }
4810 
4811 #else
4812 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
4813 				   uint32_t length, uint8_t tx_status,
4814 				   bool update)
4815 {
4816 	DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
4817 
4818 	if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
4819 		DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
4820 }
4821 #endif
4822 
4823 /*
4824  * dp_tx_prefetch_next_nbuf_data(): Prefetch nbuf and nbuf data
4825  * @nbuf: skb buffer
4826  *
4827  * Return: none
4828  */
4829 #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
4830 static inline
4831 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
4832 {
4833 	qdf_nbuf_t nbuf = NULL;
4834 
4835 	if (next)
4836 		nbuf = next->nbuf;
4837 	if (nbuf) {
4838 		/* prefetch skb->next and first few bytes of skb->cb */
4839 		qdf_prefetch(nbuf);
4840 		/* prefetch skb fields present in different cachelines */
4841 		qdf_prefetch(&nbuf->len);
4842 		qdf_prefetch(&nbuf->users);
4843 		qdf_prefetch(skb_end_pointer(nbuf));
4844 	}
4845 }
4846 #else
4847 static inline
4848 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
4849 {
4850 }
4851 #endif
4852 
4853 /**
4854  * dp_tx_mcast_reinject_handler() - Tx reinjected multicast packets handler
4855  * @soc: core txrx main context
4856  * @desc: software descriptor
4857  *
4858  * Return: true when packet is reinjected
4859  */
4860 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
4861 	defined(WLAN_MCAST_MLO)
4862 static inline bool
4863 dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
4864 {
4865 	struct dp_vdev *vdev = NULL;
4866 
4867 	if (desc->tx_status == HAL_TX_TQM_RR_MULTICAST_DROP) {
4868 		if (!soc->arch_ops.dp_tx_mcast_handler)
4869 			return false;
4870 
4871 		vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
4872 					     DP_MOD_ID_REINJECT);
4873 
4874 		if (qdf_unlikely(!vdev)) {
4875 			dp_tx_comp_info_rl("Unable to get vdev ref  %d",
4876 					   desc->id);
4877 			return false;
4878 		}
4879 		DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
4880 				 qdf_nbuf_len(desc->nbuf));
4881 		soc->arch_ops.dp_tx_mcast_handler(soc, vdev, desc->nbuf);
4882 		dp_tx_desc_release(desc, desc->pool_id);
4883 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
4884 		return true;
4885 	}
4886 
4887 	return false;
4888 }
4889 #else
4890 static inline bool
4891 dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
4892 {
4893 	return false;
4894 }
4895 #endif
4896 
4897 /**
4898  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
4899  * @soc: core txrx main context
4900  * @comp_head: software descriptor head pointer
4901  * @ring_id: ring number
4902  *
4903  * This function will process batch of descriptors reaped by dp_tx_comp_handler
4904  * and release the software descriptors after processing is complete
4905  *
4906  * Return: none
4907  */
4908 static void
4909 dp_tx_comp_process_desc_list(struct dp_soc *soc,
4910 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id)
4911 {
4912 	struct dp_tx_desc_s *desc;
4913 	struct dp_tx_desc_s *next;
4914 	struct hal_tx_completion_status ts;
4915 	struct dp_txrx_peer *txrx_peer = NULL;
4916 	uint16_t peer_id = DP_INVALID_PEER;
4917 	dp_txrx_ref_handle txrx_ref_handle = NULL;
4918 
4919 	desc = comp_head;
4920 
4921 	while (desc) {
4922 		next = desc->next;
4923 		dp_tx_prefetch_next_nbuf_data(next);
4924 
4925 		if (peer_id != desc->peer_id) {
4926 			if (txrx_peer)
4927 				dp_txrx_peer_unref_delete(txrx_ref_handle,
4928 							  DP_MOD_ID_TX_COMP);
4929 			peer_id = desc->peer_id;
4930 			txrx_peer =
4931 				dp_txrx_peer_get_ref_by_id(soc, peer_id,
4932 							   &txrx_ref_handle,
4933 							   DP_MOD_ID_TX_COMP);
4934 		}
4935 
4936 		if (dp_tx_mcast_reinject_handler(soc, desc)) {
4937 			desc = next;
4938 			continue;
4939 		}
4940 		if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
4941 			struct dp_pdev *pdev = desc->pdev;
4942 
4943 			if (qdf_likely(txrx_peer))
4944 				dp_tx_update_peer_basic_stats(txrx_peer,
4945 							      desc->length,
4946 							      desc->tx_status,
4947 							      false);
4948 			qdf_assert(pdev);
4949 			dp_tx_outstanding_dec(pdev);
4950 
4951 			/*
4952 			 * Calling a QDF WRAPPER here is creating signifcant
4953 			 * performance impact so avoided the wrapper call here
4954 			 */
4955 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
4956 					       desc->id, DP_TX_COMP_UNMAP);
4957 			dp_tx_nbuf_unmap(soc, desc);
4958 			qdf_nbuf_free_simple(desc->nbuf);
4959 			dp_tx_desc_free(soc, desc, desc->pool_id);
4960 			desc = next;
4961 			continue;
4962 		}
4963 
4964 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
4965 
4966 		dp_tx_comp_process_tx_status(soc, desc, &ts, txrx_peer,
4967 					     ring_id);
4968 
4969 		dp_tx_comp_process_desc(soc, desc, &ts, txrx_peer);
4970 
4971 		dp_tx_desc_release(desc, desc->pool_id);
4972 		desc = next;
4973 	}
4974 	if (txrx_peer)
4975 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
4976 }
4977 
4978 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
4979 static inline
4980 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
4981 				   int max_reap_limit)
4982 {
4983 	bool limit_hit = false;
4984 
4985 	limit_hit =
4986 		(num_reaped >= max_reap_limit) ? true : false;
4987 
4988 	if (limit_hit)
4989 		DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
4990 
4991 	return limit_hit;
4992 }
4993 
4994 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
4995 {
4996 	return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
4997 }
4998 
4999 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
5000 {
5001 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
5002 
5003 	return cfg->tx_comp_loop_pkt_limit;
5004 }
5005 #else
5006 static inline
5007 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
5008 				   int max_reap_limit)
5009 {
5010 	return false;
5011 }
5012 
5013 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
5014 {
5015 	return false;
5016 }
5017 
5018 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
5019 {
5020 	return 0;
5021 }
5022 #endif
5023 
5024 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
5025 static inline int
5026 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
5027 				  int *max_reap_limit)
5028 {
5029 	return soc->arch_ops.dp_srng_test_and_update_nf_params(soc, dp_srng,
5030 							       max_reap_limit);
5031 }
5032 #else
5033 static inline int
5034 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
5035 				  int *max_reap_limit)
5036 {
5037 	return 0;
5038 }
5039 #endif
5040 
5041 #ifdef DP_TX_TRACKING
5042 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
5043 {
5044 	if ((tx_desc->magic != DP_TX_MAGIC_PATTERN_INUSE) &&
5045 	    (tx_desc->magic != DP_TX_MAGIC_PATTERN_FREE)) {
5046 		dp_err_rl("tx_desc %u is corrupted", tx_desc->id);
5047 		qdf_trigger_self_recovery(NULL, QDF_TX_DESC_LEAK);
5048 	}
5049 }
5050 #endif
5051 
5052 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
5053 			    hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
5054 			    uint32_t quota)
5055 {
5056 	void *tx_comp_hal_desc;
5057 	void *last_prefetched_hw_desc = NULL;
5058 	struct dp_tx_desc_s *last_prefetched_sw_desc = NULL;
5059 	hal_soc_handle_t hal_soc;
5060 	uint8_t buffer_src;
5061 	struct dp_tx_desc_s *tx_desc = NULL;
5062 	struct dp_tx_desc_s *head_desc = NULL;
5063 	struct dp_tx_desc_s *tail_desc = NULL;
5064 	uint32_t num_processed = 0;
5065 	uint32_t count;
5066 	uint32_t num_avail_for_reap = 0;
5067 	bool force_break = false;
5068 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
5069 	int max_reap_limit, ring_near_full;
5070 
5071 	DP_HIST_INIT();
5072 
5073 more_data:
5074 
5075 	hal_soc = soc->hal_soc;
5076 	/* Re-initialize local variables to be re-used */
5077 	head_desc = NULL;
5078 	tail_desc = NULL;
5079 	count = 0;
5080 	max_reap_limit = dp_tx_comp_get_loop_pkt_limit(soc);
5081 
5082 	ring_near_full = dp_srng_test_and_update_nf_params(soc, tx_comp_ring,
5083 							   &max_reap_limit);
5084 
5085 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
5086 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
5087 		return 0;
5088 	}
5089 
5090 	num_avail_for_reap = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0);
5091 
5092 	if (num_avail_for_reap >= quota)
5093 		num_avail_for_reap = quota;
5094 
5095 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
5096 	last_prefetched_hw_desc = dp_srng_dst_prefetch(hal_soc, hal_ring_hdl,
5097 						       num_avail_for_reap);
5098 
5099 	/* Find head descriptor from completion ring */
5100 	while (qdf_likely(num_avail_for_reap--)) {
5101 
5102 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
5103 		if (qdf_unlikely(!tx_comp_hal_desc))
5104 			break;
5105 		buffer_src = hal_tx_comp_get_buffer_source(hal_soc,
5106 							   tx_comp_hal_desc);
5107 
5108 		/* If this buffer was not released by TQM or FW, then it is not
5109 		 * Tx completion indication, assert */
5110 		if (qdf_unlikely(buffer_src !=
5111 					HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
5112 				 (qdf_unlikely(buffer_src !=
5113 					HAL_TX_COMP_RELEASE_SOURCE_FW))) {
5114 			uint8_t wbm_internal_error;
5115 
5116 			dp_err_rl(
5117 				"Tx comp release_src != TQM | FW but from %d",
5118 				buffer_src);
5119 			hal_dump_comp_desc(tx_comp_hal_desc);
5120 			DP_STATS_INC(soc, tx.invalid_release_source, 1);
5121 
5122 			/* When WBM sees NULL buffer_addr_info in any of
5123 			 * ingress rings it sends an error indication,
5124 			 * with wbm_internal_error=1, to a specific ring.
5125 			 * The WBM2SW ring used to indicate these errors is
5126 			 * fixed in HW, and that ring is being used as Tx
5127 			 * completion ring. These errors are not related to
5128 			 * Tx completions, and should just be ignored
5129 			 */
5130 			wbm_internal_error = hal_get_wbm_internal_error(
5131 							hal_soc,
5132 							tx_comp_hal_desc);
5133 
5134 			if (wbm_internal_error) {
5135 				dp_err_rl("Tx comp wbm_internal_error!!");
5136 				DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
5137 
5138 				if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
5139 								buffer_src)
5140 					dp_handle_wbm_internal_error(
5141 						soc,
5142 						tx_comp_hal_desc,
5143 						hal_tx_comp_get_buffer_type(
5144 							tx_comp_hal_desc));
5145 
5146 			} else {
5147 				dp_err_rl("Tx comp wbm_internal_error false");
5148 				DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
5149 			}
5150 			continue;
5151 		}
5152 
5153 		soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
5154 							       tx_comp_hal_desc,
5155 							       &tx_desc);
5156 		if (!tx_desc) {
5157 			dp_err("unable to retrieve tx_desc!");
5158 			QDF_BUG(0);
5159 			continue;
5160 		}
5161 		tx_desc->buffer_src = buffer_src;
5162 		/*
5163 		 * If the release source is FW, process the HTT status
5164 		 */
5165 		if (qdf_unlikely(buffer_src ==
5166 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
5167 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
5168 
5169 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
5170 					htt_tx_status);
5171 			/* Collect hw completion contents */
5172 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
5173 					      &tx_desc->comp, 1);
5174 			soc->arch_ops.dp_tx_process_htt_completion(
5175 							soc,
5176 							tx_desc,
5177 							htt_tx_status,
5178 							ring_id);
5179 		} else {
5180 			tx_desc->tx_status =
5181 				hal_tx_comp_get_tx_status(tx_comp_hal_desc);
5182 			tx_desc->buffer_src = buffer_src;
5183 			/*
5184 			 * If the fast completion mode is enabled extended
5185 			 * metadata from descriptor is not copied
5186 			 */
5187 			if (qdf_likely(tx_desc->flags &
5188 						DP_TX_DESC_FLAG_SIMPLE))
5189 				goto add_to_pool;
5190 
5191 			/*
5192 			 * If the descriptor is already freed in vdev_detach,
5193 			 * continue to next descriptor
5194 			 */
5195 			if (qdf_unlikely
5196 				((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
5197 				 !tx_desc->flags)) {
5198 				dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
5199 						   tx_desc->id);
5200 				DP_STATS_INC(soc, tx.tx_comp_exception, 1);
5201 				dp_tx_desc_check_corruption(tx_desc);
5202 				continue;
5203 			}
5204 
5205 			if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
5206 				dp_tx_comp_info_rl("pdev in down state %d",
5207 						   tx_desc->id);
5208 				tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
5209 				dp_tx_comp_free_buf(soc, tx_desc);
5210 				dp_tx_desc_release(tx_desc, tx_desc->pool_id);
5211 				goto next_desc;
5212 			}
5213 
5214 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
5215 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
5216 				dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
5217 						 tx_desc->flags, tx_desc->id);
5218 				qdf_assert_always(0);
5219 			}
5220 
5221 			/* Collect hw completion contents */
5222 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
5223 					      &tx_desc->comp, 1);
5224 add_to_pool:
5225 			DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
5226 
5227 			/* First ring descriptor on the cycle */
5228 			if (!head_desc) {
5229 				head_desc = tx_desc;
5230 				tail_desc = tx_desc;
5231 			}
5232 
5233 			tail_desc->next = tx_desc;
5234 			tx_desc->next = NULL;
5235 			tail_desc = tx_desc;
5236 		}
5237 next_desc:
5238 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
5239 
5240 		/*
5241 		 * Processed packet count is more than given quota
5242 		 * stop to processing
5243 		 */
5244 
5245 		count++;
5246 
5247 		dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
5248 					       num_avail_for_reap,
5249 					       hal_ring_hdl,
5250 					       &last_prefetched_hw_desc,
5251 					       &last_prefetched_sw_desc);
5252 
5253 		if (dp_tx_comp_loop_pkt_limit_hit(soc, count, max_reap_limit))
5254 			break;
5255 	}
5256 
5257 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
5258 
5259 	/* Process the reaped descriptors */
5260 	if (head_desc)
5261 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
5262 
5263 	DP_STATS_INC(soc, tx.tx_comp[ring_id], count);
5264 
5265 	/*
5266 	 * If we are processing in near-full condition, there are 3 scenario
5267 	 * 1) Ring entries has reached critical state
5268 	 * 2) Ring entries are still near high threshold
5269 	 * 3) Ring entries are below the safe level
5270 	 *
5271 	 * One more loop will move te state to normal processing and yield
5272 	 */
5273 	if (ring_near_full)
5274 		goto more_data;
5275 
5276 	if (dp_tx_comp_enable_eol_data_check(soc)) {
5277 
5278 		if (num_processed >= quota)
5279 			force_break = true;
5280 
5281 		if (!force_break &&
5282 		    hal_srng_dst_peek_sync_locked(soc->hal_soc,
5283 						  hal_ring_hdl)) {
5284 			DP_STATS_INC(soc, tx.hp_oos2, 1);
5285 			if (!hif_exec_should_yield(soc->hif_handle,
5286 						   int_ctx->dp_intr_id))
5287 				goto more_data;
5288 		}
5289 	}
5290 	DP_TX_HIST_STATS_PER_PDEV();
5291 
5292 	return num_processed;
5293 }
5294 
5295 #ifdef FEATURE_WLAN_TDLS
5296 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5297 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
5298 {
5299 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5300 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5301 						     DP_MOD_ID_TDLS);
5302 
5303 	if (!vdev) {
5304 		dp_err("vdev handle for id %d is NULL", vdev_id);
5305 		return NULL;
5306 	}
5307 
5308 	if (tx_spec & OL_TX_SPEC_NO_FREE)
5309 		vdev->is_tdls_frame = true;
5310 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
5311 
5312 	return dp_tx_send(soc_hdl, vdev_id, msdu_list);
5313 }
5314 #endif
5315 
5316 /**
5317  * dp_tx_vdev_attach() - attach vdev to dp tx
5318  * @vdev: virtual device instance
5319  *
5320  * Return: QDF_STATUS_SUCCESS: success
5321  *         QDF_STATUS_E_RESOURCES: Error return
5322  */
5323 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
5324 {
5325 	int pdev_id;
5326 	/*
5327 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
5328 	 */
5329 	DP_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
5330 				    DP_TCL_METADATA_TYPE_VDEV_BASED);
5331 
5332 	DP_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
5333 				       vdev->vdev_id);
5334 
5335 	pdev_id =
5336 		dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
5337 						       vdev->pdev->pdev_id);
5338 	DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
5339 
5340 	/*
5341 	 * Set HTT Extension Valid bit to 0 by default
5342 	 */
5343 	DP_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
5344 
5345 	dp_tx_vdev_update_search_flags(vdev);
5346 
5347 	return QDF_STATUS_SUCCESS;
5348 }
5349 
5350 #ifndef FEATURE_WDS
5351 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
5352 {
5353 	return false;
5354 }
5355 #endif
5356 
5357 /**
5358  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
5359  * @vdev: virtual device instance
5360  *
5361  * Return: void
5362  *
5363  */
5364 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
5365 {
5366 	struct dp_soc *soc = vdev->pdev->soc;
5367 
5368 	/*
5369 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
5370 	 * for TDLS link
5371 	 *
5372 	 * Enable AddrY (SA based search) only for non-WDS STA and
5373 	 * ProxySTA VAP (in HKv1) modes.
5374 	 *
5375 	 * In all other VAP modes, only DA based search should be
5376 	 * enabled
5377 	 */
5378 	if (vdev->opmode == wlan_op_mode_sta &&
5379 	    vdev->tdls_link_connected)
5380 		vdev->hal_desc_addr_search_flags =
5381 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
5382 	else if ((vdev->opmode == wlan_op_mode_sta) &&
5383 		 !dp_tx_da_search_override(vdev))
5384 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
5385 	else
5386 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
5387 
5388 	if (vdev->opmode == wlan_op_mode_sta && !vdev->tdls_link_connected)
5389 		vdev->search_type = soc->sta_mode_search_policy;
5390 	else
5391 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
5392 }
5393 
5394 static inline bool
5395 dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
5396 			  struct dp_vdev *vdev,
5397 			  struct dp_tx_desc_s *tx_desc)
5398 {
5399 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
5400 		return false;
5401 
5402 	/*
5403 	 * if vdev is given, then only check whether desc
5404 	 * vdev match. if vdev is NULL, then check whether
5405 	 * desc pdev match.
5406 	 */
5407 	return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
5408 		(tx_desc->pdev == pdev);
5409 }
5410 
5411 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
5412 /**
5413  * dp_tx_desc_flush() - release resources associated
5414  *                      to TX Desc
5415  *
5416  * @dp_pdev: Handle to DP pdev structure
5417  * @vdev: virtual device instance
5418  * NULL: no specific Vdev is required and check all allcated TX desc
5419  * on this pdev.
5420  * Non-NULL: only check the allocated TX Desc associated to this Vdev.
5421  *
5422  * @force_free:
5423  * true: flush the TX desc.
5424  * false: only reset the Vdev in each allocated TX desc
5425  * that associated to current Vdev.
5426  *
5427  * This function will go through the TX desc pool to flush
5428  * the outstanding TX data or reset Vdev to NULL in associated TX
5429  * Desc.
5430  */
5431 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
5432 		      bool force_free)
5433 {
5434 	uint8_t i;
5435 	uint32_t j;
5436 	uint32_t num_desc, page_id, offset;
5437 	uint16_t num_desc_per_page;
5438 	struct dp_soc *soc = pdev->soc;
5439 	struct dp_tx_desc_s *tx_desc = NULL;
5440 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
5441 
5442 	if (!vdev && !force_free) {
5443 		dp_err("Reset TX desc vdev, Vdev param is required!");
5444 		return;
5445 	}
5446 
5447 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
5448 		tx_desc_pool = &soc->tx_desc[i];
5449 		if (!(tx_desc_pool->pool_size) ||
5450 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
5451 		    !(tx_desc_pool->desc_pages.cacheable_pages))
5452 			continue;
5453 
5454 		/*
5455 		 * Add flow pool lock protection in case pool is freed
5456 		 * due to all tx_desc is recycled when handle TX completion.
5457 		 * this is not necessary when do force flush as:
5458 		 * a. double lock will happen if dp_tx_desc_release is
5459 		 *    also trying to acquire it.
5460 		 * b. dp interrupt has been disabled before do force TX desc
5461 		 *    flush in dp_pdev_deinit().
5462 		 */
5463 		if (!force_free)
5464 			qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
5465 		num_desc = tx_desc_pool->pool_size;
5466 		num_desc_per_page =
5467 			tx_desc_pool->desc_pages.num_element_per_page;
5468 		for (j = 0; j < num_desc; j++) {
5469 			page_id = j / num_desc_per_page;
5470 			offset = j % num_desc_per_page;
5471 
5472 			if (qdf_unlikely(!(tx_desc_pool->
5473 					 desc_pages.cacheable_pages)))
5474 				break;
5475 
5476 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
5477 
5478 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
5479 				/*
5480 				 * Free TX desc if force free is
5481 				 * required, otherwise only reset vdev
5482 				 * in this TX desc.
5483 				 */
5484 				if (force_free) {
5485 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
5486 					dp_tx_comp_free_buf(soc, tx_desc);
5487 					dp_tx_desc_release(tx_desc, i);
5488 				} else {
5489 					tx_desc->vdev_id = DP_INVALID_VDEV_ID;
5490 				}
5491 			}
5492 		}
5493 		if (!force_free)
5494 			qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
5495 	}
5496 }
5497 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
5498 /**
5499  * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
5500  *
5501  * @soc: Handle to DP soc structure
5502  * @tx_desc: pointer of one TX desc
5503  * @desc_pool_id: TX Desc pool id
5504  */
5505 static inline void
5506 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
5507 		      uint8_t desc_pool_id)
5508 {
5509 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
5510 
5511 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
5512 
5513 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
5514 }
5515 
5516 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
5517 		      bool force_free)
5518 {
5519 	uint8_t i, num_pool;
5520 	uint32_t j;
5521 	uint32_t num_desc, page_id, offset;
5522 	uint16_t num_desc_per_page;
5523 	struct dp_soc *soc = pdev->soc;
5524 	struct dp_tx_desc_s *tx_desc = NULL;
5525 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
5526 
5527 	if (!vdev && !force_free) {
5528 		dp_err("Reset TX desc vdev, Vdev param is required!");
5529 		return;
5530 	}
5531 
5532 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5533 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5534 
5535 	for (i = 0; i < num_pool; i++) {
5536 		tx_desc_pool = &soc->tx_desc[i];
5537 		if (!tx_desc_pool->desc_pages.cacheable_pages)
5538 			continue;
5539 
5540 		num_desc_per_page =
5541 			tx_desc_pool->desc_pages.num_element_per_page;
5542 		for (j = 0; j < num_desc; j++) {
5543 			page_id = j / num_desc_per_page;
5544 			offset = j % num_desc_per_page;
5545 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
5546 
5547 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
5548 				if (force_free) {
5549 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
5550 					dp_tx_comp_free_buf(soc, tx_desc);
5551 					dp_tx_desc_release(tx_desc, i);
5552 				} else {
5553 					dp_tx_desc_reset_vdev(soc, tx_desc,
5554 							      i);
5555 				}
5556 			}
5557 		}
5558 	}
5559 }
5560 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
5561 
5562 /**
5563  * dp_tx_vdev_detach() - detach vdev from dp tx
5564  * @vdev: virtual device instance
5565  *
5566  * Return: QDF_STATUS_SUCCESS: success
5567  *         QDF_STATUS_E_RESOURCES: Error return
5568  */
5569 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
5570 {
5571 	struct dp_pdev *pdev = vdev->pdev;
5572 
5573 	/* Reset TX desc associated to this Vdev as NULL */
5574 	dp_tx_desc_flush(pdev, vdev, false);
5575 
5576 	return QDF_STATUS_SUCCESS;
5577 }
5578 
5579 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
5580 /* Pools will be allocated dynamically */
5581 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
5582 					   int num_desc)
5583 {
5584 	uint8_t i;
5585 
5586 	for (i = 0; i < num_pool; i++) {
5587 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
5588 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
5589 	}
5590 
5591 	return QDF_STATUS_SUCCESS;
5592 }
5593 
5594 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
5595 					  uint32_t num_desc)
5596 {
5597 	return QDF_STATUS_SUCCESS;
5598 }
5599 
5600 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
5601 {
5602 }
5603 
5604 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
5605 {
5606 	uint8_t i;
5607 
5608 	for (i = 0; i < num_pool; i++)
5609 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
5610 }
5611 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
5612 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
5613 					   uint32_t num_desc)
5614 {
5615 	uint8_t i, count;
5616 
5617 	/* Allocate software Tx descriptor pools */
5618 	for (i = 0; i < num_pool; i++) {
5619 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
5620 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5621 				  FL("Tx Desc Pool alloc %d failed %pK"),
5622 				  i, soc);
5623 			goto fail;
5624 		}
5625 	}
5626 	return QDF_STATUS_SUCCESS;
5627 
5628 fail:
5629 	for (count = 0; count < i; count++)
5630 		dp_tx_desc_pool_free(soc, count);
5631 
5632 	return QDF_STATUS_E_NOMEM;
5633 }
5634 
5635 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
5636 					  uint32_t num_desc)
5637 {
5638 	uint8_t i;
5639 	for (i = 0; i < num_pool; i++) {
5640 		if (dp_tx_desc_pool_init(soc, i, num_desc)) {
5641 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5642 				  FL("Tx Desc Pool init %d failed %pK"),
5643 				  i, soc);
5644 			return QDF_STATUS_E_NOMEM;
5645 		}
5646 	}
5647 	return QDF_STATUS_SUCCESS;
5648 }
5649 
5650 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
5651 {
5652 	uint8_t i;
5653 
5654 	for (i = 0; i < num_pool; i++)
5655 		dp_tx_desc_pool_deinit(soc, i);
5656 }
5657 
5658 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
5659 {
5660 	uint8_t i;
5661 
5662 	for (i = 0; i < num_pool; i++)
5663 		dp_tx_desc_pool_free(soc, i);
5664 }
5665 
5666 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
5667 
5668 /**
5669  * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
5670  * @soc: core txrx main context
5671  * @num_pool: number of pools
5672  *
5673  */
5674 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
5675 {
5676 	dp_tx_tso_desc_pool_deinit(soc, num_pool);
5677 	dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
5678 }
5679 
5680 /**
5681  * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
5682  * @soc: core txrx main context
5683  * @num_pool: number of pools
5684  *
5685  */
5686 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
5687 {
5688 	dp_tx_tso_desc_pool_free(soc, num_pool);
5689 	dp_tx_tso_num_seg_pool_free(soc, num_pool);
5690 }
5691 
5692 /**
5693  * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
5694  * @soc: core txrx main context
5695  *
5696  * This function frees all tx related descriptors as below
5697  * 1. Regular TX descriptors (static pools)
5698  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
5699  * 3. TSO descriptors
5700  *
5701  */
5702 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
5703 {
5704 	uint8_t num_pool;
5705 
5706 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5707 
5708 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
5709 	dp_tx_ext_desc_pool_free(soc, num_pool);
5710 	dp_tx_delete_static_pools(soc, num_pool);
5711 }
5712 
5713 /**
5714  * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
5715  * @soc: core txrx main context
5716  *
5717  * This function de-initializes all tx related descriptors as below
5718  * 1. Regular TX descriptors (static pools)
5719  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
5720  * 3. TSO descriptors
5721  *
5722  */
5723 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
5724 {
5725 	uint8_t num_pool;
5726 
5727 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5728 
5729 	dp_tx_flow_control_deinit(soc);
5730 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
5731 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
5732 	dp_tx_deinit_static_pools(soc, num_pool);
5733 }
5734 
5735 /**
5736  * dp_tso_attach() - TSO attach handler
5737  * @txrx_soc: Opaque Dp handle
5738  *
5739  * Reserve TSO descriptor buffers
5740  *
5741  * Return: QDF_STATUS_E_FAILURE on failure or
5742  * QDF_STATUS_SUCCESS on success
5743  */
5744 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
5745 					 uint8_t num_pool,
5746 					 uint32_t num_desc)
5747 {
5748 	if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
5749 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
5750 		return QDF_STATUS_E_FAILURE;
5751 	}
5752 
5753 	if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
5754 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
5755 		       num_pool, soc);
5756 		return QDF_STATUS_E_FAILURE;
5757 	}
5758 	return QDF_STATUS_SUCCESS;
5759 }
5760 
5761 /**
5762  * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
5763  * @soc: DP soc handle
5764  * @num_pool: Number of pools
5765  * @num_desc: Number of descriptors
5766  *
5767  * Initialize TSO descriptor pools
5768  *
5769  * Return: QDF_STATUS_E_FAILURE on failure or
5770  * QDF_STATUS_SUCCESS on success
5771  */
5772 
5773 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
5774 					uint8_t num_pool,
5775 					uint32_t num_desc)
5776 {
5777 	if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
5778 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
5779 		return QDF_STATUS_E_FAILURE;
5780 	}
5781 
5782 	if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
5783 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
5784 		       num_pool, soc);
5785 		return QDF_STATUS_E_FAILURE;
5786 	}
5787 	return QDF_STATUS_SUCCESS;
5788 }
5789 
5790 /**
5791  * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
5792  * @soc: core txrx main context
5793  *
5794  * This function allocates memory for following descriptor pools
5795  * 1. regular sw tx descriptor pools (static pools)
5796  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
5797  * 3. TSO descriptor pools
5798  *
5799  * Return: QDF_STATUS_SUCCESS: success
5800  *         QDF_STATUS_E_RESOURCES: Error return
5801  */
5802 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
5803 {
5804 	uint8_t num_pool;
5805 	uint32_t num_desc;
5806 	uint32_t num_ext_desc;
5807 
5808 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5809 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5810 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
5811 
5812 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5813 		  "%s Tx Desc Alloc num_pool = %d, descs = %d",
5814 		  __func__, num_pool, num_desc);
5815 
5816 	if ((num_pool > MAX_TXDESC_POOLS) ||
5817 	    (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
5818 		goto fail1;
5819 
5820 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
5821 		goto fail1;
5822 
5823 	if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
5824 		goto fail2;
5825 
5826 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
5827 		return QDF_STATUS_SUCCESS;
5828 
5829 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
5830 		goto fail3;
5831 
5832 	return QDF_STATUS_SUCCESS;
5833 
5834 fail3:
5835 	dp_tx_ext_desc_pool_free(soc, num_pool);
5836 fail2:
5837 	dp_tx_delete_static_pools(soc, num_pool);
5838 fail1:
5839 	return QDF_STATUS_E_RESOURCES;
5840 }
5841 
5842 /**
5843  * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
5844  * @soc: core txrx main context
5845  *
5846  * This function initializes the following TX descriptor pools
5847  * 1. regular sw tx descriptor pools (static pools)
5848  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
5849  * 3. TSO descriptor pools
5850  *
5851  * Return: QDF_STATUS_SUCCESS: success
5852  *	   QDF_STATUS_E_RESOURCES: Error return
5853  */
5854 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
5855 {
5856 	uint8_t num_pool;
5857 	uint32_t num_desc;
5858 	uint32_t num_ext_desc;
5859 
5860 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5861 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5862 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
5863 
5864 	if (dp_tx_init_static_pools(soc, num_pool, num_desc))
5865 		goto fail1;
5866 
5867 	if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
5868 		goto fail2;
5869 
5870 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
5871 		return QDF_STATUS_SUCCESS;
5872 
5873 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
5874 		goto fail3;
5875 
5876 	dp_tx_flow_control_init(soc);
5877 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
5878 	return QDF_STATUS_SUCCESS;
5879 
5880 fail3:
5881 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
5882 fail2:
5883 	dp_tx_deinit_static_pools(soc, num_pool);
5884 fail1:
5885 	return QDF_STATUS_E_RESOURCES;
5886 }
5887 
5888 /**
5889  * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
5890  * @txrx_soc: dp soc handle
5891  *
5892  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
5893  *			QDF_STATUS_E_FAILURE
5894  */
5895 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
5896 {
5897 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5898 	uint8_t num_pool;
5899 	uint32_t num_desc;
5900 	uint32_t num_ext_desc;
5901 
5902 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5903 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5904 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
5905 
5906 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
5907 		return QDF_STATUS_E_FAILURE;
5908 
5909 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
5910 		return QDF_STATUS_E_FAILURE;
5911 
5912 	return QDF_STATUS_SUCCESS;
5913 }
5914 
5915 /**
5916  * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
5917  * @txrx_soc: dp soc handle
5918  *
5919  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
5920  */
5921 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
5922 {
5923 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5924 	uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5925 
5926 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
5927 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
5928 
5929 	return QDF_STATUS_SUCCESS;
5930 }
5931 
5932 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
5933 void dp_pkt_add_timestamp(struct dp_vdev *vdev,
5934 			  enum qdf_pkt_timestamp_index index, uint64_t time,
5935 			  qdf_nbuf_t nbuf)
5936 {
5937 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled())) {
5938 		uint64_t tsf_time;
5939 
5940 		if (vdev->get_tsf_time) {
5941 			vdev->get_tsf_time(vdev->osif_vdev, time, &tsf_time);
5942 			qdf_add_dp_pkt_timestamp(nbuf, index, tsf_time);
5943 		}
5944 	}
5945 }
5946 
5947 void dp_pkt_get_timestamp(uint64_t *time)
5948 {
5949 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled()))
5950 		*time = qdf_get_log_timestamp();
5951 }
5952 #endif
5953 
5954