xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision b6ef4fe306b6abd9485ff7d6ca12da80552e7caf)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "htt.h"
21 #include "dp_htt.h"
22 #include "hal_hw_headers.h"
23 #include "dp_tx.h"
24 #include "dp_tx_desc.h"
25 #include "dp_peer.h"
26 #include "dp_types.h"
27 #include "hal_tx.h"
28 #include "qdf_mem.h"
29 #include "qdf_nbuf.h"
30 #include "qdf_net_types.h"
31 #include "qdf_module.h"
32 #include <wlan_cfg.h>
33 #include "dp_ipa.h"
34 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
35 #include "if_meta_hdr.h"
36 #endif
37 #include "enet.h"
38 #include "dp_internal.h"
39 #ifdef ATH_SUPPORT_IQUE
40 #include "dp_txrx_me.h"
41 #endif
42 #include "dp_hist.h"
43 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
44 #include <dp_swlm.h>
45 #endif
46 #ifdef WIFI_MONITOR_SUPPORT
47 #include <dp_mon.h>
48 #endif
49 #ifdef FEATURE_WDS
50 #include "dp_txrx_wds.h"
51 #endif
52 #include "cdp_txrx_cmn_reg.h"
53 
54 /* Flag to skip CCE classify when mesh or tid override enabled */
55 #define DP_TX_SKIP_CCE_CLASSIFY \
56 	(DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
57 
58 /* TODO Add support in TSO */
59 #define DP_DESC_NUM_FRAG(x) 0
60 
61 /* disable TQM_BYPASS */
62 #define TQM_BYPASS_WAR 0
63 
64 /* invalid peer id for reinject*/
65 #define DP_INVALID_PEER 0XFFFE
66 
67 #define DP_RETRY_COUNT 7
68 
69 #ifdef WLAN_MCAST_MLO
70 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
71 	HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
72 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
73 	HTT_TX_TCL_METADATA_V2_VALID_HTT_SET(_var, _val)
74 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
75 	HTT_TX_TCL_METADATA_TYPE_V2_SET(_var, _val)
76 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
77 	HTT_TX_TCL_METADATA_V2_HOST_INSPECTED_SET(_var, _val)
78 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
79 	 HTT_TX_TCL_METADATA_V2_PEER_ID_SET(_var, _val)
80 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
81 	HTT_TX_TCL_METADATA_V2_VDEV_ID_SET(_var, _val)
82 #define DP_TCL_METADATA_TYPE_PEER_BASED \
83 	HTT_TCL_METADATA_V2_TYPE_PEER_BASED
84 #define DP_TCL_METADATA_TYPE_VDEV_BASED \
85 	HTT_TCL_METADATA_V2_TYPE_VDEV_BASED
86 #else
87 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
88 	HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
89 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
90 	HTT_TX_TCL_METADATA_VALID_HTT_SET(_var, _val)
91 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
92 	HTT_TX_TCL_METADATA_TYPE_SET(_var, _val)
93 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
94 	HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val)
95 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
96 	HTT_TX_TCL_METADATA_PEER_ID_SET(_var, _val)
97 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
98 	HTT_TX_TCL_METADATA_VDEV_ID_SET(_var, _val)
99 #define DP_TCL_METADATA_TYPE_PEER_BASED \
100 	HTT_TCL_METADATA_TYPE_PEER_BASED
101 #define DP_TCL_METADATA_TYPE_VDEV_BASED \
102 	HTT_TCL_METADATA_TYPE_VDEV_BASED
103 #endif
104 
105 /*mapping between hal encrypt type and cdp_sec_type*/
106 uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
107 					  HAL_TX_ENCRYPT_TYPE_WEP_128,
108 					  HAL_TX_ENCRYPT_TYPE_WEP_104,
109 					  HAL_TX_ENCRYPT_TYPE_WEP_40,
110 					  HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
111 					  HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
112 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
113 					  HAL_TX_ENCRYPT_TYPE_WAPI,
114 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
115 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
116 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
117 					  HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
118 qdf_export_symbol(sec_type_map);
119 
120 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
121 /**
122  * dp_update_tx_desc_stats - Update the increase or decrease in
123  * outstanding tx desc count
124  * values on pdev and soc
125  * @vdev: DP pdev handle
126  *
127  * Return: void
128  */
129 static inline void
130 dp_update_tx_desc_stats(struct dp_pdev *pdev)
131 {
132 	int32_t tx_descs_cnt =
133 		qdf_atomic_read(&pdev->num_tx_outstanding);
134 	if (pdev->tx_descs_max < tx_descs_cnt)
135 		pdev->tx_descs_max = tx_descs_cnt;
136 	qdf_mem_tx_desc_cnt_update(pdev->num_tx_outstanding,
137 				   pdev->tx_descs_max);
138 }
139 
140 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
141 
142 static inline void
143 dp_update_tx_desc_stats(struct dp_pdev *pdev)
144 {
145 }
146 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
147 
148 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
149 static inline
150 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
151 {
152 	qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
153 				     QDF_DMA_TO_DEVICE,
154 				     desc->nbuf->len);
155 	desc->flags |= DP_TX_DESC_FLAG_UNMAP_DONE;
156 }
157 
158 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
159 {
160 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE)))
161 		qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
162 					     QDF_DMA_TO_DEVICE,
163 					     desc->nbuf->len);
164 }
165 #else
166 static inline
167 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
168 {
169 }
170 
171 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
172 {
173 	qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
174 				     QDF_DMA_TO_DEVICE, desc->nbuf->len);
175 }
176 #endif
177 
178 #ifdef QCA_TX_LIMIT_CHECK
179 /**
180  * dp_tx_limit_check - Check if allocated tx descriptors reached
181  * soc max limit and pdev max limit
182  * @vdev: DP vdev handle
183  *
184  * Return: true if allocated tx descriptors reached max configured value, else
185  * false
186  */
187 static inline bool
188 dp_tx_limit_check(struct dp_vdev *vdev)
189 {
190 	struct dp_pdev *pdev = vdev->pdev;
191 	struct dp_soc *soc = pdev->soc;
192 
193 	if (qdf_atomic_read(&soc->num_tx_outstanding) >=
194 			soc->num_tx_allowed) {
195 		dp_tx_info("queued packets are more than max tx, drop the frame");
196 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
197 		return true;
198 	}
199 
200 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
201 			pdev->num_tx_allowed) {
202 		dp_tx_info("queued packets are more than max tx, drop the frame");
203 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
204 		DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_outstand.num, 1);
205 		return true;
206 	}
207 	return false;
208 }
209 
210 /**
211  * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
212  * reached soc max limit
213  * @vdev: DP vdev handle
214  *
215  * Return: true if allocated tx descriptors reached max configured value, else
216  * false
217  */
218 static inline bool
219 dp_tx_exception_limit_check(struct dp_vdev *vdev)
220 {
221 	struct dp_pdev *pdev = vdev->pdev;
222 	struct dp_soc *soc = pdev->soc;
223 
224 	if (qdf_atomic_read(&soc->num_tx_exception) >=
225 			soc->num_msdu_exception_desc) {
226 		dp_info("exc packets are more than max drop the exc pkt");
227 		DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
228 		return true;
229 	}
230 
231 	return false;
232 }
233 
234 /**
235  * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
236  * @vdev: DP pdev handle
237  *
238  * Return: void
239  */
240 static inline void
241 dp_tx_outstanding_inc(struct dp_pdev *pdev)
242 {
243 	struct dp_soc *soc = pdev->soc;
244 
245 	qdf_atomic_inc(&pdev->num_tx_outstanding);
246 	qdf_atomic_inc(&soc->num_tx_outstanding);
247 	dp_update_tx_desc_stats(pdev);
248 }
249 
250 /**
251  * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
252  * @vdev: DP pdev handle
253  *
254  * Return: void
255  */
256 static inline void
257 dp_tx_outstanding_dec(struct dp_pdev *pdev)
258 {
259 	struct dp_soc *soc = pdev->soc;
260 
261 	qdf_atomic_dec(&pdev->num_tx_outstanding);
262 	qdf_atomic_dec(&soc->num_tx_outstanding);
263 	dp_update_tx_desc_stats(pdev);
264 }
265 
266 #else //QCA_TX_LIMIT_CHECK
267 static inline bool
268 dp_tx_limit_check(struct dp_vdev *vdev)
269 {
270 	return false;
271 }
272 
273 static inline bool
274 dp_tx_exception_limit_check(struct dp_vdev *vdev)
275 {
276 	return false;
277 }
278 
279 static inline void
280 dp_tx_outstanding_inc(struct dp_pdev *pdev)
281 {
282 	qdf_atomic_inc(&pdev->num_tx_outstanding);
283 	dp_update_tx_desc_stats(pdev);
284 }
285 
286 static inline void
287 dp_tx_outstanding_dec(struct dp_pdev *pdev)
288 {
289 	qdf_atomic_dec(&pdev->num_tx_outstanding);
290 	dp_update_tx_desc_stats(pdev);
291 }
292 #endif //QCA_TX_LIMIT_CHECK
293 
294 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
295 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
296 {
297 	enum dp_tx_event_type type;
298 
299 	if (flags & DP_TX_DESC_FLAG_FLUSH)
300 		type = DP_TX_DESC_FLUSH;
301 	else if (flags & DP_TX_DESC_FLAG_TX_COMP_ERR)
302 		type = DP_TX_COMP_UNMAP_ERR;
303 	else if (flags & DP_TX_DESC_FLAG_COMPLETED_TX)
304 		type = DP_TX_COMP_UNMAP;
305 	else
306 		type = DP_TX_DESC_UNMAP;
307 
308 	return type;
309 }
310 
311 static inline void
312 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
313 		       qdf_nbuf_t skb, uint32_t sw_cookie,
314 		       enum dp_tx_event_type type)
315 {
316 	struct dp_tx_desc_event *entry;
317 	uint32_t idx;
318 
319 	if (qdf_unlikely(!soc->tx_tcl_history || !soc->tx_comp_history))
320 		return;
321 
322 	switch (type) {
323 	case DP_TX_COMP_UNMAP:
324 	case DP_TX_COMP_UNMAP_ERR:
325 	case DP_TX_COMP_MSDU_EXT:
326 		idx = dp_history_get_next_index(&soc->tx_comp_history->index,
327 						DP_TX_COMP_HISTORY_SIZE);
328 		entry = &soc->tx_comp_history->entry[idx];
329 		break;
330 	case DP_TX_DESC_MAP:
331 	case DP_TX_DESC_UNMAP:
332 	case DP_TX_DESC_COOKIE:
333 	case DP_TX_DESC_FLUSH:
334 		idx = dp_history_get_next_index(&soc->tx_tcl_history->index,
335 						DP_TX_TCL_HISTORY_SIZE);
336 		entry = &soc->tx_tcl_history->entry[idx];
337 		break;
338 	default:
339 		dp_info_rl("Invalid dp_tx_event_type: %d", type);
340 		return;
341 	}
342 
343 	entry->skb = skb;
344 	entry->paddr = paddr;
345 	entry->sw_cookie = sw_cookie;
346 	entry->type = type;
347 	entry->ts = qdf_get_log_timestamp();
348 }
349 
350 static inline void
351 dp_tx_tso_seg_history_add(struct dp_soc *soc,
352 			  struct qdf_tso_seg_elem_t *tso_seg,
353 			  qdf_nbuf_t skb, uint32_t sw_cookie,
354 			  enum dp_tx_event_type type)
355 {
356 	int i;
357 
358 	for (i = 1; i < tso_seg->seg.num_frags; i++) {
359 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[i].paddr,
360 				       skb, sw_cookie, type);
361 	}
362 
363 	if (!tso_seg->next)
364 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[0].paddr,
365 				       skb, 0xFFFFFFFF, type);
366 }
367 
368 static inline void
369 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
370 		      qdf_nbuf_t skb, uint32_t sw_cookie,
371 		      enum dp_tx_event_type type)
372 {
373 	struct qdf_tso_seg_elem_t *curr_seg = tso_info.tso_seg_list;
374 	uint32_t num_segs = tso_info.num_segs;
375 
376 	while (num_segs) {
377 		dp_tx_tso_seg_history_add(soc, curr_seg, skb, sw_cookie, type);
378 		curr_seg = curr_seg->next;
379 		num_segs--;
380 	}
381 }
382 
383 #else
384 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
385 {
386 	return DP_TX_DESC_INVAL_EVT;
387 }
388 
389 static inline void
390 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
391 		       qdf_nbuf_t skb, uint32_t sw_cookie,
392 		       enum dp_tx_event_type type)
393 {
394 }
395 
396 static inline void
397 dp_tx_tso_seg_history_add(struct dp_soc *soc,
398 			  struct qdf_tso_seg_elem_t *tso_seg,
399 			  qdf_nbuf_t skb, uint32_t sw_cookie,
400 			  enum dp_tx_event_type type)
401 {
402 }
403 
404 static inline void
405 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
406 		      qdf_nbuf_t skb, uint32_t sw_cookie,
407 		      enum dp_tx_event_type type)
408 {
409 }
410 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
411 
412 static int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc);
413 
414 /**
415  * dp_is_tput_high() - Check if throughput is high
416  *
417  * @soc - core txrx main context
418  *
419  * The current function is based of the RTPM tput policy variable where RTPM is
420  * avoided based on throughput.
421  */
422 static inline int dp_is_tput_high(struct dp_soc *soc)
423 {
424 	return dp_get_rtpm_tput_policy_requirement(soc);
425 }
426 
427 #if defined(FEATURE_TSO)
428 /**
429  * dp_tx_tso_unmap_segment() - Unmap TSO segment
430  *
431  * @soc - core txrx main context
432  * @seg_desc - tso segment descriptor
433  * @num_seg_desc - tso number segment descriptor
434  */
435 static void dp_tx_tso_unmap_segment(
436 		struct dp_soc *soc,
437 		struct qdf_tso_seg_elem_t *seg_desc,
438 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
439 {
440 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
441 	if (qdf_unlikely(!seg_desc)) {
442 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
443 			 __func__, __LINE__);
444 		qdf_assert(0);
445 	} else if (qdf_unlikely(!num_seg_desc)) {
446 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
447 			 __func__, __LINE__);
448 		qdf_assert(0);
449 	} else {
450 		bool is_last_seg;
451 		/* no tso segment left to do dma unmap */
452 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
453 			return;
454 
455 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
456 					true : false;
457 		qdf_nbuf_unmap_tso_segment(soc->osdev,
458 					   seg_desc, is_last_seg);
459 		num_seg_desc->num_seg.tso_cmn_num_seg--;
460 	}
461 }
462 
463 /**
464  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
465  *                            back to the freelist
466  *
467  * @soc - soc device handle
468  * @tx_desc - Tx software descriptor
469  */
470 static void dp_tx_tso_desc_release(struct dp_soc *soc,
471 				   struct dp_tx_desc_s *tx_desc)
472 {
473 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
474 	if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_desc)) {
475 		dp_tx_err("SO desc is NULL!");
476 		qdf_assert(0);
477 	} else if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_num_desc)) {
478 		dp_tx_err("TSO num desc is NULL!");
479 		qdf_assert(0);
480 	} else {
481 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
482 			(struct qdf_tso_num_seg_elem_t *)tx_desc->
483 				msdu_ext_desc->tso_num_desc;
484 
485 		/* Add the tso num segment into the free list */
486 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
487 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
488 					    tx_desc->msdu_ext_desc->
489 					    tso_num_desc);
490 			tx_desc->msdu_ext_desc->tso_num_desc = NULL;
491 			DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
492 		}
493 
494 		/* Add the tso segment into the free list*/
495 		dp_tx_tso_desc_free(soc,
496 				    tx_desc->pool_id, tx_desc->msdu_ext_desc->
497 				    tso_desc);
498 		tx_desc->msdu_ext_desc->tso_desc = NULL;
499 	}
500 }
501 #else
502 static void dp_tx_tso_unmap_segment(
503 		struct dp_soc *soc,
504 		struct qdf_tso_seg_elem_t *seg_desc,
505 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
506 
507 {
508 }
509 
510 static void dp_tx_tso_desc_release(struct dp_soc *soc,
511 				   struct dp_tx_desc_s *tx_desc)
512 {
513 }
514 #endif
515 
516 /**
517  * dp_tx_desc_release() - Release Tx Descriptor
518  * @tx_desc : Tx Descriptor
519  * @desc_pool_id: Descriptor Pool ID
520  *
521  * Deallocate all resources attached to Tx descriptor and free the Tx
522  * descriptor.
523  *
524  * Return:
525  */
526 void
527 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
528 {
529 	struct dp_pdev *pdev = tx_desc->pdev;
530 	struct dp_soc *soc;
531 	uint8_t comp_status = 0;
532 
533 	qdf_assert(pdev);
534 
535 	soc = pdev->soc;
536 
537 	dp_tx_outstanding_dec(pdev);
538 
539 	if (tx_desc->msdu_ext_desc) {
540 		if (tx_desc->frm_type == dp_tx_frm_tso)
541 			dp_tx_tso_desc_release(soc, tx_desc);
542 
543 		if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
544 			dp_tx_me_free_buf(tx_desc->pdev,
545 					  tx_desc->msdu_ext_desc->me_buffer);
546 
547 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
548 	}
549 
550 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
551 		qdf_atomic_dec(&soc->num_tx_exception);
552 
553 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
554 				tx_desc->buffer_src)
555 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
556 							     soc->hal_soc);
557 	else
558 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
559 
560 	dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d",
561 		    tx_desc->id, comp_status,
562 		    qdf_atomic_read(&pdev->num_tx_outstanding));
563 
564 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
565 	return;
566 }
567 
568 /**
569  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
570  * @vdev: DP vdev Handle
571  * @nbuf: skb
572  * @msdu_info: msdu_info required to create HTT metadata
573  *
574  * Prepares and fills HTT metadata in the frame pre-header for special frames
575  * that should be transmitted using varying transmit parameters.
576  * There are 2 VDEV modes that currently needs this special metadata -
577  *  1) Mesh Mode
578  *  2) DSRC Mode
579  *
580  * Return: HTT metadata size
581  *
582  */
583 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
584 					  struct dp_tx_msdu_info_s *msdu_info)
585 {
586 	uint32_t *meta_data = msdu_info->meta_data;
587 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
588 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
589 
590 	uint8_t htt_desc_size;
591 
592 	/* Size rounded of multiple of 8 bytes */
593 	uint8_t htt_desc_size_aligned;
594 
595 	uint8_t *hdr = NULL;
596 
597 	/*
598 	 * Metadata - HTT MSDU Extension header
599 	 */
600 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
601 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
602 
603 	if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
604 	    HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
605 							   meta_data[0])) {
606 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
607 				 htt_desc_size_aligned)) {
608 			nbuf = qdf_nbuf_realloc_headroom(nbuf,
609 							 htt_desc_size_aligned);
610 			if (!nbuf) {
611 				/*
612 				 * qdf_nbuf_realloc_headroom won't do skb_clone
613 				 * as skb_realloc_headroom does. so, no free is
614 				 * needed here.
615 				 */
616 				DP_STATS_INC(vdev,
617 					     tx_i.dropped.headroom_insufficient,
618 					     1);
619 				qdf_print(" %s[%d] skb_realloc_headroom failed",
620 					  __func__, __LINE__);
621 				return 0;
622 			}
623 		}
624 		/* Fill and add HTT metaheader */
625 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
626 		if (!hdr) {
627 			dp_tx_err("Error in filling HTT metadata");
628 
629 			return 0;
630 		}
631 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
632 
633 	} else if (vdev->opmode == wlan_op_mode_ocb) {
634 		/* Todo - Add support for DSRC */
635 	}
636 
637 	return htt_desc_size_aligned;
638 }
639 
640 /**
641  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
642  * @tso_seg: TSO segment to process
643  * @ext_desc: Pointer to MSDU extension descriptor
644  *
645  * Return: void
646  */
647 #if defined(FEATURE_TSO)
648 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
649 		void *ext_desc)
650 {
651 	uint8_t num_frag;
652 	uint32_t tso_flags;
653 
654 	/*
655 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
656 	 * tcp_flag_mask
657 	 *
658 	 * Checksum enable flags are set in TCL descriptor and not in Extension
659 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
660 	 */
661 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
662 
663 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
664 
665 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
666 		tso_seg->tso_flags.ip_len);
667 
668 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
669 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
670 
671 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
672 		uint32_t lo = 0;
673 		uint32_t hi = 0;
674 
675 		qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
676 				  (tso_seg->tso_frags[num_frag].length));
677 
678 		qdf_dmaaddr_to_32s(
679 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
680 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
681 			tso_seg->tso_frags[num_frag].length);
682 	}
683 
684 	return;
685 }
686 #else
687 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
688 		void *ext_desc)
689 {
690 	return;
691 }
692 #endif
693 
694 #if defined(FEATURE_TSO)
695 /**
696  * dp_tx_free_tso_seg_list() - Loop through the tso segments
697  *                             allocated and free them
698  *
699  * @soc: soc handle
700  * @free_seg: list of tso segments
701  * @msdu_info: msdu descriptor
702  *
703  * Return - void
704  */
705 static void dp_tx_free_tso_seg_list(
706 		struct dp_soc *soc,
707 		struct qdf_tso_seg_elem_t *free_seg,
708 		struct dp_tx_msdu_info_s *msdu_info)
709 {
710 	struct qdf_tso_seg_elem_t *next_seg;
711 
712 	while (free_seg) {
713 		next_seg = free_seg->next;
714 		dp_tx_tso_desc_free(soc,
715 				    msdu_info->tx_queue.desc_pool_id,
716 				    free_seg);
717 		free_seg = next_seg;
718 	}
719 }
720 
721 /**
722  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
723  *                                 allocated and free them
724  *
725  * @soc:  soc handle
726  * @free_num_seg: list of tso number segments
727  * @msdu_info: msdu descriptor
728  * Return - void
729  */
730 static void dp_tx_free_tso_num_seg_list(
731 		struct dp_soc *soc,
732 		struct qdf_tso_num_seg_elem_t *free_num_seg,
733 		struct dp_tx_msdu_info_s *msdu_info)
734 {
735 	struct qdf_tso_num_seg_elem_t *next_num_seg;
736 
737 	while (free_num_seg) {
738 		next_num_seg = free_num_seg->next;
739 		dp_tso_num_seg_free(soc,
740 				    msdu_info->tx_queue.desc_pool_id,
741 				    free_num_seg);
742 		free_num_seg = next_num_seg;
743 	}
744 }
745 
746 /**
747  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
748  *                              do dma unmap for each segment
749  *
750  * @soc: soc handle
751  * @free_seg: list of tso segments
752  * @num_seg_desc: tso number segment descriptor
753  *
754  * Return - void
755  */
756 static void dp_tx_unmap_tso_seg_list(
757 		struct dp_soc *soc,
758 		struct qdf_tso_seg_elem_t *free_seg,
759 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
760 {
761 	struct qdf_tso_seg_elem_t *next_seg;
762 
763 	if (qdf_unlikely(!num_seg_desc)) {
764 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
765 		return;
766 	}
767 
768 	while (free_seg) {
769 		next_seg = free_seg->next;
770 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
771 		free_seg = next_seg;
772 	}
773 }
774 
775 #ifdef FEATURE_TSO_STATS
776 /**
777  * dp_tso_get_stats_idx: Retrieve the tso packet id
778  * @pdev - pdev handle
779  *
780  * Return: id
781  */
782 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
783 {
784 	uint32_t stats_idx;
785 
786 	stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
787 						% CDP_MAX_TSO_PACKETS);
788 	return stats_idx;
789 }
790 #else
791 static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
792 {
793 	return 0;
794 }
795 #endif /* FEATURE_TSO_STATS */
796 
797 /**
798  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
799  *				     free the tso segments descriptor and
800  *				     tso num segments descriptor
801  *
802  * @soc:  soc handle
803  * @msdu_info: msdu descriptor
804  * @tso_seg_unmap: flag to show if dma unmap is necessary
805  *
806  * Return - void
807  */
808 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
809 					  struct dp_tx_msdu_info_s *msdu_info,
810 					  bool tso_seg_unmap)
811 {
812 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
813 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
814 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
815 					tso_info->tso_num_seg_list;
816 
817 	/* do dma unmap for each segment */
818 	if (tso_seg_unmap)
819 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
820 
821 	/* free all tso number segment descriptor though looks only have 1 */
822 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
823 
824 	/* free all tso segment descriptor */
825 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
826 }
827 
828 /**
829  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
830  * @vdev: virtual device handle
831  * @msdu: network buffer
832  * @msdu_info: meta data associated with the msdu
833  *
834  * Return: QDF_STATUS_SUCCESS success
835  */
836 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
837 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
838 {
839 	struct qdf_tso_seg_elem_t *tso_seg;
840 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
841 	struct dp_soc *soc = vdev->pdev->soc;
842 	struct dp_pdev *pdev = vdev->pdev;
843 	struct qdf_tso_info_t *tso_info;
844 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
845 	tso_info = &msdu_info->u.tso_info;
846 	tso_info->curr_seg = NULL;
847 	tso_info->tso_seg_list = NULL;
848 	tso_info->num_segs = num_seg;
849 	msdu_info->frm_type = dp_tx_frm_tso;
850 	tso_info->tso_num_seg_list = NULL;
851 
852 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
853 
854 	while (num_seg) {
855 		tso_seg = dp_tx_tso_desc_alloc(
856 				soc, msdu_info->tx_queue.desc_pool_id);
857 		if (tso_seg) {
858 			tso_seg->next = tso_info->tso_seg_list;
859 			tso_info->tso_seg_list = tso_seg;
860 			num_seg--;
861 		} else {
862 			dp_err_rl("Failed to alloc tso seg desc");
863 			DP_STATS_INC_PKT(vdev->pdev,
864 					 tso_stats.tso_no_mem_dropped, 1,
865 					 qdf_nbuf_len(msdu));
866 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
867 
868 			return QDF_STATUS_E_NOMEM;
869 		}
870 	}
871 
872 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
873 
874 	tso_num_seg = dp_tso_num_seg_alloc(soc,
875 			msdu_info->tx_queue.desc_pool_id);
876 
877 	if (tso_num_seg) {
878 		tso_num_seg->next = tso_info->tso_num_seg_list;
879 		tso_info->tso_num_seg_list = tso_num_seg;
880 	} else {
881 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
882 			 __func__);
883 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
884 
885 		return QDF_STATUS_E_NOMEM;
886 	}
887 
888 	msdu_info->num_seg =
889 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
890 
891 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
892 			msdu_info->num_seg);
893 
894 	if (!(msdu_info->num_seg)) {
895 		/*
896 		 * Free allocated TSO seg desc and number seg desc,
897 		 * do unmap for segments if dma map has done.
898 		 */
899 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
900 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
901 
902 		return QDF_STATUS_E_INVAL;
903 	}
904 	dp_tx_tso_history_add(soc, msdu_info->u.tso_info,
905 			      msdu, 0, DP_TX_DESC_MAP);
906 
907 	tso_info->curr_seg = tso_info->tso_seg_list;
908 
909 	tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
910 	dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
911 			     msdu, msdu_info->num_seg);
912 	dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
913 				    tso_info->msdu_stats_idx);
914 	dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
915 	return QDF_STATUS_SUCCESS;
916 }
917 #else
918 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
919 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
920 {
921 	return QDF_STATUS_E_NOMEM;
922 }
923 #endif
924 
925 QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
926 			(DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
927 			 sizeof(struct htt_tx_msdu_desc_ext2_t)));
928 
929 /**
930  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
931  * @vdev: DP Vdev handle
932  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
933  * @desc_pool_id: Descriptor Pool ID
934  *
935  * Return:
936  */
937 static
938 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
939 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
940 {
941 	uint8_t i;
942 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
943 	struct dp_tx_seg_info_s *seg_info;
944 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
945 	struct dp_soc *soc = vdev->pdev->soc;
946 
947 	/* Allocate an extension descriptor */
948 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
949 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
950 
951 	if (!msdu_ext_desc) {
952 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
953 		return NULL;
954 	}
955 
956 	if (msdu_info->exception_fw &&
957 			qdf_unlikely(vdev->mesh_vdev)) {
958 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
959 				&msdu_info->meta_data[0],
960 				sizeof(struct htt_tx_msdu_desc_ext2_t));
961 		qdf_atomic_inc(&soc->num_tx_exception);
962 		msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
963 	}
964 
965 	switch (msdu_info->frm_type) {
966 	case dp_tx_frm_sg:
967 	case dp_tx_frm_me:
968 	case dp_tx_frm_raw:
969 		seg_info = msdu_info->u.sg_info.curr_seg;
970 		/* Update the buffer pointers in MSDU Extension Descriptor */
971 		for (i = 0; i < seg_info->frag_cnt; i++) {
972 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
973 				seg_info->frags[i].paddr_lo,
974 				seg_info->frags[i].paddr_hi,
975 				seg_info->frags[i].len);
976 		}
977 
978 		break;
979 
980 	case dp_tx_frm_tso:
981 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
982 				&cached_ext_desc[0]);
983 		break;
984 
985 
986 	default:
987 		break;
988 	}
989 
990 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
991 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
992 
993 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
994 			msdu_ext_desc->vaddr);
995 
996 	return msdu_ext_desc;
997 }
998 
999 /**
1000  * dp_tx_trace_pkt() - Trace TX packet at DP layer
1001  *
1002  * @skb: skb to be traced
1003  * @msdu_id: msdu_id of the packet
1004  * @vdev_id: vdev_id of the packet
1005  *
1006  * Return: None
1007  */
1008 #ifdef DP_DISABLE_TX_PKT_TRACE
1009 static void dp_tx_trace_pkt(struct dp_soc *soc,
1010 			    qdf_nbuf_t skb, uint16_t msdu_id,
1011 			    uint8_t vdev_id)
1012 {
1013 }
1014 #else
1015 static void dp_tx_trace_pkt(struct dp_soc *soc,
1016 			    qdf_nbuf_t skb, uint16_t msdu_id,
1017 			    uint8_t vdev_id)
1018 {
1019 	if (dp_is_tput_high(soc))
1020 		return;
1021 
1022 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
1023 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
1024 	DPTRACE(qdf_dp_trace_ptr(skb,
1025 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
1026 				 QDF_TRACE_DEFAULT_PDEV_ID,
1027 				 qdf_nbuf_data_addr(skb),
1028 				 sizeof(qdf_nbuf_data(skb)),
1029 				 msdu_id, vdev_id, 0));
1030 
1031 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
1032 
1033 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
1034 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
1035 				      msdu_id, QDF_TX));
1036 }
1037 #endif
1038 
1039 #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
1040 /**
1041  * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as
1042  *				      exception by the upper layer (OS_IF)
1043  * @soc: DP soc handle
1044  * @nbuf: packet to be transmitted
1045  *
1046  * Returns: 1 if the packet is marked as exception,
1047  *	    0, if the packet is not marked as exception.
1048  */
1049 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
1050 						 qdf_nbuf_t nbuf)
1051 {
1052 	return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf);
1053 }
1054 #else
1055 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
1056 						 qdf_nbuf_t nbuf)
1057 {
1058 	return 0;
1059 }
1060 #endif
1061 
1062 /**
1063  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
1064  * @vdev: DP vdev handle
1065  * @nbuf: skb
1066  * @desc_pool_id: Descriptor pool ID
1067  * @meta_data: Metadata to the fw
1068  * @tx_exc_metadata: Handle that holds exception path metadata
1069  * Allocate and prepare Tx descriptor with msdu information.
1070  *
1071  * Return: Pointer to Tx Descriptor on success,
1072  *         NULL on failure
1073  */
1074 static
1075 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
1076 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
1077 		struct dp_tx_msdu_info_s *msdu_info,
1078 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1079 {
1080 	uint8_t align_pad;
1081 	uint8_t is_exception = 0;
1082 	uint8_t htt_hdr_size;
1083 	struct dp_tx_desc_s *tx_desc;
1084 	struct dp_pdev *pdev = vdev->pdev;
1085 	struct dp_soc *soc = pdev->soc;
1086 
1087 	if (dp_tx_limit_check(vdev))
1088 		return NULL;
1089 
1090 	/* Allocate software Tx descriptor */
1091 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1092 
1093 	if (qdf_unlikely(!tx_desc)) {
1094 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1095 		DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
1096 		return NULL;
1097 	}
1098 
1099 	dp_tx_outstanding_inc(pdev);
1100 
1101 	/* Initialize the SW tx descriptor */
1102 	tx_desc->nbuf = nbuf;
1103 	tx_desc->frm_type = dp_tx_frm_std;
1104 	tx_desc->tx_encap_type = ((tx_exc_metadata &&
1105 		(tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
1106 		tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
1107 	tx_desc->vdev_id = vdev->vdev_id;
1108 	tx_desc->pdev = pdev;
1109 	tx_desc->msdu_ext_desc = NULL;
1110 	tx_desc->pkt_offset = 0;
1111 	tx_desc->length = qdf_nbuf_headlen(nbuf);
1112 
1113 	dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
1114 
1115 	if (qdf_unlikely(vdev->multipass_en)) {
1116 		if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
1117 			goto failure;
1118 	}
1119 
1120 	/* Packets marked by upper layer (OS-IF) to be sent to FW */
1121 	if (dp_tx_is_nbuf_marked_exception(soc, nbuf))
1122 		is_exception = 1;
1123 	/*
1124 	 * For special modes (vdev_type == ocb or mesh), data frames should be
1125 	 * transmitted using varying transmit parameters (tx spec) which include
1126 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
1127 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
1128 	 * These frames are sent as exception packets to firmware.
1129 	 *
1130 	 * HW requirement is that metadata should always point to a
1131 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
1132 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
1133 	 *  to get 8-byte aligned start address along with align_pad added
1134 	 *
1135 	 *  |-----------------------------|
1136 	 *  |                             |
1137 	 *  |-----------------------------| <-----Buffer Pointer Address given
1138 	 *  |                             |  ^    in HW descriptor (aligned)
1139 	 *  |       HTT Metadata          |  |
1140 	 *  |                             |  |
1141 	 *  |                             |  | Packet Offset given in descriptor
1142 	 *  |                             |  |
1143 	 *  |-----------------------------|  |
1144 	 *  |       Alignment Pad         |  v
1145 	 *  |-----------------------------| <----- Actual buffer start address
1146 	 *  |        SKB Data             |           (Unaligned)
1147 	 *  |                             |
1148 	 *  |                             |
1149 	 *  |                             |
1150 	 *  |                             |
1151 	 *  |                             |
1152 	 *  |-----------------------------|
1153 	 */
1154 	if (qdf_unlikely((msdu_info->exception_fw)) ||
1155 				(vdev->opmode == wlan_op_mode_ocb) ||
1156 				(tx_exc_metadata &&
1157 				tx_exc_metadata->is_tx_sniffer)) {
1158 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
1159 
1160 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
1161 			DP_STATS_INC(vdev,
1162 				     tx_i.dropped.headroom_insufficient, 1);
1163 			goto failure;
1164 		}
1165 
1166 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
1167 			dp_tx_err("qdf_nbuf_push_head failed");
1168 			goto failure;
1169 		}
1170 
1171 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
1172 				msdu_info);
1173 		if (htt_hdr_size == 0)
1174 			goto failure;
1175 
1176 		tx_desc->length = qdf_nbuf_headlen(nbuf);
1177 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
1178 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1179 		is_exception = 1;
1180 		tx_desc->length -= tx_desc->pkt_offset;
1181 	}
1182 
1183 #if !TQM_BYPASS_WAR
1184 	if (is_exception || tx_exc_metadata)
1185 #endif
1186 	{
1187 		/* Temporary WAR due to TQM VP issues */
1188 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1189 		qdf_atomic_inc(&soc->num_tx_exception);
1190 	}
1191 
1192 	return tx_desc;
1193 
1194 failure:
1195 	dp_tx_desc_release(tx_desc, desc_pool_id);
1196 	return NULL;
1197 }
1198 
1199 /**
1200  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
1201  * @vdev: DP vdev handle
1202  * @nbuf: skb
1203  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
1204  * @desc_pool_id : Descriptor Pool ID
1205  *
1206  * Allocate and prepare Tx descriptor with msdu and fragment descritor
1207  * information. For frames wth fragments, allocate and prepare
1208  * an MSDU extension descriptor
1209  *
1210  * Return: Pointer to Tx Descriptor on success,
1211  *         NULL on failure
1212  */
1213 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
1214 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
1215 		uint8_t desc_pool_id)
1216 {
1217 	struct dp_tx_desc_s *tx_desc;
1218 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
1219 	struct dp_pdev *pdev = vdev->pdev;
1220 	struct dp_soc *soc = pdev->soc;
1221 
1222 	if (dp_tx_limit_check(vdev))
1223 		return NULL;
1224 
1225 	/* Allocate software Tx descriptor */
1226 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1227 	if (!tx_desc) {
1228 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1229 		return NULL;
1230 	}
1231 	dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg,
1232 				  nbuf, tx_desc->id, DP_TX_DESC_COOKIE);
1233 
1234 	dp_tx_outstanding_inc(pdev);
1235 
1236 	/* Initialize the SW tx descriptor */
1237 	tx_desc->nbuf = nbuf;
1238 	tx_desc->frm_type = msdu_info->frm_type;
1239 	tx_desc->tx_encap_type = vdev->tx_encap_type;
1240 	tx_desc->vdev_id = vdev->vdev_id;
1241 	tx_desc->pdev = pdev;
1242 	tx_desc->pkt_offset = 0;
1243 
1244 	dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
1245 
1246 	/* Handle scattered frames - TSO/SG/ME */
1247 	/* Allocate and prepare an extension descriptor for scattered frames */
1248 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
1249 	if (!msdu_ext_desc) {
1250 		dp_tx_info("Tx Extension Descriptor Alloc Fail");
1251 		goto failure;
1252 	}
1253 
1254 #if TQM_BYPASS_WAR
1255 	/* Temporary WAR due to TQM VP issues */
1256 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1257 	qdf_atomic_inc(&soc->num_tx_exception);
1258 #endif
1259 	if (qdf_unlikely(msdu_info->exception_fw))
1260 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1261 
1262 	tx_desc->msdu_ext_desc = msdu_ext_desc;
1263 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
1264 
1265 	msdu_ext_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
1266 	msdu_ext_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
1267 
1268 	tx_desc->dma_addr = msdu_ext_desc->paddr;
1269 
1270 	if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
1271 		tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
1272 	else
1273 		tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
1274 
1275 	return tx_desc;
1276 failure:
1277 	dp_tx_desc_release(tx_desc, desc_pool_id);
1278 	return NULL;
1279 }
1280 
1281 /**
1282  * dp_tx_prepare_raw() - Prepare RAW packet TX
1283  * @vdev: DP vdev handle
1284  * @nbuf: buffer pointer
1285  * @seg_info: Pointer to Segment info Descriptor to be prepared
1286  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
1287  *     descriptor
1288  *
1289  * Return:
1290  */
1291 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1292 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1293 {
1294 	qdf_nbuf_t curr_nbuf = NULL;
1295 	uint16_t total_len = 0;
1296 	qdf_dma_addr_t paddr;
1297 	int32_t i;
1298 	int32_t mapped_buf_num = 0;
1299 
1300 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
1301 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1302 
1303 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
1304 
1305 	/* Continue only if frames are of DATA type */
1306 	if (!DP_FRAME_IS_DATA(qos_wh)) {
1307 		DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
1308 		dp_tx_debug("Pkt. recd is of not data type");
1309 		goto error;
1310 	}
1311 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
1312 	if (vdev->raw_mode_war &&
1313 	    (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
1314 	    (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
1315 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
1316 
1317 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
1318 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
1319 		/*
1320 		 * Number of nbuf's must not exceed the size of the frags
1321 		 * array in seg_info.
1322 		 */
1323 		if (i >= DP_TX_MAX_NUM_FRAGS) {
1324 			dp_err_rl("nbuf cnt exceeds the max number of segs");
1325 			DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
1326 			goto error;
1327 		}
1328 		if (QDF_STATUS_SUCCESS !=
1329 			qdf_nbuf_map_nbytes_single(vdev->osdev,
1330 						   curr_nbuf,
1331 						   QDF_DMA_TO_DEVICE,
1332 						   curr_nbuf->len)) {
1333 			dp_tx_err("%s dma map error ", __func__);
1334 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
1335 			goto error;
1336 		}
1337 		/* Update the count of mapped nbuf's */
1338 		mapped_buf_num++;
1339 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
1340 		seg_info->frags[i].paddr_lo = paddr;
1341 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
1342 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
1343 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
1344 		total_len += qdf_nbuf_len(curr_nbuf);
1345 	}
1346 
1347 	seg_info->frag_cnt = i;
1348 	seg_info->total_len = total_len;
1349 	seg_info->next = NULL;
1350 
1351 	sg_info->curr_seg = seg_info;
1352 
1353 	msdu_info->frm_type = dp_tx_frm_raw;
1354 	msdu_info->num_seg = 1;
1355 
1356 	return nbuf;
1357 
1358 error:
1359 	i = 0;
1360 	while (nbuf) {
1361 		curr_nbuf = nbuf;
1362 		if (i < mapped_buf_num) {
1363 			qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
1364 						     QDF_DMA_TO_DEVICE,
1365 						     curr_nbuf->len);
1366 			i++;
1367 		}
1368 		nbuf = qdf_nbuf_next(nbuf);
1369 		qdf_nbuf_free(curr_nbuf);
1370 	}
1371 	return NULL;
1372 
1373 }
1374 
1375 /**
1376  * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
1377  * @soc: DP soc handle
1378  * @nbuf: Buffer pointer
1379  *
1380  * unmap the chain of nbufs that belong to this RAW frame.
1381  *
1382  * Return: None
1383  */
1384 static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
1385 				    qdf_nbuf_t nbuf)
1386 {
1387 	qdf_nbuf_t cur_nbuf = nbuf;
1388 
1389 	do {
1390 		qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
1391 					     QDF_DMA_TO_DEVICE,
1392 					     cur_nbuf->len);
1393 		cur_nbuf = qdf_nbuf_next(cur_nbuf);
1394 	} while (cur_nbuf);
1395 }
1396 
1397 #ifdef VDEV_PEER_PROTOCOL_COUNT
1398 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
1399 					       qdf_nbuf_t nbuf)
1400 {
1401 	qdf_nbuf_t nbuf_local;
1402 	struct dp_vdev *vdev_local = vdev_hdl;
1403 
1404 	do {
1405 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track)))
1406 			break;
1407 		nbuf_local = nbuf;
1408 		if (qdf_unlikely(((vdev_local)->tx_encap_type) ==
1409 			 htt_cmn_pkt_type_raw))
1410 			break;
1411 		else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local))))
1412 			break;
1413 		else if (qdf_nbuf_is_tso((nbuf_local)))
1414 			break;
1415 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local),
1416 						       (nbuf_local),
1417 						       NULL, 1, 0);
1418 	} while (0);
1419 }
1420 #endif
1421 
1422 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1423 /**
1424  * dp_tx_update_stats() - Update soc level tx stats
1425  * @soc: DP soc handle
1426  * @nbuf: packet being transmitted
1427  *
1428  * Returns: none
1429  */
1430 void dp_tx_update_stats(struct dp_soc *soc,
1431 			qdf_nbuf_t nbuf)
1432 {
1433 	DP_STATS_INC_PKT(soc, tx.egress, 1, qdf_nbuf_len(nbuf));
1434 }
1435 
1436 int
1437 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1438 			 struct dp_tx_desc_s *tx_desc,
1439 			 uint8_t tid)
1440 {
1441 	struct dp_swlm *swlm = &soc->swlm;
1442 	union swlm_data swlm_query_data;
1443 	struct dp_swlm_tcl_data tcl_data;
1444 	QDF_STATUS status;
1445 	int ret;
1446 
1447 	if (qdf_unlikely(!swlm->is_enabled))
1448 		return 0;
1449 
1450 	tcl_data.nbuf = tx_desc->nbuf;
1451 	tcl_data.tid = tid;
1452 	tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
1453 	swlm_query_data.tcl_data = &tcl_data;
1454 
1455 	status = dp_swlm_tcl_pre_check(soc, &tcl_data);
1456 	if (QDF_IS_STATUS_ERROR(status)) {
1457 		dp_swlm_tcl_reset_session_data(soc);
1458 		DP_STATS_INC(swlm, tcl.coalesce_fail, 1);
1459 		return 0;
1460 	}
1461 
1462 	ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
1463 	if (ret) {
1464 		DP_STATS_INC(swlm, tcl.coalesce_success, 1);
1465 	} else {
1466 		DP_STATS_INC(swlm, tcl.coalesce_fail, 1);
1467 	}
1468 
1469 	return ret;
1470 }
1471 
1472 void
1473 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1474 		      int coalesce)
1475 {
1476 	if (coalesce)
1477 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1478 	else
1479 		dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1480 }
1481 
1482 #endif
1483 
1484 #ifdef FEATURE_RUNTIME_PM
1485 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
1486 {
1487 	return qdf_atomic_read(&soc->rtpm_high_tput_flag);
1488 }
1489 /**
1490  * dp_tx_ring_access_end_wrapper() - Wrapper for ring access end
1491  * @soc: Datapath soc handle
1492  * @hal_ring_hdl: HAL ring handle
1493  * @coalesce: Coalesce the current write or not
1494  *
1495  * Wrapper for HAL ring access end for data transmission for
1496  * FEATURE_RUNTIME_PM
1497  *
1498  * Returns: none
1499  */
1500 void
1501 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1502 			      hal_ring_handle_t hal_ring_hdl,
1503 			      int coalesce)
1504 {
1505 	int ret;
1506 
1507 	/*
1508 	 * Avoid runtime get and put APIs under high throughput scenarios.
1509 	 */
1510 	if (dp_get_rtpm_tput_policy_requirement(soc)) {
1511 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1512 		return;
1513 	}
1514 
1515 	ret = hif_pm_runtime_get(soc->hif_handle,
1516 				 RTPM_ID_DW_TX_HW_ENQUEUE, true);
1517 	switch (ret) {
1518 	case 0:
1519 		if (hif_system_pm_state_check(soc->hif_handle)) {
1520 			dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1521 			hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1522 			hal_srng_inc_flush_cnt(hal_ring_hdl);
1523 		} else {
1524 			dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1525 		}
1526 		hif_pm_runtime_put(soc->hif_handle,
1527 				   RTPM_ID_DW_TX_HW_ENQUEUE);
1528 		break;
1529 	/*
1530 	 * If hif_pm_runtime_get returns -EBUSY or -EINPROGRESS,
1531 	 * take the dp runtime refcount using dp_runtime_get,
1532 	 * check link state,if up, write TX ring HP, else just set flush event.
1533 	 * In dp_runtime_resume, wait until dp runtime refcount becomes
1534 	 * zero or time out, then flush pending tx.
1535 	 */
1536 	case -EBUSY:
1537 	case -EINPROGRESS:
1538 		dp_runtime_get(soc);
1539 		if (hif_pm_get_link_state(soc->hif_handle) ==
1540 		    HIF_PM_LINK_STATE_UP) {
1541 			dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1542 		} else {
1543 			dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1544 			hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1545 			qdf_atomic_inc(&soc->tx_pending_rtpm);
1546 			hal_srng_inc_flush_cnt(hal_ring_hdl);
1547 		}
1548 		dp_runtime_put(soc);
1549 		break;
1550 	default:
1551 		dp_runtime_get(soc);
1552 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1553 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1554 		qdf_atomic_inc(&soc->tx_pending_rtpm);
1555 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1556 		dp_runtime_put(soc);
1557 	}
1558 }
1559 #else
1560 
1561 #ifdef DP_POWER_SAVE
1562 void
1563 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1564 			      hal_ring_handle_t hal_ring_hdl,
1565 			      int coalesce)
1566 {
1567 	if (hif_system_pm_state_check(soc->hif_handle)) {
1568 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1569 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1570 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1571 	} else {
1572 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1573 	}
1574 }
1575 #endif
1576 
1577 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
1578 {
1579 	return 0;
1580 }
1581 #endif
1582 
1583 /**
1584  * dp_tx_get_tid() - Obtain TID to be used for this frame
1585  * @vdev: DP vdev handle
1586  * @nbuf: skb
1587  *
1588  * Extract the DSCP or PCP information from frame and map into TID value.
1589  *
1590  * Return: void
1591  */
1592 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1593 			  struct dp_tx_msdu_info_s *msdu_info)
1594 {
1595 	uint8_t tos = 0, dscp_tid_override = 0;
1596 	uint8_t *hdr_ptr, *L3datap;
1597 	uint8_t is_mcast = 0;
1598 	qdf_ether_header_t *eh = NULL;
1599 	qdf_ethervlan_header_t *evh = NULL;
1600 	uint16_t   ether_type;
1601 	qdf_llc_t *llcHdr;
1602 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1603 
1604 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1605 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1606 		eh = (qdf_ether_header_t *)nbuf->data;
1607 		hdr_ptr = (uint8_t *)(eh->ether_dhost);
1608 		L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
1609 	} else {
1610 		qdf_dot3_qosframe_t *qos_wh =
1611 			(qdf_dot3_qosframe_t *) nbuf->data;
1612 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1613 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1614 		return;
1615 	}
1616 
1617 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1618 	ether_type = eh->ether_type;
1619 
1620 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
1621 	/*
1622 	 * Check if packet is dot3 or eth2 type.
1623 	 */
1624 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1625 		ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1626 				sizeof(*llcHdr));
1627 
1628 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1629 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1630 				sizeof(*llcHdr);
1631 			ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
1632 					+ sizeof(*llcHdr) +
1633 					sizeof(qdf_net_vlanhdr_t));
1634 		} else {
1635 			L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
1636 				sizeof(*llcHdr);
1637 		}
1638 	} else {
1639 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1640 			evh = (qdf_ethervlan_header_t *) eh;
1641 			ether_type = evh->ether_type;
1642 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1643 		}
1644 	}
1645 
1646 	/*
1647 	 * Find priority from IP TOS DSCP field
1648 	 */
1649 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1650 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1651 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1652 			/* Only for unicast frames */
1653 			if (!is_mcast) {
1654 				/* send it on VO queue */
1655 				msdu_info->tid = DP_VO_TID;
1656 			}
1657 		} else {
1658 			/*
1659 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1660 			 * from TOS byte.
1661 			 */
1662 			tos = ip->ip_tos;
1663 			dscp_tid_override = 1;
1664 
1665 		}
1666 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1667 		/* TODO
1668 		 * use flowlabel
1669 		 *igmpmld cases to be handled in phase 2
1670 		 */
1671 		unsigned long ver_pri_flowlabel;
1672 		unsigned long pri;
1673 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1674 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1675 			DP_IPV6_PRIORITY_SHIFT;
1676 		tos = pri;
1677 		dscp_tid_override = 1;
1678 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1679 		msdu_info->tid = DP_VO_TID;
1680 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1681 		/* Only for unicast frames */
1682 		if (!is_mcast) {
1683 			/* send ucast arp on VO queue */
1684 			msdu_info->tid = DP_VO_TID;
1685 		}
1686 	}
1687 
1688 	/*
1689 	 * Assign all MCAST packets to BE
1690 	 */
1691 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1692 		if (is_mcast) {
1693 			tos = 0;
1694 			dscp_tid_override = 1;
1695 		}
1696 	}
1697 
1698 	if (dscp_tid_override == 1) {
1699 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1700 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1701 	}
1702 
1703 	if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
1704 		msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
1705 
1706 	return;
1707 }
1708 
1709 /**
1710  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1711  * @vdev: DP vdev handle
1712  * @nbuf: skb
1713  *
1714  * Software based TID classification is required when more than 2 DSCP-TID
1715  * mapping tables are needed.
1716  * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
1717  *
1718  * Return: void
1719  */
1720 static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1721 				      struct dp_tx_msdu_info_s *msdu_info)
1722 {
1723 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1724 
1725 	/*
1726 	 * skip_sw_tid_classification flag will set in below cases-
1727 	 * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
1728 	 * 2. hlos_tid_override enabled for vdev
1729 	 * 3. mesh mode enabled for vdev
1730 	 */
1731 	if (qdf_likely(vdev->skip_sw_tid_classification)) {
1732 		/* Update tid in msdu_info from skb priority */
1733 		if (qdf_unlikely(vdev->skip_sw_tid_classification
1734 			& DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
1735 			uint32_t tid = qdf_nbuf_get_priority(nbuf);
1736 
1737 			if (tid == DP_TX_INVALID_QOS_TAG)
1738 				return;
1739 
1740 			msdu_info->tid = tid;
1741 			return;
1742 		}
1743 		return;
1744 	}
1745 
1746 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1747 }
1748 
1749 #ifdef FEATURE_WLAN_TDLS
1750 /**
1751  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1752  * @soc: datapath SOC
1753  * @vdev: datapath vdev
1754  * @tx_desc: TX descriptor
1755  *
1756  * Return: None
1757  */
1758 static void dp_tx_update_tdls_flags(struct dp_soc *soc,
1759 				    struct dp_vdev *vdev,
1760 				    struct dp_tx_desc_s *tx_desc)
1761 {
1762 	if (vdev) {
1763 		if (vdev->is_tdls_frame) {
1764 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1765 			vdev->is_tdls_frame = false;
1766 		}
1767 	}
1768 }
1769 
1770 static uint8_t dp_htt_tx_comp_get_status(struct dp_soc *soc, char *htt_desc)
1771 {
1772 	uint8_t tx_status = HTT_TX_FW2WBM_TX_STATUS_MAX;
1773 
1774 	switch (soc->arch_id) {
1775 	case CDP_ARCH_TYPE_LI:
1776 		tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
1777 		break;
1778 
1779 	case CDP_ARCH_TYPE_BE:
1780 		tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
1781 		break;
1782 
1783 	default:
1784 		dp_err("Incorrect CDP_ARCH %d", soc->arch_id);
1785 		QDF_BUG(0);
1786 	}
1787 
1788 	return tx_status;
1789 }
1790 
1791 /**
1792  * dp_non_std_htt_tx_comp_free_buff() - Free the non std tx packet buffer
1793  * @soc: dp_soc handle
1794  * @tx_desc: TX descriptor
1795  * @vdev: datapath vdev handle
1796  *
1797  * Return: None
1798  */
1799 static void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
1800 					 struct dp_tx_desc_s *tx_desc)
1801 {
1802 	uint8_t tx_status = 0;
1803 	uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
1804 
1805 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1806 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
1807 						     DP_MOD_ID_TDLS);
1808 
1809 	if (qdf_unlikely(!vdev)) {
1810 		dp_err_rl("vdev is null!");
1811 		goto error;
1812 	}
1813 
1814 	hal_tx_comp_get_htt_desc(&tx_desc->comp, htt_tx_status);
1815 	tx_status = dp_htt_tx_comp_get_status(soc, htt_tx_status);
1816 	dp_debug("vdev_id: %d tx_status: %d", tx_desc->vdev_id, tx_status);
1817 
1818 	if (vdev->tx_non_std_data_callback.func) {
1819 		qdf_nbuf_set_next(nbuf, NULL);
1820 		vdev->tx_non_std_data_callback.func(
1821 				vdev->tx_non_std_data_callback.ctxt,
1822 				nbuf, tx_status);
1823 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1824 		return;
1825 	} else {
1826 		dp_err_rl("callback func is null");
1827 	}
1828 
1829 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1830 error:
1831 	qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
1832 	qdf_nbuf_free(nbuf);
1833 }
1834 
1835 /**
1836  * dp_tx_msdu_single_map() - do nbuf map
1837  * @vdev: DP vdev handle
1838  * @tx_desc: DP TX descriptor pointer
1839  * @nbuf: skb pointer
1840  *
1841  * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
1842  * operation done in other component.
1843  *
1844  * Return: QDF_STATUS
1845  */
1846 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1847 					       struct dp_tx_desc_s *tx_desc,
1848 					       qdf_nbuf_t nbuf)
1849 {
1850 	if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
1851 		return qdf_nbuf_map_nbytes_single(vdev->osdev,
1852 						  nbuf,
1853 						  QDF_DMA_TO_DEVICE,
1854 						  nbuf->len);
1855 	else
1856 		return qdf_nbuf_map_single(vdev->osdev, nbuf,
1857 					   QDF_DMA_TO_DEVICE);
1858 }
1859 #else
1860 static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
1861 					   struct dp_vdev *vdev,
1862 					   struct dp_tx_desc_s *tx_desc)
1863 {
1864 }
1865 
1866 static inline void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
1867 						struct dp_tx_desc_s *tx_desc)
1868 {
1869 }
1870 
1871 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1872 					       struct dp_tx_desc_s *tx_desc,
1873 					       qdf_nbuf_t nbuf)
1874 {
1875 	return qdf_nbuf_map_nbytes_single(vdev->osdev,
1876 					  nbuf,
1877 					  QDF_DMA_TO_DEVICE,
1878 					  nbuf->len);
1879 }
1880 #endif
1881 
1882 #if defined(QCA_DP_TX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
1883 static inline
1884 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
1885 			      struct dp_tx_desc_s *tx_desc,
1886 			      qdf_nbuf_t nbuf)
1887 {
1888 	qdf_nbuf_dma_clean_range((void *)nbuf->data,
1889 				 (void *)(nbuf->data + nbuf->len));
1890 	return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
1891 }
1892 
1893 static inline
1894 void dp_tx_nbuf_unmap(struct dp_soc *soc,
1895 		      struct dp_tx_desc_s *desc)
1896 {
1897 }
1898 #else
1899 static inline
1900 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
1901 			      struct dp_tx_desc_s *tx_desc,
1902 			      qdf_nbuf_t nbuf)
1903 {
1904 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
1905 
1906 	ret = dp_tx_msdu_single_map(vdev, tx_desc, nbuf);
1907 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret)))
1908 		return 0;
1909 
1910 	return qdf_nbuf_mapped_paddr_get(nbuf);
1911 }
1912 
1913 static inline
1914 void dp_tx_nbuf_unmap(struct dp_soc *soc,
1915 		      struct dp_tx_desc_s *desc)
1916 {
1917 	qdf_nbuf_unmap_nbytes_single_paddr(soc->osdev,
1918 					   desc->nbuf,
1919 					   desc->dma_addr,
1920 					   QDF_DMA_TO_DEVICE,
1921 					   desc->length);
1922 }
1923 #endif
1924 
1925 #ifdef MESH_MODE_SUPPORT
1926 /**
1927  * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
1928  * @soc: datapath SOC
1929  * @vdev: datapath vdev
1930  * @tx_desc: TX descriptor
1931  *
1932  * Return: None
1933  */
1934 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
1935 					   struct dp_vdev *vdev,
1936 					   struct dp_tx_desc_s *tx_desc)
1937 {
1938 	if (qdf_unlikely(vdev->mesh_vdev))
1939 		tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
1940 }
1941 
1942 /**
1943  * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
1944  * @soc: dp_soc handle
1945  * @tx_desc: TX descriptor
1946  * @vdev: datapath vdev handle
1947  *
1948  * Return: None
1949  */
1950 static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
1951 					     struct dp_tx_desc_s *tx_desc)
1952 {
1953 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1954 	struct dp_vdev *vdev = NULL;
1955 
1956 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
1957 		qdf_nbuf_free(nbuf);
1958 		DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
1959 	} else {
1960 		vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
1961 					     DP_MOD_ID_MESH);
1962 		if (vdev && vdev->osif_tx_free_ext)
1963 			vdev->osif_tx_free_ext((nbuf));
1964 		else
1965 			qdf_nbuf_free(nbuf);
1966 
1967 		if (vdev)
1968 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
1969 	}
1970 }
1971 #else
1972 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
1973 					   struct dp_vdev *vdev,
1974 					   struct dp_tx_desc_s *tx_desc)
1975 {
1976 }
1977 
1978 static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
1979 					     struct dp_tx_desc_s *tx_desc)
1980 {
1981 }
1982 #endif
1983 
1984 /**
1985  * dp_tx_frame_is_drop() - checks if the packet is loopback
1986  * @vdev: DP vdev handle
1987  * @nbuf: skb
1988  *
1989  * Return: 1 if frame needs to be dropped else 0
1990  */
1991 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
1992 {
1993 	struct dp_pdev *pdev = NULL;
1994 	struct dp_ast_entry *src_ast_entry = NULL;
1995 	struct dp_ast_entry *dst_ast_entry = NULL;
1996 	struct dp_soc *soc = NULL;
1997 
1998 	qdf_assert(vdev);
1999 	pdev = vdev->pdev;
2000 	qdf_assert(pdev);
2001 	soc = pdev->soc;
2002 
2003 	dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
2004 				(soc, dstmac, vdev->pdev->pdev_id);
2005 
2006 	src_ast_entry = dp_peer_ast_hash_find_by_pdevid
2007 				(soc, srcmac, vdev->pdev->pdev_id);
2008 	if (dst_ast_entry && src_ast_entry) {
2009 		if (dst_ast_entry->peer_id ==
2010 				src_ast_entry->peer_id)
2011 			return 1;
2012 	}
2013 
2014 	return 0;
2015 }
2016 
2017 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
2018 	defined(WLAN_MCAST_MLO)
2019 /* MLO peer id for reinject*/
2020 #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
2021 /* MLO vdev id inc offset */
2022 #define DP_MLO_VDEV_ID_OFFSET 0x80
2023 
2024 static inline void
2025 dp_tx_update_mcast_param(uint16_t peer_id,
2026 			 uint16_t *htt_tcl_metadata,
2027 			 struct dp_vdev *vdev,
2028 			 struct dp_tx_msdu_info_s *msdu_info)
2029 {
2030 	if (peer_id == DP_MLO_MCAST_REINJECT_PEER_ID) {
2031 		*htt_tcl_metadata = 0;
2032 		DP_TX_TCL_METADATA_TYPE_SET(
2033 				*htt_tcl_metadata,
2034 				HTT_TCL_METADATA_V2_TYPE_GLOBAL_SEQ_BASED);
2035 		HTT_TX_TCL_METADATA_GLBL_SEQ_NO_SET(*htt_tcl_metadata,
2036 						    msdu_info->gsn);
2037 
2038 		msdu_info->vdev_id = vdev->vdev_id + DP_MLO_VDEV_ID_OFFSET;
2039 	} else {
2040 		msdu_info->vdev_id = vdev->vdev_id;
2041 	}
2042 }
2043 #else
2044 static inline void
2045 dp_tx_update_mcast_param(uint16_t peer_id,
2046 			 uint16_t *htt_tcl_metadata,
2047 			 struct dp_vdev *vdev,
2048 			 struct dp_tx_msdu_info_s *msdu_info)
2049 {
2050 }
2051 #endif
2052 /**
2053  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
2054  * @vdev: DP vdev handle
2055  * @nbuf: skb
2056  * @tid: TID from HLOS for overriding default DSCP-TID mapping
2057  * @meta_data: Metadata to the fw
2058  * @tx_q: Tx queue to be used for this Tx frame
2059  * @peer_id: peer_id of the peer in case of NAWDS frames
2060  * @tx_exc_metadata: Handle that holds exception path metadata
2061  *
2062  * Return: NULL on success,
2063  *         nbuf when it fails to send
2064  */
2065 qdf_nbuf_t
2066 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2067 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
2068 		       struct cdp_tx_exception_metadata *tx_exc_metadata)
2069 {
2070 	struct dp_pdev *pdev = vdev->pdev;
2071 	struct dp_soc *soc = pdev->soc;
2072 	struct dp_tx_desc_s *tx_desc;
2073 	QDF_STATUS status;
2074 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
2075 	uint16_t htt_tcl_metadata = 0;
2076 	enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
2077 	uint8_t tid = msdu_info->tid;
2078 	struct cdp_tid_tx_stats *tid_stats = NULL;
2079 	qdf_dma_addr_t paddr;
2080 
2081 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
2082 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
2083 			msdu_info, tx_exc_metadata);
2084 	if (!tx_desc) {
2085 		dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
2086 			  vdev, tx_q->desc_pool_id);
2087 		drop_code = TX_DESC_ERR;
2088 		goto fail_return;
2089 	}
2090 
2091 	dp_tx_update_tdls_flags(soc, vdev, tx_desc);
2092 
2093 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
2094 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2095 		DP_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
2096 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
2097 		DP_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
2098 					    DP_TCL_METADATA_TYPE_PEER_BASED);
2099 		DP_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
2100 					       peer_id);
2101 	} else
2102 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2103 
2104 	if (msdu_info->exception_fw)
2105 		DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2106 
2107 	dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
2108 					 !pdev->enhanced_stats_en);
2109 
2110 	dp_tx_update_mesh_flags(soc, vdev, tx_desc);
2111 
2112 	paddr =  dp_tx_nbuf_map(vdev, tx_desc, nbuf);
2113 	if (!paddr) {
2114 		/* Handle failure */
2115 		dp_err("qdf_nbuf_map failed");
2116 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
2117 		drop_code = TX_DMA_MAP_ERR;
2118 		goto release_desc;
2119 	}
2120 
2121 	tx_desc->dma_addr = paddr;
2122 	dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
2123 			       tx_desc->id, DP_TX_DESC_MAP);
2124 	dp_tx_update_mcast_param(peer_id, &htt_tcl_metadata, vdev, msdu_info);
2125 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
2126 	status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
2127 					     htt_tcl_metadata,
2128 					     tx_exc_metadata, msdu_info);
2129 
2130 	if (status != QDF_STATUS_SUCCESS) {
2131 		dp_tx_err_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2132 			     tx_desc, tx_q->ring_id);
2133 		dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
2134 				       tx_desc->id, DP_TX_DESC_UNMAP);
2135 		qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
2136 					     QDF_DMA_TO_DEVICE,
2137 					     nbuf->len);
2138 		drop_code = TX_HW_ENQUEUE;
2139 		goto release_desc;
2140 	}
2141 
2142 	return NULL;
2143 
2144 release_desc:
2145 	dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2146 
2147 fail_return:
2148 	dp_tx_get_tid(vdev, nbuf, msdu_info);
2149 	tid_stats = &pdev->stats.tid_stats.
2150 		    tid_tx_stats[tx_q->ring_id][tid];
2151 	tid_stats->swdrop_cnt[drop_code]++;
2152 	return nbuf;
2153 }
2154 
2155 /**
2156  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2157  * @soc: Soc handle
2158  * @desc: software Tx descriptor to be processed
2159  *
2160  * Return: none
2161  */
2162 void dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2163 {
2164 	qdf_nbuf_t nbuf = desc->nbuf;
2165 	enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
2166 
2167 	/* nbuf already freed in vdev detach path */
2168 	if (!nbuf)
2169 		return;
2170 
2171 	/* If it is TDLS mgmt, don't unmap or free the frame */
2172 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2173 		return dp_non_std_htt_tx_comp_free_buff(soc, desc);
2174 
2175 	/* 0 : MSDU buffer, 1 : MLE */
2176 	if (desc->msdu_ext_desc) {
2177 		/* TSO free */
2178 		if (hal_tx_ext_desc_get_tso_enable(
2179 					desc->msdu_ext_desc->vaddr)) {
2180 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
2181 					       desc->id, DP_TX_COMP_MSDU_EXT);
2182 			dp_tx_tso_seg_history_add(soc,
2183 						  desc->msdu_ext_desc->tso_desc,
2184 						  desc->nbuf, desc->id, type);
2185 			/* unmap eash TSO seg before free the nbuf */
2186 			dp_tx_tso_unmap_segment(soc,
2187 						desc->msdu_ext_desc->tso_desc,
2188 						desc->msdu_ext_desc->
2189 						tso_num_desc);
2190 			qdf_nbuf_free(nbuf);
2191 			return;
2192 		}
2193 
2194 		if (qdf_unlikely(desc->frm_type == dp_tx_frm_sg)) {
2195 			void *msdu_ext_desc = desc->msdu_ext_desc->vaddr;
2196 			qdf_dma_addr_t iova;
2197 			uint32_t frag_len;
2198 			uint32_t i;
2199 
2200 			qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
2201 						     QDF_DMA_TO_DEVICE,
2202 						     qdf_nbuf_headlen(nbuf));
2203 
2204 			for (i = 1; i < DP_TX_MAX_NUM_FRAGS; i++) {
2205 				hal_tx_ext_desc_get_frag_info(msdu_ext_desc, i,
2206 							      &iova,
2207 							      &frag_len);
2208 				if (!iova || !frag_len)
2209 					break;
2210 
2211 				qdf_mem_unmap_page(soc->osdev, iova, frag_len,
2212 						   QDF_DMA_TO_DEVICE);
2213 			}
2214 
2215 			qdf_nbuf_free(nbuf);
2216 			return;
2217 		}
2218 	}
2219 	/* If it's ME frame, dont unmap the cloned nbuf's */
2220 	if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
2221 		goto nbuf_free;
2222 
2223 	dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, desc->id, type);
2224 	dp_tx_unmap(soc, desc);
2225 
2226 	if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
2227 		return dp_mesh_tx_comp_free_buff(soc, desc);
2228 nbuf_free:
2229 	qdf_nbuf_free(nbuf);
2230 }
2231 
2232 /**
2233  * dp_tx_sg_unmap_buf() - Unmap scatter gather fragments
2234  * @soc: DP soc handle
2235  * @nbuf: skb
2236  * @msdu_info: MSDU info
2237  *
2238  * Return: None
2239  */
2240 static inline void
2241 dp_tx_sg_unmap_buf(struct dp_soc *soc, qdf_nbuf_t nbuf,
2242 		   struct dp_tx_msdu_info_s *msdu_info)
2243 {
2244 	uint32_t cur_idx;
2245 	struct dp_tx_seg_info_s *seg = msdu_info->u.sg_info.curr_seg;
2246 
2247 	qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE,
2248 				     qdf_nbuf_headlen(nbuf));
2249 
2250 	for (cur_idx = 1; cur_idx < seg->frag_cnt; cur_idx++)
2251 		qdf_mem_unmap_page(soc->osdev, (qdf_dma_addr_t)
2252 				   (seg->frags[cur_idx].paddr_lo | ((uint64_t)
2253 				    seg->frags[cur_idx].paddr_hi) << 32),
2254 				   seg->frags[cur_idx].len,
2255 				   QDF_DMA_TO_DEVICE);
2256 }
2257 
2258 /**
2259  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
2260  * @vdev: DP vdev handle
2261  * @nbuf: skb
2262  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
2263  *
2264  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
2265  *
2266  * Return: NULL on success,
2267  *         nbuf when it fails to send
2268  */
2269 #if QDF_LOCK_STATS
2270 noinline
2271 #else
2272 #endif
2273 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2274 				    struct dp_tx_msdu_info_s *msdu_info)
2275 {
2276 	uint32_t i;
2277 	struct dp_pdev *pdev = vdev->pdev;
2278 	struct dp_soc *soc = pdev->soc;
2279 	struct dp_tx_desc_s *tx_desc;
2280 	bool is_cce_classified = false;
2281 	QDF_STATUS status;
2282 	uint16_t htt_tcl_metadata = 0;
2283 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
2284 	struct cdp_tid_tx_stats *tid_stats = NULL;
2285 	uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
2286 
2287 	if (msdu_info->frm_type == dp_tx_frm_me)
2288 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2289 
2290 	i = 0;
2291 	/* Print statement to track i and num_seg */
2292 	/*
2293 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
2294 	 * descriptors using information in msdu_info
2295 	 */
2296 	while (i < msdu_info->num_seg) {
2297 		/*
2298 		 * Setup Tx descriptor for an MSDU, and MSDU extension
2299 		 * descriptor
2300 		 */
2301 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
2302 				tx_q->desc_pool_id);
2303 
2304 		if (!tx_desc) {
2305 			if (msdu_info->frm_type == dp_tx_frm_me) {
2306 				prep_desc_fail++;
2307 				dp_tx_me_free_buf(pdev,
2308 					(void *)(msdu_info->u.sg_info
2309 						.curr_seg->frags[0].vaddr));
2310 				if (prep_desc_fail == msdu_info->num_seg) {
2311 					/*
2312 					 * Unmap is needed only if descriptor
2313 					 * preparation failed for all segments.
2314 					 */
2315 					qdf_nbuf_unmap(soc->osdev,
2316 						       msdu_info->u.sg_info.
2317 						       curr_seg->nbuf,
2318 						       QDF_DMA_TO_DEVICE);
2319 				}
2320 				/*
2321 				 * Free the nbuf for the current segment
2322 				 * and make it point to the next in the list.
2323 				 * For me, there are as many segments as there
2324 				 * are no of clients.
2325 				 */
2326 				qdf_nbuf_free(msdu_info->u.sg_info
2327 					      .curr_seg->nbuf);
2328 				if (msdu_info->u.sg_info.curr_seg->next) {
2329 					msdu_info->u.sg_info.curr_seg =
2330 						msdu_info->u.sg_info
2331 						.curr_seg->next;
2332 					nbuf = msdu_info->u.sg_info
2333 					       .curr_seg->nbuf;
2334 				}
2335 				i++;
2336 				continue;
2337 			}
2338 
2339 			if (msdu_info->frm_type == dp_tx_frm_tso) {
2340 				dp_tx_tso_seg_history_add(
2341 						soc,
2342 						msdu_info->u.tso_info.curr_seg,
2343 						nbuf, 0, DP_TX_DESC_UNMAP);
2344 				dp_tx_tso_unmap_segment(soc,
2345 							msdu_info->u.tso_info.
2346 							curr_seg,
2347 							msdu_info->u.tso_info.
2348 							tso_num_seg_list);
2349 
2350 				if (msdu_info->u.tso_info.curr_seg->next) {
2351 					msdu_info->u.tso_info.curr_seg =
2352 					msdu_info->u.tso_info.curr_seg->next;
2353 					i++;
2354 					continue;
2355 				}
2356 			}
2357 
2358 			if (msdu_info->frm_type == dp_tx_frm_sg)
2359 				dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
2360 
2361 			goto done;
2362 		}
2363 
2364 		if (msdu_info->frm_type == dp_tx_frm_me) {
2365 			tx_desc->msdu_ext_desc->me_buffer =
2366 				(struct dp_tx_me_buf_t *)msdu_info->
2367 				u.sg_info.curr_seg->frags[0].vaddr;
2368 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
2369 		}
2370 
2371 		if (is_cce_classified)
2372 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2373 
2374 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2375 		if (msdu_info->exception_fw) {
2376 			DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2377 		}
2378 
2379 		/*
2380 		 * For frames with multiple segments (TSO, ME), jump to next
2381 		 * segment.
2382 		 */
2383 		if (msdu_info->frm_type == dp_tx_frm_tso) {
2384 			if (msdu_info->u.tso_info.curr_seg->next) {
2385 				msdu_info->u.tso_info.curr_seg =
2386 					msdu_info->u.tso_info.curr_seg->next;
2387 
2388 				/*
2389 				 * If this is a jumbo nbuf, then increment the
2390 				 * number of nbuf users for each additional
2391 				 * segment of the msdu. This will ensure that
2392 				 * the skb is freed only after receiving tx
2393 				 * completion for all segments of an nbuf
2394 				 */
2395 				qdf_nbuf_inc_users(nbuf);
2396 
2397 				/* Check with MCL if this is needed */
2398 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
2399 				 */
2400 			}
2401 		}
2402 
2403 		dp_tx_update_mcast_param(DP_INVALID_PEER,
2404 					 &htt_tcl_metadata,
2405 					 vdev,
2406 					 msdu_info);
2407 		/*
2408 		 * Enqueue the Tx MSDU descriptor to HW for transmit
2409 		 */
2410 		status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
2411 						     htt_tcl_metadata,
2412 						     NULL, msdu_info);
2413 
2414 		if (status != QDF_STATUS_SUCCESS) {
2415 			dp_info_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2416 				   tx_desc, tx_q->ring_id);
2417 
2418 			dp_tx_get_tid(vdev, nbuf, msdu_info);
2419 			tid_stats = &pdev->stats.tid_stats.
2420 				    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
2421 			tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
2422 
2423 			if (msdu_info->frm_type == dp_tx_frm_me) {
2424 				hw_enq_fail++;
2425 				if (hw_enq_fail == msdu_info->num_seg) {
2426 					/*
2427 					 * Unmap is needed only if enqueue
2428 					 * failed for all segments.
2429 					 */
2430 					qdf_nbuf_unmap(soc->osdev,
2431 						       msdu_info->u.sg_info.
2432 						       curr_seg->nbuf,
2433 						       QDF_DMA_TO_DEVICE);
2434 				}
2435 				/*
2436 				 * Free the nbuf for the current segment
2437 				 * and make it point to the next in the list.
2438 				 * For me, there are as many segments as there
2439 				 * are no of clients.
2440 				 */
2441 				qdf_nbuf_free(msdu_info->u.sg_info
2442 					      .curr_seg->nbuf);
2443 				if (msdu_info->u.sg_info.curr_seg->next) {
2444 					msdu_info->u.sg_info.curr_seg =
2445 						msdu_info->u.sg_info
2446 						.curr_seg->next;
2447 					nbuf = msdu_info->u.sg_info
2448 					       .curr_seg->nbuf;
2449 				} else
2450 					break;
2451 				i++;
2452 				dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2453 				continue;
2454 			}
2455 
2456 			/*
2457 			 * For TSO frames, the nbuf users increment done for
2458 			 * the current segment has to be reverted, since the
2459 			 * hw enqueue for this segment failed
2460 			 */
2461 			if (msdu_info->frm_type == dp_tx_frm_tso &&
2462 			    msdu_info->u.tso_info.curr_seg) {
2463 				/*
2464 				 * unmap and free current,
2465 				 * retransmit remaining segments
2466 				 */
2467 				dp_tx_comp_free_buf(soc, tx_desc);
2468 				i++;
2469 				dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2470 				continue;
2471 			}
2472 
2473 			if (msdu_info->frm_type == dp_tx_frm_sg)
2474 				dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
2475 
2476 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2477 			goto done;
2478 		}
2479 
2480 		/*
2481 		 * TODO
2482 		 * if tso_info structure can be modified to have curr_seg
2483 		 * as first element, following 2 blocks of code (for TSO and SG)
2484 		 * can be combined into 1
2485 		 */
2486 
2487 		/*
2488 		 * For Multicast-Unicast converted packets,
2489 		 * each converted frame (for a client) is represented as
2490 		 * 1 segment
2491 		 */
2492 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
2493 				(msdu_info->frm_type == dp_tx_frm_me)) {
2494 			if (msdu_info->u.sg_info.curr_seg->next) {
2495 				msdu_info->u.sg_info.curr_seg =
2496 					msdu_info->u.sg_info.curr_seg->next;
2497 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2498 			} else
2499 				break;
2500 		}
2501 		i++;
2502 	}
2503 
2504 	nbuf = NULL;
2505 
2506 done:
2507 	return nbuf;
2508 }
2509 
2510 /**
2511  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
2512  *                     for SG frames
2513  * @vdev: DP vdev handle
2514  * @nbuf: skb
2515  * @seg_info: Pointer to Segment info Descriptor to be prepared
2516  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2517  *
2518  * Return: NULL on success,
2519  *         nbuf when it fails to send
2520  */
2521 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2522 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
2523 {
2524 	uint32_t cur_frag, nr_frags, i;
2525 	qdf_dma_addr_t paddr;
2526 	struct dp_tx_sg_info_s *sg_info;
2527 
2528 	sg_info = &msdu_info->u.sg_info;
2529 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
2530 
2531 	if (QDF_STATUS_SUCCESS !=
2532 		qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
2533 					   QDF_DMA_TO_DEVICE,
2534 					   qdf_nbuf_headlen(nbuf))) {
2535 		dp_tx_err("dma map error");
2536 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2537 		qdf_nbuf_free(nbuf);
2538 		return NULL;
2539 	}
2540 
2541 	paddr = qdf_nbuf_mapped_paddr_get(nbuf);
2542 	seg_info->frags[0].paddr_lo = paddr;
2543 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
2544 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
2545 	seg_info->frags[0].vaddr = (void *) nbuf;
2546 
2547 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
2548 		if (QDF_STATUS_SUCCESS != qdf_nbuf_frag_map(vdev->osdev,
2549 							    nbuf, 0,
2550 							    QDF_DMA_TO_DEVICE,
2551 							    cur_frag)) {
2552 			dp_tx_err("frag dma map error");
2553 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2554 			goto map_err;
2555 		}
2556 
2557 		paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
2558 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
2559 		seg_info->frags[cur_frag + 1].paddr_hi =
2560 			((uint64_t) paddr) >> 32;
2561 		seg_info->frags[cur_frag + 1].len =
2562 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
2563 	}
2564 
2565 	seg_info->frag_cnt = (cur_frag + 1);
2566 	seg_info->total_len = qdf_nbuf_len(nbuf);
2567 	seg_info->next = NULL;
2568 
2569 	sg_info->curr_seg = seg_info;
2570 
2571 	msdu_info->frm_type = dp_tx_frm_sg;
2572 	msdu_info->num_seg = 1;
2573 
2574 	return nbuf;
2575 map_err:
2576 	/* restore paddr into nbuf before calling unmap */
2577 	qdf_nbuf_mapped_paddr_set(nbuf,
2578 				  (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
2579 				  ((uint64_t)
2580 				  seg_info->frags[0].paddr_hi) << 32));
2581 	qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
2582 				     QDF_DMA_TO_DEVICE,
2583 				     seg_info->frags[0].len);
2584 	for (i = 1; i <= cur_frag; i++) {
2585 		qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
2586 				   (seg_info->frags[i].paddr_lo | ((uint64_t)
2587 				   seg_info->frags[i].paddr_hi) << 32),
2588 				   seg_info->frags[i].len,
2589 				   QDF_DMA_TO_DEVICE);
2590 	}
2591 	qdf_nbuf_free(nbuf);
2592 	return NULL;
2593 }
2594 
2595 /**
2596  * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
2597  * @vdev: DP vdev handle
2598  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2599  * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
2600  *
2601  * Return: NULL on failure,
2602  *         nbuf when extracted successfully
2603  */
2604 static
2605 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
2606 				    struct dp_tx_msdu_info_s *msdu_info,
2607 				    uint16_t ppdu_cookie)
2608 {
2609 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2610 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2611 
2612 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2613 
2614 	HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
2615 				(msdu_info->meta_data[5], 1);
2616 	HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
2617 				(msdu_info->meta_data[5], 1);
2618 	HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
2619 				(msdu_info->meta_data[6], ppdu_cookie);
2620 
2621 	msdu_info->exception_fw = 1;
2622 	msdu_info->is_tx_sniffer = 1;
2623 }
2624 
2625 #ifdef MESH_MODE_SUPPORT
2626 
2627 /**
2628  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
2629 				and prepare msdu_info for mesh frames.
2630  * @vdev: DP vdev handle
2631  * @nbuf: skb
2632  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2633  *
2634  * Return: NULL on failure,
2635  *         nbuf when extracted successfully
2636  */
2637 static
2638 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2639 				struct dp_tx_msdu_info_s *msdu_info)
2640 {
2641 	struct meta_hdr_s *mhdr;
2642 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2643 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2644 
2645 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2646 
2647 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
2648 		msdu_info->exception_fw = 0;
2649 		goto remove_meta_hdr;
2650 	}
2651 
2652 	msdu_info->exception_fw = 1;
2653 
2654 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2655 
2656 	meta_data->host_tx_desc_pool = 1;
2657 	meta_data->update_peer_cache = 1;
2658 	meta_data->learning_frame = 1;
2659 
2660 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
2661 		meta_data->power = mhdr->power;
2662 
2663 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
2664 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
2665 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
2666 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
2667 
2668 		meta_data->dyn_bw = 1;
2669 
2670 		meta_data->valid_pwr = 1;
2671 		meta_data->valid_mcs_mask = 1;
2672 		meta_data->valid_nss_mask = 1;
2673 		meta_data->valid_preamble_type  = 1;
2674 		meta_data->valid_retries = 1;
2675 		meta_data->valid_bw_info = 1;
2676 	}
2677 
2678 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
2679 		meta_data->encrypt_type = 0;
2680 		meta_data->valid_encrypt_type = 1;
2681 		meta_data->learning_frame = 0;
2682 	}
2683 
2684 	meta_data->valid_key_flags = 1;
2685 	meta_data->key_flags = (mhdr->keyix & 0x3);
2686 
2687 remove_meta_hdr:
2688 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
2689 		dp_tx_err("qdf_nbuf_pull_head failed");
2690 		qdf_nbuf_free(nbuf);
2691 		return NULL;
2692 	}
2693 
2694 	msdu_info->tid = qdf_nbuf_get_priority(nbuf);
2695 
2696 	dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x"
2697 		   " tid %d to_fw %d",
2698 		   msdu_info->meta_data[0],
2699 		   msdu_info->meta_data[1],
2700 		   msdu_info->meta_data[2],
2701 		   msdu_info->meta_data[3],
2702 		   msdu_info->meta_data[4],
2703 		   msdu_info->meta_data[5],
2704 		   msdu_info->tid, msdu_info->exception_fw);
2705 
2706 	return nbuf;
2707 }
2708 #else
2709 static
2710 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2711 				struct dp_tx_msdu_info_s *msdu_info)
2712 {
2713 	return nbuf;
2714 }
2715 
2716 #endif
2717 
2718 /**
2719  * dp_check_exc_metadata() - Checks if parameters are valid
2720  * @tx_exc - holds all exception path parameters
2721  *
2722  * Returns true when all the parameters are valid else false
2723  *
2724  */
2725 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
2726 {
2727 	bool invalid_tid = (tx_exc->tid >= DP_MAX_TIDS && tx_exc->tid !=
2728 			    HTT_INVALID_TID);
2729 	bool invalid_encap_type =
2730 			(tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
2731 			 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
2732 	bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
2733 				 tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
2734 	bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
2735 			       tx_exc->ppdu_cookie == 0);
2736 
2737 	if (invalid_tid || invalid_encap_type || invalid_sec_type ||
2738 	    invalid_cookie) {
2739 		return false;
2740 	}
2741 
2742 	return true;
2743 }
2744 
2745 #ifdef ATH_SUPPORT_IQUE
2746 /**
2747  * dp_tx_mcast_enhance() - Multicast enhancement on TX
2748  * @vdev: vdev handle
2749  * @nbuf: skb
2750  *
2751  * Return: true on success,
2752  *         false on failure
2753  */
2754 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
2755 {
2756 	qdf_ether_header_t *eh;
2757 
2758 	/* Mcast to Ucast Conversion*/
2759 	if (qdf_likely(!vdev->mcast_enhancement_en))
2760 		return true;
2761 
2762 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2763 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2764 	    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2765 		dp_verbose_debug("Mcast frm for ME %pK", vdev);
2766 		qdf_nbuf_set_next(nbuf, NULL);
2767 
2768 		DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
2769 				 qdf_nbuf_len(nbuf));
2770 		if (dp_tx_prepare_send_me(vdev, nbuf) ==
2771 				QDF_STATUS_SUCCESS) {
2772 			return false;
2773 		}
2774 
2775 		if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
2776 			if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
2777 					QDF_STATUS_SUCCESS) {
2778 				return false;
2779 			}
2780 		}
2781 	}
2782 
2783 	return true;
2784 }
2785 #else
2786 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
2787 {
2788 	return true;
2789 }
2790 #endif
2791 
2792 /**
2793  * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
2794  * @nbuf: qdf_nbuf_t
2795  * @vdev: struct dp_vdev *
2796  *
2797  * Allow packet for processing only if it is for peer client which is
2798  * connected with same vap. Drop packet if client is connected to
2799  * different vap.
2800  *
2801  * Return: QDF_STATUS
2802  */
2803 static inline QDF_STATUS
2804 dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
2805 {
2806 	struct dp_ast_entry *dst_ast_entry = NULL;
2807 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2808 
2809 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
2810 	    DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
2811 		return QDF_STATUS_SUCCESS;
2812 
2813 	qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
2814 	dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
2815 							eh->ether_dhost,
2816 							vdev->vdev_id);
2817 
2818 	/* If there is no ast entry, return failure */
2819 	if (qdf_unlikely(!dst_ast_entry)) {
2820 		qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
2821 		return QDF_STATUS_E_FAILURE;
2822 	}
2823 	qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
2824 
2825 	return QDF_STATUS_SUCCESS;
2826 }
2827 
2828 /**
2829  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
2830  * @soc: DP soc handle
2831  * @vdev_id: id of DP vdev handle
2832  * @nbuf: skb
2833  * @tx_exc_metadata: Handle that holds exception path meta data
2834  *
2835  * Entry point for Core Tx layer (DP_TX) invoked from
2836  * hard_start_xmit in OSIF/HDD to transmit frames through fw
2837  *
2838  * Return: NULL on success,
2839  *         nbuf when it fails to send
2840  */
2841 qdf_nbuf_t
2842 dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2843 		     qdf_nbuf_t nbuf,
2844 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
2845 {
2846 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2847 	qdf_ether_header_t *eh = NULL;
2848 	struct dp_tx_msdu_info_s msdu_info;
2849 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
2850 						     DP_MOD_ID_TX_EXCEPTION);
2851 
2852 	if (qdf_unlikely(!vdev))
2853 		goto fail;
2854 
2855 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
2856 
2857 	if (!tx_exc_metadata)
2858 		goto fail;
2859 
2860 	msdu_info.tid = tx_exc_metadata->tid;
2861 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2862 	dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
2863 			 QDF_MAC_ADDR_REF(nbuf->data));
2864 
2865 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
2866 
2867 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
2868 		dp_tx_err("Invalid parameters in exception path");
2869 		goto fail;
2870 	}
2871 
2872 	/* Basic sanity checks for unsupported packets */
2873 
2874 	/* MESH mode */
2875 	if (qdf_unlikely(vdev->mesh_vdev)) {
2876 		dp_tx_err("Mesh mode is not supported in exception path");
2877 		goto fail;
2878 	}
2879 
2880 	/*
2881 	 * Classify the frame and call corresponding
2882 	 * "prepare" function which extracts the segment (TSO)
2883 	 * and fragmentation information (for TSO , SG, ME, or Raw)
2884 	 * into MSDU_INFO structure which is later used to fill
2885 	 * SW and HW descriptors.
2886 	 */
2887 	if (qdf_nbuf_is_tso(nbuf)) {
2888 		dp_verbose_debug("TSO frame %pK", vdev);
2889 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
2890 				 qdf_nbuf_len(nbuf));
2891 
2892 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
2893 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
2894 					 qdf_nbuf_len(nbuf));
2895 			goto fail;
2896 		}
2897 
2898 		goto send_multiple;
2899 	}
2900 
2901 	/* SG */
2902 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2903 		struct dp_tx_seg_info_s seg_info = {0};
2904 
2905 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
2906 		if (!nbuf)
2907 			goto fail;
2908 
2909 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
2910 
2911 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
2912 				 qdf_nbuf_len(nbuf));
2913 
2914 		goto send_multiple;
2915 	}
2916 
2917 	if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
2918 		DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
2919 				 qdf_nbuf_len(nbuf));
2920 
2921 		dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
2922 					       tx_exc_metadata->ppdu_cookie);
2923 	}
2924 
2925 	/*
2926 	 * Get HW Queue to use for this frame.
2927 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
2928 	 * dedicated for data and 1 for command.
2929 	 * "queue_id" maps to one hardware ring.
2930 	 *  With each ring, we also associate a unique Tx descriptor pool
2931 	 *  to minimize lock contention for these resources.
2932 	 */
2933 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2934 
2935 	/*
2936 	 * Check exception descriptors
2937 	 */
2938 	if (dp_tx_exception_limit_check(vdev))
2939 		goto fail;
2940 
2941 	/*  Single linear frame */
2942 	/*
2943 	 * If nbuf is a simple linear frame, use send_single function to
2944 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2945 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2946 	 */
2947 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
2948 			tx_exc_metadata->peer_id, tx_exc_metadata);
2949 
2950 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
2951 	return nbuf;
2952 
2953 send_multiple:
2954 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2955 
2956 fail:
2957 	if (vdev)
2958 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
2959 	dp_verbose_debug("pkt send failed");
2960 	return nbuf;
2961 }
2962 
2963 /**
2964  * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
2965  *      in exception path in special case to avoid regular exception path chk.
2966  * @soc: DP soc handle
2967  * @vdev_id: id of DP vdev handle
2968  * @nbuf: skb
2969  * @tx_exc_metadata: Handle that holds exception path meta data
2970  *
2971  * Entry point for Core Tx layer (DP_TX) invoked from
2972  * hard_start_xmit in OSIF/HDD to transmit frames through fw
2973  *
2974  * Return: NULL on success,
2975  *         nbuf when it fails to send
2976  */
2977 qdf_nbuf_t
2978 dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
2979 				   uint8_t vdev_id, qdf_nbuf_t nbuf,
2980 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
2981 {
2982 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2983 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
2984 						     DP_MOD_ID_TX_EXCEPTION);
2985 
2986 	if (qdf_unlikely(!vdev))
2987 		goto fail;
2988 
2989 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
2990 			== QDF_STATUS_E_FAILURE)) {
2991 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
2992 		goto fail;
2993 	}
2994 
2995 	/* Unref count as it will agin be taken inside dp_tx_exception */
2996 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
2997 
2998 	return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
2999 
3000 fail:
3001 	if (vdev)
3002 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
3003 	dp_verbose_debug("pkt send failed");
3004 	return nbuf;
3005 }
3006 
3007 /**
3008  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
3009  * @soc: DP soc handle
3010  * @vdev_id: DP vdev handle
3011  * @nbuf: skb
3012  *
3013  * Entry point for Core Tx layer (DP_TX) invoked from
3014  * hard_start_xmit in OSIF/HDD
3015  *
3016  * Return: NULL on success,
3017  *         nbuf when it fails to send
3018  */
3019 #ifdef MESH_MODE_SUPPORT
3020 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3021 			   qdf_nbuf_t nbuf)
3022 {
3023 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3024 	struct meta_hdr_s *mhdr;
3025 	qdf_nbuf_t nbuf_mesh = NULL;
3026 	qdf_nbuf_t nbuf_clone = NULL;
3027 	struct dp_vdev *vdev;
3028 	uint8_t no_enc_frame = 0;
3029 
3030 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
3031 	if (!nbuf_mesh) {
3032 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3033 				"qdf_nbuf_unshare failed");
3034 		return nbuf;
3035 	}
3036 
3037 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
3038 	if (!vdev) {
3039 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3040 				"vdev is NULL for vdev_id %d", vdev_id);
3041 		return nbuf;
3042 	}
3043 
3044 	nbuf = nbuf_mesh;
3045 
3046 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
3047 
3048 	if ((vdev->sec_type != cdp_sec_type_none) &&
3049 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
3050 		no_enc_frame = 1;
3051 
3052 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
3053 		qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
3054 
3055 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
3056 		       !no_enc_frame) {
3057 		nbuf_clone = qdf_nbuf_clone(nbuf);
3058 		if (!nbuf_clone) {
3059 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3060 				"qdf_nbuf_clone failed");
3061 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
3062 			return nbuf;
3063 		}
3064 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
3065 	}
3066 
3067 	if (nbuf_clone) {
3068 		if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
3069 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
3070 		} else {
3071 			qdf_nbuf_free(nbuf_clone);
3072 		}
3073 	}
3074 
3075 	if (no_enc_frame)
3076 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
3077 	else
3078 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
3079 
3080 	nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
3081 	if ((!nbuf) && no_enc_frame) {
3082 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
3083 	}
3084 
3085 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
3086 	return nbuf;
3087 }
3088 
3089 #else
3090 
3091 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
3092 			   qdf_nbuf_t nbuf)
3093 {
3094 	return dp_tx_send(soc, vdev_id, nbuf);
3095 }
3096 
3097 #endif
3098 
3099 /**
3100  * dp_tx_nawds_handler() - NAWDS handler
3101  *
3102  * @soc: DP soc handle
3103  * @vdev_id: id of DP vdev handle
3104  * @msdu_info: msdu_info required to create HTT metadata
3105  * @nbuf: skb
3106  *
3107  * This API transfers the multicast frames with the peer id
3108  * on NAWDS enabled peer.
3109 
3110  * Return: none
3111  */
3112 
3113 static inline
3114 void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
3115 			 struct dp_tx_msdu_info_s *msdu_info, qdf_nbuf_t nbuf)
3116 {
3117 	struct dp_peer *peer = NULL;
3118 	qdf_nbuf_t nbuf_clone = NULL;
3119 	uint16_t peer_id = DP_INVALID_PEER;
3120 	uint16_t sa_peer_id = DP_INVALID_PEER;
3121 	struct dp_ast_entry *ast_entry = NULL;
3122 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3123 	struct dp_txrx_peer *txrx_peer;
3124 
3125 	if (!soc->ast_offload_support) {
3126 		if (qdf_nbuf_get_tx_ftype(nbuf) == CB_FTYPE_INTRABSS_FWD) {
3127 			qdf_spin_lock_bh(&soc->ast_lock);
3128 			ast_entry = dp_peer_ast_hash_find_by_pdevid
3129 				(soc,
3130 				 (uint8_t *)(eh->ether_shost),
3131 				 vdev->pdev->pdev_id);
3132 			if (ast_entry)
3133 				sa_peer_id = ast_entry->peer_id;
3134 			qdf_spin_unlock_bh(&soc->ast_lock);
3135 		}
3136 	} else {
3137 		if ((qdf_nbuf_get_tx_ftype(nbuf) == CB_FTYPE_INTRABSS_FWD) &&
3138 		    qdf_nbuf_get_tx_fctx(nbuf))
3139 			sa_peer_id = *(uint32_t *)qdf_nbuf_get_tx_fctx(nbuf);
3140 	}
3141 
3142 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3143 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3144 		txrx_peer = dp_get_txrx_peer(peer);
3145 		qdf_assert_always(txrx_peer);
3146 
3147 		if (!txrx_peer->bss_peer && txrx_peer->nawds_enabled) {
3148 			peer_id = peer->peer_id;
3149 			/* Multicast packets needs to be
3150 			 * dropped in case of intra bss forwarding
3151 			 */
3152 			if (sa_peer_id == peer->peer_id) {
3153 				dp_tx_debug("multicast packet");
3154 				DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3155 							  tx.nawds_mcast_drop,
3156 							  1);
3157 				continue;
3158 			}
3159 
3160 			nbuf_clone = qdf_nbuf_clone(nbuf);
3161 
3162 			if (!nbuf_clone) {
3163 				QDF_TRACE(QDF_MODULE_ID_DP,
3164 					  QDF_TRACE_LEVEL_ERROR,
3165 					  FL("nbuf clone failed"));
3166 				break;
3167 			}
3168 
3169 			nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
3170 							    msdu_info, peer_id,
3171 							    NULL);
3172 
3173 			if (nbuf_clone) {
3174 				dp_tx_debug("pkt send failed");
3175 				qdf_nbuf_free(nbuf_clone);
3176 			} else {
3177 				if (peer_id != DP_INVALID_PEER) {
3178 					DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
3179 								      tx.nawds_mcast,
3180 								      1, qdf_nbuf_len(nbuf));
3181 				}
3182 			}
3183 		}
3184 	}
3185 
3186 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3187 }
3188 
3189 #ifdef QCA_DP_TX_NBUF_AND_NBUF_DATA_PREFETCH
3190 static inline
3191 void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
3192 {
3193 	if (nbuf) {
3194 		qdf_prefetch(&nbuf->len);
3195 		qdf_prefetch(&nbuf->data);
3196 	}
3197 }
3198 #else
3199 static inline
3200 void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
3201 {
3202 }
3203 #endif
3204 
3205 /**
3206  * dp_tx_send() - Transmit a frame on a given VAP
3207  * @soc: DP soc handle
3208  * @vdev_id: id of DP vdev handle
3209  * @nbuf: skb
3210  *
3211  * Entry point for Core Tx layer (DP_TX) invoked from
3212  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
3213  * cases
3214  *
3215  * Return: NULL on success,
3216  *         nbuf when it fails to send
3217  */
3218 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3219 		      qdf_nbuf_t nbuf)
3220 {
3221 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3222 	uint16_t peer_id = HTT_INVALID_PEER;
3223 	/*
3224 	 * doing a memzero is causing additional function call overhead
3225 	 * so doing static stack clearing
3226 	 */
3227 	struct dp_tx_msdu_info_s msdu_info = {0};
3228 	struct dp_vdev *vdev = NULL;
3229 
3230 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3231 		return nbuf;
3232 
3233 	/*
3234 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3235 	 * this in per packet path.
3236 	 *
3237 	 * As in this path vdev memory is already protected with netdev
3238 	 * tx lock
3239 	 */
3240 	vdev = soc->vdev_id_map[vdev_id];
3241 	if (qdf_unlikely(!vdev))
3242 		return nbuf;
3243 
3244 	dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
3245 			 QDF_MAC_ADDR_REF(nbuf->data));
3246 
3247 	/*
3248 	 * Set Default Host TID value to invalid TID
3249 	 * (TID override disabled)
3250 	 */
3251 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
3252 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
3253 
3254 	if (qdf_unlikely(vdev->mesh_vdev)) {
3255 		qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
3256 								&msdu_info);
3257 		if (!nbuf_mesh) {
3258 			dp_verbose_debug("Extracting mesh metadata failed");
3259 			return nbuf;
3260 		}
3261 		nbuf = nbuf_mesh;
3262 	}
3263 
3264 	/*
3265 	 * Get HW Queue to use for this frame.
3266 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
3267 	 * dedicated for data and 1 for command.
3268 	 * "queue_id" maps to one hardware ring.
3269 	 *  With each ring, we also associate a unique Tx descriptor pool
3270 	 *  to minimize lock contention for these resources.
3271 	 */
3272 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3273 
3274 	/*
3275 	 * TCL H/W supports 2 DSCP-TID mapping tables.
3276 	 *  Table 1 - Default DSCP-TID mapping table
3277 	 *  Table 2 - 1 DSCP-TID override table
3278 	 *
3279 	 * If we need a different DSCP-TID mapping for this vap,
3280 	 * call tid_classify to extract DSCP/ToS from frame and
3281 	 * map to a TID and store in msdu_info. This is later used
3282 	 * to fill in TCL Input descriptor (per-packet TID override).
3283 	 */
3284 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
3285 
3286 	/*
3287 	 * Classify the frame and call corresponding
3288 	 * "prepare" function which extracts the segment (TSO)
3289 	 * and fragmentation information (for TSO , SG, ME, or Raw)
3290 	 * into MSDU_INFO structure which is later used to fill
3291 	 * SW and HW descriptors.
3292 	 */
3293 	if (qdf_nbuf_is_tso(nbuf)) {
3294 		dp_verbose_debug("TSO frame %pK", vdev);
3295 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
3296 				 qdf_nbuf_len(nbuf));
3297 
3298 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
3299 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
3300 					 qdf_nbuf_len(nbuf));
3301 			return nbuf;
3302 		}
3303 
3304 		goto send_multiple;
3305 	}
3306 
3307 	/* SG */
3308 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
3309 		if (qdf_nbuf_get_nr_frags(nbuf) > DP_TX_MAX_NUM_FRAGS - 1) {
3310 			if (qdf_unlikely(qdf_nbuf_linearize(nbuf)))
3311 				return nbuf;
3312 		} else {
3313 			struct dp_tx_seg_info_s seg_info = {0};
3314 
3315 			nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info,
3316 						&msdu_info);
3317 			if (!nbuf)
3318 				return NULL;
3319 
3320 			dp_verbose_debug("non-TSO SG frame %pK", vdev);
3321 
3322 			DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
3323 					 qdf_nbuf_len(nbuf));
3324 
3325 			goto send_multiple;
3326 		}
3327 	}
3328 
3329 	if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
3330 		return NULL;
3331 
3332 	/* RAW */
3333 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
3334 		struct dp_tx_seg_info_s seg_info = {0};
3335 
3336 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
3337 		if (!nbuf)
3338 			return NULL;
3339 
3340 		dp_verbose_debug("Raw frame %pK", vdev);
3341 
3342 		goto send_multiple;
3343 
3344 	}
3345 
3346 	if (qdf_unlikely(vdev->nawds_enabled)) {
3347 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
3348 					  qdf_nbuf_data(nbuf);
3349 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost))
3350 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf);
3351 
3352 		peer_id = DP_INVALID_PEER;
3353 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
3354 				 1, qdf_nbuf_len(nbuf));
3355 	}
3356 
3357 	/*  Single linear frame */
3358 	/*
3359 	 * If nbuf is a simple linear frame, use send_single function to
3360 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
3361 	 * SRNG. There is no need to setup a MSDU extension descriptor.
3362 	 */
3363 	dp_tx_prefetch_nbuf_data(nbuf);
3364 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
3365 
3366 	return nbuf;
3367 
3368 send_multiple:
3369 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3370 
3371 	if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
3372 		dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
3373 
3374 	return nbuf;
3375 }
3376 
3377 /**
3378  * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
3379  *      case to vaoid check in perpkt path.
3380  * @soc: DP soc handle
3381  * @vdev_id: id of DP vdev handle
3382  * @nbuf: skb
3383  *
3384  * Entry point for Core Tx layer (DP_TX) invoked from
3385  * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
3386  * with special condition to avoid per pkt check in dp_tx_send
3387  *
3388  * Return: NULL on success,
3389  *         nbuf when it fails to send
3390  */
3391 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
3392 				    uint8_t vdev_id, qdf_nbuf_t nbuf)
3393 {
3394 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3395 	struct dp_vdev *vdev = NULL;
3396 
3397 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3398 		return nbuf;
3399 
3400 	/*
3401 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3402 	 * this in per packet path.
3403 	 *
3404 	 * As in this path vdev memory is already protected with netdev
3405 	 * tx lock
3406 	 */
3407 	vdev = soc->vdev_id_map[vdev_id];
3408 	if (qdf_unlikely(!vdev))
3409 		return nbuf;
3410 
3411 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
3412 			== QDF_STATUS_E_FAILURE)) {
3413 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
3414 		return nbuf;
3415 	}
3416 
3417 	return dp_tx_send(soc_hdl, vdev_id, nbuf);
3418 }
3419 
3420 #ifdef UMAC_SUPPORT_PROXY_ARP
3421 /**
3422  * dp_tx_proxy_arp() - Tx proxy arp handler
3423  * @vdev: datapath vdev handle
3424  * @buf: sk buffer
3425  *
3426  * Return: status
3427  */
3428 static inline
3429 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3430 {
3431 	if (vdev->osif_proxy_arp)
3432 		return vdev->osif_proxy_arp(vdev->osif_vdev, nbuf);
3433 
3434 	/*
3435 	 * when UMAC_SUPPORT_PROXY_ARP is defined, we expect
3436 	 * osif_proxy_arp has a valid function pointer assigned
3437 	 * to it
3438 	 */
3439 	dp_tx_err("valid function pointer for osif_proxy_arp is expected!!\n");
3440 
3441 	return QDF_STATUS_NOT_INITIALIZED;
3442 }
3443 #else
3444 /**
3445  * dp_tx_proxy_arp() - Tx proxy arp handler
3446  * @vdev: datapath vdev handle
3447  * @buf: sk buffer
3448  *
3449  * This function always return 0 when UMAC_SUPPORT_PROXY_ARP
3450  * is not defined.
3451  *
3452  * Return: status
3453  */
3454 static inline
3455 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3456 {
3457 	return QDF_STATUS_SUCCESS;
3458 }
3459 #endif
3460 
3461 /**
3462  * dp_tx_reinject_handler() - Tx Reinject Handler
3463  * @soc: datapath soc handle
3464  * @vdev: datapath vdev handle
3465  * @tx_desc: software descriptor head pointer
3466  * @status : Tx completion status from HTT descriptor
3467  * @reinject_reason : reinject reason from HTT descriptor
3468  *
3469  * This function reinjects frames back to Target.
3470  * Todo - Host queue needs to be added
3471  *
3472  * Return: none
3473  */
3474 void dp_tx_reinject_handler(struct dp_soc *soc,
3475 			    struct dp_vdev *vdev,
3476 			    struct dp_tx_desc_s *tx_desc,
3477 			    uint8_t *status,
3478 			    uint8_t reinject_reason)
3479 {
3480 	struct dp_peer *peer = NULL;
3481 	uint32_t peer_id = HTT_INVALID_PEER;
3482 	qdf_nbuf_t nbuf = tx_desc->nbuf;
3483 	qdf_nbuf_t nbuf_copy = NULL;
3484 	struct dp_tx_msdu_info_s msdu_info;
3485 #ifdef WDS_VENDOR_EXTENSION
3486 	int is_mcast = 0, is_ucast = 0;
3487 	int num_peers_3addr = 0;
3488 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
3489 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
3490 #endif
3491 	struct dp_txrx_peer *txrx_peer;
3492 
3493 	qdf_assert(vdev);
3494 
3495 	dp_tx_debug("Tx reinject path");
3496 
3497 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
3498 			qdf_nbuf_len(tx_desc->nbuf));
3499 
3500 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
3501 #ifdef WLAN_MCAST_MLO
3502 	if (reinject_reason == HTT_TX_FW2WBM_REINJECT_REASON_MLO_MCAST) {
3503 		if (soc->arch_ops.dp_tx_mcast_handler)
3504 			soc->arch_ops.dp_tx_mcast_handler(soc, vdev, nbuf);
3505 
3506 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3507 		return;
3508 	}
3509 #endif
3510 #endif
3511 
3512 #ifdef WDS_VENDOR_EXTENSION
3513 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
3514 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
3515 	} else {
3516 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
3517 	}
3518 	is_ucast = !is_mcast;
3519 
3520 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3521 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3522 		txrx_peer = dp_get_txrx_peer(peer);
3523 		qdf_assert_always(txrx_peer);
3524 
3525 		if (txrx_peer->bss_peer)
3526 			continue;
3527 
3528 		/* Detect wds peers that use 3-addr framing for mcast.
3529 		 * if there are any, the bss_peer is used to send the
3530 		 * the mcast frame using 3-addr format. all wds enabled
3531 		 * peers that use 4-addr framing for mcast frames will
3532 		 * be duplicated and sent as 4-addr frames below.
3533 		 */
3534 		if (!txrx_peer->wds_enabled ||
3535 		    !txrx_peer->wds_ecm.wds_tx_mcast_4addr) {
3536 			num_peers_3addr = 1;
3537 			break;
3538 		}
3539 	}
3540 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3541 #endif
3542 
3543 	if (qdf_unlikely(vdev->mesh_vdev)) {
3544 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
3545 	} else {
3546 		qdf_spin_lock_bh(&vdev->peer_list_lock);
3547 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3548 			txrx_peer = dp_get_txrx_peer(peer);
3549 			qdf_assert_always(txrx_peer);
3550 
3551 			if ((txrx_peer->peer_id != HTT_INVALID_PEER) &&
3552 #ifdef WDS_VENDOR_EXTENSION
3553 			/*
3554 			 * . if 3-addr STA, then send on BSS Peer
3555 			 * . if Peer WDS enabled and accept 4-addr mcast,
3556 			 * send mcast on that peer only
3557 			 * . if Peer WDS enabled and accept 4-addr ucast,
3558 			 * send ucast on that peer only
3559 			 */
3560 			((txrx_peer->bss_peer && num_peers_3addr && is_mcast) ||
3561 			 (txrx_peer->wds_enabled &&
3562 			 ((is_mcast && txrx_peer->wds_ecm.wds_tx_mcast_4addr) ||
3563 			 (is_ucast &&
3564 			 txrx_peer->wds_ecm.wds_tx_ucast_4addr))))) {
3565 #else
3566 			(txrx_peer->bss_peer &&
3567 			 (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) {
3568 #endif
3569 				peer_id = DP_INVALID_PEER;
3570 
3571 				nbuf_copy = qdf_nbuf_copy(nbuf);
3572 
3573 				if (!nbuf_copy) {
3574 					dp_tx_debug("nbuf copy failed");
3575 					break;
3576 				}
3577 				qdf_mem_zero(&msdu_info, sizeof(msdu_info));
3578 				dp_tx_get_queue(vdev, nbuf,
3579 						&msdu_info.tx_queue);
3580 
3581 				nbuf_copy = dp_tx_send_msdu_single(vdev,
3582 						nbuf_copy,
3583 						&msdu_info,
3584 						peer_id,
3585 						NULL);
3586 
3587 				if (nbuf_copy) {
3588 					dp_tx_debug("pkt send failed");
3589 					qdf_nbuf_free(nbuf_copy);
3590 				}
3591 			}
3592 		}
3593 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
3594 	}
3595 
3596 	qdf_nbuf_free(nbuf);
3597 
3598 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3599 }
3600 
3601 /**
3602  * dp_tx_inspect_handler() - Tx Inspect Handler
3603  * @soc: datapath soc handle
3604  * @vdev: datapath vdev handle
3605  * @tx_desc: software descriptor head pointer
3606  * @status : Tx completion status from HTT descriptor
3607  *
3608  * Handles Tx frames sent back to Host for inspection
3609  * (ProxyARP)
3610  *
3611  * Return: none
3612  */
3613 void dp_tx_inspect_handler(struct dp_soc *soc,
3614 			   struct dp_vdev *vdev,
3615 			   struct dp_tx_desc_s *tx_desc,
3616 			   uint8_t *status)
3617 {
3618 
3619 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3620 			"%s Tx inspect path",
3621 			__func__);
3622 
3623 	DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
3624 			 qdf_nbuf_len(tx_desc->nbuf));
3625 
3626 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
3627 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3628 }
3629 
3630 #ifdef MESH_MODE_SUPPORT
3631 /**
3632  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
3633  *                                         in mesh meta header
3634  * @tx_desc: software descriptor head pointer
3635  * @ts: pointer to tx completion stats
3636  * Return: none
3637  */
3638 static
3639 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
3640 		struct hal_tx_completion_status *ts)
3641 {
3642 	qdf_nbuf_t netbuf = tx_desc->nbuf;
3643 
3644 	if (!tx_desc->msdu_ext_desc) {
3645 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
3646 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3647 				"netbuf %pK offset %d",
3648 				netbuf, tx_desc->pkt_offset);
3649 			return;
3650 		}
3651 	}
3652 }
3653 
3654 #else
3655 static
3656 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
3657 		struct hal_tx_completion_status *ts)
3658 {
3659 }
3660 
3661 #endif
3662 
3663 #ifdef QCA_PEER_EXT_STATS
3664 /*
3665  * dp_tx_compute_tid_delay() - Compute per TID delay
3666  * @stats: Per TID delay stats
3667  * @tx_desc: Software Tx descriptor
3668  *
3669  * Compute the software enqueue and hw enqueue delays and
3670  * update the respective histograms
3671  *
3672  * Return: void
3673  */
3674 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
3675 				    struct dp_tx_desc_s *tx_desc)
3676 {
3677 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
3678 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
3679 	uint32_t sw_enqueue_delay, fwhw_transmit_delay;
3680 
3681 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
3682 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
3683 	timestamp_hw_enqueue = tx_desc->timestamp;
3684 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
3685 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
3686 					 timestamp_hw_enqueue);
3687 
3688 	/*
3689 	 * Update the Tx software enqueue delay and HW enque-Completion delay.
3690 	 */
3691 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
3692 	dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
3693 }
3694 
3695 /*
3696  * dp_tx_update_peer_delay_stats() - Update the peer delay stats
3697  * @txrx_peer: DP peer context
3698  * @tx_desc: Tx software descriptor
3699  * @tid: Transmission ID
3700  * @ring_id: Rx CPU context ID/CPU_ID
3701  *
3702  * Update the peer extended stats. These are enhanced other
3703  * delay stats per msdu level.
3704  *
3705  * Return: void
3706  */
3707 static void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
3708 					  struct dp_tx_desc_s *tx_desc,
3709 					  uint8_t tid, uint8_t ring_id)
3710 {
3711 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
3712 	struct dp_soc *soc = NULL;
3713 	struct dp_peer_delay_stats *delay_stats = NULL;
3714 
3715 	soc = pdev->soc;
3716 	if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
3717 		return;
3718 
3719 	delay_stats = txrx_peer->delay_stats;
3720 
3721 	qdf_assert(delay_stats);
3722 	qdf_assert(ring < CDP_MAX_TXRX_CTX);
3723 
3724 	/*
3725 	 * For non-TID packets use the TID 9
3726 	 */
3727 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
3728 		tid = CDP_MAX_DATA_TIDS - 1;
3729 
3730 	dp_tx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
3731 				tx_desc);
3732 }
3733 #else
3734 static inline void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
3735 						 struct dp_tx_desc_s *tx_desc,
3736 						 uint8_t tid, uint8_t ring_id)
3737 {
3738 }
3739 #endif
3740 
3741 /**
3742  * dp_tx_compute_delay() - Compute and fill in all timestamps
3743  *				to pass in correct fields
3744  *
3745  * @vdev: pdev handle
3746  * @tx_desc: tx descriptor
3747  * @tid: tid value
3748  * @ring_id: TCL or WBM ring number for transmit path
3749  * Return: none
3750  */
3751 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
3752 			 uint8_t tid, uint8_t ring_id)
3753 {
3754 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
3755 	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
3756 
3757 	if (qdf_likely(!vdev->pdev->delay_stats_flag))
3758 		return;
3759 
3760 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
3761 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
3762 	timestamp_hw_enqueue = tx_desc->timestamp;
3763 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
3764 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
3765 					 timestamp_hw_enqueue);
3766 	interframe_delay = (uint32_t)(timestamp_ingress -
3767 				      vdev->prev_tx_enq_tstamp);
3768 
3769 	/*
3770 	 * Delay in software enqueue
3771 	 */
3772 	dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid,
3773 			      CDP_DELAY_STATS_SW_ENQ, ring_id);
3774 	/*
3775 	 * Delay between packet enqueued to HW and Tx completion
3776 	 */
3777 	dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid,
3778 			      CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id);
3779 
3780 	/*
3781 	 * Update interframe delay stats calculated at hardstart receive point.
3782 	 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
3783 	 * interframe delay will not be calculate correctly for 1st frame.
3784 	 * On the other side, this will help in avoiding extra per packet check
3785 	 * of !vdev->prev_tx_enq_tstamp.
3786 	 */
3787 	dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
3788 			      CDP_DELAY_STATS_TX_INTERFRAME, ring_id);
3789 	vdev->prev_tx_enq_tstamp = timestamp_ingress;
3790 }
3791 
3792 #ifdef DISABLE_DP_STATS
3793 static
3794 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf,
3795 				   struct dp_txrx_peer *txrx_peer)
3796 {
3797 }
3798 #else
3799 static inline void
3800 dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer)
3801 {
3802 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
3803 
3804 	DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
3805 	if (subtype != QDF_PROTO_INVALID)
3806 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.no_ack_count[subtype],
3807 					  1);
3808 }
3809 #endif
3810 
3811 #ifndef QCA_ENHANCED_STATS_SUPPORT
3812 /**
3813  * dp_tx_update_peer_extd_stats()- Update Tx extended path stats for peer
3814  *
3815  * @ts: Tx compltion status
3816  * @txrx_peer: datapath txrx_peer handle
3817  *
3818  * Return: void
3819  */
3820 static inline void
3821 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
3822 			     struct dp_txrx_peer *txrx_peer)
3823 {
3824 	uint8_t mcs, pkt_type;
3825 
3826 	mcs = ts->mcs;
3827 	pkt_type = ts->pkt_type;
3828 
3829 	DP_PEER_EXTD_STATS_INCC(txrx_peer,
3830 				tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3831 				((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
3832 	DP_PEER_EXTD_STATS_INCC(txrx_peer,
3833 				tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3834 				((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
3835 	DP_PEER_EXTD_STATS_INCC(txrx_peer,
3836 				tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3837 				((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
3838 	DP_PEER_EXTD_STATS_INCC(txrx_peer,
3839 				tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3840 				((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
3841 	DP_PEER_EXTD_STATS_INCC(txrx_peer,
3842 				tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3843 				((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
3844 	DP_PEER_EXTD_STATS_INCC(txrx_peer,
3845 				tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3846 				((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
3847 	DP_PEER_EXTD_STATS_INCC(txrx_peer,
3848 				tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3849 				((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
3850 	DP_PEER_EXTD_STATS_INCC(txrx_peer,
3851 				tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3852 				((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
3853 	DP_PEER_EXTD_STATS_INCC(txrx_peer,
3854 				tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3855 				((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
3856 	DP_PEER_EXTD_STATS_INCC(txrx_peer,
3857 				tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3858 				((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
3859 
3860 	DP_PEER_EXTD_STATS_INC(txrx_peer, tx.sgi_count[ts->sgi], 1);
3861 	DP_PEER_EXTD_STATS_INC(txrx_peer, tx.bw[ts->bw], 1);
3862 	DP_PEER_EXTD_STATS_UPD(txrx_peer, tx.last_ack_rssi, ts->ack_frame_rssi);
3863 	DP_PEER_EXTD_STATS_INC(txrx_peer,
3864 			       tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
3865 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.stbc, 1, ts->stbc);
3866 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.ldpc, 1, ts->ldpc);
3867 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries, 1, ts->transmit_cnt > 1);
3868 	if (ts->first_msdu) {
3869 		DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries_mpdu, 1,
3870 					ts->transmit_cnt > 1);
3871 		DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.mpdu_success_with_retries,
3872 					qdf_do_div(ts->transmit_cnt, DP_RETRY_COUNT),
3873 					ts->transmit_cnt > DP_RETRY_COUNT);
3874 	}
3875 }
3876 #else
3877 static inline void
3878 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
3879 			     struct dp_txrx_peer *txrx_peer)
3880 {
3881 }
3882 #endif
3883 
3884 /**
3885  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
3886  *				per wbm ring
3887  *
3888  * @tx_desc: software descriptor head pointer
3889  * @ts: Tx completion status
3890  * @peer: peer handle
3891  * @ring_id: ring number
3892  *
3893  * Return: None
3894  */
3895 static inline void
3896 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
3897 			struct hal_tx_completion_status *ts,
3898 			struct dp_txrx_peer *txrx_peer, uint8_t ring_id)
3899 {
3900 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
3901 	struct dp_soc *soc = NULL;
3902 	uint8_t tid = ts->tid;
3903 	uint32_t length;
3904 	struct cdp_tid_tx_stats *tid_stats;
3905 
3906 	if (!pdev)
3907 		return;
3908 
3909 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
3910 		tid = CDP_MAX_DATA_TIDS - 1;
3911 
3912 	tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
3913 	soc = pdev->soc;
3914 
3915 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
3916 		dp_err("Release source is not from TQM");
3917 		return;
3918 	}
3919 
3920 	length = qdf_nbuf_len(tx_desc->nbuf);
3921 	DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
3922 
3923 	if (qdf_unlikely(pdev->delay_stats_flag))
3924 		dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id);
3925 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.dropped.age_out, 1,
3926 			  (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
3927 
3928 	DP_PEER_PER_PKT_STATS_INCC_PKT(txrx_peer, tx.dropped.fw_rem, 1, length,
3929 			  (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
3930 
3931 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.dropped.fw_rem_notx, 1,
3932 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
3933 
3934 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.dropped.fw_rem_tx, 1,
3935 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
3936 
3937 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.dropped.fw_reason1, 1,
3938 		     (ts->status == HAL_TX_TQM_RR_FW_REASON1));
3939 
3940 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.dropped.fw_reason2, 1,
3941 		     (ts->status == HAL_TX_TQM_RR_FW_REASON2));
3942 
3943 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.dropped.fw_reason3, 1,
3944 		     (ts->status == HAL_TX_TQM_RR_FW_REASON3));
3945 	/*
3946 	 * tx_failed is ideally supposed to be updated from HTT ppdu completion
3947 	 * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there
3948 	 * are no completions for failed cases. Hence updating tx_failed from
3949 	 * data path. Please note that if tx_failed is fixed to be from ppdu,
3950 	 * then this has to be removed
3951 	 */
3952 	txrx_peer->tx_failed =
3953 			txrx_peer->stats.per_pkt_stats.tx.dropped.fw_rem.num +
3954 			txrx_peer->stats.per_pkt_stats.tx.dropped.fw_rem_notx +
3955 			txrx_peer->stats.per_pkt_stats.tx.dropped.fw_rem_tx +
3956 			txrx_peer->stats.per_pkt_stats.tx.dropped.age_out +
3957 			txrx_peer->stats.per_pkt_stats.tx.dropped.fw_reason1 +
3958 			txrx_peer->stats.per_pkt_stats.tx.dropped.fw_reason2 +
3959 			txrx_peer->stats.per_pkt_stats.tx.dropped.fw_reason3;
3960 	if (ts->status < CDP_MAX_TX_TQM_STATUS) {
3961 		tid_stats->tqm_status_cnt[ts->status]++;
3962 	}
3963 
3964 	if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
3965 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.failed_retry_count, 1,
3966 					   ts->transmit_cnt > DP_RETRY_COUNT);
3967 		dp_update_no_ack_stats(tx_desc->nbuf, txrx_peer);
3968 		return;
3969 	}
3970 
3971 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.retry_count, 1,
3972 				   ts->transmit_cnt > 1);
3973 
3974 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.multiple_retry_count, 1,
3975 				   ts->transmit_cnt > 2);
3976 
3977 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.ofdma, 1, ts->ofdma);
3978 
3979 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.amsdu_cnt, 1,
3980 				   ts->msdu_part_of_amsdu);
3981 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.non_amsdu_cnt, 1,
3982 				   !ts->msdu_part_of_amsdu);
3983 
3984 	txrx_peer->stats.per_pkt_stats.tx.last_tx_ts = qdf_system_ticks();
3985 
3986 	dp_tx_update_peer_extd_stats(ts, txrx_peer);
3987 }
3988 
3989 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3990 /**
3991  * dp_tx_flow_pool_lock() - take flow pool lock
3992  * @soc: core txrx main context
3993  * @tx_desc: tx desc
3994  *
3995  * Return: None
3996  */
3997 static inline
3998 void dp_tx_flow_pool_lock(struct dp_soc *soc,
3999 			  struct dp_tx_desc_s *tx_desc)
4000 {
4001 	struct dp_tx_desc_pool_s *pool;
4002 	uint8_t desc_pool_id;
4003 
4004 	desc_pool_id = tx_desc->pool_id;
4005 	pool = &soc->tx_desc[desc_pool_id];
4006 
4007 	qdf_spin_lock_bh(&pool->flow_pool_lock);
4008 }
4009 
4010 /**
4011  * dp_tx_flow_pool_unlock() - release flow pool lock
4012  * @soc: core txrx main context
4013  * @tx_desc: tx desc
4014  *
4015  * Return: None
4016  */
4017 static inline
4018 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
4019 			    struct dp_tx_desc_s *tx_desc)
4020 {
4021 	struct dp_tx_desc_pool_s *pool;
4022 	uint8_t desc_pool_id;
4023 
4024 	desc_pool_id = tx_desc->pool_id;
4025 	pool = &soc->tx_desc[desc_pool_id];
4026 
4027 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
4028 }
4029 #else
4030 static inline
4031 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
4032 {
4033 }
4034 
4035 static inline
4036 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
4037 {
4038 }
4039 #endif
4040 
4041 /**
4042  * dp_tx_notify_completion() - Notify tx completion for this desc
4043  * @soc: core txrx main context
4044  * @vdev: datapath vdev handle
4045  * @tx_desc: tx desc
4046  * @netbuf:  buffer
4047  * @status: tx status
4048  *
4049  * Return: none
4050  */
4051 static inline void dp_tx_notify_completion(struct dp_soc *soc,
4052 					   struct dp_vdev *vdev,
4053 					   struct dp_tx_desc_s *tx_desc,
4054 					   qdf_nbuf_t netbuf,
4055 					   uint8_t status)
4056 {
4057 	void *osif_dev;
4058 	ol_txrx_completion_fp tx_compl_cbk = NULL;
4059 	uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
4060 
4061 	qdf_assert(tx_desc);
4062 
4063 	dp_tx_flow_pool_lock(soc, tx_desc);
4064 
4065 	if (!vdev ||
4066 	    !vdev->osif_vdev) {
4067 		dp_tx_flow_pool_unlock(soc, tx_desc);
4068 		return;
4069 	}
4070 
4071 	osif_dev = vdev->osif_vdev;
4072 	tx_compl_cbk = vdev->tx_comp;
4073 	dp_tx_flow_pool_unlock(soc, tx_desc);
4074 
4075 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
4076 		flag |= BIT(QDF_TX_RX_STATUS_OK);
4077 
4078 	if (tx_compl_cbk)
4079 		tx_compl_cbk(netbuf, osif_dev, flag);
4080 }
4081 
4082 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
4083  * @pdev: pdev handle
4084  * @tid: tid value
4085  * @txdesc_ts: timestamp from txdesc
4086  * @ppdu_id: ppdu id
4087  *
4088  * Return: none
4089  */
4090 #ifdef FEATURE_PERPKT_INFO
4091 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
4092 					       struct dp_txrx_peer *txrx_peer,
4093 					       uint8_t tid,
4094 					       uint64_t txdesc_ts,
4095 					       uint32_t ppdu_id)
4096 {
4097 	uint64_t delta_ms;
4098 	struct cdp_tx_sojourn_stats *sojourn_stats;
4099 	struct dp_peer *primary_link_peer = NULL;
4100 	struct dp_soc *link_peer_soc = NULL;
4101 
4102 	if (qdf_unlikely(!pdev->enhanced_stats_en))
4103 		return;
4104 
4105 	if (qdf_unlikely(tid == HTT_INVALID_TID ||
4106 			 tid >= CDP_DATA_TID_MAX))
4107 		return;
4108 
4109 	if (qdf_unlikely(!pdev->sojourn_buf))
4110 		return;
4111 
4112 	primary_link_peer = dp_get_primary_link_peer_by_id(pdev->soc,
4113 							   txrx_peer->peer_id,
4114 							   DP_MOD_ID_TX_COMP);
4115 
4116 	if (qdf_unlikely(!primary_link_peer))
4117 		return;
4118 
4119 	sojourn_stats = (struct cdp_tx_sojourn_stats *)
4120 		qdf_nbuf_data(pdev->sojourn_buf);
4121 
4122 	link_peer_soc = primary_link_peer->vdev->pdev->soc;
4123 	sojourn_stats->cookie = (void *)
4124 			dp_monitor_peer_get_rdkstats_ctx(link_peer_soc,
4125 							 primary_link_peer);
4126 
4127 	delta_ms = qdf_ktime_to_ms(qdf_ktime_real_get()) -
4128 				txdesc_ts;
4129 	qdf_ewma_tx_lag_add(&txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid],
4130 			    delta_ms);
4131 	sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
4132 	sojourn_stats->num_msdus[tid] = 1;
4133 	sojourn_stats->avg_sojourn_msdu[tid].internal =
4134 		txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid].internal;
4135 	dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
4136 			     pdev->sojourn_buf, HTT_INVALID_PEER,
4137 			     WDI_NO_VAL, pdev->pdev_id);
4138 	sojourn_stats->sum_sojourn_msdu[tid] = 0;
4139 	sojourn_stats->num_msdus[tid] = 0;
4140 	sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
4141 
4142 	dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_TX_COMP);
4143 }
4144 #else
4145 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
4146 					       struct dp_txrx_peer *txrx_peer,
4147 					       uint8_t tid,
4148 					       uint64_t txdesc_ts,
4149 					       uint32_t ppdu_id)
4150 {
4151 }
4152 #endif
4153 
4154 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
4155 /**
4156  * dp_send_completion_to_pkt_capture() - send tx completion to packet capture
4157  * @soc: dp_soc handle
4158  * @desc: Tx Descriptor
4159  * @ts: HAL Tx completion descriptor contents
4160  *
4161  * This function is used to send tx completion to packet capture
4162  */
4163 void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
4164 				       struct dp_tx_desc_s *desc,
4165 				       struct hal_tx_completion_status *ts)
4166 {
4167 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc,
4168 			     desc, ts->peer_id,
4169 			     WDI_NO_VAL, desc->pdev->pdev_id);
4170 }
4171 #endif
4172 
4173 /**
4174  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
4175  * @soc: DP Soc handle
4176  * @tx_desc: software Tx descriptor
4177  * @ts : Tx completion status from HAL/HTT descriptor
4178  *
4179  * Return: none
4180  */
4181 void
4182 dp_tx_comp_process_desc(struct dp_soc *soc,
4183 			struct dp_tx_desc_s *desc,
4184 			struct hal_tx_completion_status *ts,
4185 			struct dp_txrx_peer *txrx_peer)
4186 {
4187 	uint64_t time_latency = 0;
4188 	uint16_t peer_id = DP_INVALID_PEER_ID;
4189 
4190 	/*
4191 	 * m_copy/tx_capture modes are not supported for
4192 	 * scatter gather packets
4193 	 */
4194 	if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
4195 		time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
4196 				desc->timestamp);
4197 	}
4198 
4199 	dp_send_completion_to_pkt_capture(soc, desc, ts);
4200 
4201 	if (dp_tx_pkt_tracepoints_enabled())
4202 		qdf_trace_dp_packet(desc->nbuf, QDF_TX,
4203 				    desc->msdu_ext_desc ?
4204 				    desc->msdu_ext_desc->tso_desc : NULL,
4205 				    desc->timestamp);
4206 
4207 	if (!(desc->msdu_ext_desc)) {
4208 		dp_tx_enh_unmap(soc, desc);
4209 		if (txrx_peer)
4210 			peer_id = txrx_peer->peer_id;
4211 
4212 		if (QDF_STATUS_SUCCESS ==
4213 		    dp_monitor_tx_add_to_comp_queue(soc, desc, ts, peer_id)) {
4214 			return;
4215 		}
4216 
4217 		if (QDF_STATUS_SUCCESS ==
4218 		    dp_get_completion_indication_for_stack(soc,
4219 							   desc->pdev,
4220 							   txrx_peer, ts,
4221 							   desc->nbuf,
4222 							   time_latency)) {
4223 			dp_send_completion_to_stack(soc,
4224 						    desc->pdev,
4225 						    ts->peer_id,
4226 						    ts->ppdu_id,
4227 						    desc->nbuf);
4228 			return;
4229 		}
4230 	}
4231 
4232 	desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX;
4233 	dp_tx_comp_free_buf(soc, desc);
4234 }
4235 
4236 #ifdef DISABLE_DP_STATS
4237 /**
4238  * dp_tx_update_connectivity_stats() - update tx connectivity stats
4239  * @soc: core txrx main context
4240  * @tx_desc: tx desc
4241  * @status: tx status
4242  *
4243  * Return: none
4244  */
4245 static inline
4246 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
4247 				     struct dp_vdev *vdev,
4248 				     struct dp_tx_desc_s *tx_desc,
4249 				     uint8_t status)
4250 {
4251 }
4252 #else
4253 static inline
4254 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
4255 				     struct dp_vdev *vdev,
4256 				     struct dp_tx_desc_s *tx_desc,
4257 				     uint8_t status)
4258 {
4259 	void *osif_dev;
4260 	ol_txrx_stats_rx_fp stats_cbk;
4261 	uint8_t pkt_type;
4262 
4263 	qdf_assert(tx_desc);
4264 
4265 	if (!vdev ||
4266 	    !vdev->osif_vdev ||
4267 	    !vdev->stats_cb)
4268 		return;
4269 
4270 	osif_dev = vdev->osif_vdev;
4271 	stats_cbk = vdev->stats_cb;
4272 
4273 	stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
4274 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
4275 		stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
4276 			  &pkt_type);
4277 }
4278 #endif
4279 
4280 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
4281 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4282 		      uint32_t delta_tsf)
4283 {
4284 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4285 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4286 						     DP_MOD_ID_CDP);
4287 
4288 	if (!vdev) {
4289 		dp_err_rl("vdev %d does not exist", vdev_id);
4290 		return;
4291 	}
4292 
4293 	vdev->delta_tsf = delta_tsf;
4294 	dp_debug("vdev id %u delta_tsf %u", vdev_id, delta_tsf);
4295 
4296 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4297 }
4298 
4299 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
4300 				      uint8_t vdev_id, bool enable)
4301 {
4302 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4303 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4304 						     DP_MOD_ID_CDP);
4305 
4306 	if (!vdev) {
4307 		dp_err_rl("vdev %d does not exist", vdev_id);
4308 		return QDF_STATUS_E_FAILURE;
4309 	}
4310 
4311 	qdf_atomic_set(&vdev->ul_delay_report, enable);
4312 
4313 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4314 
4315 	return QDF_STATUS_SUCCESS;
4316 }
4317 
4318 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4319 			       uint32_t *val)
4320 {
4321 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4322 	struct dp_vdev *vdev;
4323 	uint32_t delay_accum;
4324 	uint32_t pkts_accum;
4325 
4326 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
4327 	if (!vdev) {
4328 		dp_err_rl("vdev %d does not exist", vdev_id);
4329 		return QDF_STATUS_E_FAILURE;
4330 	}
4331 
4332 	if (!qdf_atomic_read(&vdev->ul_delay_report)) {
4333 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4334 		return QDF_STATUS_E_FAILURE;
4335 	}
4336 
4337 	/* Average uplink delay based on current accumulated values */
4338 	delay_accum = qdf_atomic_read(&vdev->ul_delay_accum);
4339 	pkts_accum = qdf_atomic_read(&vdev->ul_pkts_accum);
4340 
4341 	*val = delay_accum / pkts_accum;
4342 	dp_debug("uplink_delay %u delay_accum %u pkts_accum %u", *val,
4343 		 delay_accum, pkts_accum);
4344 
4345 	/* Reset accumulated values to 0 */
4346 	qdf_atomic_set(&vdev->ul_delay_accum, 0);
4347 	qdf_atomic_set(&vdev->ul_pkts_accum, 0);
4348 
4349 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4350 
4351 	return QDF_STATUS_SUCCESS;
4352 }
4353 
4354 static void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
4355 				      struct hal_tx_completion_status *ts)
4356 {
4357 	uint32_t buffer_ts;
4358 	uint32_t delta_tsf;
4359 	uint32_t ul_delay;
4360 
4361 	/* Tx_rate_stats_info_valid is 0 and tsf is invalid then */
4362 	if (!ts->valid)
4363 		return;
4364 
4365 	if (qdf_unlikely(!vdev)) {
4366 		dp_info_rl("vdev is null or delete in progrss");
4367 		return;
4368 	}
4369 
4370 	if (!qdf_atomic_read(&vdev->ul_delay_report))
4371 		return;
4372 
4373 	delta_tsf = vdev->delta_tsf;
4374 
4375 	/* buffer_timestamp is in units of 1024 us and is [31:13] of
4376 	 * WBM_RELEASE_RING_4. After left shift 10 bits, it's
4377 	 * valid up to 29 bits.
4378 	 */
4379 	buffer_ts = ts->buffer_timestamp << 10;
4380 
4381 	ul_delay = ts->tsf - buffer_ts - delta_tsf;
4382 	ul_delay &= 0x1FFFFFFF; /* mask 29 BITS */
4383 	if (ul_delay > 0x1000000) {
4384 		dp_info_rl("----------------------\n"
4385 			   "Tx completion status:\n"
4386 			   "----------------------\n"
4387 			   "release_src = %d\n"
4388 			   "ppdu_id = 0x%x\n"
4389 			   "release_reason = %d\n"
4390 			   "tsf = %u (0x%x)\n"
4391 			   "buffer_timestamp = %u (0x%x)\n"
4392 			   "delta_tsf = %u (0x%x)\n",
4393 			   ts->release_src, ts->ppdu_id, ts->status,
4394 			   ts->tsf, ts->tsf, ts->buffer_timestamp,
4395 			   ts->buffer_timestamp, delta_tsf, delta_tsf);
4396 		return;
4397 	}
4398 
4399 	ul_delay /= 1000; /* in unit of ms */
4400 
4401 	qdf_atomic_add(ul_delay, &vdev->ul_delay_accum);
4402 	qdf_atomic_inc(&vdev->ul_pkts_accum);
4403 }
4404 #else /* !WLAN_FEATURE_TSF_UPLINK_DELAY */
4405 static inline
4406 void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
4407 			       struct hal_tx_completion_status *ts)
4408 {
4409 }
4410 #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
4411 
4412 /**
4413  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
4414  * @soc: DP soc handle
4415  * @tx_desc: software descriptor head pointer
4416  * @ts: Tx completion status
4417  * @txrx_peer: txrx peer handle
4418  * @ring_id: ring number
4419  *
4420  * Return: none
4421  */
4422 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
4423 				  struct dp_tx_desc_s *tx_desc,
4424 				  struct hal_tx_completion_status *ts,
4425 				  struct dp_txrx_peer *txrx_peer,
4426 				  uint8_t ring_id)
4427 {
4428 	uint32_t length;
4429 	qdf_ether_header_t *eh;
4430 	struct dp_vdev *vdev = NULL;
4431 	qdf_nbuf_t nbuf = tx_desc->nbuf;
4432 	enum qdf_dp_tx_rx_status dp_status;
4433 
4434 	if (!nbuf) {
4435 		dp_info_rl("invalid tx descriptor. nbuf NULL");
4436 		goto out;
4437 	}
4438 
4439 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
4440 	length = qdf_nbuf_len(nbuf);
4441 
4442 	dp_status = dp_tx_hw_to_qdf(ts->status);
4443 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
4444 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
4445 				 QDF_TRACE_DEFAULT_PDEV_ID,
4446 				 qdf_nbuf_data_addr(nbuf),
4447 				 sizeof(qdf_nbuf_data(nbuf)),
4448 				 tx_desc->id, ts->status, dp_status));
4449 
4450 	dp_tx_comp_debug("-------------------- \n"
4451 			 "Tx Completion Stats: \n"
4452 			 "-------------------- \n"
4453 			 "ack_frame_rssi = %d \n"
4454 			 "first_msdu = %d \n"
4455 			 "last_msdu = %d \n"
4456 			 "msdu_part_of_amsdu = %d \n"
4457 			 "rate_stats valid = %d \n"
4458 			 "bw = %d \n"
4459 			 "pkt_type = %d \n"
4460 			 "stbc = %d \n"
4461 			 "ldpc = %d \n"
4462 			 "sgi = %d \n"
4463 			 "mcs = %d \n"
4464 			 "ofdma = %d \n"
4465 			 "tones_in_ru = %d \n"
4466 			 "tsf = %d \n"
4467 			 "ppdu_id = %d \n"
4468 			 "transmit_cnt = %d \n"
4469 			 "tid = %d \n"
4470 			 "peer_id = %d\n"
4471 			 "tx_status = %d\n",
4472 			 ts->ack_frame_rssi, ts->first_msdu,
4473 			 ts->last_msdu, ts->msdu_part_of_amsdu,
4474 			 ts->valid, ts->bw, ts->pkt_type, ts->stbc,
4475 			 ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
4476 			 ts->tones_in_ru, ts->tsf, ts->ppdu_id,
4477 			 ts->transmit_cnt, ts->tid, ts->peer_id,
4478 			 ts->status);
4479 
4480 	/* Update SoC level stats */
4481 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
4482 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
4483 
4484 	if (!txrx_peer) {
4485 		dp_info_rl("peer is null or deletion in progress");
4486 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
4487 		goto out;
4488 	}
4489 	vdev = txrx_peer->vdev;
4490 
4491 	dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
4492 	dp_tx_update_uplink_delay(soc, vdev, ts);
4493 
4494 	/* Update per-packet stats for mesh mode */
4495 	if (qdf_unlikely(vdev->mesh_vdev) &&
4496 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
4497 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
4498 
4499 	/* Update peer level stats */
4500 	if (qdf_unlikely(txrx_peer->bss_peer &&
4501 			 vdev->opmode == wlan_op_mode_ap)) {
4502 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
4503 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1,
4504 						      length);
4505 
4506 			if (txrx_peer->vdev->tx_encap_type ==
4507 				htt_cmn_pkt_type_ethernet &&
4508 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
4509 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
4510 							      tx.bcast, 1,
4511 							      length);
4512 			}
4513 		}
4514 	} else {
4515 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.ucast, 1, length);
4516 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
4517 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.tx_success,
4518 						      1, length);
4519 			if (qdf_unlikely(txrx_peer->in_twt)) {
4520 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
4521 							      tx.tx_success_twt,
4522 							      1, length);
4523 			}
4524 		}
4525 	}
4526 
4527 	dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id);
4528 	dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts->tid, ring_id);
4529 
4530 #ifdef QCA_SUPPORT_RDK_STATS
4531 	if (soc->rdkstats_enabled)
4532 		dp_tx_sojourn_stats_process(vdev->pdev, txrx_peer, ts->tid,
4533 					    tx_desc->timestamp,
4534 					    ts->ppdu_id);
4535 #endif
4536 
4537 out:
4538 	return;
4539 }
4540 
4541 #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \
4542 	defined(QCA_ENHANCED_STATS_SUPPORT)
4543 /*
4544  * dp_tx_update_peer_basic_stats(): Update peer basic stats
4545  * @txrx_peer: Datapath txrx_peer handle
4546  * @length: Length of the packet
4547  * @tx_status: Tx status from TQM/FW
4548  * @update: enhanced flag value present in dp_pdev
4549  *
4550  * Return: none
4551  */
4552 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
4553 				   uint32_t length, uint8_t tx_status,
4554 				   bool update)
4555 {
4556 	if ((!txrx_peer->hw_txrx_stats_en) || update) {
4557 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
4558 
4559 		if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
4560 			DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
4561 	}
4562 }
4563 #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
4564 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
4565 				   uint32_t length, uint8_t tx_status,
4566 				   bool update)
4567 {
4568 	if (!peer->hw_txrx_stats_en) {
4569 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
4570 
4571 		if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
4572 			DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
4573 	}
4574 }
4575 
4576 #else
4577 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
4578 				   uint32_t length, uint8_t tx_status,
4579 				   bool update)
4580 {
4581 	DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
4582 
4583 	if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
4584 		DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
4585 }
4586 #endif
4587 
4588 /*
4589  * dp_tx_prefetch_next_nbuf_data(): Prefetch nbuf and nbuf data
4590  * @nbuf: skb buffer
4591  *
4592  * Return: none
4593  */
4594 #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
4595 static inline
4596 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
4597 {
4598 	qdf_nbuf_t nbuf = NULL;
4599 
4600 	if (next)
4601 		nbuf = next->nbuf;
4602 	if (nbuf) {
4603 		/* prefetch skb->next and first few bytes of skb->cb */
4604 		qdf_prefetch(nbuf);
4605 		/* prefetch skb fields present in different cachelines */
4606 		qdf_prefetch(&nbuf->len);
4607 		qdf_prefetch(&nbuf->users);
4608 	}
4609 }
4610 #else
4611 static inline
4612 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
4613 {
4614 }
4615 #endif
4616 
4617 /**
4618  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
4619  * @soc: core txrx main context
4620  * @comp_head: software descriptor head pointer
4621  * @ring_id: ring number
4622  *
4623  * This function will process batch of descriptors reaped by dp_tx_comp_handler
4624  * and release the software descriptors after processing is complete
4625  *
4626  * Return: none
4627  */
4628 static void
4629 dp_tx_comp_process_desc_list(struct dp_soc *soc,
4630 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id)
4631 {
4632 	struct dp_tx_desc_s *desc;
4633 	struct dp_tx_desc_s *next;
4634 	struct hal_tx_completion_status ts;
4635 	struct dp_txrx_peer *txrx_peer = NULL;
4636 	uint16_t peer_id = DP_INVALID_PEER;
4637 	qdf_nbuf_t netbuf;
4638 	dp_txrx_ref_handle txrx_ref_handle;
4639 
4640 	desc = comp_head;
4641 
4642 	while (desc) {
4643 		next = desc->next;
4644 		dp_tx_prefetch_next_nbuf_data(next);
4645 
4646 		if (peer_id != desc->peer_id) {
4647 			if (txrx_peer)
4648 				dp_txrx_peer_unref_delete(txrx_ref_handle,
4649 							  DP_MOD_ID_TX_COMP);
4650 			peer_id = desc->peer_id;
4651 			txrx_peer =
4652 				dp_txrx_peer_get_ref_by_id(soc, peer_id,
4653 							   &txrx_ref_handle,
4654 							   DP_MOD_ID_TX_COMP);
4655 		}
4656 
4657 		if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
4658 			struct dp_pdev *pdev = desc->pdev;
4659 
4660 			if (qdf_likely(txrx_peer))
4661 				dp_tx_update_peer_basic_stats(txrx_peer,
4662 							      desc->length,
4663 							      desc->tx_status,
4664 							      false);
4665 			qdf_assert(pdev);
4666 			dp_tx_outstanding_dec(pdev);
4667 
4668 			/*
4669 			 * Calling a QDF WRAPPER here is creating signifcant
4670 			 * performance impact so avoided the wrapper call here
4671 			 */
4672 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
4673 					       desc->id, DP_TX_COMP_UNMAP);
4674 			dp_tx_nbuf_unmap(soc, desc);
4675 			qdf_nbuf_free(desc->nbuf);
4676 			dp_tx_desc_free(soc, desc, desc->pool_id);
4677 			desc = next;
4678 			continue;
4679 		}
4680 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
4681 
4682 		dp_tx_comp_process_tx_status(soc, desc, &ts, txrx_peer,
4683 					     ring_id);
4684 
4685 		netbuf = desc->nbuf;
4686 		/* check tx complete notification */
4687 		if (txrx_peer && qdf_nbuf_tx_notify_comp_get(netbuf))
4688 			dp_tx_notify_completion(soc, txrx_peer->vdev, desc,
4689 						netbuf, ts.status);
4690 
4691 		dp_tx_comp_process_desc(soc, desc, &ts, txrx_peer);
4692 
4693 		dp_tx_desc_release(desc, desc->pool_id);
4694 		desc = next;
4695 	}
4696 	if (txrx_peer)
4697 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
4698 }
4699 
4700 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
4701 static inline
4702 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
4703 				   int max_reap_limit)
4704 {
4705 	bool limit_hit = false;
4706 
4707 	limit_hit =
4708 		(num_reaped >= max_reap_limit) ? true : false;
4709 
4710 	if (limit_hit)
4711 		DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
4712 
4713 	return limit_hit;
4714 }
4715 
4716 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
4717 {
4718 	return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
4719 }
4720 
4721 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
4722 {
4723 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
4724 
4725 	return cfg->tx_comp_loop_pkt_limit;
4726 }
4727 #else
4728 static inline
4729 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
4730 				   int max_reap_limit)
4731 {
4732 	return false;
4733 }
4734 
4735 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
4736 {
4737 	return false;
4738 }
4739 
4740 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
4741 {
4742 	return 0;
4743 }
4744 #endif
4745 
4746 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
4747 static inline int
4748 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
4749 				  int *max_reap_limit)
4750 {
4751 	return soc->arch_ops.dp_srng_test_and_update_nf_params(soc, dp_srng,
4752 							       max_reap_limit);
4753 }
4754 #else
4755 static inline int
4756 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
4757 				  int *max_reap_limit)
4758 {
4759 	return 0;
4760 }
4761 #endif
4762 
4763 #ifdef DP_TX_TRACKING
4764 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
4765 {
4766 	if ((tx_desc->magic != DP_TX_MAGIC_PATTERN_INUSE) &&
4767 	    (tx_desc->magic != DP_TX_MAGIC_PATTERN_FREE)) {
4768 		dp_err_rl("tx_desc %u is corrupted", tx_desc->id);
4769 		qdf_trigger_self_recovery(NULL, QDF_TX_DESC_LEAK);
4770 	}
4771 }
4772 #endif
4773 
4774 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
4775 			    hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
4776 			    uint32_t quota)
4777 {
4778 	void *tx_comp_hal_desc;
4779 	void *last_prefetched_hw_desc = NULL;
4780 	struct dp_tx_desc_s *last_prefetched_sw_desc = NULL;
4781 	hal_soc_handle_t hal_soc;
4782 	uint8_t buffer_src;
4783 	struct dp_tx_desc_s *tx_desc = NULL;
4784 	struct dp_tx_desc_s *head_desc = NULL;
4785 	struct dp_tx_desc_s *tail_desc = NULL;
4786 	uint32_t num_processed = 0;
4787 	uint32_t count;
4788 	uint32_t num_avail_for_reap = 0;
4789 	bool force_break = false;
4790 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
4791 	int max_reap_limit, ring_near_full;
4792 
4793 	DP_HIST_INIT();
4794 
4795 more_data:
4796 
4797 	hal_soc = soc->hal_soc;
4798 	/* Re-initialize local variables to be re-used */
4799 	head_desc = NULL;
4800 	tail_desc = NULL;
4801 	count = 0;
4802 	max_reap_limit = dp_tx_comp_get_loop_pkt_limit(soc);
4803 
4804 	ring_near_full = dp_srng_test_and_update_nf_params(soc, tx_comp_ring,
4805 							   &max_reap_limit);
4806 
4807 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
4808 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
4809 		return 0;
4810 	}
4811 
4812 	num_avail_for_reap = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0);
4813 
4814 	if (num_avail_for_reap >= quota)
4815 		num_avail_for_reap = quota;
4816 
4817 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
4818 	last_prefetched_hw_desc = dp_srng_dst_prefetch(hal_soc, hal_ring_hdl,
4819 						       num_avail_for_reap);
4820 
4821 	/* Find head descriptor from completion ring */
4822 	while (qdf_likely(num_avail_for_reap--)) {
4823 
4824 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
4825 		if (qdf_unlikely(!tx_comp_hal_desc))
4826 			break;
4827 		buffer_src = hal_tx_comp_get_buffer_source(hal_soc,
4828 							   tx_comp_hal_desc);
4829 
4830 		/* If this buffer was not released by TQM or FW, then it is not
4831 		 * Tx completion indication, assert */
4832 		if (qdf_unlikely(buffer_src !=
4833 					HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
4834 				 (qdf_unlikely(buffer_src !=
4835 					HAL_TX_COMP_RELEASE_SOURCE_FW))) {
4836 			uint8_t wbm_internal_error;
4837 
4838 			dp_err_rl(
4839 				"Tx comp release_src != TQM | FW but from %d",
4840 				buffer_src);
4841 			hal_dump_comp_desc(tx_comp_hal_desc);
4842 			DP_STATS_INC(soc, tx.invalid_release_source, 1);
4843 
4844 			/* When WBM sees NULL buffer_addr_info in any of
4845 			 * ingress rings it sends an error indication,
4846 			 * with wbm_internal_error=1, to a specific ring.
4847 			 * The WBM2SW ring used to indicate these errors is
4848 			 * fixed in HW, and that ring is being used as Tx
4849 			 * completion ring. These errors are not related to
4850 			 * Tx completions, and should just be ignored
4851 			 */
4852 			wbm_internal_error = hal_get_wbm_internal_error(
4853 							hal_soc,
4854 							tx_comp_hal_desc);
4855 
4856 			if (wbm_internal_error) {
4857 				dp_err_rl("Tx comp wbm_internal_error!!");
4858 				DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
4859 
4860 				if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
4861 								buffer_src)
4862 					dp_handle_wbm_internal_error(
4863 						soc,
4864 						tx_comp_hal_desc,
4865 						hal_tx_comp_get_buffer_type(
4866 							tx_comp_hal_desc));
4867 
4868 			} else {
4869 				dp_err_rl("Tx comp wbm_internal_error false");
4870 				DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
4871 			}
4872 			continue;
4873 		}
4874 
4875 		soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
4876 							       tx_comp_hal_desc,
4877 							       &tx_desc);
4878 		if (!tx_desc) {
4879 			dp_err("unable to retrieve tx_desc!");
4880 			QDF_BUG(0);
4881 			continue;
4882 		}
4883 		tx_desc->buffer_src = buffer_src;
4884 		/*
4885 		 * If the release source is FW, process the HTT status
4886 		 */
4887 		if (qdf_unlikely(buffer_src ==
4888 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
4889 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
4890 
4891 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
4892 					htt_tx_status);
4893 			/* Collect hw completion contents */
4894 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
4895 					      &tx_desc->comp, 1);
4896 			soc->arch_ops.dp_tx_process_htt_completion(
4897 							soc,
4898 							tx_desc,
4899 							htt_tx_status,
4900 							ring_id);
4901 		} else {
4902 			tx_desc->tx_status =
4903 				hal_tx_comp_get_tx_status(tx_comp_hal_desc);
4904 			tx_desc->buffer_src = buffer_src;
4905 			/*
4906 			 * If the fast completion mode is enabled extended
4907 			 * metadata from descriptor is not copied
4908 			 */
4909 			if (qdf_likely(tx_desc->flags &
4910 						DP_TX_DESC_FLAG_SIMPLE))
4911 				goto add_to_pool;
4912 
4913 			/*
4914 			 * If the descriptor is already freed in vdev_detach,
4915 			 * continue to next descriptor
4916 			 */
4917 			if (qdf_unlikely
4918 				((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
4919 				 !tx_desc->flags)) {
4920 				dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
4921 						   tx_desc->id);
4922 				DP_STATS_INC(soc, tx.tx_comp_exception, 1);
4923 				dp_tx_desc_check_corruption(tx_desc);
4924 				continue;
4925 			}
4926 
4927 			if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
4928 				dp_tx_comp_info_rl("pdev in down state %d",
4929 						   tx_desc->id);
4930 				tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
4931 				dp_tx_comp_free_buf(soc, tx_desc);
4932 				dp_tx_desc_release(tx_desc, tx_desc->pool_id);
4933 				goto next_desc;
4934 			}
4935 
4936 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
4937 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
4938 				dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
4939 						 tx_desc->flags, tx_desc->id);
4940 				qdf_assert_always(0);
4941 			}
4942 
4943 			/* Collect hw completion contents */
4944 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
4945 					      &tx_desc->comp, 1);
4946 add_to_pool:
4947 			DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
4948 
4949 			/* First ring descriptor on the cycle */
4950 			if (!head_desc) {
4951 				head_desc = tx_desc;
4952 				tail_desc = tx_desc;
4953 			}
4954 
4955 			tail_desc->next = tx_desc;
4956 			tx_desc->next = NULL;
4957 			tail_desc = tx_desc;
4958 		}
4959 next_desc:
4960 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
4961 
4962 		/*
4963 		 * Processed packet count is more than given quota
4964 		 * stop to processing
4965 		 */
4966 
4967 		count++;
4968 
4969 		dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
4970 					       num_avail_for_reap,
4971 					       hal_ring_hdl,
4972 					       &last_prefetched_hw_desc,
4973 					       &last_prefetched_sw_desc);
4974 
4975 		if (dp_tx_comp_loop_pkt_limit_hit(soc, count, max_reap_limit))
4976 			break;
4977 	}
4978 
4979 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
4980 
4981 	/* Process the reaped descriptors */
4982 	if (head_desc)
4983 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
4984 
4985 	/*
4986 	 * If we are processing in near-full condition, there are 3 scenario
4987 	 * 1) Ring entries has reached critical state
4988 	 * 2) Ring entries are still near high threshold
4989 	 * 3) Ring entries are below the safe level
4990 	 *
4991 	 * One more loop will move te state to normal processing and yield
4992 	 */
4993 	if (ring_near_full)
4994 		goto more_data;
4995 
4996 	if (dp_tx_comp_enable_eol_data_check(soc)) {
4997 
4998 		if (num_processed >= quota)
4999 			force_break = true;
5000 
5001 		if (!force_break &&
5002 		    hal_srng_dst_peek_sync_locked(soc->hal_soc,
5003 						  hal_ring_hdl)) {
5004 			DP_STATS_INC(soc, tx.hp_oos2, 1);
5005 			if (!hif_exec_should_yield(soc->hif_handle,
5006 						   int_ctx->dp_intr_id))
5007 				goto more_data;
5008 		}
5009 	}
5010 	DP_TX_HIST_STATS_PER_PDEV();
5011 
5012 	return num_processed;
5013 }
5014 
5015 #ifdef FEATURE_WLAN_TDLS
5016 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5017 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
5018 {
5019 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5020 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5021 						     DP_MOD_ID_TDLS);
5022 
5023 	if (!vdev) {
5024 		dp_err("vdev handle for id %d is NULL", vdev_id);
5025 		return NULL;
5026 	}
5027 
5028 	if (tx_spec & OL_TX_SPEC_NO_FREE)
5029 		vdev->is_tdls_frame = true;
5030 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
5031 
5032 	return dp_tx_send(soc_hdl, vdev_id, msdu_list);
5033 }
5034 #endif
5035 
5036 /**
5037  * dp_tx_vdev_attach() - attach vdev to dp tx
5038  * @vdev: virtual device instance
5039  *
5040  * Return: QDF_STATUS_SUCCESS: success
5041  *         QDF_STATUS_E_RESOURCES: Error return
5042  */
5043 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
5044 {
5045 	int pdev_id;
5046 	/*
5047 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
5048 	 */
5049 	DP_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
5050 				    DP_TCL_METADATA_TYPE_VDEV_BASED);
5051 
5052 	DP_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
5053 				       vdev->vdev_id);
5054 
5055 	pdev_id =
5056 		dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
5057 						       vdev->pdev->pdev_id);
5058 	DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
5059 
5060 	/*
5061 	 * Set HTT Extension Valid bit to 0 by default
5062 	 */
5063 	DP_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
5064 
5065 	dp_tx_vdev_update_search_flags(vdev);
5066 
5067 	return QDF_STATUS_SUCCESS;
5068 }
5069 
5070 #ifndef FEATURE_WDS
5071 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
5072 {
5073 	return false;
5074 }
5075 #endif
5076 
5077 /**
5078  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
5079  * @vdev: virtual device instance
5080  *
5081  * Return: void
5082  *
5083  */
5084 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
5085 {
5086 	struct dp_soc *soc = vdev->pdev->soc;
5087 
5088 	/*
5089 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
5090 	 * for TDLS link
5091 	 *
5092 	 * Enable AddrY (SA based search) only for non-WDS STA and
5093 	 * ProxySTA VAP (in HKv1) modes.
5094 	 *
5095 	 * In all other VAP modes, only DA based search should be
5096 	 * enabled
5097 	 */
5098 	if (vdev->opmode == wlan_op_mode_sta &&
5099 	    vdev->tdls_link_connected)
5100 		vdev->hal_desc_addr_search_flags =
5101 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
5102 	else if ((vdev->opmode == wlan_op_mode_sta) &&
5103 		 !dp_tx_da_search_override(vdev))
5104 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
5105 	else
5106 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
5107 
5108 	if (vdev->opmode == wlan_op_mode_sta)
5109 		vdev->search_type = soc->sta_mode_search_policy;
5110 	else
5111 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
5112 }
5113 
5114 static inline bool
5115 dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
5116 			  struct dp_vdev *vdev,
5117 			  struct dp_tx_desc_s *tx_desc)
5118 {
5119 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
5120 		return false;
5121 
5122 	/*
5123 	 * if vdev is given, then only check whether desc
5124 	 * vdev match. if vdev is NULL, then check whether
5125 	 * desc pdev match.
5126 	 */
5127 	return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
5128 		(tx_desc->pdev == pdev);
5129 }
5130 
5131 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
5132 /**
5133  * dp_tx_desc_flush() - release resources associated
5134  *                      to TX Desc
5135  *
5136  * @dp_pdev: Handle to DP pdev structure
5137  * @vdev: virtual device instance
5138  * NULL: no specific Vdev is required and check all allcated TX desc
5139  * on this pdev.
5140  * Non-NULL: only check the allocated TX Desc associated to this Vdev.
5141  *
5142  * @force_free:
5143  * true: flush the TX desc.
5144  * false: only reset the Vdev in each allocated TX desc
5145  * that associated to current Vdev.
5146  *
5147  * This function will go through the TX desc pool to flush
5148  * the outstanding TX data or reset Vdev to NULL in associated TX
5149  * Desc.
5150  */
5151 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
5152 		      bool force_free)
5153 {
5154 	uint8_t i;
5155 	uint32_t j;
5156 	uint32_t num_desc, page_id, offset;
5157 	uint16_t num_desc_per_page;
5158 	struct dp_soc *soc = pdev->soc;
5159 	struct dp_tx_desc_s *tx_desc = NULL;
5160 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
5161 
5162 	if (!vdev && !force_free) {
5163 		dp_err("Reset TX desc vdev, Vdev param is required!");
5164 		return;
5165 	}
5166 
5167 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
5168 		tx_desc_pool = &soc->tx_desc[i];
5169 		if (!(tx_desc_pool->pool_size) ||
5170 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
5171 		    !(tx_desc_pool->desc_pages.cacheable_pages))
5172 			continue;
5173 
5174 		/*
5175 		 * Add flow pool lock protection in case pool is freed
5176 		 * due to all tx_desc is recycled when handle TX completion.
5177 		 * this is not necessary when do force flush as:
5178 		 * a. double lock will happen if dp_tx_desc_release is
5179 		 *    also trying to acquire it.
5180 		 * b. dp interrupt has been disabled before do force TX desc
5181 		 *    flush in dp_pdev_deinit().
5182 		 */
5183 		if (!force_free)
5184 			qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
5185 		num_desc = tx_desc_pool->pool_size;
5186 		num_desc_per_page =
5187 			tx_desc_pool->desc_pages.num_element_per_page;
5188 		for (j = 0; j < num_desc; j++) {
5189 			page_id = j / num_desc_per_page;
5190 			offset = j % num_desc_per_page;
5191 
5192 			if (qdf_unlikely(!(tx_desc_pool->
5193 					 desc_pages.cacheable_pages)))
5194 				break;
5195 
5196 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
5197 
5198 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
5199 				/*
5200 				 * Free TX desc if force free is
5201 				 * required, otherwise only reset vdev
5202 				 * in this TX desc.
5203 				 */
5204 				if (force_free) {
5205 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
5206 					dp_tx_comp_free_buf(soc, tx_desc);
5207 					dp_tx_desc_release(tx_desc, i);
5208 				} else {
5209 					tx_desc->vdev_id = DP_INVALID_VDEV_ID;
5210 				}
5211 			}
5212 		}
5213 		if (!force_free)
5214 			qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
5215 	}
5216 }
5217 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
5218 /**
5219  * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
5220  *
5221  * @soc: Handle to DP soc structure
5222  * @tx_desc: pointer of one TX desc
5223  * @desc_pool_id: TX Desc pool id
5224  */
5225 static inline void
5226 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
5227 		      uint8_t desc_pool_id)
5228 {
5229 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
5230 
5231 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
5232 
5233 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
5234 }
5235 
5236 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
5237 		      bool force_free)
5238 {
5239 	uint8_t i, num_pool;
5240 	uint32_t j;
5241 	uint32_t num_desc, page_id, offset;
5242 	uint16_t num_desc_per_page;
5243 	struct dp_soc *soc = pdev->soc;
5244 	struct dp_tx_desc_s *tx_desc = NULL;
5245 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
5246 
5247 	if (!vdev && !force_free) {
5248 		dp_err("Reset TX desc vdev, Vdev param is required!");
5249 		return;
5250 	}
5251 
5252 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5253 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5254 
5255 	for (i = 0; i < num_pool; i++) {
5256 		tx_desc_pool = &soc->tx_desc[i];
5257 		if (!tx_desc_pool->desc_pages.cacheable_pages)
5258 			continue;
5259 
5260 		num_desc_per_page =
5261 			tx_desc_pool->desc_pages.num_element_per_page;
5262 		for (j = 0; j < num_desc; j++) {
5263 			page_id = j / num_desc_per_page;
5264 			offset = j % num_desc_per_page;
5265 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
5266 
5267 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
5268 				if (force_free) {
5269 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
5270 					dp_tx_comp_free_buf(soc, tx_desc);
5271 					dp_tx_desc_release(tx_desc, i);
5272 				} else {
5273 					dp_tx_desc_reset_vdev(soc, tx_desc,
5274 							      i);
5275 				}
5276 			}
5277 		}
5278 	}
5279 }
5280 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
5281 
5282 /**
5283  * dp_tx_vdev_detach() - detach vdev from dp tx
5284  * @vdev: virtual device instance
5285  *
5286  * Return: QDF_STATUS_SUCCESS: success
5287  *         QDF_STATUS_E_RESOURCES: Error return
5288  */
5289 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
5290 {
5291 	struct dp_pdev *pdev = vdev->pdev;
5292 
5293 	/* Reset TX desc associated to this Vdev as NULL */
5294 	dp_tx_desc_flush(pdev, vdev, false);
5295 
5296 	return QDF_STATUS_SUCCESS;
5297 }
5298 
5299 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
5300 /* Pools will be allocated dynamically */
5301 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
5302 					   int num_desc)
5303 {
5304 	uint8_t i;
5305 
5306 	for (i = 0; i < num_pool; i++) {
5307 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
5308 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
5309 	}
5310 
5311 	return QDF_STATUS_SUCCESS;
5312 }
5313 
5314 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
5315 					  int num_desc)
5316 {
5317 	return QDF_STATUS_SUCCESS;
5318 }
5319 
5320 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
5321 {
5322 }
5323 
5324 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
5325 {
5326 	uint8_t i;
5327 
5328 	for (i = 0; i < num_pool; i++)
5329 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
5330 }
5331 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
5332 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
5333 					   int num_desc)
5334 {
5335 	uint8_t i, count;
5336 
5337 	/* Allocate software Tx descriptor pools */
5338 	for (i = 0; i < num_pool; i++) {
5339 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
5340 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5341 				  FL("Tx Desc Pool alloc %d failed %pK"),
5342 				  i, soc);
5343 			goto fail;
5344 		}
5345 	}
5346 	return QDF_STATUS_SUCCESS;
5347 
5348 fail:
5349 	for (count = 0; count < i; count++)
5350 		dp_tx_desc_pool_free(soc, count);
5351 
5352 	return QDF_STATUS_E_NOMEM;
5353 }
5354 
5355 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
5356 					  int num_desc)
5357 {
5358 	uint8_t i;
5359 	for (i = 0; i < num_pool; i++) {
5360 		if (dp_tx_desc_pool_init(soc, i, num_desc)) {
5361 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5362 				  FL("Tx Desc Pool init %d failed %pK"),
5363 				  i, soc);
5364 			return QDF_STATUS_E_NOMEM;
5365 		}
5366 	}
5367 	return QDF_STATUS_SUCCESS;
5368 }
5369 
5370 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
5371 {
5372 	uint8_t i;
5373 
5374 	for (i = 0; i < num_pool; i++)
5375 		dp_tx_desc_pool_deinit(soc, i);
5376 }
5377 
5378 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
5379 {
5380 	uint8_t i;
5381 
5382 	for (i = 0; i < num_pool; i++)
5383 		dp_tx_desc_pool_free(soc, i);
5384 }
5385 
5386 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
5387 
5388 /**
5389  * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
5390  * @soc: core txrx main context
5391  * @num_pool: number of pools
5392  *
5393  */
5394 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
5395 {
5396 	dp_tx_tso_desc_pool_deinit(soc, num_pool);
5397 	dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
5398 }
5399 
5400 /**
5401  * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
5402  * @soc: core txrx main context
5403  * @num_pool: number of pools
5404  *
5405  */
5406 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
5407 {
5408 	dp_tx_tso_desc_pool_free(soc, num_pool);
5409 	dp_tx_tso_num_seg_pool_free(soc, num_pool);
5410 }
5411 
5412 /**
5413  * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
5414  * @soc: core txrx main context
5415  *
5416  * This function frees all tx related descriptors as below
5417  * 1. Regular TX descriptors (static pools)
5418  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
5419  * 3. TSO descriptors
5420  *
5421  */
5422 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
5423 {
5424 	uint8_t num_pool;
5425 
5426 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5427 
5428 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
5429 	dp_tx_ext_desc_pool_free(soc, num_pool);
5430 	dp_tx_delete_static_pools(soc, num_pool);
5431 }
5432 
5433 /**
5434  * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
5435  * @soc: core txrx main context
5436  *
5437  * This function de-initializes all tx related descriptors as below
5438  * 1. Regular TX descriptors (static pools)
5439  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
5440  * 3. TSO descriptors
5441  *
5442  */
5443 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
5444 {
5445 	uint8_t num_pool;
5446 
5447 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5448 
5449 	dp_tx_flow_control_deinit(soc);
5450 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
5451 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
5452 	dp_tx_deinit_static_pools(soc, num_pool);
5453 }
5454 
5455 /**
5456  * dp_tso_attach() - TSO attach handler
5457  * @txrx_soc: Opaque Dp handle
5458  *
5459  * Reserve TSO descriptor buffers
5460  *
5461  * Return: QDF_STATUS_E_FAILURE on failure or
5462  * QDF_STATUS_SUCCESS on success
5463  */
5464 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
5465 					 uint8_t num_pool,
5466 					 uint16_t num_desc)
5467 {
5468 	if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
5469 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
5470 		return QDF_STATUS_E_FAILURE;
5471 	}
5472 
5473 	if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
5474 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
5475 		       num_pool, soc);
5476 		return QDF_STATUS_E_FAILURE;
5477 	}
5478 	return QDF_STATUS_SUCCESS;
5479 }
5480 
5481 /**
5482  * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
5483  * @soc: DP soc handle
5484  * @num_pool: Number of pools
5485  * @num_desc: Number of descriptors
5486  *
5487  * Initialize TSO descriptor pools
5488  *
5489  * Return: QDF_STATUS_E_FAILURE on failure or
5490  * QDF_STATUS_SUCCESS on success
5491  */
5492 
5493 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
5494 					uint8_t num_pool,
5495 					uint16_t num_desc)
5496 {
5497 	if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
5498 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
5499 		return QDF_STATUS_E_FAILURE;
5500 	}
5501 
5502 	if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
5503 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
5504 		       num_pool, soc);
5505 		return QDF_STATUS_E_FAILURE;
5506 	}
5507 	return QDF_STATUS_SUCCESS;
5508 }
5509 
5510 /**
5511  * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
5512  * @soc: core txrx main context
5513  *
5514  * This function allocates memory for following descriptor pools
5515  * 1. regular sw tx descriptor pools (static pools)
5516  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
5517  * 3. TSO descriptor pools
5518  *
5519  * Return: QDF_STATUS_SUCCESS: success
5520  *         QDF_STATUS_E_RESOURCES: Error return
5521  */
5522 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
5523 {
5524 	uint8_t num_pool;
5525 	uint32_t num_desc;
5526 	uint32_t num_ext_desc;
5527 
5528 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5529 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5530 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
5531 
5532 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5533 		  "%s Tx Desc Alloc num_pool = %d, descs = %d",
5534 		  __func__, num_pool, num_desc);
5535 
5536 	if ((num_pool > MAX_TXDESC_POOLS) ||
5537 	    (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
5538 		goto fail1;
5539 
5540 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
5541 		goto fail1;
5542 
5543 	if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
5544 		goto fail2;
5545 
5546 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
5547 		return QDF_STATUS_SUCCESS;
5548 
5549 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
5550 		goto fail3;
5551 
5552 	return QDF_STATUS_SUCCESS;
5553 
5554 fail3:
5555 	dp_tx_ext_desc_pool_free(soc, num_pool);
5556 fail2:
5557 	dp_tx_delete_static_pools(soc, num_pool);
5558 fail1:
5559 	return QDF_STATUS_E_RESOURCES;
5560 }
5561 
5562 /**
5563  * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
5564  * @soc: core txrx main context
5565  *
5566  * This function initializes the following TX descriptor pools
5567  * 1. regular sw tx descriptor pools (static pools)
5568  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
5569  * 3. TSO descriptor pools
5570  *
5571  * Return: QDF_STATUS_SUCCESS: success
5572  *	   QDF_STATUS_E_RESOURCES: Error return
5573  */
5574 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
5575 {
5576 	uint8_t num_pool;
5577 	uint32_t num_desc;
5578 	uint32_t num_ext_desc;
5579 
5580 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5581 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5582 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
5583 
5584 	if (dp_tx_init_static_pools(soc, num_pool, num_desc))
5585 		goto fail1;
5586 
5587 	if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
5588 		goto fail2;
5589 
5590 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
5591 		return QDF_STATUS_SUCCESS;
5592 
5593 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
5594 		goto fail3;
5595 
5596 	dp_tx_flow_control_init(soc);
5597 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
5598 	return QDF_STATUS_SUCCESS;
5599 
5600 fail3:
5601 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
5602 fail2:
5603 	dp_tx_deinit_static_pools(soc, num_pool);
5604 fail1:
5605 	return QDF_STATUS_E_RESOURCES;
5606 }
5607 
5608 /**
5609  * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
5610  * @txrx_soc: dp soc handle
5611  *
5612  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
5613  *			QDF_STATUS_E_FAILURE
5614  */
5615 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
5616 {
5617 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5618 	uint8_t num_pool;
5619 	uint32_t num_desc;
5620 	uint32_t num_ext_desc;
5621 
5622 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5623 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5624 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
5625 
5626 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
5627 		return QDF_STATUS_E_FAILURE;
5628 
5629 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
5630 		return QDF_STATUS_E_FAILURE;
5631 
5632 	return QDF_STATUS_SUCCESS;
5633 }
5634 
5635 /**
5636  * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
5637  * @txrx_soc: dp soc handle
5638  *
5639  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
5640  */
5641 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
5642 {
5643 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5644 	uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5645 
5646 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
5647 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
5648 
5649 	return QDF_STATUS_SUCCESS;
5650 }
5651 
5652