xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision 5611ef508114526caa3c58ffe2e188650c7b53d1)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "dp_htt.h"
21 #include "hal_hw_headers.h"
22 #include "dp_tx.h"
23 #include "dp_tx_desc.h"
24 #include "dp_peer.h"
25 #include "dp_types.h"
26 #include "hal_tx.h"
27 #include "qdf_mem.h"
28 #include "qdf_nbuf.h"
29 #include "qdf_net_types.h"
30 #include <wlan_cfg.h>
31 #include "dp_ipa.h"
32 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
33 #include "if_meta_hdr.h"
34 #endif
35 #include "enet.h"
36 #include "dp_internal.h"
37 #ifdef ATH_SUPPORT_IQUE
38 #include "dp_txrx_me.h"
39 #endif
40 #include "dp_hist.h"
41 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
42 #include <dp_swlm.h>
43 #endif
44 #ifdef WIFI_MONITOR_SUPPORT
45 #include <dp_mon.h>
46 #endif
47 #ifdef FEATURE_WDS
48 #include "dp_txrx_wds.h"
49 #endif
50 
51 /* Flag to skip CCE classify when mesh or tid override enabled */
52 #define DP_TX_SKIP_CCE_CLASSIFY \
53 	(DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
54 
55 /* TODO Add support in TSO */
56 #define DP_DESC_NUM_FRAG(x) 0
57 
58 /* disable TQM_BYPASS */
59 #define TQM_BYPASS_WAR 0
60 
61 /* invalid peer id for reinject*/
62 #define DP_INVALID_PEER 0XFFFE
63 
64 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
65 /**
66  * dp_update_tx_desc_stats - Update the increase or decrease in
67  * outstanding tx desc count
68  * values on pdev and soc
69  * @vdev: DP pdev handle
70  *
71  * Return: void
72  */
73 static inline void
74 dp_update_tx_desc_stats(struct dp_pdev *pdev)
75 {
76 	int32_t tx_descs_cnt =
77 		qdf_atomic_read(&pdev->num_tx_outstanding);
78 	if (pdev->tx_descs_max < tx_descs_cnt)
79 		pdev->tx_descs_max = tx_descs_cnt;
80 	qdf_mem_tx_desc_cnt_update(pdev->num_tx_outstanding,
81 				   pdev->tx_descs_max);
82 }
83 
84 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
85 
86 static inline void
87 dp_update_tx_desc_stats(struct dp_pdev *pdev)
88 {
89 }
90 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
91 
92 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
93 static inline
94 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
95 {
96 	qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
97 				     QDF_DMA_TO_DEVICE,
98 				     desc->nbuf->len);
99 	desc->flags |= DP_TX_DESC_FLAG_UNMAP_DONE;
100 }
101 
102 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
103 {
104 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE)))
105 		qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
106 					     QDF_DMA_TO_DEVICE,
107 					     desc->nbuf->len);
108 }
109 #else
110 static inline
111 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
112 {
113 }
114 
115 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
116 {
117 	qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
118 				     QDF_DMA_TO_DEVICE, desc->nbuf->len);
119 }
120 #endif
121 
122 #ifdef QCA_TX_LIMIT_CHECK
123 /**
124  * dp_tx_limit_check - Check if allocated tx descriptors reached
125  * soc max limit and pdev max limit
126  * @vdev: DP vdev handle
127  *
128  * Return: true if allocated tx descriptors reached max configured value, else
129  * false
130  */
131 static inline bool
132 dp_tx_limit_check(struct dp_vdev *vdev)
133 {
134 	struct dp_pdev *pdev = vdev->pdev;
135 	struct dp_soc *soc = pdev->soc;
136 
137 	if (qdf_atomic_read(&soc->num_tx_outstanding) >=
138 			soc->num_tx_allowed) {
139 		dp_tx_info("queued packets are more than max tx, drop the frame");
140 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
141 		return true;
142 	}
143 
144 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
145 			pdev->num_tx_allowed) {
146 		dp_tx_info("queued packets are more than max tx, drop the frame");
147 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
148 		DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_outstand.num, 1);
149 		return true;
150 	}
151 	return false;
152 }
153 
154 /**
155  * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
156  * reached soc max limit
157  * @vdev: DP vdev handle
158  *
159  * Return: true if allocated tx descriptors reached max configured value, else
160  * false
161  */
162 static inline bool
163 dp_tx_exception_limit_check(struct dp_vdev *vdev)
164 {
165 	struct dp_pdev *pdev = vdev->pdev;
166 	struct dp_soc *soc = pdev->soc;
167 
168 	if (qdf_atomic_read(&soc->num_tx_exception) >=
169 			soc->num_msdu_exception_desc) {
170 		dp_info("exc packets are more than max drop the exc pkt");
171 		DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
172 		return true;
173 	}
174 
175 	return false;
176 }
177 
178 /**
179  * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
180  * @vdev: DP pdev handle
181  *
182  * Return: void
183  */
184 static inline void
185 dp_tx_outstanding_inc(struct dp_pdev *pdev)
186 {
187 	struct dp_soc *soc = pdev->soc;
188 
189 	qdf_atomic_inc(&pdev->num_tx_outstanding);
190 	qdf_atomic_inc(&soc->num_tx_outstanding);
191 	dp_update_tx_desc_stats(pdev);
192 }
193 
194 /**
195  * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
196  * @vdev: DP pdev handle
197  *
198  * Return: void
199  */
200 static inline void
201 dp_tx_outstanding_dec(struct dp_pdev *pdev)
202 {
203 	struct dp_soc *soc = pdev->soc;
204 
205 	qdf_atomic_dec(&pdev->num_tx_outstanding);
206 	qdf_atomic_dec(&soc->num_tx_outstanding);
207 	dp_update_tx_desc_stats(pdev);
208 }
209 
210 #else //QCA_TX_LIMIT_CHECK
211 static inline bool
212 dp_tx_limit_check(struct dp_vdev *vdev)
213 {
214 	return false;
215 }
216 
217 static inline bool
218 dp_tx_exception_limit_check(struct dp_vdev *vdev)
219 {
220 	return false;
221 }
222 
223 static inline void
224 dp_tx_outstanding_inc(struct dp_pdev *pdev)
225 {
226 	qdf_atomic_inc(&pdev->num_tx_outstanding);
227 	dp_update_tx_desc_stats(pdev);
228 }
229 
230 static inline void
231 dp_tx_outstanding_dec(struct dp_pdev *pdev)
232 {
233 	qdf_atomic_dec(&pdev->num_tx_outstanding);
234 	dp_update_tx_desc_stats(pdev);
235 }
236 #endif //QCA_TX_LIMIT_CHECK
237 
238 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
239 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
240 {
241 	enum dp_tx_event_type type;
242 
243 	if (flags & DP_TX_DESC_FLAG_FLUSH)
244 		type = DP_TX_DESC_FLUSH;
245 	else if (flags & DP_TX_DESC_FLAG_TX_COMP_ERR)
246 		type = DP_TX_COMP_UNMAP_ERR;
247 	else if (flags & DP_TX_DESC_FLAG_COMPLETED_TX)
248 		type = DP_TX_COMP_UNMAP;
249 	else
250 		type = DP_TX_DESC_UNMAP;
251 
252 	return type;
253 }
254 
255 static inline void
256 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
257 		       qdf_nbuf_t skb, uint32_t sw_cookie,
258 		       enum dp_tx_event_type type)
259 {
260 	struct dp_tx_desc_event *entry;
261 	uint32_t idx;
262 
263 	if (qdf_unlikely(!soc->tx_tcl_history || !soc->tx_comp_history))
264 		return;
265 
266 	switch (type) {
267 	case DP_TX_COMP_UNMAP:
268 	case DP_TX_COMP_UNMAP_ERR:
269 	case DP_TX_COMP_MSDU_EXT:
270 		idx = dp_history_get_next_index(&soc->tx_comp_history->index,
271 						DP_TX_COMP_HISTORY_SIZE);
272 		entry = &soc->tx_comp_history->entry[idx];
273 		break;
274 	case DP_TX_DESC_MAP:
275 	case DP_TX_DESC_UNMAP:
276 	case DP_TX_DESC_COOKIE:
277 	case DP_TX_DESC_FLUSH:
278 		idx = dp_history_get_next_index(&soc->tx_tcl_history->index,
279 						DP_TX_TCL_HISTORY_SIZE);
280 		entry = &soc->tx_tcl_history->entry[idx];
281 		break;
282 	default:
283 		dp_info_rl("Invalid dp_tx_event_type: %d", type);
284 		return;
285 	}
286 
287 	entry->skb = skb;
288 	entry->paddr = paddr;
289 	entry->sw_cookie = sw_cookie;
290 	entry->type = type;
291 	entry->ts = qdf_get_log_timestamp();
292 }
293 
294 static inline void
295 dp_tx_tso_seg_history_add(struct dp_soc *soc,
296 			  struct qdf_tso_seg_elem_t *tso_seg,
297 			  qdf_nbuf_t skb, uint32_t sw_cookie,
298 			  enum dp_tx_event_type type)
299 {
300 	int i;
301 
302 	for (i = 1; i < tso_seg->seg.num_frags; i++) {
303 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[i].paddr,
304 				       skb, sw_cookie, type);
305 	}
306 
307 	if (!tso_seg->next)
308 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[0].paddr,
309 				       skb, 0xFFFFFFFF, type);
310 }
311 
312 static inline void
313 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
314 		      qdf_nbuf_t skb, uint32_t sw_cookie,
315 		      enum dp_tx_event_type type)
316 {
317 	struct qdf_tso_seg_elem_t *curr_seg = tso_info.tso_seg_list;
318 	uint32_t num_segs = tso_info.num_segs;
319 
320 	while (num_segs) {
321 		dp_tx_tso_seg_history_add(soc, curr_seg, skb, sw_cookie, type);
322 		curr_seg = curr_seg->next;
323 		num_segs--;
324 	}
325 }
326 
327 #else
328 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
329 {
330 	return DP_TX_DESC_INVAL_EVT;
331 }
332 
333 static inline void
334 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
335 		       qdf_nbuf_t skb, uint32_t sw_cookie,
336 		       enum dp_tx_event_type type)
337 {
338 }
339 
340 static inline void
341 dp_tx_tso_seg_history_add(struct dp_soc *soc,
342 			  struct qdf_tso_seg_elem_t *tso_seg,
343 			  qdf_nbuf_t skb, uint32_t sw_cookie,
344 			  enum dp_tx_event_type type)
345 {
346 }
347 
348 static inline void
349 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
350 		      qdf_nbuf_t skb, uint32_t sw_cookie,
351 		      enum dp_tx_event_type type)
352 {
353 }
354 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
355 
356 #if defined(FEATURE_TSO)
357 /**
358  * dp_tx_tso_unmap_segment() - Unmap TSO segment
359  *
360  * @soc - core txrx main context
361  * @seg_desc - tso segment descriptor
362  * @num_seg_desc - tso number segment descriptor
363  */
364 static void dp_tx_tso_unmap_segment(
365 		struct dp_soc *soc,
366 		struct qdf_tso_seg_elem_t *seg_desc,
367 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
368 {
369 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
370 	if (qdf_unlikely(!seg_desc)) {
371 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
372 			 __func__, __LINE__);
373 		qdf_assert(0);
374 	} else if (qdf_unlikely(!num_seg_desc)) {
375 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
376 			 __func__, __LINE__);
377 		qdf_assert(0);
378 	} else {
379 		bool is_last_seg;
380 		/* no tso segment left to do dma unmap */
381 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
382 			return;
383 
384 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
385 					true : false;
386 		qdf_nbuf_unmap_tso_segment(soc->osdev,
387 					   seg_desc, is_last_seg);
388 		num_seg_desc->num_seg.tso_cmn_num_seg--;
389 	}
390 }
391 
392 /**
393  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
394  *                            back to the freelist
395  *
396  * @soc - soc device handle
397  * @tx_desc - Tx software descriptor
398  */
399 static void dp_tx_tso_desc_release(struct dp_soc *soc,
400 				   struct dp_tx_desc_s *tx_desc)
401 {
402 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
403 	if (qdf_unlikely(!tx_desc->tso_desc)) {
404 		dp_tx_err("SO desc is NULL!");
405 		qdf_assert(0);
406 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
407 		dp_tx_err("TSO num desc is NULL!");
408 		qdf_assert(0);
409 	} else {
410 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
411 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
412 
413 		/* Add the tso num segment into the free list */
414 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
415 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
416 					    tx_desc->tso_num_desc);
417 			tx_desc->tso_num_desc = NULL;
418 			DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
419 		}
420 
421 		/* Add the tso segment into the free list*/
422 		dp_tx_tso_desc_free(soc,
423 				    tx_desc->pool_id, tx_desc->tso_desc);
424 		tx_desc->tso_desc = NULL;
425 	}
426 }
427 #else
428 static void dp_tx_tso_unmap_segment(
429 		struct dp_soc *soc,
430 		struct qdf_tso_seg_elem_t *seg_desc,
431 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
432 
433 {
434 }
435 
436 static void dp_tx_tso_desc_release(struct dp_soc *soc,
437 				   struct dp_tx_desc_s *tx_desc)
438 {
439 }
440 #endif
441 
442 /**
443  * dp_tx_desc_release() - Release Tx Descriptor
444  * @tx_desc : Tx Descriptor
445  * @desc_pool_id: Descriptor Pool ID
446  *
447  * Deallocate all resources attached to Tx descriptor and free the Tx
448  * descriptor.
449  *
450  * Return:
451  */
452 static void
453 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
454 {
455 	struct dp_pdev *pdev = tx_desc->pdev;
456 	struct dp_soc *soc;
457 	uint8_t comp_status = 0;
458 
459 	qdf_assert(pdev);
460 
461 	soc = pdev->soc;
462 
463 	dp_tx_outstanding_dec(pdev);
464 
465 	if (tx_desc->frm_type == dp_tx_frm_tso)
466 		dp_tx_tso_desc_release(soc, tx_desc);
467 
468 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
469 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
470 
471 	if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
472 		dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
473 
474 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
475 		qdf_atomic_dec(&soc->num_tx_exception);
476 
477 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
478 				tx_desc->buffer_src)
479 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
480 							     soc->hal_soc);
481 	else
482 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
483 
484 	dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d",
485 		    tx_desc->id, comp_status,
486 		    qdf_atomic_read(&pdev->num_tx_outstanding));
487 
488 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
489 	return;
490 }
491 
492 /**
493  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
494  * @vdev: DP vdev Handle
495  * @nbuf: skb
496  * @msdu_info: msdu_info required to create HTT metadata
497  *
498  * Prepares and fills HTT metadata in the frame pre-header for special frames
499  * that should be transmitted using varying transmit parameters.
500  * There are 2 VDEV modes that currently needs this special metadata -
501  *  1) Mesh Mode
502  *  2) DSRC Mode
503  *
504  * Return: HTT metadata size
505  *
506  */
507 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
508 					  struct dp_tx_msdu_info_s *msdu_info)
509 {
510 	uint32_t *meta_data = msdu_info->meta_data;
511 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
512 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
513 
514 	uint8_t htt_desc_size;
515 
516 	/* Size rounded of multiple of 8 bytes */
517 	uint8_t htt_desc_size_aligned;
518 
519 	uint8_t *hdr = NULL;
520 
521 	/*
522 	 * Metadata - HTT MSDU Extension header
523 	 */
524 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
525 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
526 
527 	if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
528 	    HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
529 							   meta_data[0])) {
530 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
531 				 htt_desc_size_aligned)) {
532 			nbuf = qdf_nbuf_realloc_headroom(nbuf,
533 							 htt_desc_size_aligned);
534 			if (!nbuf) {
535 				/*
536 				 * qdf_nbuf_realloc_headroom won't do skb_clone
537 				 * as skb_realloc_headroom does. so, no free is
538 				 * needed here.
539 				 */
540 				DP_STATS_INC(vdev,
541 					     tx_i.dropped.headroom_insufficient,
542 					     1);
543 				qdf_print(" %s[%d] skb_realloc_headroom failed",
544 					  __func__, __LINE__);
545 				return 0;
546 			}
547 		}
548 		/* Fill and add HTT metaheader */
549 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
550 		if (!hdr) {
551 			dp_tx_err("Error in filling HTT metadata");
552 
553 			return 0;
554 		}
555 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
556 
557 	} else if (vdev->opmode == wlan_op_mode_ocb) {
558 		/* Todo - Add support for DSRC */
559 	}
560 
561 	return htt_desc_size_aligned;
562 }
563 
564 /**
565  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
566  * @tso_seg: TSO segment to process
567  * @ext_desc: Pointer to MSDU extension descriptor
568  *
569  * Return: void
570  */
571 #if defined(FEATURE_TSO)
572 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
573 		void *ext_desc)
574 {
575 	uint8_t num_frag;
576 	uint32_t tso_flags;
577 
578 	/*
579 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
580 	 * tcp_flag_mask
581 	 *
582 	 * Checksum enable flags are set in TCL descriptor and not in Extension
583 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
584 	 */
585 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
586 
587 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
588 
589 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
590 		tso_seg->tso_flags.ip_len);
591 
592 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
593 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
594 
595 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
596 		uint32_t lo = 0;
597 		uint32_t hi = 0;
598 
599 		qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
600 				  (tso_seg->tso_frags[num_frag].length));
601 
602 		qdf_dmaaddr_to_32s(
603 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
604 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
605 			tso_seg->tso_frags[num_frag].length);
606 	}
607 
608 	return;
609 }
610 #else
611 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
612 		void *ext_desc)
613 {
614 	return;
615 }
616 #endif
617 
618 #if defined(FEATURE_TSO)
619 /**
620  * dp_tx_free_tso_seg_list() - Loop through the tso segments
621  *                             allocated and free them
622  *
623  * @soc: soc handle
624  * @free_seg: list of tso segments
625  * @msdu_info: msdu descriptor
626  *
627  * Return - void
628  */
629 static void dp_tx_free_tso_seg_list(
630 		struct dp_soc *soc,
631 		struct qdf_tso_seg_elem_t *free_seg,
632 		struct dp_tx_msdu_info_s *msdu_info)
633 {
634 	struct qdf_tso_seg_elem_t *next_seg;
635 
636 	while (free_seg) {
637 		next_seg = free_seg->next;
638 		dp_tx_tso_desc_free(soc,
639 				    msdu_info->tx_queue.desc_pool_id,
640 				    free_seg);
641 		free_seg = next_seg;
642 	}
643 }
644 
645 /**
646  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
647  *                                 allocated and free them
648  *
649  * @soc:  soc handle
650  * @free_num_seg: list of tso number segments
651  * @msdu_info: msdu descriptor
652  * Return - void
653  */
654 static void dp_tx_free_tso_num_seg_list(
655 		struct dp_soc *soc,
656 		struct qdf_tso_num_seg_elem_t *free_num_seg,
657 		struct dp_tx_msdu_info_s *msdu_info)
658 {
659 	struct qdf_tso_num_seg_elem_t *next_num_seg;
660 
661 	while (free_num_seg) {
662 		next_num_seg = free_num_seg->next;
663 		dp_tso_num_seg_free(soc,
664 				    msdu_info->tx_queue.desc_pool_id,
665 				    free_num_seg);
666 		free_num_seg = next_num_seg;
667 	}
668 }
669 
670 /**
671  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
672  *                              do dma unmap for each segment
673  *
674  * @soc: soc handle
675  * @free_seg: list of tso segments
676  * @num_seg_desc: tso number segment descriptor
677  *
678  * Return - void
679  */
680 static void dp_tx_unmap_tso_seg_list(
681 		struct dp_soc *soc,
682 		struct qdf_tso_seg_elem_t *free_seg,
683 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
684 {
685 	struct qdf_tso_seg_elem_t *next_seg;
686 
687 	if (qdf_unlikely(!num_seg_desc)) {
688 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
689 		return;
690 	}
691 
692 	while (free_seg) {
693 		next_seg = free_seg->next;
694 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
695 		free_seg = next_seg;
696 	}
697 }
698 
699 #ifdef FEATURE_TSO_STATS
700 /**
701  * dp_tso_get_stats_idx: Retrieve the tso packet id
702  * @pdev - pdev handle
703  *
704  * Return: id
705  */
706 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
707 {
708 	uint32_t stats_idx;
709 
710 	stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
711 						% CDP_MAX_TSO_PACKETS);
712 	return stats_idx;
713 }
714 #else
715 static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
716 {
717 	return 0;
718 }
719 #endif /* FEATURE_TSO_STATS */
720 
721 /**
722  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
723  *				     free the tso segments descriptor and
724  *				     tso num segments descriptor
725  *
726  * @soc:  soc handle
727  * @msdu_info: msdu descriptor
728  * @tso_seg_unmap: flag to show if dma unmap is necessary
729  *
730  * Return - void
731  */
732 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
733 					  struct dp_tx_msdu_info_s *msdu_info,
734 					  bool tso_seg_unmap)
735 {
736 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
737 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
738 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
739 					tso_info->tso_num_seg_list;
740 
741 	/* do dma unmap for each segment */
742 	if (tso_seg_unmap)
743 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
744 
745 	/* free all tso number segment descriptor though looks only have 1 */
746 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
747 
748 	/* free all tso segment descriptor */
749 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
750 }
751 
752 /**
753  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
754  * @vdev: virtual device handle
755  * @msdu: network buffer
756  * @msdu_info: meta data associated with the msdu
757  *
758  * Return: QDF_STATUS_SUCCESS success
759  */
760 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
761 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
762 {
763 	struct qdf_tso_seg_elem_t *tso_seg;
764 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
765 	struct dp_soc *soc = vdev->pdev->soc;
766 	struct dp_pdev *pdev = vdev->pdev;
767 	struct qdf_tso_info_t *tso_info;
768 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
769 	tso_info = &msdu_info->u.tso_info;
770 	tso_info->curr_seg = NULL;
771 	tso_info->tso_seg_list = NULL;
772 	tso_info->num_segs = num_seg;
773 	msdu_info->frm_type = dp_tx_frm_tso;
774 	tso_info->tso_num_seg_list = NULL;
775 
776 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
777 
778 	while (num_seg) {
779 		tso_seg = dp_tx_tso_desc_alloc(
780 				soc, msdu_info->tx_queue.desc_pool_id);
781 		if (tso_seg) {
782 			tso_seg->next = tso_info->tso_seg_list;
783 			tso_info->tso_seg_list = tso_seg;
784 			num_seg--;
785 		} else {
786 			dp_err_rl("Failed to alloc tso seg desc");
787 			DP_STATS_INC_PKT(vdev->pdev,
788 					 tso_stats.tso_no_mem_dropped, 1,
789 					 qdf_nbuf_len(msdu));
790 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
791 
792 			return QDF_STATUS_E_NOMEM;
793 		}
794 	}
795 
796 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
797 
798 	tso_num_seg = dp_tso_num_seg_alloc(soc,
799 			msdu_info->tx_queue.desc_pool_id);
800 
801 	if (tso_num_seg) {
802 		tso_num_seg->next = tso_info->tso_num_seg_list;
803 		tso_info->tso_num_seg_list = tso_num_seg;
804 	} else {
805 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
806 			 __func__);
807 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
808 
809 		return QDF_STATUS_E_NOMEM;
810 	}
811 
812 	msdu_info->num_seg =
813 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
814 
815 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
816 			msdu_info->num_seg);
817 
818 	if (!(msdu_info->num_seg)) {
819 		/*
820 		 * Free allocated TSO seg desc and number seg desc,
821 		 * do unmap for segments if dma map has done.
822 		 */
823 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
824 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
825 
826 		return QDF_STATUS_E_INVAL;
827 	}
828 	dp_tx_tso_history_add(soc, msdu_info->u.tso_info,
829 			      msdu, 0, DP_TX_DESC_MAP);
830 
831 	tso_info->curr_seg = tso_info->tso_seg_list;
832 
833 	tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
834 	dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
835 			     msdu, msdu_info->num_seg);
836 	dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
837 				    tso_info->msdu_stats_idx);
838 	dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
839 	return QDF_STATUS_SUCCESS;
840 }
841 #else
842 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
843 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
844 {
845 	return QDF_STATUS_E_NOMEM;
846 }
847 #endif
848 
849 QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
850 			(DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
851 			 sizeof(struct htt_tx_msdu_desc_ext2_t)));
852 
853 /**
854  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
855  * @vdev: DP Vdev handle
856  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
857  * @desc_pool_id: Descriptor Pool ID
858  *
859  * Return:
860  */
861 static
862 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
863 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
864 {
865 	uint8_t i;
866 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
867 	struct dp_tx_seg_info_s *seg_info;
868 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
869 	struct dp_soc *soc = vdev->pdev->soc;
870 
871 	/* Allocate an extension descriptor */
872 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
873 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
874 
875 	if (!msdu_ext_desc) {
876 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
877 		return NULL;
878 	}
879 
880 	if (msdu_info->exception_fw &&
881 			qdf_unlikely(vdev->mesh_vdev)) {
882 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
883 				&msdu_info->meta_data[0],
884 				sizeof(struct htt_tx_msdu_desc_ext2_t));
885 		qdf_atomic_inc(&soc->num_tx_exception);
886 		msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
887 	}
888 
889 	switch (msdu_info->frm_type) {
890 	case dp_tx_frm_sg:
891 	case dp_tx_frm_me:
892 	case dp_tx_frm_raw:
893 		seg_info = msdu_info->u.sg_info.curr_seg;
894 		/* Update the buffer pointers in MSDU Extension Descriptor */
895 		for (i = 0; i < seg_info->frag_cnt; i++) {
896 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
897 				seg_info->frags[i].paddr_lo,
898 				seg_info->frags[i].paddr_hi,
899 				seg_info->frags[i].len);
900 		}
901 
902 		break;
903 
904 	case dp_tx_frm_tso:
905 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
906 				&cached_ext_desc[0]);
907 		break;
908 
909 
910 	default:
911 		break;
912 	}
913 
914 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
915 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
916 
917 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
918 			msdu_ext_desc->vaddr);
919 
920 	return msdu_ext_desc;
921 }
922 
923 /**
924  * dp_tx_trace_pkt() - Trace TX packet at DP layer
925  *
926  * @skb: skb to be traced
927  * @msdu_id: msdu_id of the packet
928  * @vdev_id: vdev_id of the packet
929  *
930  * Return: None
931  */
932 #ifdef DP_DISABLE_TX_PKT_TRACE
933 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
934 			    uint8_t vdev_id)
935 {
936 }
937 #else
938 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
939 			    uint8_t vdev_id)
940 {
941 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
942 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
943 	DPTRACE(qdf_dp_trace_ptr(skb,
944 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
945 				 QDF_TRACE_DEFAULT_PDEV_ID,
946 				 qdf_nbuf_data_addr(skb),
947 				 sizeof(qdf_nbuf_data(skb)),
948 				 msdu_id, vdev_id, 0));
949 
950 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
951 
952 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
953 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
954 				      msdu_id, QDF_TX));
955 }
956 #endif
957 
958 #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
959 /**
960  * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as
961  *				      exception by the upper layer (OS_IF)
962  * @soc: DP soc handle
963  * @nbuf: packet to be transmitted
964  *
965  * Returns: 1 if the packet is marked as exception,
966  *	    0, if the packet is not marked as exception.
967  */
968 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
969 						 qdf_nbuf_t nbuf)
970 {
971 	return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf);
972 }
973 #else
974 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
975 						 qdf_nbuf_t nbuf)
976 {
977 	return 0;
978 }
979 #endif
980 
981 /**
982  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
983  * @vdev: DP vdev handle
984  * @nbuf: skb
985  * @desc_pool_id: Descriptor pool ID
986  * @meta_data: Metadata to the fw
987  * @tx_exc_metadata: Handle that holds exception path metadata
988  * Allocate and prepare Tx descriptor with msdu information.
989  *
990  * Return: Pointer to Tx Descriptor on success,
991  *         NULL on failure
992  */
993 static
994 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
995 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
996 		struct dp_tx_msdu_info_s *msdu_info,
997 		struct cdp_tx_exception_metadata *tx_exc_metadata)
998 {
999 	uint8_t align_pad;
1000 	uint8_t is_exception = 0;
1001 	uint8_t htt_hdr_size;
1002 	struct dp_tx_desc_s *tx_desc;
1003 	struct dp_pdev *pdev = vdev->pdev;
1004 	struct dp_soc *soc = pdev->soc;
1005 
1006 	if (dp_tx_limit_check(vdev))
1007 		return NULL;
1008 
1009 	/* Allocate software Tx descriptor */
1010 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1011 
1012 	if (qdf_unlikely(!tx_desc)) {
1013 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1014 		DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
1015 		return NULL;
1016 	}
1017 
1018 	dp_tx_outstanding_inc(pdev);
1019 
1020 	/* Initialize the SW tx descriptor */
1021 	tx_desc->nbuf = nbuf;
1022 	tx_desc->frm_type = dp_tx_frm_std;
1023 	tx_desc->tx_encap_type = ((tx_exc_metadata &&
1024 		(tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
1025 		tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
1026 	tx_desc->vdev_id = vdev->vdev_id;
1027 	tx_desc->pdev = pdev;
1028 	tx_desc->msdu_ext_desc = NULL;
1029 	tx_desc->pkt_offset = 0;
1030 	tx_desc->length = qdf_nbuf_headlen(nbuf);
1031 
1032 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
1033 
1034 	if (qdf_unlikely(vdev->multipass_en)) {
1035 		if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
1036 			goto failure;
1037 	}
1038 
1039 	/* Packets marked by upper layer (OS-IF) to be sent to FW */
1040 	if (dp_tx_is_nbuf_marked_exception(soc, nbuf))
1041 		is_exception = 1;
1042 	/*
1043 	 * For special modes (vdev_type == ocb or mesh), data frames should be
1044 	 * transmitted using varying transmit parameters (tx spec) which include
1045 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
1046 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
1047 	 * These frames are sent as exception packets to firmware.
1048 	 *
1049 	 * HW requirement is that metadata should always point to a
1050 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
1051 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
1052 	 *  to get 8-byte aligned start address along with align_pad added
1053 	 *
1054 	 *  |-----------------------------|
1055 	 *  |                             |
1056 	 *  |-----------------------------| <-----Buffer Pointer Address given
1057 	 *  |                             |  ^    in HW descriptor (aligned)
1058 	 *  |       HTT Metadata          |  |
1059 	 *  |                             |  |
1060 	 *  |                             |  | Packet Offset given in descriptor
1061 	 *  |                             |  |
1062 	 *  |-----------------------------|  |
1063 	 *  |       Alignment Pad         |  v
1064 	 *  |-----------------------------| <----- Actual buffer start address
1065 	 *  |        SKB Data             |           (Unaligned)
1066 	 *  |                             |
1067 	 *  |                             |
1068 	 *  |                             |
1069 	 *  |                             |
1070 	 *  |                             |
1071 	 *  |-----------------------------|
1072 	 */
1073 	if (qdf_unlikely((msdu_info->exception_fw)) ||
1074 				(vdev->opmode == wlan_op_mode_ocb) ||
1075 				(tx_exc_metadata &&
1076 				tx_exc_metadata->is_tx_sniffer)) {
1077 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
1078 
1079 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
1080 			DP_STATS_INC(vdev,
1081 				     tx_i.dropped.headroom_insufficient, 1);
1082 			goto failure;
1083 		}
1084 
1085 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
1086 			dp_tx_err("qdf_nbuf_push_head failed");
1087 			goto failure;
1088 		}
1089 
1090 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
1091 				msdu_info);
1092 		if (htt_hdr_size == 0)
1093 			goto failure;
1094 
1095 		tx_desc->length = qdf_nbuf_headlen(nbuf);
1096 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
1097 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1098 		is_exception = 1;
1099 		tx_desc->length -= tx_desc->pkt_offset;
1100 	}
1101 
1102 #if !TQM_BYPASS_WAR
1103 	if (is_exception || tx_exc_metadata)
1104 #endif
1105 	{
1106 		/* Temporary WAR due to TQM VP issues */
1107 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1108 		qdf_atomic_inc(&soc->num_tx_exception);
1109 	}
1110 
1111 	return tx_desc;
1112 
1113 failure:
1114 	dp_tx_desc_release(tx_desc, desc_pool_id);
1115 	return NULL;
1116 }
1117 
1118 /**
1119  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
1120  * @vdev: DP vdev handle
1121  * @nbuf: skb
1122  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
1123  * @desc_pool_id : Descriptor Pool ID
1124  *
1125  * Allocate and prepare Tx descriptor with msdu and fragment descritor
1126  * information. For frames wth fragments, allocate and prepare
1127  * an MSDU extension descriptor
1128  *
1129  * Return: Pointer to Tx Descriptor on success,
1130  *         NULL on failure
1131  */
1132 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
1133 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
1134 		uint8_t desc_pool_id)
1135 {
1136 	struct dp_tx_desc_s *tx_desc;
1137 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
1138 	struct dp_pdev *pdev = vdev->pdev;
1139 	struct dp_soc *soc = pdev->soc;
1140 
1141 	if (dp_tx_limit_check(vdev))
1142 		return NULL;
1143 
1144 	/* Allocate software Tx descriptor */
1145 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1146 	if (!tx_desc) {
1147 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1148 		return NULL;
1149 	}
1150 	dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg,
1151 				  nbuf, tx_desc->id, DP_TX_DESC_COOKIE);
1152 
1153 	dp_tx_outstanding_inc(pdev);
1154 
1155 	/* Initialize the SW tx descriptor */
1156 	tx_desc->nbuf = nbuf;
1157 	tx_desc->frm_type = msdu_info->frm_type;
1158 	tx_desc->tx_encap_type = vdev->tx_encap_type;
1159 	tx_desc->vdev_id = vdev->vdev_id;
1160 	tx_desc->pdev = pdev;
1161 	tx_desc->pkt_offset = 0;
1162 	tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
1163 	tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
1164 
1165 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
1166 
1167 	/* Handle scattered frames - TSO/SG/ME */
1168 	/* Allocate and prepare an extension descriptor for scattered frames */
1169 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
1170 	if (!msdu_ext_desc) {
1171 		dp_tx_info("Tx Extension Descriptor Alloc Fail");
1172 		goto failure;
1173 	}
1174 
1175 #if TQM_BYPASS_WAR
1176 	/* Temporary WAR due to TQM VP issues */
1177 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1178 	qdf_atomic_inc(&soc->num_tx_exception);
1179 #endif
1180 	if (qdf_unlikely(msdu_info->exception_fw))
1181 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1182 
1183 	tx_desc->msdu_ext_desc = msdu_ext_desc;
1184 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
1185 
1186 	tx_desc->dma_addr = msdu_ext_desc->paddr;
1187 
1188 	if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
1189 		tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
1190 	else
1191 		tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
1192 
1193 	return tx_desc;
1194 failure:
1195 	dp_tx_desc_release(tx_desc, desc_pool_id);
1196 	return NULL;
1197 }
1198 
1199 /**
1200  * dp_tx_prepare_raw() - Prepare RAW packet TX
1201  * @vdev: DP vdev handle
1202  * @nbuf: buffer pointer
1203  * @seg_info: Pointer to Segment info Descriptor to be prepared
1204  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
1205  *     descriptor
1206  *
1207  * Return:
1208  */
1209 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1210 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1211 {
1212 	qdf_nbuf_t curr_nbuf = NULL;
1213 	uint16_t total_len = 0;
1214 	qdf_dma_addr_t paddr;
1215 	int32_t i;
1216 	int32_t mapped_buf_num = 0;
1217 
1218 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
1219 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1220 
1221 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
1222 
1223 	/* Continue only if frames are of DATA type */
1224 	if (!DP_FRAME_IS_DATA(qos_wh)) {
1225 		DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
1226 		dp_tx_debug("Pkt. recd is of not data type");
1227 		goto error;
1228 	}
1229 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
1230 	if (vdev->raw_mode_war &&
1231 	    (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
1232 	    (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
1233 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
1234 
1235 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
1236 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
1237 		/*
1238 		 * Number of nbuf's must not exceed the size of the frags
1239 		 * array in seg_info.
1240 		 */
1241 		if (i >= DP_TX_MAX_NUM_FRAGS) {
1242 			dp_err_rl("nbuf cnt exceeds the max number of segs");
1243 			DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
1244 			goto error;
1245 		}
1246 		if (QDF_STATUS_SUCCESS !=
1247 			qdf_nbuf_map_nbytes_single(vdev->osdev,
1248 						   curr_nbuf,
1249 						   QDF_DMA_TO_DEVICE,
1250 						   curr_nbuf->len)) {
1251 			dp_tx_err("%s dma map error ", __func__);
1252 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
1253 			goto error;
1254 		}
1255 		/* Update the count of mapped nbuf's */
1256 		mapped_buf_num++;
1257 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
1258 		seg_info->frags[i].paddr_lo = paddr;
1259 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
1260 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
1261 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
1262 		total_len += qdf_nbuf_len(curr_nbuf);
1263 	}
1264 
1265 	seg_info->frag_cnt = i;
1266 	seg_info->total_len = total_len;
1267 	seg_info->next = NULL;
1268 
1269 	sg_info->curr_seg = seg_info;
1270 
1271 	msdu_info->frm_type = dp_tx_frm_raw;
1272 	msdu_info->num_seg = 1;
1273 
1274 	return nbuf;
1275 
1276 error:
1277 	i = 0;
1278 	while (nbuf) {
1279 		curr_nbuf = nbuf;
1280 		if (i < mapped_buf_num) {
1281 			qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
1282 						     QDF_DMA_TO_DEVICE,
1283 						     curr_nbuf->len);
1284 			i++;
1285 		}
1286 		nbuf = qdf_nbuf_next(nbuf);
1287 		qdf_nbuf_free(curr_nbuf);
1288 	}
1289 	return NULL;
1290 
1291 }
1292 
1293 /**
1294  * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
1295  * @soc: DP soc handle
1296  * @nbuf: Buffer pointer
1297  *
1298  * unmap the chain of nbufs that belong to this RAW frame.
1299  *
1300  * Return: None
1301  */
1302 static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
1303 				    qdf_nbuf_t nbuf)
1304 {
1305 	qdf_nbuf_t cur_nbuf = nbuf;
1306 
1307 	do {
1308 		qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
1309 					     QDF_DMA_TO_DEVICE,
1310 					     cur_nbuf->len);
1311 		cur_nbuf = qdf_nbuf_next(cur_nbuf);
1312 	} while (cur_nbuf);
1313 }
1314 
1315 #ifdef VDEV_PEER_PROTOCOL_COUNT
1316 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
1317 					       qdf_nbuf_t nbuf)
1318 {
1319 	qdf_nbuf_t nbuf_local;
1320 	struct dp_vdev *vdev_local = vdev_hdl;
1321 
1322 	do {
1323 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track)))
1324 			break;
1325 		nbuf_local = nbuf;
1326 		if (qdf_unlikely(((vdev_local)->tx_encap_type) ==
1327 			 htt_cmn_pkt_type_raw))
1328 			break;
1329 		else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local))))
1330 			break;
1331 		else if (qdf_nbuf_is_tso((nbuf_local)))
1332 			break;
1333 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local),
1334 						       (nbuf_local),
1335 						       NULL, 1, 0);
1336 	} while (0);
1337 }
1338 #endif
1339 
1340 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1341 /**
1342  * dp_tx_update_stats() - Update soc level tx stats
1343  * @soc: DP soc handle
1344  * @nbuf: packet being transmitted
1345  *
1346  * Returns: none
1347  */
1348 void dp_tx_update_stats(struct dp_soc *soc,
1349 			qdf_nbuf_t nbuf)
1350 {
1351 	DP_STATS_INC_PKT(soc, tx.egress, 1, qdf_nbuf_len(nbuf));
1352 }
1353 
1354 int
1355 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1356 			 struct dp_tx_desc_s *tx_desc,
1357 			 uint8_t tid)
1358 {
1359 	struct dp_swlm *swlm = &soc->swlm;
1360 	union swlm_data swlm_query_data;
1361 	struct dp_swlm_tcl_data tcl_data;
1362 	QDF_STATUS status;
1363 	int ret;
1364 
1365 	if (qdf_unlikely(!swlm->is_enabled))
1366 		return 0;
1367 
1368 	tcl_data.nbuf = tx_desc->nbuf;
1369 	tcl_data.tid = tid;
1370 	tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
1371 	swlm_query_data.tcl_data = &tcl_data;
1372 
1373 	status = dp_swlm_tcl_pre_check(soc, &tcl_data);
1374 	if (QDF_IS_STATUS_ERROR(status)) {
1375 		dp_swlm_tcl_reset_session_data(soc);
1376 		DP_STATS_INC(swlm, tcl.coalesce_fail, 1);
1377 		return 0;
1378 	}
1379 
1380 	ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
1381 	if (ret) {
1382 		DP_STATS_INC(swlm, tcl.coalesce_success, 1);
1383 	} else {
1384 		DP_STATS_INC(swlm, tcl.coalesce_fail, 1);
1385 	}
1386 
1387 	return ret;
1388 }
1389 
1390 void
1391 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1392 		      int coalesce)
1393 {
1394 	if (coalesce)
1395 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1396 	else
1397 		dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1398 }
1399 
1400 #endif
1401 
1402 #ifdef FEATURE_RUNTIME_PM
1403 /**
1404  * dp_tx_ring_access_end_wrapper() - Wrapper for ring access end
1405  * @soc: Datapath soc handle
1406  * @hal_ring_hdl: HAL ring handle
1407  * @coalesce: Coalesce the current write or not
1408  *
1409  * Wrapper for HAL ring access end for data transmission for
1410  * FEATURE_RUNTIME_PM
1411  *
1412  * Returns: none
1413  */
1414 void
1415 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1416 			      hal_ring_handle_t hal_ring_hdl,
1417 			      int coalesce)
1418 {
1419 	int ret;
1420 
1421 	ret = hif_pm_runtime_get(soc->hif_handle,
1422 				 RTPM_ID_DW_TX_HW_ENQUEUE, true);
1423 	switch (ret) {
1424 	case 0:
1425 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1426 		hif_pm_runtime_put(soc->hif_handle,
1427 				   RTPM_ID_DW_TX_HW_ENQUEUE);
1428 		break;
1429 	/*
1430 	 * If hif_pm_runtime_get returns -EBUSY or -EINPROGRESS,
1431 	 * take the dp runtime refcount using dp_runtime_get,
1432 	 * check link state,if up, write TX ring HP, else just set flush event.
1433 	 * In dp_runtime_resume, wait until dp runtime refcount becomes
1434 	 * zero or time out, then flush pending tx.
1435 	 */
1436 	case -EBUSY:
1437 	case -EINPROGRESS:
1438 		dp_runtime_get(soc);
1439 		if (hif_pm_get_link_state(soc->hif_handle) ==
1440 		    HIF_PM_LINK_STATE_UP) {
1441 			dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1442 		} else {
1443 			dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1444 			hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1445 			hal_srng_inc_flush_cnt(hal_ring_hdl);
1446 		}
1447 		dp_runtime_put(soc);
1448 		break;
1449 	default:
1450 		dp_runtime_get(soc);
1451 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1452 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1453 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1454 		dp_runtime_put(soc);
1455 	}
1456 }
1457 #endif
1458 
1459 /**
1460  * dp_cce_classify() - Classify the frame based on CCE rules
1461  * @vdev: DP vdev handle
1462  * @nbuf: skb
1463  *
1464  * Classify frames based on CCE rules
1465  * Return: bool( true if classified,
1466  *               else false)
1467  */
1468 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1469 {
1470 	qdf_ether_header_t *eh = NULL;
1471 	uint16_t   ether_type;
1472 	qdf_llc_t *llcHdr;
1473 	qdf_nbuf_t nbuf_clone = NULL;
1474 	qdf_dot3_qosframe_t *qos_wh = NULL;
1475 
1476 	if (qdf_likely(vdev->skip_sw_tid_classification)) {
1477 	/*
1478 	 * In case of mesh packets or hlos tid override enabled,
1479 	 * don't do any classification
1480 	 */
1481 		if (qdf_unlikely(vdev->skip_sw_tid_classification
1482 					& DP_TX_SKIP_CCE_CLASSIFY))
1483 			return false;
1484 	}
1485 
1486 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1487 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1488 		ether_type = eh->ether_type;
1489 		llcHdr = (qdf_llc_t *)(nbuf->data +
1490 					sizeof(qdf_ether_header_t));
1491 	} else {
1492 		qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1493 		/* For encrypted packets don't do any classification */
1494 		if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
1495 			return false;
1496 
1497 		if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
1498 			if (qdf_unlikely(
1499 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
1500 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
1501 
1502 				ether_type = *(uint16_t *)(nbuf->data
1503 						+ QDF_IEEE80211_4ADDR_HDR_LEN
1504 						+ sizeof(qdf_llc_t)
1505 						- sizeof(ether_type));
1506 				llcHdr = (qdf_llc_t *)(nbuf->data +
1507 						QDF_IEEE80211_4ADDR_HDR_LEN);
1508 			} else {
1509 				ether_type = *(uint16_t *)(nbuf->data
1510 						+ QDF_IEEE80211_3ADDR_HDR_LEN
1511 						+ sizeof(qdf_llc_t)
1512 						- sizeof(ether_type));
1513 				llcHdr = (qdf_llc_t *)(nbuf->data +
1514 					QDF_IEEE80211_3ADDR_HDR_LEN);
1515 			}
1516 
1517 			if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
1518 				&& (ether_type ==
1519 				qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
1520 
1521 				DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
1522 				return true;
1523 			}
1524 		}
1525 
1526 		return false;
1527 	}
1528 
1529 	if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
1530 		ether_type = *(uint16_t *)(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1531 				sizeof(*llcHdr));
1532 		nbuf_clone = qdf_nbuf_clone(nbuf);
1533 		if (qdf_unlikely(nbuf_clone)) {
1534 			qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
1535 
1536 			if (ether_type == htons(ETHERTYPE_VLAN)) {
1537 				qdf_nbuf_pull_head(nbuf_clone,
1538 						sizeof(qdf_net_vlanhdr_t));
1539 			}
1540 		}
1541 	} else {
1542 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1543 			nbuf_clone = qdf_nbuf_clone(nbuf);
1544 			if (qdf_unlikely(nbuf_clone)) {
1545 				qdf_nbuf_pull_head(nbuf_clone,
1546 					sizeof(qdf_net_vlanhdr_t));
1547 			}
1548 		}
1549 	}
1550 
1551 	if (qdf_unlikely(nbuf_clone))
1552 		nbuf = nbuf_clone;
1553 
1554 
1555 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
1556 		|| qdf_nbuf_is_ipv4_arp_pkt(nbuf)
1557 		|| qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
1558 		|| qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
1559 		|| (qdf_nbuf_is_ipv4_pkt(nbuf)
1560 			&& qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
1561 		|| (qdf_nbuf_is_ipv6_pkt(nbuf) &&
1562 			qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
1563 		if (qdf_unlikely(nbuf_clone))
1564 			qdf_nbuf_free(nbuf_clone);
1565 		return true;
1566 	}
1567 
1568 	if (qdf_unlikely(nbuf_clone))
1569 		qdf_nbuf_free(nbuf_clone);
1570 
1571 	return false;
1572 }
1573 
1574 /**
1575  * dp_tx_get_tid() - Obtain TID to be used for this frame
1576  * @vdev: DP vdev handle
1577  * @nbuf: skb
1578  *
1579  * Extract the DSCP or PCP information from frame and map into TID value.
1580  *
1581  * Return: void
1582  */
1583 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1584 			  struct dp_tx_msdu_info_s *msdu_info)
1585 {
1586 	uint8_t tos = 0, dscp_tid_override = 0;
1587 	uint8_t *hdr_ptr, *L3datap;
1588 	uint8_t is_mcast = 0;
1589 	qdf_ether_header_t *eh = NULL;
1590 	qdf_ethervlan_header_t *evh = NULL;
1591 	uint16_t   ether_type;
1592 	qdf_llc_t *llcHdr;
1593 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1594 
1595 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1596 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1597 		eh = (qdf_ether_header_t *)nbuf->data;
1598 		hdr_ptr = (uint8_t *)(eh->ether_dhost);
1599 		L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
1600 	} else {
1601 		qdf_dot3_qosframe_t *qos_wh =
1602 			(qdf_dot3_qosframe_t *) nbuf->data;
1603 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1604 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1605 		return;
1606 	}
1607 
1608 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1609 	ether_type = eh->ether_type;
1610 
1611 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
1612 	/*
1613 	 * Check if packet is dot3 or eth2 type.
1614 	 */
1615 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1616 		ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1617 				sizeof(*llcHdr));
1618 
1619 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1620 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1621 				sizeof(*llcHdr);
1622 			ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
1623 					+ sizeof(*llcHdr) +
1624 					sizeof(qdf_net_vlanhdr_t));
1625 		} else {
1626 			L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
1627 				sizeof(*llcHdr);
1628 		}
1629 	} else {
1630 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1631 			evh = (qdf_ethervlan_header_t *) eh;
1632 			ether_type = evh->ether_type;
1633 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1634 		}
1635 	}
1636 
1637 	/*
1638 	 * Find priority from IP TOS DSCP field
1639 	 */
1640 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1641 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1642 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1643 			/* Only for unicast frames */
1644 			if (!is_mcast) {
1645 				/* send it on VO queue */
1646 				msdu_info->tid = DP_VO_TID;
1647 			}
1648 		} else {
1649 			/*
1650 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1651 			 * from TOS byte.
1652 			 */
1653 			tos = ip->ip_tos;
1654 			dscp_tid_override = 1;
1655 
1656 		}
1657 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1658 		/* TODO
1659 		 * use flowlabel
1660 		 *igmpmld cases to be handled in phase 2
1661 		 */
1662 		unsigned long ver_pri_flowlabel;
1663 		unsigned long pri;
1664 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1665 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1666 			DP_IPV6_PRIORITY_SHIFT;
1667 		tos = pri;
1668 		dscp_tid_override = 1;
1669 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1670 		msdu_info->tid = DP_VO_TID;
1671 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1672 		/* Only for unicast frames */
1673 		if (!is_mcast) {
1674 			/* send ucast arp on VO queue */
1675 			msdu_info->tid = DP_VO_TID;
1676 		}
1677 	}
1678 
1679 	/*
1680 	 * Assign all MCAST packets to BE
1681 	 */
1682 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1683 		if (is_mcast) {
1684 			tos = 0;
1685 			dscp_tid_override = 1;
1686 		}
1687 	}
1688 
1689 	if (dscp_tid_override == 1) {
1690 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1691 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1692 	}
1693 
1694 	if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
1695 		msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
1696 
1697 	return;
1698 }
1699 
1700 /**
1701  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1702  * @vdev: DP vdev handle
1703  * @nbuf: skb
1704  *
1705  * Software based TID classification is required when more than 2 DSCP-TID
1706  * mapping tables are needed.
1707  * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
1708  *
1709  * Return: void
1710  */
1711 static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1712 				      struct dp_tx_msdu_info_s *msdu_info)
1713 {
1714 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1715 
1716 	/*
1717 	 * skip_sw_tid_classification flag will set in below cases-
1718 	 * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
1719 	 * 2. hlos_tid_override enabled for vdev
1720 	 * 3. mesh mode enabled for vdev
1721 	 */
1722 	if (qdf_likely(vdev->skip_sw_tid_classification)) {
1723 		/* Update tid in msdu_info from skb priority */
1724 		if (qdf_unlikely(vdev->skip_sw_tid_classification
1725 			    & DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
1726 			msdu_info->tid = qdf_nbuf_get_priority(nbuf);
1727 			return;
1728 		}
1729 		return;
1730 	}
1731 
1732 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1733 }
1734 
1735 #ifdef FEATURE_WLAN_TDLS
1736 /**
1737  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1738  * @soc: datapath SOC
1739  * @vdev: datapath vdev
1740  * @tx_desc: TX descriptor
1741  *
1742  * Return: None
1743  */
1744 static void dp_tx_update_tdls_flags(struct dp_soc *soc,
1745 				    struct dp_vdev *vdev,
1746 				    struct dp_tx_desc_s *tx_desc)
1747 {
1748 	if (vdev) {
1749 		if (vdev->is_tdls_frame) {
1750 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1751 			vdev->is_tdls_frame = false;
1752 		}
1753 	}
1754 }
1755 
1756 /**
1757  * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1758  * @soc: dp_soc handle
1759  * @tx_desc: TX descriptor
1760  * @vdev: datapath vdev handle
1761  *
1762  * Return: None
1763  */
1764 static void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
1765 					 struct dp_tx_desc_s *tx_desc)
1766 {
1767 	struct hal_tx_completion_status ts = {0};
1768 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1769 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
1770 						     DP_MOD_ID_TDLS);
1771 
1772 	if (qdf_unlikely(!vdev)) {
1773 		dp_err_rl("vdev is null!");
1774 		goto error;
1775 	}
1776 
1777 	hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
1778 	if (vdev->tx_non_std_data_callback.func) {
1779 		qdf_nbuf_set_next(nbuf, NULL);
1780 		vdev->tx_non_std_data_callback.func(
1781 				vdev->tx_non_std_data_callback.ctxt,
1782 				nbuf, ts.status);
1783 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1784 		return;
1785 	} else {
1786 		dp_err_rl("callback func is null");
1787 	}
1788 
1789 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1790 error:
1791 	qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
1792 	qdf_nbuf_free(nbuf);
1793 }
1794 
1795 /**
1796  * dp_tx_msdu_single_map() - do nbuf map
1797  * @vdev: DP vdev handle
1798  * @tx_desc: DP TX descriptor pointer
1799  * @nbuf: skb pointer
1800  *
1801  * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
1802  * operation done in other component.
1803  *
1804  * Return: QDF_STATUS
1805  */
1806 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1807 					       struct dp_tx_desc_s *tx_desc,
1808 					       qdf_nbuf_t nbuf)
1809 {
1810 	if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
1811 		return qdf_nbuf_map_nbytes_single(vdev->osdev,
1812 						  nbuf,
1813 						  QDF_DMA_TO_DEVICE,
1814 						  nbuf->len);
1815 	else
1816 		return qdf_nbuf_map_single(vdev->osdev, nbuf,
1817 					   QDF_DMA_TO_DEVICE);
1818 }
1819 #else
1820 static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
1821 					   struct dp_vdev *vdev,
1822 					   struct dp_tx_desc_s *tx_desc)
1823 {
1824 }
1825 
1826 static inline void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
1827 						struct dp_tx_desc_s *tx_desc)
1828 {
1829 }
1830 
1831 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1832 					       struct dp_tx_desc_s *tx_desc,
1833 					       qdf_nbuf_t nbuf)
1834 {
1835 	return qdf_nbuf_map_nbytes_single(vdev->osdev,
1836 					  nbuf,
1837 					  QDF_DMA_TO_DEVICE,
1838 					  nbuf->len);
1839 }
1840 #endif
1841 
1842 #ifdef MESH_MODE_SUPPORT
1843 /**
1844  * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
1845  * @soc: datapath SOC
1846  * @vdev: datapath vdev
1847  * @tx_desc: TX descriptor
1848  *
1849  * Return: None
1850  */
1851 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
1852 					   struct dp_vdev *vdev,
1853 					   struct dp_tx_desc_s *tx_desc)
1854 {
1855 	if (qdf_unlikely(vdev->mesh_vdev))
1856 		tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
1857 }
1858 
1859 /**
1860  * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
1861  * @soc: dp_soc handle
1862  * @tx_desc: TX descriptor
1863  * @vdev: datapath vdev handle
1864  *
1865  * Return: None
1866  */
1867 static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
1868 					     struct dp_tx_desc_s *tx_desc)
1869 {
1870 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1871 	struct dp_vdev *vdev = NULL;
1872 
1873 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
1874 		qdf_nbuf_free(nbuf);
1875 		DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
1876 	} else {
1877 		vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
1878 					     DP_MOD_ID_MESH);
1879 		if (vdev && vdev->osif_tx_free_ext)
1880 			vdev->osif_tx_free_ext((nbuf));
1881 		else
1882 			qdf_nbuf_free(nbuf);
1883 
1884 		if (vdev)
1885 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
1886 	}
1887 }
1888 #else
1889 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
1890 					   struct dp_vdev *vdev,
1891 					   struct dp_tx_desc_s *tx_desc)
1892 {
1893 }
1894 
1895 static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
1896 					     struct dp_tx_desc_s *tx_desc)
1897 {
1898 }
1899 #endif
1900 
1901 /**
1902  * dp_tx_frame_is_drop() - checks if the packet is loopback
1903  * @vdev: DP vdev handle
1904  * @nbuf: skb
1905  *
1906  * Return: 1 if frame needs to be dropped else 0
1907  */
1908 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
1909 {
1910 	struct dp_pdev *pdev = NULL;
1911 	struct dp_ast_entry *src_ast_entry = NULL;
1912 	struct dp_ast_entry *dst_ast_entry = NULL;
1913 	struct dp_soc *soc = NULL;
1914 
1915 	qdf_assert(vdev);
1916 	pdev = vdev->pdev;
1917 	qdf_assert(pdev);
1918 	soc = pdev->soc;
1919 
1920 	dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
1921 				(soc, dstmac, vdev->pdev->pdev_id);
1922 
1923 	src_ast_entry = dp_peer_ast_hash_find_by_pdevid
1924 				(soc, srcmac, vdev->pdev->pdev_id);
1925 	if (dst_ast_entry && src_ast_entry) {
1926 		if (dst_ast_entry->peer_id ==
1927 				src_ast_entry->peer_id)
1928 			return 1;
1929 	}
1930 
1931 	return 0;
1932 }
1933 
1934 /**
1935  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1936  * @vdev: DP vdev handle
1937  * @nbuf: skb
1938  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1939  * @meta_data: Metadata to the fw
1940  * @tx_q: Tx queue to be used for this Tx frame
1941  * @peer_id: peer_id of the peer in case of NAWDS frames
1942  * @tx_exc_metadata: Handle that holds exception path metadata
1943  *
1944  * Return: NULL on success,
1945  *         nbuf when it fails to send
1946  */
1947 qdf_nbuf_t
1948 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1949 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
1950 		       struct cdp_tx_exception_metadata *tx_exc_metadata)
1951 {
1952 	struct dp_pdev *pdev = vdev->pdev;
1953 	struct dp_soc *soc = pdev->soc;
1954 	struct dp_tx_desc_s *tx_desc;
1955 	QDF_STATUS status;
1956 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
1957 	uint16_t htt_tcl_metadata = 0;
1958 	enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
1959 	uint8_t tid = msdu_info->tid;
1960 	struct cdp_tid_tx_stats *tid_stats = NULL;
1961 
1962 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
1963 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
1964 			msdu_info, tx_exc_metadata);
1965 	if (!tx_desc) {
1966 		dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
1967 			  vdev, tx_q->desc_pool_id);
1968 		drop_code = TX_DESC_ERR;
1969 		goto fail_return;
1970 	}
1971 
1972 	if (qdf_unlikely(soc->cce_disable)) {
1973 		if (dp_cce_classify(vdev, nbuf) == true) {
1974 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1975 			tid = DP_VO_TID;
1976 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1977 		}
1978 	}
1979 
1980 	dp_tx_update_tdls_flags(soc, vdev, tx_desc);
1981 
1982 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
1983 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1984 		HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
1985 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
1986 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
1987 				HTT_TCL_METADATA_TYPE_PEER_BASED);
1988 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
1989 				peer_id);
1990 	} else
1991 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1992 
1993 	if (msdu_info->exception_fw)
1994 		HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1995 
1996 	dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
1997 					 !monitor_is_enable_enhanced_stats(pdev));
1998 
1999 	dp_tx_update_mesh_flags(soc, vdev, tx_desc);
2000 
2001 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
2002 			 dp_tx_msdu_single_map(vdev, tx_desc, nbuf))) {
2003 		/* Handle failure */
2004 		dp_err("qdf_nbuf_map failed");
2005 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
2006 		drop_code = TX_DMA_MAP_ERR;
2007 		goto release_desc;
2008 	}
2009 
2010 	tx_desc->dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
2011 	dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
2012 			       tx_desc->id, DP_TX_DESC_MAP);
2013 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
2014 	status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
2015 					     htt_tcl_metadata,
2016 					     tx_exc_metadata, msdu_info);
2017 
2018 	if (status != QDF_STATUS_SUCCESS) {
2019 		dp_tx_err_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2020 			     tx_desc, tx_q->ring_id);
2021 		dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
2022 				       tx_desc->id, DP_TX_DESC_UNMAP);
2023 		qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
2024 					     QDF_DMA_TO_DEVICE,
2025 					     nbuf->len);
2026 		drop_code = TX_HW_ENQUEUE;
2027 		goto release_desc;
2028 	}
2029 
2030 	return NULL;
2031 
2032 release_desc:
2033 	dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2034 
2035 fail_return:
2036 	dp_tx_get_tid(vdev, nbuf, msdu_info);
2037 	tid_stats = &pdev->stats.tid_stats.
2038 		    tid_tx_stats[tx_q->ring_id][tid];
2039 	tid_stats->swdrop_cnt[drop_code]++;
2040 	return nbuf;
2041 }
2042 
2043 /**
2044  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2045  * @soc: Soc handle
2046  * @desc: software Tx descriptor to be processed
2047  *
2048  * Return: none
2049  */
2050 static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
2051 				       struct dp_tx_desc_s *desc)
2052 {
2053 	qdf_nbuf_t nbuf = desc->nbuf;
2054 	enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
2055 
2056 	/* nbuf already freed in vdev detach path */
2057 	if (!nbuf)
2058 		return;
2059 
2060 	/* If it is TDLS mgmt, don't unmap or free the frame */
2061 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2062 		return dp_non_std_tx_comp_free_buff(soc, desc);
2063 
2064 	/* 0 : MSDU buffer, 1 : MLE */
2065 	if (desc->msdu_ext_desc) {
2066 		/* TSO free */
2067 		if (hal_tx_ext_desc_get_tso_enable(
2068 					desc->msdu_ext_desc->vaddr)) {
2069 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
2070 					       desc->id, DP_TX_COMP_MSDU_EXT);
2071 			dp_tx_tso_seg_history_add(soc, desc->tso_desc,
2072 						  desc->nbuf, desc->id, type);
2073 			/* unmap eash TSO seg before free the nbuf */
2074 			dp_tx_tso_unmap_segment(soc, desc->tso_desc,
2075 						desc->tso_num_desc);
2076 			qdf_nbuf_free(nbuf);
2077 			return;
2078 		}
2079 	}
2080 	/* If it's ME frame, dont unmap the cloned nbuf's */
2081 	if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
2082 		goto nbuf_free;
2083 
2084 	dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, desc->id, type);
2085 	dp_tx_unmap(soc, desc);
2086 
2087 	if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
2088 		return dp_mesh_tx_comp_free_buff(soc, desc);
2089 nbuf_free:
2090 	qdf_nbuf_free(nbuf);
2091 }
2092 
2093 /**
2094  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
2095  * @vdev: DP vdev handle
2096  * @nbuf: skb
2097  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
2098  *
2099  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
2100  *
2101  * Return: NULL on success,
2102  *         nbuf when it fails to send
2103  */
2104 #if QDF_LOCK_STATS
2105 noinline
2106 #else
2107 #endif
2108 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2109 				    struct dp_tx_msdu_info_s *msdu_info)
2110 {
2111 	uint32_t i;
2112 	struct dp_pdev *pdev = vdev->pdev;
2113 	struct dp_soc *soc = pdev->soc;
2114 	struct dp_tx_desc_s *tx_desc;
2115 	bool is_cce_classified = false;
2116 	QDF_STATUS status;
2117 	uint16_t htt_tcl_metadata = 0;
2118 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
2119 	struct cdp_tid_tx_stats *tid_stats = NULL;
2120 	uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
2121 
2122 	if (qdf_unlikely(soc->cce_disable)) {
2123 		is_cce_classified = dp_cce_classify(vdev, nbuf);
2124 		if (is_cce_classified) {
2125 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
2126 			msdu_info->tid = DP_VO_TID;
2127 		}
2128 	}
2129 
2130 	if (msdu_info->frm_type == dp_tx_frm_me)
2131 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2132 
2133 	i = 0;
2134 	/* Print statement to track i and num_seg */
2135 	/*
2136 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
2137 	 * descriptors using information in msdu_info
2138 	 */
2139 	while (i < msdu_info->num_seg) {
2140 		/*
2141 		 * Setup Tx descriptor for an MSDU, and MSDU extension
2142 		 * descriptor
2143 		 */
2144 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
2145 				tx_q->desc_pool_id);
2146 
2147 		if (!tx_desc) {
2148 			if (msdu_info->frm_type == dp_tx_frm_me) {
2149 				prep_desc_fail++;
2150 				dp_tx_me_free_buf(pdev,
2151 					(void *)(msdu_info->u.sg_info
2152 						.curr_seg->frags[0].vaddr));
2153 				if (prep_desc_fail == msdu_info->num_seg) {
2154 					/*
2155 					 * Unmap is needed only if descriptor
2156 					 * preparation failed for all segments.
2157 					 */
2158 					qdf_nbuf_unmap(soc->osdev,
2159 						       msdu_info->u.sg_info.
2160 						       curr_seg->nbuf,
2161 						       QDF_DMA_TO_DEVICE);
2162 				}
2163 				/*
2164 				 * Free the nbuf for the current segment
2165 				 * and make it point to the next in the list.
2166 				 * For me, there are as many segments as there
2167 				 * are no of clients.
2168 				 */
2169 				qdf_nbuf_free(msdu_info->u.sg_info
2170 					      .curr_seg->nbuf);
2171 				if (msdu_info->u.sg_info.curr_seg->next) {
2172 					msdu_info->u.sg_info.curr_seg =
2173 						msdu_info->u.sg_info
2174 						.curr_seg->next;
2175 					nbuf = msdu_info->u.sg_info
2176 					       .curr_seg->nbuf;
2177 				}
2178 				i++;
2179 				continue;
2180 			}
2181 
2182 			if (msdu_info->frm_type == dp_tx_frm_tso) {
2183 				dp_tx_tso_seg_history_add(
2184 						soc,
2185 						msdu_info->u.tso_info.curr_seg,
2186 						nbuf, 0, DP_TX_DESC_UNMAP);
2187 				dp_tx_tso_unmap_segment(soc,
2188 							msdu_info->u.tso_info.
2189 							curr_seg,
2190 							msdu_info->u.tso_info.
2191 							tso_num_seg_list);
2192 
2193 				if (msdu_info->u.tso_info.curr_seg->next) {
2194 					msdu_info->u.tso_info.curr_seg =
2195 					msdu_info->u.tso_info.curr_seg->next;
2196 					i++;
2197 					continue;
2198 				}
2199 			}
2200 
2201 			goto done;
2202 		}
2203 
2204 		if (msdu_info->frm_type == dp_tx_frm_me) {
2205 			tx_desc->me_buffer =
2206 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
2207 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
2208 		}
2209 
2210 		if (is_cce_classified)
2211 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2212 
2213 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2214 		if (msdu_info->exception_fw) {
2215 			HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2216 		}
2217 
2218 		/*
2219 		 * For frames with multiple segments (TSO, ME), jump to next
2220 		 * segment.
2221 		 */
2222 		if (msdu_info->frm_type == dp_tx_frm_tso) {
2223 			if (msdu_info->u.tso_info.curr_seg->next) {
2224 				msdu_info->u.tso_info.curr_seg =
2225 					msdu_info->u.tso_info.curr_seg->next;
2226 
2227 				/*
2228 				 * If this is a jumbo nbuf, then increment the
2229 				 * number of nbuf users for each additional
2230 				 * segment of the msdu. This will ensure that
2231 				 * the skb is freed only after receiving tx
2232 				 * completion for all segments of an nbuf
2233 				 */
2234 				qdf_nbuf_inc_users(nbuf);
2235 
2236 				/* Check with MCL if this is needed */
2237 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
2238 				 */
2239 			}
2240 		}
2241 
2242 		/*
2243 		 * Enqueue the Tx MSDU descriptor to HW for transmit
2244 		 */
2245 		status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
2246 						     htt_tcl_metadata,
2247 						     NULL, msdu_info);
2248 
2249 		if (status != QDF_STATUS_SUCCESS) {
2250 			dp_info("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2251 				tx_desc, tx_q->ring_id);
2252 
2253 			dp_tx_get_tid(vdev, nbuf, msdu_info);
2254 			tid_stats = &pdev->stats.tid_stats.
2255 				    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
2256 			tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
2257 
2258 			if (msdu_info->frm_type == dp_tx_frm_me) {
2259 				hw_enq_fail++;
2260 				if (hw_enq_fail == msdu_info->num_seg) {
2261 					/*
2262 					 * Unmap is needed only if enqueue
2263 					 * failed for all segments.
2264 					 */
2265 					qdf_nbuf_unmap(soc->osdev,
2266 						       msdu_info->u.sg_info.
2267 						       curr_seg->nbuf,
2268 						       QDF_DMA_TO_DEVICE);
2269 				}
2270 				/*
2271 				 * Free the nbuf for the current segment
2272 				 * and make it point to the next in the list.
2273 				 * For me, there are as many segments as there
2274 				 * are no of clients.
2275 				 */
2276 				qdf_nbuf_free(msdu_info->u.sg_info
2277 					      .curr_seg->nbuf);
2278 				if (msdu_info->u.sg_info.curr_seg->next) {
2279 					msdu_info->u.sg_info.curr_seg =
2280 						msdu_info->u.sg_info
2281 						.curr_seg->next;
2282 					nbuf = msdu_info->u.sg_info
2283 					       .curr_seg->nbuf;
2284 				} else
2285 					break;
2286 				i++;
2287 				continue;
2288 			}
2289 
2290 			/*
2291 			 * For TSO frames, the nbuf users increment done for
2292 			 * the current segment has to be reverted, since the
2293 			 * hw enqueue for this segment failed
2294 			 */
2295 			if (msdu_info->frm_type == dp_tx_frm_tso &&
2296 			    msdu_info->u.tso_info.curr_seg) {
2297 				/*
2298 				 * unmap and free current,
2299 				 * retransmit remaining segments
2300 				 */
2301 				dp_tx_comp_free_buf(soc, tx_desc);
2302 				i++;
2303 				continue;
2304 			}
2305 
2306 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2307 			goto done;
2308 		}
2309 
2310 		/*
2311 		 * TODO
2312 		 * if tso_info structure can be modified to have curr_seg
2313 		 * as first element, following 2 blocks of code (for TSO and SG)
2314 		 * can be combined into 1
2315 		 */
2316 
2317 		/*
2318 		 * For Multicast-Unicast converted packets,
2319 		 * each converted frame (for a client) is represented as
2320 		 * 1 segment
2321 		 */
2322 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
2323 				(msdu_info->frm_type == dp_tx_frm_me)) {
2324 			if (msdu_info->u.sg_info.curr_seg->next) {
2325 				msdu_info->u.sg_info.curr_seg =
2326 					msdu_info->u.sg_info.curr_seg->next;
2327 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2328 			} else
2329 				break;
2330 		}
2331 		i++;
2332 	}
2333 
2334 	nbuf = NULL;
2335 
2336 done:
2337 	return nbuf;
2338 }
2339 
2340 /**
2341  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
2342  *                     for SG frames
2343  * @vdev: DP vdev handle
2344  * @nbuf: skb
2345  * @seg_info: Pointer to Segment info Descriptor to be prepared
2346  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2347  *
2348  * Return: NULL on success,
2349  *         nbuf when it fails to send
2350  */
2351 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2352 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
2353 {
2354 	uint32_t cur_frag, nr_frags, i;
2355 	qdf_dma_addr_t paddr;
2356 	struct dp_tx_sg_info_s *sg_info;
2357 
2358 	sg_info = &msdu_info->u.sg_info;
2359 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
2360 
2361 	if (QDF_STATUS_SUCCESS !=
2362 		qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
2363 					   QDF_DMA_TO_DEVICE,
2364 					   qdf_nbuf_headlen(nbuf))) {
2365 		dp_tx_err("dma map error");
2366 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2367 
2368 		qdf_nbuf_free(nbuf);
2369 		return NULL;
2370 	}
2371 
2372 	paddr = qdf_nbuf_mapped_paddr_get(nbuf);
2373 	seg_info->frags[0].paddr_lo = paddr;
2374 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
2375 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
2376 	seg_info->frags[0].vaddr = (void *) nbuf;
2377 
2378 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
2379 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
2380 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
2381 			dp_tx_err("frag dma map error");
2382 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2383 			goto map_err;
2384 		}
2385 
2386 		paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
2387 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
2388 		seg_info->frags[cur_frag + 1].paddr_hi =
2389 			((uint64_t) paddr) >> 32;
2390 		seg_info->frags[cur_frag + 1].len =
2391 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
2392 	}
2393 
2394 	seg_info->frag_cnt = (cur_frag + 1);
2395 	seg_info->total_len = qdf_nbuf_len(nbuf);
2396 	seg_info->next = NULL;
2397 
2398 	sg_info->curr_seg = seg_info;
2399 
2400 	msdu_info->frm_type = dp_tx_frm_sg;
2401 	msdu_info->num_seg = 1;
2402 
2403 	return nbuf;
2404 map_err:
2405 	/* restore paddr into nbuf before calling unmap */
2406 	qdf_nbuf_mapped_paddr_set(nbuf,
2407 				  (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
2408 				  ((uint64_t)
2409 				  seg_info->frags[0].paddr_hi) << 32));
2410 	qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
2411 				     QDF_DMA_TO_DEVICE,
2412 				     seg_info->frags[0].len);
2413 	for (i = 1; i <= cur_frag; i++) {
2414 		qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
2415 				   (seg_info->frags[i].paddr_lo | ((uint64_t)
2416 				   seg_info->frags[i].paddr_hi) << 32),
2417 				   seg_info->frags[i].len,
2418 				   QDF_DMA_TO_DEVICE);
2419 	}
2420 	qdf_nbuf_free(nbuf);
2421 	return NULL;
2422 }
2423 
2424 /**
2425  * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
2426  * @vdev: DP vdev handle
2427  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2428  * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
2429  *
2430  * Return: NULL on failure,
2431  *         nbuf when extracted successfully
2432  */
2433 static
2434 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
2435 				    struct dp_tx_msdu_info_s *msdu_info,
2436 				    uint16_t ppdu_cookie)
2437 {
2438 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2439 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2440 
2441 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2442 
2443 	HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
2444 				(msdu_info->meta_data[5], 1);
2445 	HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
2446 				(msdu_info->meta_data[5], 1);
2447 	HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
2448 				(msdu_info->meta_data[6], ppdu_cookie);
2449 
2450 	msdu_info->exception_fw = 1;
2451 	msdu_info->is_tx_sniffer = 1;
2452 }
2453 
2454 #ifdef MESH_MODE_SUPPORT
2455 
2456 /**
2457  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
2458 				and prepare msdu_info for mesh frames.
2459  * @vdev: DP vdev handle
2460  * @nbuf: skb
2461  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2462  *
2463  * Return: NULL on failure,
2464  *         nbuf when extracted successfully
2465  */
2466 static
2467 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2468 				struct dp_tx_msdu_info_s *msdu_info)
2469 {
2470 	struct meta_hdr_s *mhdr;
2471 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2472 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2473 
2474 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2475 
2476 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
2477 		msdu_info->exception_fw = 0;
2478 		goto remove_meta_hdr;
2479 	}
2480 
2481 	msdu_info->exception_fw = 1;
2482 
2483 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2484 
2485 	meta_data->host_tx_desc_pool = 1;
2486 	meta_data->update_peer_cache = 1;
2487 	meta_data->learning_frame = 1;
2488 
2489 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
2490 		meta_data->power = mhdr->power;
2491 
2492 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
2493 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
2494 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
2495 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
2496 
2497 		meta_data->dyn_bw = 1;
2498 
2499 		meta_data->valid_pwr = 1;
2500 		meta_data->valid_mcs_mask = 1;
2501 		meta_data->valid_nss_mask = 1;
2502 		meta_data->valid_preamble_type  = 1;
2503 		meta_data->valid_retries = 1;
2504 		meta_data->valid_bw_info = 1;
2505 	}
2506 
2507 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
2508 		meta_data->encrypt_type = 0;
2509 		meta_data->valid_encrypt_type = 1;
2510 		meta_data->learning_frame = 0;
2511 	}
2512 
2513 	meta_data->valid_key_flags = 1;
2514 	meta_data->key_flags = (mhdr->keyix & 0x3);
2515 
2516 remove_meta_hdr:
2517 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
2518 		dp_tx_err("qdf_nbuf_pull_head failed");
2519 		qdf_nbuf_free(nbuf);
2520 		return NULL;
2521 	}
2522 
2523 	msdu_info->tid = qdf_nbuf_get_priority(nbuf);
2524 
2525 	dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x"
2526 		   " tid %d to_fw %d",
2527 		   msdu_info->meta_data[0],
2528 		   msdu_info->meta_data[1],
2529 		   msdu_info->meta_data[2],
2530 		   msdu_info->meta_data[3],
2531 		   msdu_info->meta_data[4],
2532 		   msdu_info->meta_data[5],
2533 		   msdu_info->tid, msdu_info->exception_fw);
2534 
2535 	return nbuf;
2536 }
2537 #else
2538 static
2539 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2540 				struct dp_tx_msdu_info_s *msdu_info)
2541 {
2542 	return nbuf;
2543 }
2544 
2545 #endif
2546 
2547 /**
2548  * dp_check_exc_metadata() - Checks if parameters are valid
2549  * @tx_exc - holds all exception path parameters
2550  *
2551  * Returns true when all the parameters are valid else false
2552  *
2553  */
2554 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
2555 {
2556 	bool invalid_tid = (tx_exc->tid >= DP_MAX_TIDS && tx_exc->tid !=
2557 			    HTT_INVALID_TID);
2558 	bool invalid_encap_type =
2559 			(tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
2560 			 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
2561 	bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
2562 				 tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
2563 	bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
2564 			       tx_exc->ppdu_cookie == 0);
2565 
2566 	if (invalid_tid || invalid_encap_type || invalid_sec_type ||
2567 	    invalid_cookie) {
2568 		return false;
2569 	}
2570 
2571 	return true;
2572 }
2573 
2574 #ifdef ATH_SUPPORT_IQUE
2575 /**
2576  * dp_tx_mcast_enhance() - Multicast enhancement on TX
2577  * @vdev: vdev handle
2578  * @nbuf: skb
2579  *
2580  * Return: true on success,
2581  *         false on failure
2582  */
2583 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
2584 {
2585 	qdf_ether_header_t *eh;
2586 
2587 	/* Mcast to Ucast Conversion*/
2588 	if (qdf_likely(!vdev->mcast_enhancement_en))
2589 		return true;
2590 
2591 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2592 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2593 	    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2594 		dp_verbose_debug("Mcast frm for ME %pK", vdev);
2595 		qdf_nbuf_set_next(nbuf, NULL);
2596 
2597 		DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
2598 				 qdf_nbuf_len(nbuf));
2599 		if (dp_tx_prepare_send_me(vdev, nbuf) ==
2600 				QDF_STATUS_SUCCESS) {
2601 			return false;
2602 		}
2603 
2604 		if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
2605 			if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
2606 					QDF_STATUS_SUCCESS) {
2607 				return false;
2608 			}
2609 		}
2610 	}
2611 
2612 	return true;
2613 }
2614 #else
2615 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
2616 {
2617 	return true;
2618 }
2619 #endif
2620 
2621 /**
2622  * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
2623  * @nbuf: qdf_nbuf_t
2624  * @vdev: struct dp_vdev *
2625  *
2626  * Allow packet for processing only if it is for peer client which is
2627  * connected with same vap. Drop packet if client is connected to
2628  * different vap.
2629  *
2630  * Return: QDF_STATUS
2631  */
2632 static inline QDF_STATUS
2633 dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
2634 {
2635 	struct dp_ast_entry *dst_ast_entry = NULL;
2636 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2637 
2638 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
2639 	    DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
2640 		return QDF_STATUS_SUCCESS;
2641 
2642 	qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
2643 	dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
2644 							eh->ether_dhost,
2645 							vdev->vdev_id);
2646 
2647 	/* If there is no ast entry, return failure */
2648 	if (qdf_unlikely(!dst_ast_entry)) {
2649 		qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
2650 		return QDF_STATUS_E_FAILURE;
2651 	}
2652 	qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
2653 
2654 	return QDF_STATUS_SUCCESS;
2655 }
2656 
2657 /**
2658  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
2659  * @soc: DP soc handle
2660  * @vdev_id: id of DP vdev handle
2661  * @nbuf: skb
2662  * @tx_exc_metadata: Handle that holds exception path meta data
2663  *
2664  * Entry point for Core Tx layer (DP_TX) invoked from
2665  * hard_start_xmit in OSIF/HDD to transmit frames through fw
2666  *
2667  * Return: NULL on success,
2668  *         nbuf when it fails to send
2669  */
2670 qdf_nbuf_t
2671 dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2672 		     qdf_nbuf_t nbuf,
2673 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
2674 {
2675 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2676 	qdf_ether_header_t *eh = NULL;
2677 	struct dp_tx_msdu_info_s msdu_info;
2678 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
2679 						     DP_MOD_ID_TX_EXCEPTION);
2680 
2681 	if (qdf_unlikely(!vdev))
2682 		goto fail;
2683 
2684 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
2685 
2686 	if (!tx_exc_metadata)
2687 		goto fail;
2688 
2689 	msdu_info.tid = tx_exc_metadata->tid;
2690 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2691 	dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
2692 			 QDF_MAC_ADDR_REF(nbuf->data));
2693 
2694 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
2695 
2696 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
2697 		dp_tx_err("Invalid parameters in exception path");
2698 		goto fail;
2699 	}
2700 
2701 	/* Basic sanity checks for unsupported packets */
2702 
2703 	/* MESH mode */
2704 	if (qdf_unlikely(vdev->mesh_vdev)) {
2705 		dp_tx_err("Mesh mode is not supported in exception path");
2706 		goto fail;
2707 	}
2708 
2709 	/*
2710 	 * Classify the frame and call corresponding
2711 	 * "prepare" function which extracts the segment (TSO)
2712 	 * and fragmentation information (for TSO , SG, ME, or Raw)
2713 	 * into MSDU_INFO structure which is later used to fill
2714 	 * SW and HW descriptors.
2715 	 */
2716 	if (qdf_nbuf_is_tso(nbuf)) {
2717 		dp_verbose_debug("TSO frame %pK", vdev);
2718 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
2719 				 qdf_nbuf_len(nbuf));
2720 
2721 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
2722 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
2723 					 qdf_nbuf_len(nbuf));
2724 			goto fail;
2725 		}
2726 
2727 		goto send_multiple;
2728 	}
2729 
2730 	/* SG */
2731 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2732 		struct dp_tx_seg_info_s seg_info = {0};
2733 
2734 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
2735 		if (!nbuf)
2736 			goto fail;
2737 
2738 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
2739 
2740 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
2741 				 qdf_nbuf_len(nbuf));
2742 
2743 		goto send_multiple;
2744 	}
2745 
2746 	if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
2747 		DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
2748 				 qdf_nbuf_len(nbuf));
2749 
2750 		dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
2751 					       tx_exc_metadata->ppdu_cookie);
2752 	}
2753 
2754 	/*
2755 	 * Get HW Queue to use for this frame.
2756 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
2757 	 * dedicated for data and 1 for command.
2758 	 * "queue_id" maps to one hardware ring.
2759 	 *  With each ring, we also associate a unique Tx descriptor pool
2760 	 *  to minimize lock contention for these resources.
2761 	 */
2762 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2763 
2764 	/*
2765 	 * Check exception descriptors
2766 	 */
2767 	if (dp_tx_exception_limit_check(vdev))
2768 		goto fail;
2769 
2770 	/*  Single linear frame */
2771 	/*
2772 	 * If nbuf is a simple linear frame, use send_single function to
2773 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2774 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2775 	 */
2776 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
2777 			tx_exc_metadata->peer_id, tx_exc_metadata);
2778 
2779 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
2780 	return nbuf;
2781 
2782 send_multiple:
2783 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2784 
2785 fail:
2786 	if (vdev)
2787 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
2788 	dp_verbose_debug("pkt send failed");
2789 	return nbuf;
2790 }
2791 
2792 /**
2793  * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
2794  *      in exception path in special case to avoid regular exception path chk.
2795  * @soc: DP soc handle
2796  * @vdev_id: id of DP vdev handle
2797  * @nbuf: skb
2798  * @tx_exc_metadata: Handle that holds exception path meta data
2799  *
2800  * Entry point for Core Tx layer (DP_TX) invoked from
2801  * hard_start_xmit in OSIF/HDD to transmit frames through fw
2802  *
2803  * Return: NULL on success,
2804  *         nbuf when it fails to send
2805  */
2806 qdf_nbuf_t
2807 dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
2808 				   uint8_t vdev_id, qdf_nbuf_t nbuf,
2809 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
2810 {
2811 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2812 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
2813 						     DP_MOD_ID_TX_EXCEPTION);
2814 
2815 	if (qdf_unlikely(!vdev))
2816 		goto fail;
2817 
2818 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
2819 			== QDF_STATUS_E_FAILURE)) {
2820 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
2821 		goto fail;
2822 	}
2823 
2824 	/* Unref count as it will agin be taken inside dp_tx_exception */
2825 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
2826 
2827 	return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
2828 
2829 fail:
2830 	if (vdev)
2831 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
2832 	dp_verbose_debug("pkt send failed");
2833 	return nbuf;
2834 }
2835 
2836 /**
2837  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
2838  * @soc: DP soc handle
2839  * @vdev_id: DP vdev handle
2840  * @nbuf: skb
2841  *
2842  * Entry point for Core Tx layer (DP_TX) invoked from
2843  * hard_start_xmit in OSIF/HDD
2844  *
2845  * Return: NULL on success,
2846  *         nbuf when it fails to send
2847  */
2848 #ifdef MESH_MODE_SUPPORT
2849 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2850 			   qdf_nbuf_t nbuf)
2851 {
2852 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2853 	struct meta_hdr_s *mhdr;
2854 	qdf_nbuf_t nbuf_mesh = NULL;
2855 	qdf_nbuf_t nbuf_clone = NULL;
2856 	struct dp_vdev *vdev;
2857 	uint8_t no_enc_frame = 0;
2858 
2859 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
2860 	if (!nbuf_mesh) {
2861 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2862 				"qdf_nbuf_unshare failed");
2863 		return nbuf;
2864 	}
2865 
2866 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
2867 	if (!vdev) {
2868 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2869 				"vdev is NULL for vdev_id %d", vdev_id);
2870 		return nbuf;
2871 	}
2872 
2873 	nbuf = nbuf_mesh;
2874 
2875 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2876 
2877 	if ((vdev->sec_type != cdp_sec_type_none) &&
2878 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
2879 		no_enc_frame = 1;
2880 
2881 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
2882 		qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
2883 
2884 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
2885 		       !no_enc_frame) {
2886 		nbuf_clone = qdf_nbuf_clone(nbuf);
2887 		if (!nbuf_clone) {
2888 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2889 				"qdf_nbuf_clone failed");
2890 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
2891 			return nbuf;
2892 		}
2893 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
2894 	}
2895 
2896 	if (nbuf_clone) {
2897 		if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
2898 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2899 		} else {
2900 			qdf_nbuf_free(nbuf_clone);
2901 		}
2902 	}
2903 
2904 	if (no_enc_frame)
2905 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
2906 	else
2907 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
2908 
2909 	nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
2910 	if ((!nbuf) && no_enc_frame) {
2911 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2912 	}
2913 
2914 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
2915 	return nbuf;
2916 }
2917 
2918 #else
2919 
2920 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
2921 			   qdf_nbuf_t nbuf)
2922 {
2923 	return dp_tx_send(soc, vdev_id, nbuf);
2924 }
2925 
2926 #endif
2927 
2928 /**
2929  * dp_tx_nawds_handler() - NAWDS handler
2930  *
2931  * @soc: DP soc handle
2932  * @vdev_id: id of DP vdev handle
2933  * @msdu_info: msdu_info required to create HTT metadata
2934  * @nbuf: skb
2935  *
2936  * This API transfers the multicast frames with the peer id
2937  * on NAWDS enabled peer.
2938 
2939  * Return: none
2940  */
2941 
2942 static inline
2943 void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
2944 			 struct dp_tx_msdu_info_s *msdu_info, qdf_nbuf_t nbuf)
2945 {
2946 	struct dp_peer *peer = NULL;
2947 	qdf_nbuf_t nbuf_clone = NULL;
2948 	uint16_t peer_id = DP_INVALID_PEER;
2949 	uint16_t sa_peer_id = DP_INVALID_PEER;
2950 	struct dp_ast_entry *ast_entry = NULL;
2951 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2952 
2953 	qdf_spin_lock_bh(&soc->ast_lock);
2954 	ast_entry = dp_peer_ast_hash_find_by_pdevid
2955 				(soc,
2956 				 (uint8_t *)(eh->ether_shost),
2957 				 vdev->pdev->pdev_id);
2958 
2959 	if (ast_entry)
2960 		sa_peer_id = ast_entry->peer_id;
2961 	qdf_spin_unlock_bh(&soc->ast_lock);
2962 
2963 	qdf_spin_lock_bh(&vdev->peer_list_lock);
2964 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2965 		if (!peer->bss_peer && peer->nawds_enabled) {
2966 			peer_id = peer->peer_id;
2967 			/* Multicast packets needs to be
2968 			 * dropped in case of intra bss forwarding
2969 			 */
2970 			if (sa_peer_id == peer->peer_id) {
2971 				dp_tx_debug("multicast packet");
2972 				DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
2973 				continue;
2974 			}
2975 			nbuf_clone = qdf_nbuf_clone(nbuf);
2976 
2977 			if (!nbuf_clone) {
2978 				QDF_TRACE(QDF_MODULE_ID_DP,
2979 					  QDF_TRACE_LEVEL_ERROR,
2980 					  FL("nbuf clone failed"));
2981 				break;
2982 			}
2983 
2984 			nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
2985 							    msdu_info, peer_id,
2986 							    NULL);
2987 
2988 			if (nbuf_clone) {
2989 				dp_tx_debug("pkt send failed");
2990 				qdf_nbuf_free(nbuf_clone);
2991 			} else {
2992 				if (peer_id != DP_INVALID_PEER)
2993 					DP_STATS_INC_PKT(peer, tx.nawds_mcast,
2994 							 1, qdf_nbuf_len(nbuf));
2995 			}
2996 		}
2997 	}
2998 
2999 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3000 }
3001 
3002 /**
3003  * dp_tx_send() - Transmit a frame on a given VAP
3004  * @soc: DP soc handle
3005  * @vdev_id: id of DP vdev handle
3006  * @nbuf: skb
3007  *
3008  * Entry point for Core Tx layer (DP_TX) invoked from
3009  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
3010  * cases
3011  *
3012  * Return: NULL on success,
3013  *         nbuf when it fails to send
3014  */
3015 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3016 		      qdf_nbuf_t nbuf)
3017 {
3018 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3019 	uint16_t peer_id = HTT_INVALID_PEER;
3020 	/*
3021 	 * doing a memzero is causing additional function call overhead
3022 	 * so doing static stack clearing
3023 	 */
3024 	struct dp_tx_msdu_info_s msdu_info = {0};
3025 	struct dp_vdev *vdev = NULL;
3026 
3027 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3028 		return nbuf;
3029 
3030 	/*
3031 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3032 	 * this in per packet path.
3033 	 *
3034 	 * As in this path vdev memory is already protected with netdev
3035 	 * tx lock
3036 	 */
3037 	vdev = soc->vdev_id_map[vdev_id];
3038 	if (qdf_unlikely(!vdev))
3039 		return nbuf;
3040 
3041 	dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
3042 			 QDF_MAC_ADDR_REF(nbuf->data));
3043 
3044 	/*
3045 	 * Set Default Host TID value to invalid TID
3046 	 * (TID override disabled)
3047 	 */
3048 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
3049 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
3050 
3051 	if (qdf_unlikely(vdev->mesh_vdev)) {
3052 		qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
3053 								&msdu_info);
3054 		if (!nbuf_mesh) {
3055 			dp_verbose_debug("Extracting mesh metadata failed");
3056 			return nbuf;
3057 		}
3058 		nbuf = nbuf_mesh;
3059 	}
3060 
3061 	/*
3062 	 * Get HW Queue to use for this frame.
3063 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
3064 	 * dedicated for data and 1 for command.
3065 	 * "queue_id" maps to one hardware ring.
3066 	 *  With each ring, we also associate a unique Tx descriptor pool
3067 	 *  to minimize lock contention for these resources.
3068 	 */
3069 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3070 
3071 	/*
3072 	 * TCL H/W supports 2 DSCP-TID mapping tables.
3073 	 *  Table 1 - Default DSCP-TID mapping table
3074 	 *  Table 2 - 1 DSCP-TID override table
3075 	 *
3076 	 * If we need a different DSCP-TID mapping for this vap,
3077 	 * call tid_classify to extract DSCP/ToS from frame and
3078 	 * map to a TID and store in msdu_info. This is later used
3079 	 * to fill in TCL Input descriptor (per-packet TID override).
3080 	 */
3081 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
3082 
3083 	/*
3084 	 * Classify the frame and call corresponding
3085 	 * "prepare" function which extracts the segment (TSO)
3086 	 * and fragmentation information (for TSO , SG, ME, or Raw)
3087 	 * into MSDU_INFO structure which is later used to fill
3088 	 * SW and HW descriptors.
3089 	 */
3090 	if (qdf_nbuf_is_tso(nbuf)) {
3091 		dp_verbose_debug("TSO frame %pK", vdev);
3092 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
3093 				 qdf_nbuf_len(nbuf));
3094 
3095 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
3096 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
3097 					 qdf_nbuf_len(nbuf));
3098 			return nbuf;
3099 		}
3100 
3101 		goto send_multiple;
3102 	}
3103 
3104 	/* SG */
3105 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
3106 		struct dp_tx_seg_info_s seg_info = {0};
3107 
3108 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
3109 		if (!nbuf)
3110 			return NULL;
3111 
3112 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
3113 
3114 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
3115 				qdf_nbuf_len(nbuf));
3116 
3117 		goto send_multiple;
3118 	}
3119 
3120 	if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
3121 		return NULL;
3122 
3123 	/* RAW */
3124 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
3125 		struct dp_tx_seg_info_s seg_info = {0};
3126 
3127 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
3128 		if (!nbuf)
3129 			return NULL;
3130 
3131 		dp_verbose_debug("Raw frame %pK", vdev);
3132 
3133 		goto send_multiple;
3134 
3135 	}
3136 
3137 	if (qdf_unlikely(vdev->nawds_enabled)) {
3138 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
3139 					  qdf_nbuf_data(nbuf);
3140 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost))
3141 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf);
3142 
3143 		peer_id = DP_INVALID_PEER;
3144 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
3145 				 1, qdf_nbuf_len(nbuf));
3146 	}
3147 
3148 	/*  Single linear frame */
3149 	/*
3150 	 * If nbuf is a simple linear frame, use send_single function to
3151 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
3152 	 * SRNG. There is no need to setup a MSDU extension descriptor.
3153 	 */
3154 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
3155 
3156 	return nbuf;
3157 
3158 send_multiple:
3159 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3160 
3161 	if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
3162 		dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
3163 
3164 	return nbuf;
3165 }
3166 
3167 /**
3168  * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
3169  *      case to vaoid check in perpkt path.
3170  * @soc: DP soc handle
3171  * @vdev_id: id of DP vdev handle
3172  * @nbuf: skb
3173  *
3174  * Entry point for Core Tx layer (DP_TX) invoked from
3175  * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
3176  * with special condition to avoid per pkt check in dp_tx_send
3177  *
3178  * Return: NULL on success,
3179  *         nbuf when it fails to send
3180  */
3181 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
3182 				    uint8_t vdev_id, qdf_nbuf_t nbuf)
3183 {
3184 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3185 	struct dp_vdev *vdev = NULL;
3186 
3187 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3188 		return nbuf;
3189 
3190 	/*
3191 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3192 	 * this in per packet path.
3193 	 *
3194 	 * As in this path vdev memory is already protected with netdev
3195 	 * tx lock
3196 	 */
3197 	vdev = soc->vdev_id_map[vdev_id];
3198 	if (qdf_unlikely(!vdev))
3199 		return nbuf;
3200 
3201 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
3202 			== QDF_STATUS_E_FAILURE)) {
3203 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
3204 		return nbuf;
3205 	}
3206 
3207 	return dp_tx_send(soc_hdl, vdev_id, nbuf);
3208 }
3209 
3210 #ifdef UMAC_SUPPORT_PROXY_ARP
3211 /**
3212  * dp_tx_proxy_arp() - Tx proxy arp handler
3213  * @vdev: datapath vdev handle
3214  * @buf: sk buffer
3215  *
3216  * Return: status
3217  */
3218 static inline
3219 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3220 {
3221 	if (vdev->osif_proxy_arp)
3222 		return vdev->osif_proxy_arp(vdev->osif_vdev, nbuf);
3223 
3224 	/*
3225 	 * when UMAC_SUPPORT_PROXY_ARP is defined, we expect
3226 	 * osif_proxy_arp has a valid function pointer assigned
3227 	 * to it
3228 	 */
3229 	dp_tx_err("valid function pointer for osif_proxy_arp is expected!!\n");
3230 
3231 	return QDF_STATUS_NOT_INITIALIZED;
3232 }
3233 #else
3234 /**
3235  * dp_tx_proxy_arp() - Tx proxy arp handler
3236  * @vdev: datapath vdev handle
3237  * @buf: sk buffer
3238  *
3239  * This function always return 0 when UMAC_SUPPORT_PROXY_ARP
3240  * is not defined.
3241  *
3242  * Return: status
3243  */
3244 static inline
3245 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3246 {
3247 	return QDF_STATUS_SUCCESS;
3248 }
3249 #endif
3250 
3251 /**
3252  * dp_tx_reinject_handler() - Tx Reinject Handler
3253  * @soc: datapath soc handle
3254  * @vdev: datapath vdev handle
3255  * @tx_desc: software descriptor head pointer
3256  * @status : Tx completion status from HTT descriptor
3257  *
3258  * This function reinjects frames back to Target.
3259  * Todo - Host queue needs to be added
3260  *
3261  * Return: none
3262  */
3263 static
3264 void dp_tx_reinject_handler(struct dp_soc *soc,
3265 			    struct dp_vdev *vdev,
3266 			    struct dp_tx_desc_s *tx_desc,
3267 			    uint8_t *status)
3268 {
3269 	struct dp_peer *peer = NULL;
3270 	uint32_t peer_id = HTT_INVALID_PEER;
3271 	qdf_nbuf_t nbuf = tx_desc->nbuf;
3272 	qdf_nbuf_t nbuf_copy = NULL;
3273 	struct dp_tx_msdu_info_s msdu_info;
3274 #ifdef WDS_VENDOR_EXTENSION
3275 	int is_mcast = 0, is_ucast = 0;
3276 	int num_peers_3addr = 0;
3277 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
3278 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
3279 #endif
3280 
3281 	qdf_assert(vdev);
3282 
3283 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
3284 
3285 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3286 
3287 	dp_tx_debug("Tx reinject path");
3288 
3289 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
3290 			qdf_nbuf_len(tx_desc->nbuf));
3291 
3292 #ifdef WDS_VENDOR_EXTENSION
3293 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
3294 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
3295 	} else {
3296 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
3297 	}
3298 	is_ucast = !is_mcast;
3299 
3300 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3301 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3302 		if (peer->bss_peer)
3303 			continue;
3304 
3305 		/* Detect wds peers that use 3-addr framing for mcast.
3306 		 * if there are any, the bss_peer is used to send the
3307 		 * the mcast frame using 3-addr format. all wds enabled
3308 		 * peers that use 4-addr framing for mcast frames will
3309 		 * be duplicated and sent as 4-addr frames below.
3310 		 */
3311 		if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
3312 			num_peers_3addr = 1;
3313 			break;
3314 		}
3315 	}
3316 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3317 #endif
3318 
3319 	if (qdf_unlikely(vdev->mesh_vdev)) {
3320 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
3321 	} else {
3322 		qdf_spin_lock_bh(&vdev->peer_list_lock);
3323 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3324 			if ((peer->peer_id != HTT_INVALID_PEER) &&
3325 #ifdef WDS_VENDOR_EXTENSION
3326 			/*
3327 			 * . if 3-addr STA, then send on BSS Peer
3328 			 * . if Peer WDS enabled and accept 4-addr mcast,
3329 			 * send mcast on that peer only
3330 			 * . if Peer WDS enabled and accept 4-addr ucast,
3331 			 * send ucast on that peer only
3332 			 */
3333 			((peer->bss_peer && num_peers_3addr && is_mcast) ||
3334 			 (peer->wds_enabled &&
3335 				  ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
3336 				   (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
3337 #else
3338 			(peer->bss_peer &&
3339 			 (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) {
3340 #endif
3341 				peer_id = DP_INVALID_PEER;
3342 
3343 				nbuf_copy = qdf_nbuf_copy(nbuf);
3344 
3345 				if (!nbuf_copy) {
3346 					dp_tx_debug("nbuf copy failed");
3347 					break;
3348 				}
3349 
3350 				nbuf_copy = dp_tx_send_msdu_single(vdev,
3351 						nbuf_copy,
3352 						&msdu_info,
3353 						peer_id,
3354 						NULL);
3355 
3356 				if (nbuf_copy) {
3357 					dp_tx_debug("pkt send failed");
3358 					qdf_nbuf_free(nbuf_copy);
3359 				}
3360 			}
3361 		}
3362 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
3363 	}
3364 
3365 	qdf_nbuf_free(nbuf);
3366 
3367 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3368 }
3369 
3370 /**
3371  * dp_tx_inspect_handler() - Tx Inspect Handler
3372  * @soc: datapath soc handle
3373  * @vdev: datapath vdev handle
3374  * @tx_desc: software descriptor head pointer
3375  * @status : Tx completion status from HTT descriptor
3376  *
3377  * Handles Tx frames sent back to Host for inspection
3378  * (ProxyARP)
3379  *
3380  * Return: none
3381  */
3382 static void dp_tx_inspect_handler(struct dp_soc *soc,
3383 				  struct dp_vdev *vdev,
3384 				  struct dp_tx_desc_s *tx_desc,
3385 				  uint8_t *status)
3386 {
3387 
3388 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3389 			"%s Tx inspect path",
3390 			__func__);
3391 
3392 	DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
3393 			 qdf_nbuf_len(tx_desc->nbuf));
3394 
3395 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
3396 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3397 }
3398 
3399 #ifdef MESH_MODE_SUPPORT
3400 /**
3401  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
3402  *                                         in mesh meta header
3403  * @tx_desc: software descriptor head pointer
3404  * @ts: pointer to tx completion stats
3405  * Return: none
3406  */
3407 static
3408 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
3409 		struct hal_tx_completion_status *ts)
3410 {
3411 	struct meta_hdr_s *mhdr;
3412 	qdf_nbuf_t netbuf = tx_desc->nbuf;
3413 
3414 	if (!tx_desc->msdu_ext_desc) {
3415 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
3416 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3417 				"netbuf %pK offset %d",
3418 				netbuf, tx_desc->pkt_offset);
3419 			return;
3420 		}
3421 	}
3422 	if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
3423 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3424 			"netbuf %pK offset %zu", netbuf,
3425 			sizeof(struct meta_hdr_s));
3426 		return;
3427 	}
3428 
3429 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
3430 	mhdr->rssi = ts->ack_frame_rssi;
3431 	mhdr->band = tx_desc->pdev->operating_channel.band;
3432 	mhdr->channel = tx_desc->pdev->operating_channel.num;
3433 }
3434 
3435 #else
3436 static
3437 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
3438 		struct hal_tx_completion_status *ts)
3439 {
3440 }
3441 
3442 #endif
3443 
3444 #ifdef QCA_PEER_EXT_STATS
3445 /*
3446  * dp_tx_compute_tid_delay() - Compute per TID delay
3447  * @stats: Per TID delay stats
3448  * @tx_desc: Software Tx descriptor
3449  *
3450  * Compute the software enqueue and hw enqueue delays and
3451  * update the respective histograms
3452  *
3453  * Return: void
3454  */
3455 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
3456 				    struct dp_tx_desc_s *tx_desc)
3457 {
3458 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
3459 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
3460 	uint32_t sw_enqueue_delay, fwhw_transmit_delay;
3461 
3462 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
3463 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
3464 	timestamp_hw_enqueue = tx_desc->timestamp;
3465 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
3466 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
3467 					 timestamp_hw_enqueue);
3468 
3469 	/*
3470 	 * Update the Tx software enqueue delay and HW enque-Completion delay.
3471 	 */
3472 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
3473 	dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
3474 }
3475 
3476 /*
3477  * dp_tx_update_peer_ext_stats() - Update the peer extended stats
3478  * @peer: DP peer context
3479  * @tx_desc: Tx software descriptor
3480  * @tid: Transmission ID
3481  * @ring_id: Rx CPU context ID/CPU_ID
3482  *
3483  * Update the peer extended stats. These are enhanced other
3484  * delay stats per msdu level.
3485  *
3486  * Return: void
3487  */
3488 static void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
3489 					struct dp_tx_desc_s *tx_desc,
3490 					uint8_t tid, uint8_t ring_id)
3491 {
3492 	struct dp_pdev *pdev = peer->vdev->pdev;
3493 	struct dp_soc *soc = NULL;
3494 	struct cdp_peer_ext_stats *pext_stats = NULL;
3495 
3496 	soc = pdev->soc;
3497 	if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
3498 		return;
3499 
3500 	pext_stats = peer->pext_stats;
3501 
3502 	qdf_assert(pext_stats);
3503 	qdf_assert(ring < CDP_MAX_TXRX_CTX);
3504 
3505 	/*
3506 	 * For non-TID packets use the TID 9
3507 	 */
3508 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
3509 		tid = CDP_MAX_DATA_TIDS - 1;
3510 
3511 	dp_tx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id],
3512 				tx_desc);
3513 }
3514 #else
3515 static inline void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
3516 					       struct dp_tx_desc_s *tx_desc,
3517 					       uint8_t tid, uint8_t ring_id)
3518 {
3519 }
3520 #endif
3521 
3522 /**
3523  * dp_tx_compute_delay() - Compute and fill in all timestamps
3524  *				to pass in correct fields
3525  *
3526  * @vdev: pdev handle
3527  * @tx_desc: tx descriptor
3528  * @tid: tid value
3529  * @ring_id: TCL or WBM ring number for transmit path
3530  * Return: none
3531  */
3532 static void dp_tx_compute_delay(struct dp_vdev *vdev,
3533 				struct dp_tx_desc_s *tx_desc,
3534 				uint8_t tid, uint8_t ring_id)
3535 {
3536 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
3537 	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
3538 
3539 	if (qdf_likely(!vdev->pdev->delay_stats_flag))
3540 		return;
3541 
3542 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
3543 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
3544 	timestamp_hw_enqueue = tx_desc->timestamp;
3545 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
3546 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
3547 					 timestamp_hw_enqueue);
3548 	interframe_delay = (uint32_t)(timestamp_ingress -
3549 				      vdev->prev_tx_enq_tstamp);
3550 
3551 	/*
3552 	 * Delay in software enqueue
3553 	 */
3554 	dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid,
3555 			      CDP_DELAY_STATS_SW_ENQ, ring_id);
3556 	/*
3557 	 * Delay between packet enqueued to HW and Tx completion
3558 	 */
3559 	dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid,
3560 			      CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id);
3561 
3562 	/*
3563 	 * Update interframe delay stats calculated at hardstart receive point.
3564 	 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
3565 	 * interframe delay will not be calculate correctly for 1st frame.
3566 	 * On the other side, this will help in avoiding extra per packet check
3567 	 * of !vdev->prev_tx_enq_tstamp.
3568 	 */
3569 	dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
3570 			      CDP_DELAY_STATS_TX_INTERFRAME, ring_id);
3571 	vdev->prev_tx_enq_tstamp = timestamp_ingress;
3572 }
3573 
3574 #ifdef DISABLE_DP_STATS
3575 static
3576 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
3577 {
3578 }
3579 #else
3580 static
3581 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
3582 {
3583 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
3584 
3585 	DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
3586 	if (subtype != QDF_PROTO_INVALID)
3587 		DP_STATS_INC(peer, tx.no_ack_count[subtype], 1);
3588 }
3589 #endif
3590 
3591 /**
3592  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
3593  *				per wbm ring
3594  *
3595  * @tx_desc: software descriptor head pointer
3596  * @ts: Tx completion status
3597  * @peer: peer handle
3598  * @ring_id: ring number
3599  *
3600  * Return: None
3601  */
3602 static inline void
3603 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
3604 			struct hal_tx_completion_status *ts,
3605 			struct dp_peer *peer, uint8_t ring_id)
3606 {
3607 	struct dp_pdev *pdev = peer->vdev->pdev;
3608 	struct dp_soc *soc = NULL;
3609 	uint8_t mcs, pkt_type;
3610 	uint8_t tid = ts->tid;
3611 	uint32_t length;
3612 	struct cdp_tid_tx_stats *tid_stats;
3613 
3614 	if (!pdev)
3615 		return;
3616 
3617 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
3618 		tid = CDP_MAX_DATA_TIDS - 1;
3619 
3620 	tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
3621 	soc = pdev->soc;
3622 
3623 	mcs = ts->mcs;
3624 	pkt_type = ts->pkt_type;
3625 
3626 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
3627 		dp_err("Release source is not from TQM");
3628 		return;
3629 	}
3630 
3631 	length = qdf_nbuf_len(tx_desc->nbuf);
3632 	DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
3633 
3634 	if (qdf_unlikely(pdev->delay_stats_flag))
3635 		dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id);
3636 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
3637 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
3638 
3639 	DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length,
3640 			  (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
3641 
3642 	DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
3643 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
3644 
3645 	DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
3646 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
3647 
3648 	DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
3649 		     (ts->status == HAL_TX_TQM_RR_FW_REASON1));
3650 
3651 	DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
3652 		     (ts->status == HAL_TX_TQM_RR_FW_REASON2));
3653 
3654 	DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
3655 		     (ts->status == HAL_TX_TQM_RR_FW_REASON3));
3656 
3657 	/*
3658 	 * tx_failed is ideally supposed to be updated from HTT ppdu completion
3659 	 * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there
3660 	 * are no completions for failed cases. Hence updating tx_failed from
3661 	 * data path. Please note that if tx_failed is fixed to be from ppdu,
3662 	 * then this has to be removed
3663 	 */
3664 	peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num +
3665 				peer->stats.tx.dropped.fw_rem_notx +
3666 				peer->stats.tx.dropped.fw_rem_tx +
3667 				peer->stats.tx.dropped.age_out +
3668 				peer->stats.tx.dropped.fw_reason1 +
3669 				peer->stats.tx.dropped.fw_reason2 +
3670 				peer->stats.tx.dropped.fw_reason3;
3671 
3672 	if (ts->status < CDP_MAX_TX_TQM_STATUS) {
3673 		tid_stats->tqm_status_cnt[ts->status]++;
3674 	}
3675 
3676 	if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
3677 		dp_update_no_ack_stats(tx_desc->nbuf, peer);
3678 		return;
3679 	}
3680 
3681 	DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
3682 
3683 	DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
3684 	DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
3685 
3686 	/*
3687 	 * Following Rate Statistics are updated from HTT PPDU events from FW.
3688 	 * Return from here if HTT PPDU events are enabled.
3689 	 */
3690 	if (!(soc->process_tx_status))
3691 		return;
3692 
3693 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3694 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
3695 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3696 			((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
3697 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3698 			((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
3699 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3700 			((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
3701 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3702 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
3703 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3704 			((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
3705 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3706 			((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
3707 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3708 			((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
3709 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3710 			((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
3711 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3712 			((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
3713 
3714 	DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
3715 	DP_STATS_INC(peer, tx.bw[ts->bw], 1);
3716 	DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
3717 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
3718 	DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
3719 	DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
3720 	DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
3721 
3722 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
3723 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
3724 			     &peer->stats, ts->peer_id,
3725 			     UPDATE_PEER_STATS, pdev->pdev_id);
3726 #endif
3727 }
3728 
3729 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3730 /**
3731  * dp_tx_flow_pool_lock() - take flow pool lock
3732  * @soc: core txrx main context
3733  * @tx_desc: tx desc
3734  *
3735  * Return: None
3736  */
3737 static inline
3738 void dp_tx_flow_pool_lock(struct dp_soc *soc,
3739 			  struct dp_tx_desc_s *tx_desc)
3740 {
3741 	struct dp_tx_desc_pool_s *pool;
3742 	uint8_t desc_pool_id;
3743 
3744 	desc_pool_id = tx_desc->pool_id;
3745 	pool = &soc->tx_desc[desc_pool_id];
3746 
3747 	qdf_spin_lock_bh(&pool->flow_pool_lock);
3748 }
3749 
3750 /**
3751  * dp_tx_flow_pool_unlock() - release flow pool lock
3752  * @soc: core txrx main context
3753  * @tx_desc: tx desc
3754  *
3755  * Return: None
3756  */
3757 static inline
3758 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
3759 			    struct dp_tx_desc_s *tx_desc)
3760 {
3761 	struct dp_tx_desc_pool_s *pool;
3762 	uint8_t desc_pool_id;
3763 
3764 	desc_pool_id = tx_desc->pool_id;
3765 	pool = &soc->tx_desc[desc_pool_id];
3766 
3767 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
3768 }
3769 #else
3770 static inline
3771 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
3772 {
3773 }
3774 
3775 static inline
3776 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
3777 {
3778 }
3779 #endif
3780 
3781 /**
3782  * dp_tx_notify_completion() - Notify tx completion for this desc
3783  * @soc: core txrx main context
3784  * @vdev: datapath vdev handle
3785  * @tx_desc: tx desc
3786  * @netbuf:  buffer
3787  * @status: tx status
3788  *
3789  * Return: none
3790  */
3791 static inline void dp_tx_notify_completion(struct dp_soc *soc,
3792 					   struct dp_vdev *vdev,
3793 					   struct dp_tx_desc_s *tx_desc,
3794 					   qdf_nbuf_t netbuf,
3795 					   uint8_t status)
3796 {
3797 	void *osif_dev;
3798 	ol_txrx_completion_fp tx_compl_cbk = NULL;
3799 	uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
3800 
3801 	qdf_assert(tx_desc);
3802 
3803 	dp_tx_flow_pool_lock(soc, tx_desc);
3804 
3805 	if (!vdev ||
3806 	    !vdev->osif_vdev) {
3807 		dp_tx_flow_pool_unlock(soc, tx_desc);
3808 		return;
3809 	}
3810 
3811 	osif_dev = vdev->osif_vdev;
3812 	tx_compl_cbk = vdev->tx_comp;
3813 	dp_tx_flow_pool_unlock(soc, tx_desc);
3814 
3815 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
3816 		flag |= BIT(QDF_TX_RX_STATUS_OK);
3817 
3818 	if (tx_compl_cbk)
3819 		tx_compl_cbk(netbuf, osif_dev, flag);
3820 }
3821 
3822 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
3823  * @pdev: pdev handle
3824  * @tid: tid value
3825  * @txdesc_ts: timestamp from txdesc
3826  * @ppdu_id: ppdu id
3827  *
3828  * Return: none
3829  */
3830 #ifdef FEATURE_PERPKT_INFO
3831 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
3832 					       struct dp_peer *peer,
3833 					       uint8_t tid,
3834 					       uint64_t txdesc_ts,
3835 					       uint32_t ppdu_id)
3836 {
3837 	uint64_t delta_ms;
3838 	struct cdp_tx_sojourn_stats *sojourn_stats;
3839 
3840 	if (qdf_unlikely(!monitor_is_enable_enhanced_stats(pdev)))
3841 		return;
3842 
3843 	if (qdf_unlikely(tid == HTT_INVALID_TID ||
3844 			 tid >= CDP_DATA_TID_MAX))
3845 		return;
3846 
3847 	if (qdf_unlikely(!pdev->sojourn_buf))
3848 		return;
3849 
3850 	sojourn_stats = (struct cdp_tx_sojourn_stats *)
3851 		qdf_nbuf_data(pdev->sojourn_buf);
3852 
3853 	sojourn_stats->cookie = (void *)peer->rdkstats_ctx;
3854 
3855 	delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
3856 				txdesc_ts;
3857 	qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid],
3858 			    delta_ms);
3859 	sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
3860 	sojourn_stats->num_msdus[tid] = 1;
3861 	sojourn_stats->avg_sojourn_msdu[tid].internal =
3862 		peer->avg_sojourn_msdu[tid].internal;
3863 	dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
3864 			     pdev->sojourn_buf, HTT_INVALID_PEER,
3865 			     WDI_NO_VAL, pdev->pdev_id);
3866 	sojourn_stats->sum_sojourn_msdu[tid] = 0;
3867 	sojourn_stats->num_msdus[tid] = 0;
3868 	sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
3869 }
3870 #else
3871 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
3872 					       struct dp_peer *peer,
3873 					       uint8_t tid,
3874 					       uint64_t txdesc_ts,
3875 					       uint32_t ppdu_id)
3876 {
3877 }
3878 #endif
3879 
3880 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
3881 /**
3882  * dp_send_completion_to_pkt_capture() - send tx completion to packet capture
3883  * @soc: dp_soc handle
3884  * @desc: Tx Descriptor
3885  * @ts: HAL Tx completion descriptor contents
3886  *
3887  * This function is used to send tx completion to packet capture
3888  */
3889 void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
3890 				       struct dp_tx_desc_s *desc,
3891 				       struct hal_tx_completion_status *ts)
3892 {
3893 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc,
3894 			     desc, ts->peer_id,
3895 			     WDI_NO_VAL, desc->pdev->pdev_id);
3896 }
3897 #endif
3898 
3899 /**
3900  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
3901  * @soc: DP Soc handle
3902  * @tx_desc: software Tx descriptor
3903  * @ts : Tx completion status from HAL/HTT descriptor
3904  *
3905  * Return: none
3906  */
3907 static inline void
3908 dp_tx_comp_process_desc(struct dp_soc *soc,
3909 			struct dp_tx_desc_s *desc,
3910 			struct hal_tx_completion_status *ts,
3911 			struct dp_peer *peer)
3912 {
3913 	uint64_t time_latency = 0;
3914 
3915 	/*
3916 	 * m_copy/tx_capture modes are not supported for
3917 	 * scatter gather packets
3918 	 */
3919 	if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
3920 		time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
3921 				desc->timestamp);
3922 	}
3923 
3924 	dp_send_completion_to_pkt_capture(soc, desc, ts);
3925 
3926 	if (!(desc->msdu_ext_desc)) {
3927 		dp_tx_enh_unmap(soc, desc);
3928 
3929 		if (QDF_STATUS_SUCCESS ==
3930 		    monitor_tx_add_to_comp_queue(soc, desc, ts, peer)) {
3931 			return;
3932 		}
3933 
3934 		if (QDF_STATUS_SUCCESS ==
3935 		    dp_get_completion_indication_for_stack(soc,
3936 							   desc->pdev,
3937 							   peer, ts,
3938 							   desc->nbuf,
3939 							   time_latency)) {
3940 			dp_send_completion_to_stack(soc,
3941 						    desc->pdev,
3942 						    ts->peer_id,
3943 						    ts->ppdu_id,
3944 						    desc->nbuf);
3945 			return;
3946 		}
3947 	}
3948 
3949 	desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX;
3950 	dp_tx_comp_free_buf(soc, desc);
3951 }
3952 
3953 #ifdef DISABLE_DP_STATS
3954 /**
3955  * dp_tx_update_connectivity_stats() - update tx connectivity stats
3956  * @soc: core txrx main context
3957  * @tx_desc: tx desc
3958  * @status: tx status
3959  *
3960  * Return: none
3961  */
3962 static inline
3963 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
3964 				     struct dp_vdev *vdev,
3965 				     struct dp_tx_desc_s *tx_desc,
3966 				     uint8_t status)
3967 {
3968 }
3969 #else
3970 static inline
3971 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
3972 				     struct dp_vdev *vdev,
3973 				     struct dp_tx_desc_s *tx_desc,
3974 				     uint8_t status)
3975 {
3976 	void *osif_dev;
3977 	ol_txrx_stats_rx_fp stats_cbk;
3978 	uint8_t pkt_type;
3979 
3980 	qdf_assert(tx_desc);
3981 
3982 	if (!vdev ||
3983 	    !vdev->osif_vdev ||
3984 	    !vdev->stats_cb)
3985 		return;
3986 
3987 	osif_dev = vdev->osif_vdev;
3988 	stats_cbk = vdev->stats_cb;
3989 
3990 	stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
3991 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
3992 		stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
3993 			  &pkt_type);
3994 }
3995 #endif
3996 
3997 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
3998 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3999 		      uint32_t delta_tsf)
4000 {
4001 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4002 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4003 						     DP_MOD_ID_CDP);
4004 
4005 	if (!vdev) {
4006 		dp_err_rl("vdev %d does not exist", vdev_id);
4007 		return;
4008 	}
4009 
4010 	vdev->delta_tsf = delta_tsf;
4011 	dp_debug("vdev id %u delta_tsf %u", vdev_id, delta_tsf);
4012 
4013 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4014 }
4015 
4016 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
4017 				      uint8_t vdev_id, bool enable)
4018 {
4019 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4020 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4021 						     DP_MOD_ID_CDP);
4022 
4023 	if (!vdev) {
4024 		dp_err_rl("vdev %d does not exist", vdev_id);
4025 		return QDF_STATUS_E_FAILURE;
4026 	}
4027 
4028 	qdf_atomic_set(&vdev->ul_delay_report, enable);
4029 
4030 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4031 
4032 	return QDF_STATUS_SUCCESS;
4033 }
4034 
4035 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4036 			       uint32_t *val)
4037 {
4038 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4039 	struct dp_vdev *vdev;
4040 	uint32_t delay_accum;
4041 	uint32_t pkts_accum;
4042 
4043 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
4044 	if (!vdev) {
4045 		dp_err_rl("vdev %d does not exist", vdev_id);
4046 		return QDF_STATUS_E_FAILURE;
4047 	}
4048 
4049 	if (!qdf_atomic_read(&vdev->ul_delay_report)) {
4050 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4051 		return QDF_STATUS_E_FAILURE;
4052 	}
4053 
4054 	/* Average uplink delay based on current accumulated values */
4055 	delay_accum = qdf_atomic_read(&vdev->ul_delay_accum);
4056 	pkts_accum = qdf_atomic_read(&vdev->ul_pkts_accum);
4057 
4058 	*val = delay_accum / pkts_accum;
4059 	dp_debug("uplink_delay %u delay_accum %u pkts_accum %u", *val,
4060 		 delay_accum, pkts_accum);
4061 
4062 	/* Reset accumulated values to 0 */
4063 	qdf_atomic_set(&vdev->ul_delay_accum, 0);
4064 	qdf_atomic_set(&vdev->ul_pkts_accum, 0);
4065 
4066 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4067 
4068 	return QDF_STATUS_SUCCESS;
4069 }
4070 
4071 static void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
4072 				      struct hal_tx_completion_status *ts)
4073 {
4074 	uint32_t buffer_ts;
4075 	uint32_t delta_tsf;
4076 	uint32_t ul_delay;
4077 
4078 	/* Tx_rate_stats_info_valid is 0 and tsf is invalid then */
4079 	if (!ts->valid)
4080 		return;
4081 
4082 	if (qdf_unlikely(!vdev)) {
4083 		dp_info_rl("vdev is null or delete in progrss");
4084 		return;
4085 	}
4086 
4087 	if (!qdf_atomic_read(&vdev->ul_delay_report))
4088 		return;
4089 
4090 	delta_tsf = vdev->delta_tsf;
4091 
4092 	/* buffer_timestamp is in units of 1024 us and is [31:13] of
4093 	 * WBM_RELEASE_RING_4. After left shift 10 bits, it's
4094 	 * valid up to 29 bits.
4095 	 */
4096 	buffer_ts = ts->buffer_timestamp << 10;
4097 
4098 	ul_delay = ts->tsf - buffer_ts - delta_tsf;
4099 	ul_delay &= 0x1FFFFFFF; /* mask 29 BITS */
4100 	if (ul_delay > 0x1000000) {
4101 		dp_info_rl("----------------------\n"
4102 			   "Tx completion status:\n"
4103 			   "----------------------\n"
4104 			   "release_src = %d\n"
4105 			   "ppdu_id = 0x%x\n"
4106 			   "release_reason = %d\n"
4107 			   "tsf = %u (0x%x)\n"
4108 			   "buffer_timestamp = %u (0x%x)\n"
4109 			   "delta_tsf = %u (0x%x)\n",
4110 			   ts->release_src, ts->ppdu_id, ts->status,
4111 			   ts->tsf, ts->tsf, ts->buffer_timestamp,
4112 			   ts->buffer_timestamp, delta_tsf, delta_tsf);
4113 		return;
4114 	}
4115 
4116 	ul_delay /= 1000; /* in unit of ms */
4117 
4118 	qdf_atomic_add(ul_delay, &vdev->ul_delay_accum);
4119 	qdf_atomic_inc(&vdev->ul_pkts_accum);
4120 }
4121 #else /* !WLAN_FEATURE_TSF_UPLINK_DELAY */
4122 static inline
4123 void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
4124 			       struct hal_tx_completion_status *ts)
4125 {
4126 }
4127 #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
4128 
4129 /**
4130  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
4131  * @soc: DP soc handle
4132  * @tx_desc: software descriptor head pointer
4133  * @ts: Tx completion status
4134  * @peer: peer handle
4135  * @ring_id: ring number
4136  *
4137  * Return: none
4138  */
4139 static inline
4140 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
4141 				  struct dp_tx_desc_s *tx_desc,
4142 				  struct hal_tx_completion_status *ts,
4143 				  struct dp_peer *peer, uint8_t ring_id)
4144 {
4145 	uint32_t length;
4146 	qdf_ether_header_t *eh;
4147 	struct dp_vdev *vdev = NULL;
4148 	qdf_nbuf_t nbuf = tx_desc->nbuf;
4149 	enum qdf_dp_tx_rx_status dp_status;
4150 
4151 	if (!nbuf) {
4152 		dp_info_rl("invalid tx descriptor. nbuf NULL");
4153 		goto out;
4154 	}
4155 
4156 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
4157 	length = qdf_nbuf_len(nbuf);
4158 
4159 	dp_status = dp_tx_hw_to_qdf(ts->status);
4160 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
4161 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
4162 				 QDF_TRACE_DEFAULT_PDEV_ID,
4163 				 qdf_nbuf_data_addr(nbuf),
4164 				 sizeof(qdf_nbuf_data(nbuf)),
4165 				 tx_desc->id, ts->status, dp_status));
4166 
4167 	dp_tx_comp_debug("-------------------- \n"
4168 			 "Tx Completion Stats: \n"
4169 			 "-------------------- \n"
4170 			 "ack_frame_rssi = %d \n"
4171 			 "first_msdu = %d \n"
4172 			 "last_msdu = %d \n"
4173 			 "msdu_part_of_amsdu = %d \n"
4174 			 "rate_stats valid = %d \n"
4175 			 "bw = %d \n"
4176 			 "pkt_type = %d \n"
4177 			 "stbc = %d \n"
4178 			 "ldpc = %d \n"
4179 			 "sgi = %d \n"
4180 			 "mcs = %d \n"
4181 			 "ofdma = %d \n"
4182 			 "tones_in_ru = %d \n"
4183 			 "tsf = %d \n"
4184 			 "ppdu_id = %d \n"
4185 			 "transmit_cnt = %d \n"
4186 			 "tid = %d \n"
4187 			 "peer_id = %d\n",
4188 			 ts->ack_frame_rssi, ts->first_msdu,
4189 			 ts->last_msdu, ts->msdu_part_of_amsdu,
4190 			 ts->valid, ts->bw, ts->pkt_type, ts->stbc,
4191 			 ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
4192 			 ts->tones_in_ru, ts->tsf, ts->ppdu_id,
4193 			 ts->transmit_cnt, ts->tid, ts->peer_id);
4194 
4195 	/* Update SoC level stats */
4196 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
4197 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
4198 
4199 	if (!peer) {
4200 		dp_info_rl("peer is null or deletion in progress");
4201 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
4202 		goto out;
4203 	}
4204 	vdev = peer->vdev;
4205 
4206 	dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
4207 	dp_tx_update_uplink_delay(soc, vdev, ts);
4208 
4209 	/* Update per-packet stats for mesh mode */
4210 	if (qdf_unlikely(vdev->mesh_vdev) &&
4211 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
4212 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
4213 
4214 	/* Update peer level stats */
4215 	if (qdf_unlikely(peer->bss_peer && vdev->opmode == wlan_op_mode_ap)) {
4216 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
4217 			DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
4218 
4219 			if ((peer->vdev->tx_encap_type ==
4220 				htt_cmn_pkt_type_ethernet) &&
4221 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
4222 				DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
4223 			}
4224 		}
4225 	} else {
4226 		DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
4227 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
4228 			DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
4229 			if (qdf_unlikely(peer->in_twt)) {
4230 				DP_STATS_INC_PKT(peer,
4231 						 tx.tx_success_twt,
4232 						 1, length);
4233 			}
4234 		}
4235 	}
4236 
4237 	dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id);
4238 	dp_tx_update_peer_ext_stats(peer, tx_desc, ts->tid, ring_id);
4239 
4240 #ifdef QCA_SUPPORT_RDK_STATS
4241 	if (soc->rdkstats_enabled)
4242 		dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid,
4243 					    tx_desc->timestamp,
4244 					    ts->ppdu_id);
4245 #endif
4246 
4247 out:
4248 	return;
4249 }
4250 
4251 /**
4252  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
4253  * @soc: core txrx main context
4254  * @comp_head: software descriptor head pointer
4255  * @ring_id: ring number
4256  *
4257  * This function will process batch of descriptors reaped by dp_tx_comp_handler
4258  * and release the software descriptors after processing is complete
4259  *
4260  * Return: none
4261  */
4262 static void
4263 dp_tx_comp_process_desc_list(struct dp_soc *soc,
4264 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id)
4265 {
4266 	struct dp_tx_desc_s *desc;
4267 	struct dp_tx_desc_s *next;
4268 	struct hal_tx_completion_status ts;
4269 	struct dp_peer *peer = NULL;
4270 	uint16_t peer_id = DP_INVALID_PEER;
4271 	qdf_nbuf_t netbuf;
4272 
4273 	desc = comp_head;
4274 
4275 	while (desc) {
4276 		if (peer_id != desc->peer_id) {
4277 			if (peer)
4278 				dp_peer_unref_delete(peer,
4279 						     DP_MOD_ID_TX_COMP);
4280 			peer_id = desc->peer_id;
4281 			peer = dp_peer_get_ref_by_id(soc, peer_id,
4282 						     DP_MOD_ID_TX_COMP);
4283 		}
4284 
4285 		if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
4286 			struct dp_pdev *pdev = desc->pdev;
4287 
4288 			if (qdf_likely(peer)) {
4289 				/*
4290 				 * Increment peer statistics
4291 				 * Minimal statistics update done here
4292 				 */
4293 				DP_STATS_INC_PKT(peer, tx.comp_pkt, 1,
4294 						 desc->length);
4295 
4296 				if (desc->tx_status !=
4297 						HAL_TX_TQM_RR_FRAME_ACKED)
4298 					DP_STATS_INC(peer, tx.tx_failed, 1);
4299 			}
4300 
4301 			qdf_assert(pdev);
4302 			dp_tx_outstanding_dec(pdev);
4303 
4304 			/*
4305 			 * Calling a QDF WRAPPER here is creating signifcant
4306 			 * performance impact so avoided the wrapper call here
4307 			 */
4308 			next = desc->next;
4309 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
4310 					       desc->id, DP_TX_COMP_UNMAP);
4311 			qdf_mem_unmap_nbytes_single(soc->osdev,
4312 						    desc->dma_addr,
4313 						    QDF_DMA_TO_DEVICE,
4314 						    desc->length);
4315 			qdf_nbuf_free(desc->nbuf);
4316 			dp_tx_desc_free(soc, desc, desc->pool_id);
4317 			desc = next;
4318 			continue;
4319 		}
4320 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
4321 
4322 		dp_tx_comp_process_tx_status(soc, desc, &ts, peer, ring_id);
4323 
4324 		netbuf = desc->nbuf;
4325 		/* check tx complete notification */
4326 		if (peer && qdf_nbuf_tx_notify_comp_get(netbuf))
4327 			dp_tx_notify_completion(soc, peer->vdev, desc,
4328 						netbuf, ts.status);
4329 
4330 		dp_tx_comp_process_desc(soc, desc, &ts, peer);
4331 
4332 		next = desc->next;
4333 
4334 		dp_tx_desc_release(desc, desc->pool_id);
4335 		desc = next;
4336 	}
4337 	if (peer)
4338 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_COMP);
4339 }
4340 
4341 /**
4342  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
4343  * @soc: Handle to DP soc structure
4344  * @tx_desc: software descriptor head pointer
4345  * @status : Tx completion status from HTT descriptor
4346  * @ring_id: ring number
4347  *
4348  * This function will process HTT Tx indication messages from Target
4349  *
4350  * Return: none
4351  */
4352 static
4353 void dp_tx_process_htt_completion(struct dp_soc *soc,
4354 				  struct dp_tx_desc_s *tx_desc, uint8_t *status,
4355 				  uint8_t ring_id)
4356 {
4357 	uint8_t tx_status;
4358 	struct dp_pdev *pdev;
4359 	struct dp_vdev *vdev;
4360 	struct hal_tx_completion_status ts = {0};
4361 	uint32_t *htt_desc = (uint32_t *)status;
4362 	struct dp_peer *peer;
4363 	struct cdp_tid_tx_stats *tid_stats = NULL;
4364 	struct htt_soc *htt_handle;
4365 	uint8_t vdev_id;
4366 
4367 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
4368 	htt_handle = (struct htt_soc *)soc->htt_handle;
4369 	htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
4370 
4371 	/*
4372 	 * There can be scenario where WBM consuming descriptor enqueued
4373 	 * from TQM2WBM first and TQM completion can happen before MEC
4374 	 * notification comes from FW2WBM. Avoid access any field of tx
4375 	 * descriptor in case of MEC notify.
4376 	 */
4377 	if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY) {
4378 		/*
4379 		 * Get vdev id from HTT status word in case of MEC
4380 		 * notification
4381 		 */
4382 		vdev_id = HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(htt_desc[3]);
4383 		if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
4384 			return;
4385 
4386 		vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4387 				DP_MOD_ID_HTT_COMP);
4388 		if (!vdev)
4389 			return;
4390 		dp_tx_mec_handler(vdev, status);
4391 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
4392 		return;
4393 	}
4394 
4395 	/*
4396 	 * If the descriptor is already freed in vdev_detach,
4397 	 * continue to next descriptor
4398 	 */
4399 	if ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) && !tx_desc->flags) {
4400 		dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d", tx_desc->id);
4401 		return;
4402 	}
4403 
4404 	pdev = tx_desc->pdev;
4405 
4406 	if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
4407 		dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id);
4408 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
4409 		dp_tx_comp_free_buf(soc, tx_desc);
4410 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
4411 		return;
4412 	}
4413 
4414 	qdf_assert(tx_desc->pdev);
4415 
4416 	vdev_id = tx_desc->vdev_id;
4417 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4418 			DP_MOD_ID_HTT_COMP);
4419 
4420 	if (!vdev)
4421 		return;
4422 
4423 	switch (tx_status) {
4424 	case HTT_TX_FW2WBM_TX_STATUS_OK:
4425 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
4426 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
4427 	{
4428 		uint8_t tid;
4429 		if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
4430 			ts.peer_id =
4431 				HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
4432 						htt_desc[2]);
4433 			ts.tid =
4434 				HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
4435 						htt_desc[2]);
4436 		} else {
4437 			ts.peer_id = HTT_INVALID_PEER;
4438 			ts.tid = HTT_INVALID_TID;
4439 		}
4440 		ts.ppdu_id =
4441 			HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
4442 					htt_desc[1]);
4443 		ts.ack_frame_rssi =
4444 			HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
4445 					htt_desc[1]);
4446 
4447 		ts.tsf = htt_desc[3];
4448 		ts.first_msdu = 1;
4449 		ts.last_msdu = 1;
4450 		tid = ts.tid;
4451 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4452 			tid = CDP_MAX_DATA_TIDS - 1;
4453 
4454 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
4455 
4456 		if (qdf_unlikely(pdev->delay_stats_flag))
4457 			dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
4458 		if (tx_status < CDP_MAX_TX_HTT_STATUS) {
4459 			tid_stats->htt_status_cnt[tx_status]++;
4460 		}
4461 
4462 		peer = dp_peer_get_ref_by_id(soc, ts.peer_id,
4463 					     DP_MOD_ID_HTT_COMP);
4464 
4465 		dp_tx_comp_process_tx_status(soc, tx_desc, &ts, peer, ring_id);
4466 		dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
4467 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
4468 
4469 		if (qdf_likely(peer))
4470 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT_COMP);
4471 
4472 		break;
4473 	}
4474 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
4475 	{
4476 		dp_tx_reinject_handler(soc, vdev, tx_desc, status);
4477 		break;
4478 	}
4479 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
4480 	{
4481 		dp_tx_inspect_handler(soc, vdev, tx_desc, status);
4482 		break;
4483 	}
4484 	default:
4485 		dp_tx_comp_debug("Invalid HTT tx_status %d\n",
4486 				 tx_status);
4487 		break;
4488 	}
4489 
4490 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
4491 }
4492 
4493 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
4494 static inline
4495 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
4496 				   int max_reap_limit)
4497 {
4498 	bool limit_hit = false;
4499 
4500 	limit_hit =
4501 		(num_reaped >= max_reap_limit) ? true : false;
4502 
4503 	if (limit_hit)
4504 		DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
4505 
4506 	return limit_hit;
4507 }
4508 
4509 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
4510 {
4511 	return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
4512 }
4513 
4514 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
4515 {
4516 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
4517 
4518 	return cfg->tx_comp_loop_pkt_limit;
4519 }
4520 #else
4521 static inline
4522 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
4523 				   int max_reap_limit)
4524 {
4525 	return false;
4526 }
4527 
4528 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
4529 {
4530 	return false;
4531 }
4532 
4533 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
4534 {
4535 	return 0;
4536 }
4537 #endif
4538 
4539 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
4540 static inline int
4541 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
4542 				  int *max_reap_limit)
4543 {
4544 	return soc->arch_ops.dp_srng_test_and_update_nf_params(soc, dp_srng,
4545 							       max_reap_limit);
4546 }
4547 #else
4548 static inline int
4549 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
4550 				  int *max_reap_limit)
4551 {
4552 	return 0;
4553 }
4554 #endif
4555 
4556 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
4557 			    hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
4558 			    uint32_t quota)
4559 {
4560 	void *tx_comp_hal_desc;
4561 	uint8_t buffer_src;
4562 	struct dp_tx_desc_s *tx_desc = NULL;
4563 	struct dp_tx_desc_s *head_desc = NULL;
4564 	struct dp_tx_desc_s *tail_desc = NULL;
4565 	uint32_t num_processed = 0;
4566 	uint32_t count;
4567 	uint32_t num_avail_for_reap = 0;
4568 	bool force_break = false;
4569 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
4570 	int max_reap_limit, ring_near_full;
4571 
4572 	DP_HIST_INIT();
4573 
4574 more_data:
4575 	/* Re-initialize local variables to be re-used */
4576 	head_desc = NULL;
4577 	tail_desc = NULL;
4578 	count = 0;
4579 	max_reap_limit = dp_tx_comp_get_loop_pkt_limit(soc);
4580 
4581 	ring_near_full = dp_srng_test_and_update_nf_params(soc, tx_comp_ring,
4582 							   &max_reap_limit);
4583 
4584 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
4585 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
4586 		return 0;
4587 	}
4588 
4589 	num_avail_for_reap = hal_srng_dst_num_valid(soc->hal_soc, hal_ring_hdl, 0);
4590 
4591 	if (num_avail_for_reap >= quota)
4592 		num_avail_for_reap = quota;
4593 
4594 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
4595 
4596 	/* Find head descriptor from completion ring */
4597 	while (qdf_likely(num_avail_for_reap--)) {
4598 
4599 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
4600 		if (qdf_unlikely(!tx_comp_hal_desc))
4601 			break;
4602 		buffer_src = hal_tx_comp_get_buffer_source(soc->hal_soc,
4603 							   tx_comp_hal_desc);
4604 
4605 		/* If this buffer was not released by TQM or FW, then it is not
4606 		 * Tx completion indication, assert */
4607 		if (qdf_unlikely(buffer_src !=
4608 					HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
4609 				 (qdf_unlikely(buffer_src !=
4610 					HAL_TX_COMP_RELEASE_SOURCE_FW))) {
4611 			uint8_t wbm_internal_error;
4612 
4613 			dp_err_rl(
4614 				"Tx comp release_src != TQM | FW but from %d",
4615 				buffer_src);
4616 			hal_dump_comp_desc(tx_comp_hal_desc);
4617 			DP_STATS_INC(soc, tx.invalid_release_source, 1);
4618 
4619 			/* When WBM sees NULL buffer_addr_info in any of
4620 			 * ingress rings it sends an error indication,
4621 			 * with wbm_internal_error=1, to a specific ring.
4622 			 * The WBM2SW ring used to indicate these errors is
4623 			 * fixed in HW, and that ring is being used as Tx
4624 			 * completion ring. These errors are not related to
4625 			 * Tx completions, and should just be ignored
4626 			 */
4627 			wbm_internal_error = hal_get_wbm_internal_error(
4628 							soc->hal_soc,
4629 							tx_comp_hal_desc);
4630 
4631 			if (wbm_internal_error) {
4632 				dp_err_rl("Tx comp wbm_internal_error!!");
4633 				DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
4634 
4635 				if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
4636 								buffer_src)
4637 					dp_handle_wbm_internal_error(
4638 						soc,
4639 						tx_comp_hal_desc,
4640 						hal_tx_comp_get_buffer_type(
4641 							tx_comp_hal_desc));
4642 
4643 			} else {
4644 				dp_err_rl("Tx comp wbm_internal_error false");
4645 				DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
4646 			}
4647 			continue;
4648 		}
4649 
4650 		soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
4651 							       tx_comp_hal_desc,
4652 							       &tx_desc);
4653 		if (!tx_desc) {
4654 			dp_err("unable to retrieve tx_desc!");
4655 			QDF_BUG(0);
4656 			continue;
4657 		}
4658 		tx_desc->buffer_src = buffer_src;
4659 		/*
4660 		 * If the release source is FW, process the HTT status
4661 		 */
4662 		if (qdf_unlikely(buffer_src ==
4663 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
4664 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
4665 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
4666 					htt_tx_status);
4667 			dp_tx_process_htt_completion(soc, tx_desc,
4668 					htt_tx_status, ring_id);
4669 		} else {
4670 			tx_desc->peer_id =
4671 				hal_tx_comp_get_peer_id(tx_comp_hal_desc);
4672 			tx_desc->tx_status =
4673 				hal_tx_comp_get_tx_status(tx_comp_hal_desc);
4674 			tx_desc->buffer_src = buffer_src;
4675 			/*
4676 			 * If the fast completion mode is enabled extended
4677 			 * metadata from descriptor is not copied
4678 			 */
4679 			if (qdf_likely(tx_desc->flags &
4680 						DP_TX_DESC_FLAG_SIMPLE))
4681 				goto add_to_pool;
4682 
4683 			/*
4684 			 * If the descriptor is already freed in vdev_detach,
4685 			 * continue to next descriptor
4686 			 */
4687 			if (qdf_unlikely
4688 				((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
4689 				 !tx_desc->flags)) {
4690 				dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
4691 						   tx_desc->id);
4692 				continue;
4693 			}
4694 
4695 			if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
4696 				dp_tx_comp_info_rl("pdev in down state %d",
4697 						   tx_desc->id);
4698 				tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
4699 				dp_tx_comp_free_buf(soc, tx_desc);
4700 				dp_tx_desc_release(tx_desc, tx_desc->pool_id);
4701 				goto next_desc;
4702 			}
4703 
4704 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
4705 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
4706 				dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
4707 						 tx_desc->flags, tx_desc->id);
4708 				qdf_assert_always(0);
4709 			}
4710 
4711 			/* Collect hw completion contents */
4712 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
4713 					      &tx_desc->comp, 1);
4714 add_to_pool:
4715 			DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
4716 
4717 			/* First ring descriptor on the cycle */
4718 			if (!head_desc) {
4719 				head_desc = tx_desc;
4720 				tail_desc = tx_desc;
4721 			}
4722 
4723 			tail_desc->next = tx_desc;
4724 			tx_desc->next = NULL;
4725 			tail_desc = tx_desc;
4726 		}
4727 next_desc:
4728 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
4729 
4730 		/*
4731 		 * Processed packet count is more than given quota
4732 		 * stop to processing
4733 		 */
4734 
4735 		count++;
4736 
4737 		if (dp_tx_comp_loop_pkt_limit_hit(soc, count, max_reap_limit))
4738 			break;
4739 	}
4740 
4741 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
4742 
4743 	/* Process the reaped descriptors */
4744 	if (head_desc)
4745 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
4746 
4747 	/*
4748 	 * If we are processing in near-full condition, there are 3 scenario
4749 	 * 1) Ring entries has reached critical state
4750 	 * 2) Ring entries are still near high threshold
4751 	 * 3) Ring entries are below the safe level
4752 	 *
4753 	 * One more loop will move te state to normal processing and yield
4754 	 */
4755 	if (ring_near_full)
4756 		goto more_data;
4757 
4758 	if (dp_tx_comp_enable_eol_data_check(soc)) {
4759 
4760 		if (num_processed >= quota)
4761 			force_break = true;
4762 
4763 		if (!force_break &&
4764 		    hal_srng_dst_peek_sync_locked(soc->hal_soc,
4765 						  hal_ring_hdl)) {
4766 			DP_STATS_INC(soc, tx.hp_oos2, 1);
4767 			if (!hif_exec_should_yield(soc->hif_handle,
4768 						   int_ctx->dp_intr_id))
4769 				goto more_data;
4770 		}
4771 	}
4772 	DP_TX_HIST_STATS_PER_PDEV();
4773 
4774 	return num_processed;
4775 }
4776 
4777 #ifdef FEATURE_WLAN_TDLS
4778 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4779 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
4780 {
4781 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4782 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4783 						     DP_MOD_ID_TDLS);
4784 
4785 	if (!vdev) {
4786 		dp_err("vdev handle for id %d is NULL", vdev_id);
4787 		return NULL;
4788 	}
4789 
4790 	if (tx_spec & OL_TX_SPEC_NO_FREE)
4791 		vdev->is_tdls_frame = true;
4792 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
4793 
4794 	return dp_tx_send(soc_hdl, vdev_id, msdu_list);
4795 }
4796 #endif
4797 
4798 static void dp_tx_vdev_update_feature_flags(struct dp_vdev *vdev)
4799 {
4800 	struct wlan_cfg_dp_soc_ctxt *cfg;
4801 
4802 	struct dp_soc *soc;
4803 
4804 	soc = vdev->pdev->soc;
4805 	if (!soc)
4806 		return;
4807 
4808 	cfg = soc->wlan_cfg_ctx;
4809 	if (!cfg)
4810 		return;
4811 
4812 	if (vdev->opmode == wlan_op_mode_ndi)
4813 		vdev->csum_enabled = wlan_cfg_get_nan_checksum_offload(cfg);
4814 	else if ((vdev->subtype == wlan_op_subtype_p2p_device) ||
4815 		 (vdev->subtype == wlan_op_subtype_p2p_cli) ||
4816 		 (vdev->subtype == wlan_op_subtype_p2p_go))
4817 		vdev->csum_enabled = wlan_cfg_get_p2p_checksum_offload(cfg);
4818 	else
4819 		vdev->csum_enabled = wlan_cfg_get_checksum_offload(cfg);
4820 }
4821 
4822 /**
4823  * dp_tx_vdev_attach() - attach vdev to dp tx
4824  * @vdev: virtual device instance
4825  *
4826  * Return: QDF_STATUS_SUCCESS: success
4827  *         QDF_STATUS_E_RESOURCES: Error return
4828  */
4829 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
4830 {
4831 	int pdev_id;
4832 	/*
4833 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
4834 	 */
4835 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
4836 				     HTT_TCL_METADATA_TYPE_VDEV_BASED);
4837 
4838 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
4839 					vdev->vdev_id);
4840 
4841 	pdev_id =
4842 		dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
4843 						       vdev->pdev->pdev_id);
4844 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
4845 
4846 	/*
4847 	 * Set HTT Extension Valid bit to 0 by default
4848 	 */
4849 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
4850 
4851 	dp_tx_vdev_update_search_flags(vdev);
4852 
4853 	dp_tx_vdev_update_feature_flags(vdev);
4854 
4855 	return QDF_STATUS_SUCCESS;
4856 }
4857 
4858 #ifndef FEATURE_WDS
4859 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
4860 {
4861 	return false;
4862 }
4863 #endif
4864 
4865 /**
4866  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
4867  * @vdev: virtual device instance
4868  *
4869  * Return: void
4870  *
4871  */
4872 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
4873 {
4874 	struct dp_soc *soc = vdev->pdev->soc;
4875 
4876 	/*
4877 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
4878 	 * for TDLS link
4879 	 *
4880 	 * Enable AddrY (SA based search) only for non-WDS STA and
4881 	 * ProxySTA VAP (in HKv1) modes.
4882 	 *
4883 	 * In all other VAP modes, only DA based search should be
4884 	 * enabled
4885 	 */
4886 	if (vdev->opmode == wlan_op_mode_sta &&
4887 	    vdev->tdls_link_connected)
4888 		vdev->hal_desc_addr_search_flags =
4889 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
4890 	else if ((vdev->opmode == wlan_op_mode_sta) &&
4891 		 !dp_tx_da_search_override(vdev))
4892 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
4893 	else
4894 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
4895 
4896 	/* Set search type only when peer map v2 messaging is enabled
4897 	 * as we will have the search index (AST hash) only when v2 is
4898 	 * enabled
4899 	 */
4900 	if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
4901 		vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
4902 	else
4903 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
4904 }
4905 
4906 static inline bool
4907 dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
4908 			  struct dp_vdev *vdev,
4909 			  struct dp_tx_desc_s *tx_desc)
4910 {
4911 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
4912 		return false;
4913 
4914 	/*
4915 	 * if vdev is given, then only check whether desc
4916 	 * vdev match. if vdev is NULL, then check whether
4917 	 * desc pdev match.
4918 	 */
4919 	return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
4920 		(tx_desc->pdev == pdev);
4921 }
4922 
4923 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
4924 /**
4925  * dp_tx_desc_flush() - release resources associated
4926  *                      to TX Desc
4927  *
4928  * @dp_pdev: Handle to DP pdev structure
4929  * @vdev: virtual device instance
4930  * NULL: no specific Vdev is required and check all allcated TX desc
4931  * on this pdev.
4932  * Non-NULL: only check the allocated TX Desc associated to this Vdev.
4933  *
4934  * @force_free:
4935  * true: flush the TX desc.
4936  * false: only reset the Vdev in each allocated TX desc
4937  * that associated to current Vdev.
4938  *
4939  * This function will go through the TX desc pool to flush
4940  * the outstanding TX data or reset Vdev to NULL in associated TX
4941  * Desc.
4942  */
4943 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
4944 		      bool force_free)
4945 {
4946 	uint8_t i;
4947 	uint32_t j;
4948 	uint32_t num_desc, page_id, offset;
4949 	uint16_t num_desc_per_page;
4950 	struct dp_soc *soc = pdev->soc;
4951 	struct dp_tx_desc_s *tx_desc = NULL;
4952 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
4953 
4954 	if (!vdev && !force_free) {
4955 		dp_err("Reset TX desc vdev, Vdev param is required!");
4956 		return;
4957 	}
4958 
4959 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
4960 		tx_desc_pool = &soc->tx_desc[i];
4961 		if (!(tx_desc_pool->pool_size) ||
4962 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
4963 		    !(tx_desc_pool->desc_pages.cacheable_pages))
4964 			continue;
4965 
4966 		/*
4967 		 * Add flow pool lock protection in case pool is freed
4968 		 * due to all tx_desc is recycled when handle TX completion.
4969 		 * this is not necessary when do force flush as:
4970 		 * a. double lock will happen if dp_tx_desc_release is
4971 		 *    also trying to acquire it.
4972 		 * b. dp interrupt has been disabled before do force TX desc
4973 		 *    flush in dp_pdev_deinit().
4974 		 */
4975 		if (!force_free)
4976 			qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
4977 		num_desc = tx_desc_pool->pool_size;
4978 		num_desc_per_page =
4979 			tx_desc_pool->desc_pages.num_element_per_page;
4980 		for (j = 0; j < num_desc; j++) {
4981 			page_id = j / num_desc_per_page;
4982 			offset = j % num_desc_per_page;
4983 
4984 			if (qdf_unlikely(!(tx_desc_pool->
4985 					 desc_pages.cacheable_pages)))
4986 				break;
4987 
4988 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
4989 
4990 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
4991 				/*
4992 				 * Free TX desc if force free is
4993 				 * required, otherwise only reset vdev
4994 				 * in this TX desc.
4995 				 */
4996 				if (force_free) {
4997 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
4998 					dp_tx_comp_free_buf(soc, tx_desc);
4999 					dp_tx_desc_release(tx_desc, i);
5000 				} else {
5001 					tx_desc->vdev_id = DP_INVALID_VDEV_ID;
5002 				}
5003 			}
5004 		}
5005 		if (!force_free)
5006 			qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
5007 	}
5008 }
5009 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
5010 /**
5011  * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
5012  *
5013  * @soc: Handle to DP soc structure
5014  * @tx_desc: pointer of one TX desc
5015  * @desc_pool_id: TX Desc pool id
5016  */
5017 static inline void
5018 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
5019 		      uint8_t desc_pool_id)
5020 {
5021 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
5022 
5023 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
5024 
5025 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
5026 }
5027 
5028 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
5029 		      bool force_free)
5030 {
5031 	uint8_t i, num_pool;
5032 	uint32_t j;
5033 	uint32_t num_desc, page_id, offset;
5034 	uint16_t num_desc_per_page;
5035 	struct dp_soc *soc = pdev->soc;
5036 	struct dp_tx_desc_s *tx_desc = NULL;
5037 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
5038 
5039 	if (!vdev && !force_free) {
5040 		dp_err("Reset TX desc vdev, Vdev param is required!");
5041 		return;
5042 	}
5043 
5044 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5045 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5046 
5047 	for (i = 0; i < num_pool; i++) {
5048 		tx_desc_pool = &soc->tx_desc[i];
5049 		if (!tx_desc_pool->desc_pages.cacheable_pages)
5050 			continue;
5051 
5052 		num_desc_per_page =
5053 			tx_desc_pool->desc_pages.num_element_per_page;
5054 		for (j = 0; j < num_desc; j++) {
5055 			page_id = j / num_desc_per_page;
5056 			offset = j % num_desc_per_page;
5057 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
5058 
5059 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
5060 				if (force_free) {
5061 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
5062 					dp_tx_comp_free_buf(soc, tx_desc);
5063 					dp_tx_desc_release(tx_desc, i);
5064 				} else {
5065 					dp_tx_desc_reset_vdev(soc, tx_desc,
5066 							      i);
5067 				}
5068 			}
5069 		}
5070 	}
5071 }
5072 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
5073 
5074 /**
5075  * dp_tx_vdev_detach() - detach vdev from dp tx
5076  * @vdev: virtual device instance
5077  *
5078  * Return: QDF_STATUS_SUCCESS: success
5079  *         QDF_STATUS_E_RESOURCES: Error return
5080  */
5081 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
5082 {
5083 	struct dp_pdev *pdev = vdev->pdev;
5084 
5085 	/* Reset TX desc associated to this Vdev as NULL */
5086 	dp_tx_desc_flush(pdev, vdev, false);
5087 
5088 	return QDF_STATUS_SUCCESS;
5089 }
5090 
5091 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
5092 /* Pools will be allocated dynamically */
5093 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
5094 					   int num_desc)
5095 {
5096 	uint8_t i;
5097 
5098 	for (i = 0; i < num_pool; i++) {
5099 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
5100 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
5101 	}
5102 
5103 	return QDF_STATUS_SUCCESS;
5104 }
5105 
5106 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
5107 					  int num_desc)
5108 {
5109 	return QDF_STATUS_SUCCESS;
5110 }
5111 
5112 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
5113 {
5114 }
5115 
5116 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
5117 {
5118 	uint8_t i;
5119 
5120 	for (i = 0; i < num_pool; i++)
5121 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
5122 }
5123 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
5124 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
5125 					   int num_desc)
5126 {
5127 	uint8_t i, count;
5128 
5129 	/* Allocate software Tx descriptor pools */
5130 	for (i = 0; i < num_pool; i++) {
5131 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
5132 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5133 				  FL("Tx Desc Pool alloc %d failed %pK"),
5134 				  i, soc);
5135 			goto fail;
5136 		}
5137 	}
5138 	return QDF_STATUS_SUCCESS;
5139 
5140 fail:
5141 	for (count = 0; count < i; count++)
5142 		dp_tx_desc_pool_free(soc, count);
5143 
5144 	return QDF_STATUS_E_NOMEM;
5145 }
5146 
5147 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
5148 					  int num_desc)
5149 {
5150 	uint8_t i;
5151 	for (i = 0; i < num_pool; i++) {
5152 		if (dp_tx_desc_pool_init(soc, i, num_desc)) {
5153 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5154 				  FL("Tx Desc Pool init %d failed %pK"),
5155 				  i, soc);
5156 			return QDF_STATUS_E_NOMEM;
5157 		}
5158 	}
5159 	return QDF_STATUS_SUCCESS;
5160 }
5161 
5162 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
5163 {
5164 	uint8_t i;
5165 
5166 	for (i = 0; i < num_pool; i++)
5167 		dp_tx_desc_pool_deinit(soc, i);
5168 }
5169 
5170 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
5171 {
5172 	uint8_t i;
5173 
5174 	for (i = 0; i < num_pool; i++)
5175 		dp_tx_desc_pool_free(soc, i);
5176 }
5177 
5178 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
5179 
5180 /**
5181  * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
5182  * @soc: core txrx main context
5183  * @num_pool: number of pools
5184  *
5185  */
5186 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
5187 {
5188 	dp_tx_tso_desc_pool_deinit(soc, num_pool);
5189 	dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
5190 }
5191 
5192 /**
5193  * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
5194  * @soc: core txrx main context
5195  * @num_pool: number of pools
5196  *
5197  */
5198 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
5199 {
5200 	dp_tx_tso_desc_pool_free(soc, num_pool);
5201 	dp_tx_tso_num_seg_pool_free(soc, num_pool);
5202 }
5203 
5204 /**
5205  * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
5206  * @soc: core txrx main context
5207  *
5208  * This function frees all tx related descriptors as below
5209  * 1. Regular TX descriptors (static pools)
5210  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
5211  * 3. TSO descriptors
5212  *
5213  */
5214 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
5215 {
5216 	uint8_t num_pool;
5217 
5218 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5219 
5220 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
5221 	dp_tx_ext_desc_pool_free(soc, num_pool);
5222 	dp_tx_delete_static_pools(soc, num_pool);
5223 }
5224 
5225 /**
5226  * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
5227  * @soc: core txrx main context
5228  *
5229  * This function de-initializes all tx related descriptors as below
5230  * 1. Regular TX descriptors (static pools)
5231  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
5232  * 3. TSO descriptors
5233  *
5234  */
5235 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
5236 {
5237 	uint8_t num_pool;
5238 
5239 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5240 
5241 	dp_tx_flow_control_deinit(soc);
5242 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
5243 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
5244 	dp_tx_deinit_static_pools(soc, num_pool);
5245 }
5246 
5247 /**
5248  * dp_tso_attach() - TSO attach handler
5249  * @txrx_soc: Opaque Dp handle
5250  *
5251  * Reserve TSO descriptor buffers
5252  *
5253  * Return: QDF_STATUS_E_FAILURE on failure or
5254  * QDF_STATUS_SUCCESS on success
5255  */
5256 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
5257 					 uint8_t num_pool,
5258 					 uint16_t num_desc)
5259 {
5260 	if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
5261 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
5262 		return QDF_STATUS_E_FAILURE;
5263 	}
5264 
5265 	if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
5266 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
5267 		       num_pool, soc);
5268 		return QDF_STATUS_E_FAILURE;
5269 	}
5270 	return QDF_STATUS_SUCCESS;
5271 }
5272 
5273 /**
5274  * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
5275  * @soc: DP soc handle
5276  * @num_pool: Number of pools
5277  * @num_desc: Number of descriptors
5278  *
5279  * Initialize TSO descriptor pools
5280  *
5281  * Return: QDF_STATUS_E_FAILURE on failure or
5282  * QDF_STATUS_SUCCESS on success
5283  */
5284 
5285 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
5286 					uint8_t num_pool,
5287 					uint16_t num_desc)
5288 {
5289 	if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
5290 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
5291 		return QDF_STATUS_E_FAILURE;
5292 	}
5293 
5294 	if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
5295 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
5296 		       num_pool, soc);
5297 		return QDF_STATUS_E_FAILURE;
5298 	}
5299 	return QDF_STATUS_SUCCESS;
5300 }
5301 
5302 /**
5303  * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
5304  * @soc: core txrx main context
5305  *
5306  * This function allocates memory for following descriptor pools
5307  * 1. regular sw tx descriptor pools (static pools)
5308  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
5309  * 3. TSO descriptor pools
5310  *
5311  * Return: QDF_STATUS_SUCCESS: success
5312  *         QDF_STATUS_E_RESOURCES: Error return
5313  */
5314 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
5315 {
5316 	uint8_t num_pool;
5317 	uint32_t num_desc;
5318 	uint32_t num_ext_desc;
5319 
5320 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5321 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5322 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
5323 
5324 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5325 		  "%s Tx Desc Alloc num_pool = %d, descs = %d",
5326 		  __func__, num_pool, num_desc);
5327 
5328 	if ((num_pool > MAX_TXDESC_POOLS) ||
5329 	    (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
5330 		goto fail1;
5331 
5332 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
5333 		goto fail1;
5334 
5335 	if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
5336 		goto fail2;
5337 
5338 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
5339 		return QDF_STATUS_SUCCESS;
5340 
5341 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
5342 		goto fail3;
5343 
5344 	return QDF_STATUS_SUCCESS;
5345 
5346 fail3:
5347 	dp_tx_ext_desc_pool_free(soc, num_pool);
5348 fail2:
5349 	dp_tx_delete_static_pools(soc, num_pool);
5350 fail1:
5351 	return QDF_STATUS_E_RESOURCES;
5352 }
5353 
5354 /**
5355  * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
5356  * @soc: core txrx main context
5357  *
5358  * This function initializes the following TX descriptor pools
5359  * 1. regular sw tx descriptor pools (static pools)
5360  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
5361  * 3. TSO descriptor pools
5362  *
5363  * Return: QDF_STATUS_SUCCESS: success
5364  *	   QDF_STATUS_E_RESOURCES: Error return
5365  */
5366 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
5367 {
5368 	uint8_t num_pool;
5369 	uint32_t num_desc;
5370 	uint32_t num_ext_desc;
5371 
5372 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5373 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5374 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
5375 
5376 	if (dp_tx_init_static_pools(soc, num_pool, num_desc))
5377 		goto fail1;
5378 
5379 	if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
5380 		goto fail2;
5381 
5382 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
5383 		return QDF_STATUS_SUCCESS;
5384 
5385 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
5386 		goto fail3;
5387 
5388 	dp_tx_flow_control_init(soc);
5389 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
5390 	return QDF_STATUS_SUCCESS;
5391 
5392 fail3:
5393 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
5394 fail2:
5395 	dp_tx_deinit_static_pools(soc, num_pool);
5396 fail1:
5397 	return QDF_STATUS_E_RESOURCES;
5398 }
5399 
5400 /**
5401  * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
5402  * @txrx_soc: dp soc handle
5403  *
5404  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
5405  *			QDF_STATUS_E_FAILURE
5406  */
5407 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
5408 {
5409 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5410 	uint8_t num_pool;
5411 	uint32_t num_desc;
5412 	uint32_t num_ext_desc;
5413 
5414 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5415 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5416 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
5417 
5418 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
5419 		return QDF_STATUS_E_FAILURE;
5420 
5421 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
5422 		return QDF_STATUS_E_FAILURE;
5423 
5424 	return QDF_STATUS_SUCCESS;
5425 }
5426 
5427 /**
5428  * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
5429  * @txrx_soc: dp soc handle
5430  *
5431  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
5432  */
5433 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
5434 {
5435 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5436 	uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5437 
5438 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
5439 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
5440 
5441 	return QDF_STATUS_SUCCESS;
5442 }
5443 
5444