xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "dp_htt.h"
21 #include "hal_hw_headers.h"
22 #include "dp_tx.h"
23 #include "dp_tx_desc.h"
24 #include "dp_peer.h"
25 #include "dp_types.h"
26 #include "hal_tx.h"
27 #include "qdf_mem.h"
28 #include "qdf_nbuf.h"
29 #include "qdf_net_types.h"
30 #include "qdf_module.h"
31 #include <wlan_cfg.h>
32 #include "dp_ipa.h"
33 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
34 #include "if_meta_hdr.h"
35 #endif
36 #include "enet.h"
37 #include "dp_internal.h"
38 #ifdef ATH_SUPPORT_IQUE
39 #include "dp_txrx_me.h"
40 #endif
41 #include "dp_hist.h"
42 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
43 #include <dp_swlm.h>
44 #endif
45 #ifdef WIFI_MONITOR_SUPPORT
46 #include <dp_mon.h>
47 #endif
48 #ifdef FEATURE_WDS
49 #include "dp_txrx_wds.h"
50 #endif
51 
52 /* Flag to skip CCE classify when mesh or tid override enabled */
53 #define DP_TX_SKIP_CCE_CLASSIFY \
54 	(DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
55 
56 /* TODO Add support in TSO */
57 #define DP_DESC_NUM_FRAG(x) 0
58 
59 /* disable TQM_BYPASS */
60 #define TQM_BYPASS_WAR 0
61 
62 /* invalid peer id for reinject*/
63 #define DP_INVALID_PEER 0XFFFE
64 
65 /*mapping between hal encrypt type and cdp_sec_type*/
66 uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
67 					  HAL_TX_ENCRYPT_TYPE_WEP_128,
68 					  HAL_TX_ENCRYPT_TYPE_WEP_104,
69 					  HAL_TX_ENCRYPT_TYPE_WEP_40,
70 					  HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
71 					  HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
72 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
73 					  HAL_TX_ENCRYPT_TYPE_WAPI,
74 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
75 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
76 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
77 					  HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
78 qdf_export_symbol(sec_type_map);
79 
80 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
81 /**
82  * dp_update_tx_desc_stats - Update the increase or decrease in
83  * outstanding tx desc count
84  * values on pdev and soc
85  * @vdev: DP pdev handle
86  *
87  * Return: void
88  */
89 static inline void
90 dp_update_tx_desc_stats(struct dp_pdev *pdev)
91 {
92 	int32_t tx_descs_cnt =
93 		qdf_atomic_read(&pdev->num_tx_outstanding);
94 	if (pdev->tx_descs_max < tx_descs_cnt)
95 		pdev->tx_descs_max = tx_descs_cnt;
96 	qdf_mem_tx_desc_cnt_update(pdev->num_tx_outstanding,
97 				   pdev->tx_descs_max);
98 }
99 
100 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
101 
102 static inline void
103 dp_update_tx_desc_stats(struct dp_pdev *pdev)
104 {
105 }
106 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
107 
108 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
109 static inline
110 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
111 {
112 	qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
113 				     QDF_DMA_TO_DEVICE,
114 				     desc->nbuf->len);
115 	desc->flags |= DP_TX_DESC_FLAG_UNMAP_DONE;
116 }
117 
118 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
119 {
120 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE)))
121 		qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
122 					     QDF_DMA_TO_DEVICE,
123 					     desc->nbuf->len);
124 }
125 #else
126 static inline
127 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
128 {
129 }
130 
131 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
132 {
133 	qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
134 				     QDF_DMA_TO_DEVICE, desc->nbuf->len);
135 }
136 #endif
137 
138 #ifdef QCA_TX_LIMIT_CHECK
139 /**
140  * dp_tx_limit_check - Check if allocated tx descriptors reached
141  * soc max limit and pdev max limit
142  * @vdev: DP vdev handle
143  *
144  * Return: true if allocated tx descriptors reached max configured value, else
145  * false
146  */
147 static inline bool
148 dp_tx_limit_check(struct dp_vdev *vdev)
149 {
150 	struct dp_pdev *pdev = vdev->pdev;
151 	struct dp_soc *soc = pdev->soc;
152 
153 	if (qdf_atomic_read(&soc->num_tx_outstanding) >=
154 			soc->num_tx_allowed) {
155 		dp_tx_info("queued packets are more than max tx, drop the frame");
156 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
157 		return true;
158 	}
159 
160 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
161 			pdev->num_tx_allowed) {
162 		dp_tx_info("queued packets are more than max tx, drop the frame");
163 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
164 		DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_outstand.num, 1);
165 		return true;
166 	}
167 	return false;
168 }
169 
170 /**
171  * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
172  * reached soc max limit
173  * @vdev: DP vdev handle
174  *
175  * Return: true if allocated tx descriptors reached max configured value, else
176  * false
177  */
178 static inline bool
179 dp_tx_exception_limit_check(struct dp_vdev *vdev)
180 {
181 	struct dp_pdev *pdev = vdev->pdev;
182 	struct dp_soc *soc = pdev->soc;
183 
184 	if (qdf_atomic_read(&soc->num_tx_exception) >=
185 			soc->num_msdu_exception_desc) {
186 		dp_info("exc packets are more than max drop the exc pkt");
187 		DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
188 		return true;
189 	}
190 
191 	return false;
192 }
193 
194 /**
195  * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
196  * @vdev: DP pdev handle
197  *
198  * Return: void
199  */
200 static inline void
201 dp_tx_outstanding_inc(struct dp_pdev *pdev)
202 {
203 	struct dp_soc *soc = pdev->soc;
204 
205 	qdf_atomic_inc(&pdev->num_tx_outstanding);
206 	qdf_atomic_inc(&soc->num_tx_outstanding);
207 	dp_update_tx_desc_stats(pdev);
208 }
209 
210 /**
211  * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
212  * @vdev: DP pdev handle
213  *
214  * Return: void
215  */
216 static inline void
217 dp_tx_outstanding_dec(struct dp_pdev *pdev)
218 {
219 	struct dp_soc *soc = pdev->soc;
220 
221 	qdf_atomic_dec(&pdev->num_tx_outstanding);
222 	qdf_atomic_dec(&soc->num_tx_outstanding);
223 	dp_update_tx_desc_stats(pdev);
224 }
225 
226 #else //QCA_TX_LIMIT_CHECK
227 static inline bool
228 dp_tx_limit_check(struct dp_vdev *vdev)
229 {
230 	return false;
231 }
232 
233 static inline bool
234 dp_tx_exception_limit_check(struct dp_vdev *vdev)
235 {
236 	return false;
237 }
238 
239 static inline void
240 dp_tx_outstanding_inc(struct dp_pdev *pdev)
241 {
242 	qdf_atomic_inc(&pdev->num_tx_outstanding);
243 	dp_update_tx_desc_stats(pdev);
244 }
245 
246 static inline void
247 dp_tx_outstanding_dec(struct dp_pdev *pdev)
248 {
249 	qdf_atomic_dec(&pdev->num_tx_outstanding);
250 	dp_update_tx_desc_stats(pdev);
251 }
252 #endif //QCA_TX_LIMIT_CHECK
253 
254 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
255 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
256 {
257 	enum dp_tx_event_type type;
258 
259 	if (flags & DP_TX_DESC_FLAG_FLUSH)
260 		type = DP_TX_DESC_FLUSH;
261 	else if (flags & DP_TX_DESC_FLAG_TX_COMP_ERR)
262 		type = DP_TX_COMP_UNMAP_ERR;
263 	else if (flags & DP_TX_DESC_FLAG_COMPLETED_TX)
264 		type = DP_TX_COMP_UNMAP;
265 	else
266 		type = DP_TX_DESC_UNMAP;
267 
268 	return type;
269 }
270 
271 static inline void
272 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
273 		       qdf_nbuf_t skb, uint32_t sw_cookie,
274 		       enum dp_tx_event_type type)
275 {
276 	struct dp_tx_desc_event *entry;
277 	uint32_t idx;
278 
279 	if (qdf_unlikely(!soc->tx_tcl_history || !soc->tx_comp_history))
280 		return;
281 
282 	switch (type) {
283 	case DP_TX_COMP_UNMAP:
284 	case DP_TX_COMP_UNMAP_ERR:
285 	case DP_TX_COMP_MSDU_EXT:
286 		idx = dp_history_get_next_index(&soc->tx_comp_history->index,
287 						DP_TX_COMP_HISTORY_SIZE);
288 		entry = &soc->tx_comp_history->entry[idx];
289 		break;
290 	case DP_TX_DESC_MAP:
291 	case DP_TX_DESC_UNMAP:
292 	case DP_TX_DESC_COOKIE:
293 	case DP_TX_DESC_FLUSH:
294 		idx = dp_history_get_next_index(&soc->tx_tcl_history->index,
295 						DP_TX_TCL_HISTORY_SIZE);
296 		entry = &soc->tx_tcl_history->entry[idx];
297 		break;
298 	default:
299 		dp_info_rl("Invalid dp_tx_event_type: %d", type);
300 		return;
301 	}
302 
303 	entry->skb = skb;
304 	entry->paddr = paddr;
305 	entry->sw_cookie = sw_cookie;
306 	entry->type = type;
307 	entry->ts = qdf_get_log_timestamp();
308 }
309 
310 static inline void
311 dp_tx_tso_seg_history_add(struct dp_soc *soc,
312 			  struct qdf_tso_seg_elem_t *tso_seg,
313 			  qdf_nbuf_t skb, uint32_t sw_cookie,
314 			  enum dp_tx_event_type type)
315 {
316 	int i;
317 
318 	for (i = 1; i < tso_seg->seg.num_frags; i++) {
319 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[i].paddr,
320 				       skb, sw_cookie, type);
321 	}
322 
323 	if (!tso_seg->next)
324 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[0].paddr,
325 				       skb, 0xFFFFFFFF, type);
326 }
327 
328 static inline void
329 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
330 		      qdf_nbuf_t skb, uint32_t sw_cookie,
331 		      enum dp_tx_event_type type)
332 {
333 	struct qdf_tso_seg_elem_t *curr_seg = tso_info.tso_seg_list;
334 	uint32_t num_segs = tso_info.num_segs;
335 
336 	while (num_segs) {
337 		dp_tx_tso_seg_history_add(soc, curr_seg, skb, sw_cookie, type);
338 		curr_seg = curr_seg->next;
339 		num_segs--;
340 	}
341 }
342 
343 #else
344 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
345 {
346 	return DP_TX_DESC_INVAL_EVT;
347 }
348 
349 static inline void
350 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
351 		       qdf_nbuf_t skb, uint32_t sw_cookie,
352 		       enum dp_tx_event_type type)
353 {
354 }
355 
356 static inline void
357 dp_tx_tso_seg_history_add(struct dp_soc *soc,
358 			  struct qdf_tso_seg_elem_t *tso_seg,
359 			  qdf_nbuf_t skb, uint32_t sw_cookie,
360 			  enum dp_tx_event_type type)
361 {
362 }
363 
364 static inline void
365 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
366 		      qdf_nbuf_t skb, uint32_t sw_cookie,
367 		      enum dp_tx_event_type type)
368 {
369 }
370 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
371 
372 #if defined(FEATURE_TSO)
373 /**
374  * dp_tx_tso_unmap_segment() - Unmap TSO segment
375  *
376  * @soc - core txrx main context
377  * @seg_desc - tso segment descriptor
378  * @num_seg_desc - tso number segment descriptor
379  */
380 static void dp_tx_tso_unmap_segment(
381 		struct dp_soc *soc,
382 		struct qdf_tso_seg_elem_t *seg_desc,
383 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
384 {
385 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
386 	if (qdf_unlikely(!seg_desc)) {
387 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
388 			 __func__, __LINE__);
389 		qdf_assert(0);
390 	} else if (qdf_unlikely(!num_seg_desc)) {
391 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
392 			 __func__, __LINE__);
393 		qdf_assert(0);
394 	} else {
395 		bool is_last_seg;
396 		/* no tso segment left to do dma unmap */
397 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
398 			return;
399 
400 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
401 					true : false;
402 		qdf_nbuf_unmap_tso_segment(soc->osdev,
403 					   seg_desc, is_last_seg);
404 		num_seg_desc->num_seg.tso_cmn_num_seg--;
405 	}
406 }
407 
408 /**
409  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
410  *                            back to the freelist
411  *
412  * @soc - soc device handle
413  * @tx_desc - Tx software descriptor
414  */
415 static void dp_tx_tso_desc_release(struct dp_soc *soc,
416 				   struct dp_tx_desc_s *tx_desc)
417 {
418 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
419 	if (qdf_unlikely(!tx_desc->tso_desc)) {
420 		dp_tx_err("SO desc is NULL!");
421 		qdf_assert(0);
422 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
423 		dp_tx_err("TSO num desc is NULL!");
424 		qdf_assert(0);
425 	} else {
426 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
427 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
428 
429 		/* Add the tso num segment into the free list */
430 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
431 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
432 					    tx_desc->tso_num_desc);
433 			tx_desc->tso_num_desc = NULL;
434 			DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
435 		}
436 
437 		/* Add the tso segment into the free list*/
438 		dp_tx_tso_desc_free(soc,
439 				    tx_desc->pool_id, tx_desc->tso_desc);
440 		tx_desc->tso_desc = NULL;
441 	}
442 }
443 #else
444 static void dp_tx_tso_unmap_segment(
445 		struct dp_soc *soc,
446 		struct qdf_tso_seg_elem_t *seg_desc,
447 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
448 
449 {
450 }
451 
452 static void dp_tx_tso_desc_release(struct dp_soc *soc,
453 				   struct dp_tx_desc_s *tx_desc)
454 {
455 }
456 #endif
457 
458 /**
459  * dp_tx_desc_release() - Release Tx Descriptor
460  * @tx_desc : Tx Descriptor
461  * @desc_pool_id: Descriptor Pool ID
462  *
463  * Deallocate all resources attached to Tx descriptor and free the Tx
464  * descriptor.
465  *
466  * Return:
467  */
468 static void
469 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
470 {
471 	struct dp_pdev *pdev = tx_desc->pdev;
472 	struct dp_soc *soc;
473 	uint8_t comp_status = 0;
474 
475 	qdf_assert(pdev);
476 
477 	soc = pdev->soc;
478 
479 	dp_tx_outstanding_dec(pdev);
480 
481 	if (tx_desc->frm_type == dp_tx_frm_tso)
482 		dp_tx_tso_desc_release(soc, tx_desc);
483 
484 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
485 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
486 
487 	if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
488 		dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
489 
490 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
491 		qdf_atomic_dec(&soc->num_tx_exception);
492 
493 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
494 				tx_desc->buffer_src)
495 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
496 							     soc->hal_soc);
497 	else
498 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
499 
500 	dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d",
501 		    tx_desc->id, comp_status,
502 		    qdf_atomic_read(&pdev->num_tx_outstanding));
503 
504 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
505 	return;
506 }
507 
508 /**
509  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
510  * @vdev: DP vdev Handle
511  * @nbuf: skb
512  * @msdu_info: msdu_info required to create HTT metadata
513  *
514  * Prepares and fills HTT metadata in the frame pre-header for special frames
515  * that should be transmitted using varying transmit parameters.
516  * There are 2 VDEV modes that currently needs this special metadata -
517  *  1) Mesh Mode
518  *  2) DSRC Mode
519  *
520  * Return: HTT metadata size
521  *
522  */
523 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
524 					  struct dp_tx_msdu_info_s *msdu_info)
525 {
526 	uint32_t *meta_data = msdu_info->meta_data;
527 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
528 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
529 
530 	uint8_t htt_desc_size;
531 
532 	/* Size rounded of multiple of 8 bytes */
533 	uint8_t htt_desc_size_aligned;
534 
535 	uint8_t *hdr = NULL;
536 
537 	/*
538 	 * Metadata - HTT MSDU Extension header
539 	 */
540 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
541 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
542 
543 	if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
544 	    HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
545 							   meta_data[0])) {
546 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
547 				 htt_desc_size_aligned)) {
548 			nbuf = qdf_nbuf_realloc_headroom(nbuf,
549 							 htt_desc_size_aligned);
550 			if (!nbuf) {
551 				/*
552 				 * qdf_nbuf_realloc_headroom won't do skb_clone
553 				 * as skb_realloc_headroom does. so, no free is
554 				 * needed here.
555 				 */
556 				DP_STATS_INC(vdev,
557 					     tx_i.dropped.headroom_insufficient,
558 					     1);
559 				qdf_print(" %s[%d] skb_realloc_headroom failed",
560 					  __func__, __LINE__);
561 				return 0;
562 			}
563 		}
564 		/* Fill and add HTT metaheader */
565 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
566 		if (!hdr) {
567 			dp_tx_err("Error in filling HTT metadata");
568 
569 			return 0;
570 		}
571 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
572 
573 	} else if (vdev->opmode == wlan_op_mode_ocb) {
574 		/* Todo - Add support for DSRC */
575 	}
576 
577 	return htt_desc_size_aligned;
578 }
579 
580 /**
581  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
582  * @tso_seg: TSO segment to process
583  * @ext_desc: Pointer to MSDU extension descriptor
584  *
585  * Return: void
586  */
587 #if defined(FEATURE_TSO)
588 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
589 		void *ext_desc)
590 {
591 	uint8_t num_frag;
592 	uint32_t tso_flags;
593 
594 	/*
595 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
596 	 * tcp_flag_mask
597 	 *
598 	 * Checksum enable flags are set in TCL descriptor and not in Extension
599 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
600 	 */
601 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
602 
603 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
604 
605 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
606 		tso_seg->tso_flags.ip_len);
607 
608 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
609 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
610 
611 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
612 		uint32_t lo = 0;
613 		uint32_t hi = 0;
614 
615 		qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
616 				  (tso_seg->tso_frags[num_frag].length));
617 
618 		qdf_dmaaddr_to_32s(
619 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
620 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
621 			tso_seg->tso_frags[num_frag].length);
622 	}
623 
624 	return;
625 }
626 #else
627 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
628 		void *ext_desc)
629 {
630 	return;
631 }
632 #endif
633 
634 #if defined(FEATURE_TSO)
635 /**
636  * dp_tx_free_tso_seg_list() - Loop through the tso segments
637  *                             allocated and free them
638  *
639  * @soc: soc handle
640  * @free_seg: list of tso segments
641  * @msdu_info: msdu descriptor
642  *
643  * Return - void
644  */
645 static void dp_tx_free_tso_seg_list(
646 		struct dp_soc *soc,
647 		struct qdf_tso_seg_elem_t *free_seg,
648 		struct dp_tx_msdu_info_s *msdu_info)
649 {
650 	struct qdf_tso_seg_elem_t *next_seg;
651 
652 	while (free_seg) {
653 		next_seg = free_seg->next;
654 		dp_tx_tso_desc_free(soc,
655 				    msdu_info->tx_queue.desc_pool_id,
656 				    free_seg);
657 		free_seg = next_seg;
658 	}
659 }
660 
661 /**
662  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
663  *                                 allocated and free them
664  *
665  * @soc:  soc handle
666  * @free_num_seg: list of tso number segments
667  * @msdu_info: msdu descriptor
668  * Return - void
669  */
670 static void dp_tx_free_tso_num_seg_list(
671 		struct dp_soc *soc,
672 		struct qdf_tso_num_seg_elem_t *free_num_seg,
673 		struct dp_tx_msdu_info_s *msdu_info)
674 {
675 	struct qdf_tso_num_seg_elem_t *next_num_seg;
676 
677 	while (free_num_seg) {
678 		next_num_seg = free_num_seg->next;
679 		dp_tso_num_seg_free(soc,
680 				    msdu_info->tx_queue.desc_pool_id,
681 				    free_num_seg);
682 		free_num_seg = next_num_seg;
683 	}
684 }
685 
686 /**
687  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
688  *                              do dma unmap for each segment
689  *
690  * @soc: soc handle
691  * @free_seg: list of tso segments
692  * @num_seg_desc: tso number segment descriptor
693  *
694  * Return - void
695  */
696 static void dp_tx_unmap_tso_seg_list(
697 		struct dp_soc *soc,
698 		struct qdf_tso_seg_elem_t *free_seg,
699 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
700 {
701 	struct qdf_tso_seg_elem_t *next_seg;
702 
703 	if (qdf_unlikely(!num_seg_desc)) {
704 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
705 		return;
706 	}
707 
708 	while (free_seg) {
709 		next_seg = free_seg->next;
710 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
711 		free_seg = next_seg;
712 	}
713 }
714 
715 #ifdef FEATURE_TSO_STATS
716 /**
717  * dp_tso_get_stats_idx: Retrieve the tso packet id
718  * @pdev - pdev handle
719  *
720  * Return: id
721  */
722 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
723 {
724 	uint32_t stats_idx;
725 
726 	stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
727 						% CDP_MAX_TSO_PACKETS);
728 	return stats_idx;
729 }
730 #else
731 static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
732 {
733 	return 0;
734 }
735 #endif /* FEATURE_TSO_STATS */
736 
737 /**
738  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
739  *				     free the tso segments descriptor and
740  *				     tso num segments descriptor
741  *
742  * @soc:  soc handle
743  * @msdu_info: msdu descriptor
744  * @tso_seg_unmap: flag to show if dma unmap is necessary
745  *
746  * Return - void
747  */
748 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
749 					  struct dp_tx_msdu_info_s *msdu_info,
750 					  bool tso_seg_unmap)
751 {
752 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
753 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
754 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
755 					tso_info->tso_num_seg_list;
756 
757 	/* do dma unmap for each segment */
758 	if (tso_seg_unmap)
759 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
760 
761 	/* free all tso number segment descriptor though looks only have 1 */
762 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
763 
764 	/* free all tso segment descriptor */
765 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
766 }
767 
768 /**
769  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
770  * @vdev: virtual device handle
771  * @msdu: network buffer
772  * @msdu_info: meta data associated with the msdu
773  *
774  * Return: QDF_STATUS_SUCCESS success
775  */
776 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
777 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
778 {
779 	struct qdf_tso_seg_elem_t *tso_seg;
780 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
781 	struct dp_soc *soc = vdev->pdev->soc;
782 	struct dp_pdev *pdev = vdev->pdev;
783 	struct qdf_tso_info_t *tso_info;
784 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
785 	tso_info = &msdu_info->u.tso_info;
786 	tso_info->curr_seg = NULL;
787 	tso_info->tso_seg_list = NULL;
788 	tso_info->num_segs = num_seg;
789 	msdu_info->frm_type = dp_tx_frm_tso;
790 	tso_info->tso_num_seg_list = NULL;
791 
792 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
793 
794 	while (num_seg) {
795 		tso_seg = dp_tx_tso_desc_alloc(
796 				soc, msdu_info->tx_queue.desc_pool_id);
797 		if (tso_seg) {
798 			tso_seg->next = tso_info->tso_seg_list;
799 			tso_info->tso_seg_list = tso_seg;
800 			num_seg--;
801 		} else {
802 			dp_err_rl("Failed to alloc tso seg desc");
803 			DP_STATS_INC_PKT(vdev->pdev,
804 					 tso_stats.tso_no_mem_dropped, 1,
805 					 qdf_nbuf_len(msdu));
806 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
807 
808 			return QDF_STATUS_E_NOMEM;
809 		}
810 	}
811 
812 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
813 
814 	tso_num_seg = dp_tso_num_seg_alloc(soc,
815 			msdu_info->tx_queue.desc_pool_id);
816 
817 	if (tso_num_seg) {
818 		tso_num_seg->next = tso_info->tso_num_seg_list;
819 		tso_info->tso_num_seg_list = tso_num_seg;
820 	} else {
821 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
822 			 __func__);
823 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
824 
825 		return QDF_STATUS_E_NOMEM;
826 	}
827 
828 	msdu_info->num_seg =
829 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
830 
831 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
832 			msdu_info->num_seg);
833 
834 	if (!(msdu_info->num_seg)) {
835 		/*
836 		 * Free allocated TSO seg desc and number seg desc,
837 		 * do unmap for segments if dma map has done.
838 		 */
839 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
840 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
841 
842 		return QDF_STATUS_E_INVAL;
843 	}
844 	dp_tx_tso_history_add(soc, msdu_info->u.tso_info,
845 			      msdu, 0, DP_TX_DESC_MAP);
846 
847 	tso_info->curr_seg = tso_info->tso_seg_list;
848 
849 	tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
850 	dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
851 			     msdu, msdu_info->num_seg);
852 	dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
853 				    tso_info->msdu_stats_idx);
854 	dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
855 	return QDF_STATUS_SUCCESS;
856 }
857 #else
858 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
859 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
860 {
861 	return QDF_STATUS_E_NOMEM;
862 }
863 #endif
864 
865 QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
866 			(DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
867 			 sizeof(struct htt_tx_msdu_desc_ext2_t)));
868 
869 /**
870  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
871  * @vdev: DP Vdev handle
872  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
873  * @desc_pool_id: Descriptor Pool ID
874  *
875  * Return:
876  */
877 static
878 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
879 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
880 {
881 	uint8_t i;
882 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
883 	struct dp_tx_seg_info_s *seg_info;
884 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
885 	struct dp_soc *soc = vdev->pdev->soc;
886 
887 	/* Allocate an extension descriptor */
888 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
889 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
890 
891 	if (!msdu_ext_desc) {
892 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
893 		return NULL;
894 	}
895 
896 	if (msdu_info->exception_fw &&
897 			qdf_unlikely(vdev->mesh_vdev)) {
898 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
899 				&msdu_info->meta_data[0],
900 				sizeof(struct htt_tx_msdu_desc_ext2_t));
901 		qdf_atomic_inc(&soc->num_tx_exception);
902 		msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
903 	}
904 
905 	switch (msdu_info->frm_type) {
906 	case dp_tx_frm_sg:
907 	case dp_tx_frm_me:
908 	case dp_tx_frm_raw:
909 		seg_info = msdu_info->u.sg_info.curr_seg;
910 		/* Update the buffer pointers in MSDU Extension Descriptor */
911 		for (i = 0; i < seg_info->frag_cnt; i++) {
912 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
913 				seg_info->frags[i].paddr_lo,
914 				seg_info->frags[i].paddr_hi,
915 				seg_info->frags[i].len);
916 		}
917 
918 		break;
919 
920 	case dp_tx_frm_tso:
921 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
922 				&cached_ext_desc[0]);
923 		break;
924 
925 
926 	default:
927 		break;
928 	}
929 
930 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
931 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
932 
933 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
934 			msdu_ext_desc->vaddr);
935 
936 	return msdu_ext_desc;
937 }
938 
939 /**
940  * dp_tx_trace_pkt() - Trace TX packet at DP layer
941  *
942  * @skb: skb to be traced
943  * @msdu_id: msdu_id of the packet
944  * @vdev_id: vdev_id of the packet
945  *
946  * Return: None
947  */
948 #ifdef DP_DISABLE_TX_PKT_TRACE
949 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
950 			    uint8_t vdev_id)
951 {
952 }
953 #else
954 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
955 			    uint8_t vdev_id)
956 {
957 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
958 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
959 	DPTRACE(qdf_dp_trace_ptr(skb,
960 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
961 				 QDF_TRACE_DEFAULT_PDEV_ID,
962 				 qdf_nbuf_data_addr(skb),
963 				 sizeof(qdf_nbuf_data(skb)),
964 				 msdu_id, vdev_id, 0));
965 
966 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
967 
968 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
969 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
970 				      msdu_id, QDF_TX));
971 }
972 #endif
973 
974 #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
975 /**
976  * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as
977  *				      exception by the upper layer (OS_IF)
978  * @soc: DP soc handle
979  * @nbuf: packet to be transmitted
980  *
981  * Returns: 1 if the packet is marked as exception,
982  *	    0, if the packet is not marked as exception.
983  */
984 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
985 						 qdf_nbuf_t nbuf)
986 {
987 	return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf);
988 }
989 #else
990 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
991 						 qdf_nbuf_t nbuf)
992 {
993 	return 0;
994 }
995 #endif
996 
997 /**
998  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
999  * @vdev: DP vdev handle
1000  * @nbuf: skb
1001  * @desc_pool_id: Descriptor pool ID
1002  * @meta_data: Metadata to the fw
1003  * @tx_exc_metadata: Handle that holds exception path metadata
1004  * Allocate and prepare Tx descriptor with msdu information.
1005  *
1006  * Return: Pointer to Tx Descriptor on success,
1007  *         NULL on failure
1008  */
1009 static
1010 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
1011 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
1012 		struct dp_tx_msdu_info_s *msdu_info,
1013 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1014 {
1015 	uint8_t align_pad;
1016 	uint8_t is_exception = 0;
1017 	uint8_t htt_hdr_size;
1018 	struct dp_tx_desc_s *tx_desc;
1019 	struct dp_pdev *pdev = vdev->pdev;
1020 	struct dp_soc *soc = pdev->soc;
1021 
1022 	if (dp_tx_limit_check(vdev))
1023 		return NULL;
1024 
1025 	/* Allocate software Tx descriptor */
1026 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1027 
1028 	if (qdf_unlikely(!tx_desc)) {
1029 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1030 		DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
1031 		return NULL;
1032 	}
1033 
1034 	dp_tx_outstanding_inc(pdev);
1035 
1036 	/* Initialize the SW tx descriptor */
1037 	tx_desc->nbuf = nbuf;
1038 	tx_desc->frm_type = dp_tx_frm_std;
1039 	tx_desc->tx_encap_type = ((tx_exc_metadata &&
1040 		(tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
1041 		tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
1042 	tx_desc->vdev_id = vdev->vdev_id;
1043 	tx_desc->pdev = pdev;
1044 	tx_desc->msdu_ext_desc = NULL;
1045 	tx_desc->pkt_offset = 0;
1046 	tx_desc->length = qdf_nbuf_headlen(nbuf);
1047 
1048 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
1049 
1050 	if (qdf_unlikely(vdev->multipass_en)) {
1051 		if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
1052 			goto failure;
1053 	}
1054 
1055 	/* Packets marked by upper layer (OS-IF) to be sent to FW */
1056 	if (dp_tx_is_nbuf_marked_exception(soc, nbuf))
1057 		is_exception = 1;
1058 	/*
1059 	 * For special modes (vdev_type == ocb or mesh), data frames should be
1060 	 * transmitted using varying transmit parameters (tx spec) which include
1061 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
1062 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
1063 	 * These frames are sent as exception packets to firmware.
1064 	 *
1065 	 * HW requirement is that metadata should always point to a
1066 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
1067 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
1068 	 *  to get 8-byte aligned start address along with align_pad added
1069 	 *
1070 	 *  |-----------------------------|
1071 	 *  |                             |
1072 	 *  |-----------------------------| <-----Buffer Pointer Address given
1073 	 *  |                             |  ^    in HW descriptor (aligned)
1074 	 *  |       HTT Metadata          |  |
1075 	 *  |                             |  |
1076 	 *  |                             |  | Packet Offset given in descriptor
1077 	 *  |                             |  |
1078 	 *  |-----------------------------|  |
1079 	 *  |       Alignment Pad         |  v
1080 	 *  |-----------------------------| <----- Actual buffer start address
1081 	 *  |        SKB Data             |           (Unaligned)
1082 	 *  |                             |
1083 	 *  |                             |
1084 	 *  |                             |
1085 	 *  |                             |
1086 	 *  |                             |
1087 	 *  |-----------------------------|
1088 	 */
1089 	if (qdf_unlikely((msdu_info->exception_fw)) ||
1090 				(vdev->opmode == wlan_op_mode_ocb) ||
1091 				(tx_exc_metadata &&
1092 				tx_exc_metadata->is_tx_sniffer)) {
1093 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
1094 
1095 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
1096 			DP_STATS_INC(vdev,
1097 				     tx_i.dropped.headroom_insufficient, 1);
1098 			goto failure;
1099 		}
1100 
1101 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
1102 			dp_tx_err("qdf_nbuf_push_head failed");
1103 			goto failure;
1104 		}
1105 
1106 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
1107 				msdu_info);
1108 		if (htt_hdr_size == 0)
1109 			goto failure;
1110 
1111 		tx_desc->length = qdf_nbuf_headlen(nbuf);
1112 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
1113 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1114 		is_exception = 1;
1115 		tx_desc->length -= tx_desc->pkt_offset;
1116 	}
1117 
1118 #if !TQM_BYPASS_WAR
1119 	if (is_exception || tx_exc_metadata)
1120 #endif
1121 	{
1122 		/* Temporary WAR due to TQM VP issues */
1123 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1124 		qdf_atomic_inc(&soc->num_tx_exception);
1125 	}
1126 
1127 	return tx_desc;
1128 
1129 failure:
1130 	dp_tx_desc_release(tx_desc, desc_pool_id);
1131 	return NULL;
1132 }
1133 
1134 /**
1135  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
1136  * @vdev: DP vdev handle
1137  * @nbuf: skb
1138  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
1139  * @desc_pool_id : Descriptor Pool ID
1140  *
1141  * Allocate and prepare Tx descriptor with msdu and fragment descritor
1142  * information. For frames wth fragments, allocate and prepare
1143  * an MSDU extension descriptor
1144  *
1145  * Return: Pointer to Tx Descriptor on success,
1146  *         NULL on failure
1147  */
1148 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
1149 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
1150 		uint8_t desc_pool_id)
1151 {
1152 	struct dp_tx_desc_s *tx_desc;
1153 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
1154 	struct dp_pdev *pdev = vdev->pdev;
1155 	struct dp_soc *soc = pdev->soc;
1156 
1157 	if (dp_tx_limit_check(vdev))
1158 		return NULL;
1159 
1160 	/* Allocate software Tx descriptor */
1161 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1162 	if (!tx_desc) {
1163 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1164 		return NULL;
1165 	}
1166 	dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg,
1167 				  nbuf, tx_desc->id, DP_TX_DESC_COOKIE);
1168 
1169 	dp_tx_outstanding_inc(pdev);
1170 
1171 	/* Initialize the SW tx descriptor */
1172 	tx_desc->nbuf = nbuf;
1173 	tx_desc->frm_type = msdu_info->frm_type;
1174 	tx_desc->tx_encap_type = vdev->tx_encap_type;
1175 	tx_desc->vdev_id = vdev->vdev_id;
1176 	tx_desc->pdev = pdev;
1177 	tx_desc->pkt_offset = 0;
1178 	tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
1179 	tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
1180 
1181 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
1182 
1183 	/* Handle scattered frames - TSO/SG/ME */
1184 	/* Allocate and prepare an extension descriptor for scattered frames */
1185 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
1186 	if (!msdu_ext_desc) {
1187 		dp_tx_info("Tx Extension Descriptor Alloc Fail");
1188 		goto failure;
1189 	}
1190 
1191 #if TQM_BYPASS_WAR
1192 	/* Temporary WAR due to TQM VP issues */
1193 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1194 	qdf_atomic_inc(&soc->num_tx_exception);
1195 #endif
1196 	if (qdf_unlikely(msdu_info->exception_fw))
1197 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1198 
1199 	tx_desc->msdu_ext_desc = msdu_ext_desc;
1200 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
1201 
1202 	tx_desc->dma_addr = msdu_ext_desc->paddr;
1203 
1204 	if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
1205 		tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
1206 	else
1207 		tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
1208 
1209 	return tx_desc;
1210 failure:
1211 	dp_tx_desc_release(tx_desc, desc_pool_id);
1212 	return NULL;
1213 }
1214 
1215 /**
1216  * dp_tx_prepare_raw() - Prepare RAW packet TX
1217  * @vdev: DP vdev handle
1218  * @nbuf: buffer pointer
1219  * @seg_info: Pointer to Segment info Descriptor to be prepared
1220  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
1221  *     descriptor
1222  *
1223  * Return:
1224  */
1225 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1226 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1227 {
1228 	qdf_nbuf_t curr_nbuf = NULL;
1229 	uint16_t total_len = 0;
1230 	qdf_dma_addr_t paddr;
1231 	int32_t i;
1232 	int32_t mapped_buf_num = 0;
1233 
1234 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
1235 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1236 
1237 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
1238 
1239 	/* Continue only if frames are of DATA type */
1240 	if (!DP_FRAME_IS_DATA(qos_wh)) {
1241 		DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
1242 		dp_tx_debug("Pkt. recd is of not data type");
1243 		goto error;
1244 	}
1245 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
1246 	if (vdev->raw_mode_war &&
1247 	    (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
1248 	    (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
1249 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
1250 
1251 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
1252 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
1253 		/*
1254 		 * Number of nbuf's must not exceed the size of the frags
1255 		 * array in seg_info.
1256 		 */
1257 		if (i >= DP_TX_MAX_NUM_FRAGS) {
1258 			dp_err_rl("nbuf cnt exceeds the max number of segs");
1259 			DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
1260 			goto error;
1261 		}
1262 		if (QDF_STATUS_SUCCESS !=
1263 			qdf_nbuf_map_nbytes_single(vdev->osdev,
1264 						   curr_nbuf,
1265 						   QDF_DMA_TO_DEVICE,
1266 						   curr_nbuf->len)) {
1267 			dp_tx_err("%s dma map error ", __func__);
1268 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
1269 			goto error;
1270 		}
1271 		/* Update the count of mapped nbuf's */
1272 		mapped_buf_num++;
1273 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
1274 		seg_info->frags[i].paddr_lo = paddr;
1275 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
1276 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
1277 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
1278 		total_len += qdf_nbuf_len(curr_nbuf);
1279 	}
1280 
1281 	seg_info->frag_cnt = i;
1282 	seg_info->total_len = total_len;
1283 	seg_info->next = NULL;
1284 
1285 	sg_info->curr_seg = seg_info;
1286 
1287 	msdu_info->frm_type = dp_tx_frm_raw;
1288 	msdu_info->num_seg = 1;
1289 
1290 	return nbuf;
1291 
1292 error:
1293 	i = 0;
1294 	while (nbuf) {
1295 		curr_nbuf = nbuf;
1296 		if (i < mapped_buf_num) {
1297 			qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
1298 						     QDF_DMA_TO_DEVICE,
1299 						     curr_nbuf->len);
1300 			i++;
1301 		}
1302 		nbuf = qdf_nbuf_next(nbuf);
1303 		qdf_nbuf_free(curr_nbuf);
1304 	}
1305 	return NULL;
1306 
1307 }
1308 
1309 /**
1310  * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
1311  * @soc: DP soc handle
1312  * @nbuf: Buffer pointer
1313  *
1314  * unmap the chain of nbufs that belong to this RAW frame.
1315  *
1316  * Return: None
1317  */
1318 static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
1319 				    qdf_nbuf_t nbuf)
1320 {
1321 	qdf_nbuf_t cur_nbuf = nbuf;
1322 
1323 	do {
1324 		qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
1325 					     QDF_DMA_TO_DEVICE,
1326 					     cur_nbuf->len);
1327 		cur_nbuf = qdf_nbuf_next(cur_nbuf);
1328 	} while (cur_nbuf);
1329 }
1330 
1331 #ifdef VDEV_PEER_PROTOCOL_COUNT
1332 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
1333 					       qdf_nbuf_t nbuf)
1334 {
1335 	qdf_nbuf_t nbuf_local;
1336 	struct dp_vdev *vdev_local = vdev_hdl;
1337 
1338 	do {
1339 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track)))
1340 			break;
1341 		nbuf_local = nbuf;
1342 		if (qdf_unlikely(((vdev_local)->tx_encap_type) ==
1343 			 htt_cmn_pkt_type_raw))
1344 			break;
1345 		else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local))))
1346 			break;
1347 		else if (qdf_nbuf_is_tso((nbuf_local)))
1348 			break;
1349 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local),
1350 						       (nbuf_local),
1351 						       NULL, 1, 0);
1352 	} while (0);
1353 }
1354 #endif
1355 
1356 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1357 /**
1358  * dp_tx_update_stats() - Update soc level tx stats
1359  * @soc: DP soc handle
1360  * @nbuf: packet being transmitted
1361  *
1362  * Returns: none
1363  */
1364 void dp_tx_update_stats(struct dp_soc *soc,
1365 			qdf_nbuf_t nbuf)
1366 {
1367 	DP_STATS_INC_PKT(soc, tx.egress, 1, qdf_nbuf_len(nbuf));
1368 }
1369 
1370 int
1371 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1372 			 struct dp_tx_desc_s *tx_desc,
1373 			 uint8_t tid)
1374 {
1375 	struct dp_swlm *swlm = &soc->swlm;
1376 	union swlm_data swlm_query_data;
1377 	struct dp_swlm_tcl_data tcl_data;
1378 	QDF_STATUS status;
1379 	int ret;
1380 
1381 	if (qdf_unlikely(!swlm->is_enabled))
1382 		return 0;
1383 
1384 	tcl_data.nbuf = tx_desc->nbuf;
1385 	tcl_data.tid = tid;
1386 	tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
1387 	swlm_query_data.tcl_data = &tcl_data;
1388 
1389 	status = dp_swlm_tcl_pre_check(soc, &tcl_data);
1390 	if (QDF_IS_STATUS_ERROR(status)) {
1391 		dp_swlm_tcl_reset_session_data(soc);
1392 		DP_STATS_INC(swlm, tcl.coalesce_fail, 1);
1393 		return 0;
1394 	}
1395 
1396 	ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
1397 	if (ret) {
1398 		DP_STATS_INC(swlm, tcl.coalesce_success, 1);
1399 	} else {
1400 		DP_STATS_INC(swlm, tcl.coalesce_fail, 1);
1401 	}
1402 
1403 	return ret;
1404 }
1405 
1406 void
1407 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1408 		      int coalesce)
1409 {
1410 	if (coalesce)
1411 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1412 	else
1413 		dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1414 }
1415 
1416 #endif
1417 
1418 #ifdef FEATURE_RUNTIME_PM
1419 /**
1420  * dp_tx_ring_access_end_wrapper() - Wrapper for ring access end
1421  * @soc: Datapath soc handle
1422  * @hal_ring_hdl: HAL ring handle
1423  * @coalesce: Coalesce the current write or not
1424  *
1425  * Wrapper for HAL ring access end for data transmission for
1426  * FEATURE_RUNTIME_PM
1427  *
1428  * Returns: none
1429  */
1430 void
1431 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1432 			      hal_ring_handle_t hal_ring_hdl,
1433 			      int coalesce)
1434 {
1435 	int ret;
1436 
1437 	ret = hif_pm_runtime_get(soc->hif_handle,
1438 				 RTPM_ID_DW_TX_HW_ENQUEUE, true);
1439 	switch (ret) {
1440 	case 0:
1441 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1442 		hif_pm_runtime_put(soc->hif_handle,
1443 				   RTPM_ID_DW_TX_HW_ENQUEUE);
1444 		break;
1445 	/*
1446 	 * If hif_pm_runtime_get returns -EBUSY or -EINPROGRESS,
1447 	 * take the dp runtime refcount using dp_runtime_get,
1448 	 * check link state,if up, write TX ring HP, else just set flush event.
1449 	 * In dp_runtime_resume, wait until dp runtime refcount becomes
1450 	 * zero or time out, then flush pending tx.
1451 	 */
1452 	case -EBUSY:
1453 	case -EINPROGRESS:
1454 		dp_runtime_get(soc);
1455 		if (hif_pm_get_link_state(soc->hif_handle) ==
1456 		    HIF_PM_LINK_STATE_UP) {
1457 			dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1458 		} else {
1459 			dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1460 			hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1461 			hal_srng_inc_flush_cnt(hal_ring_hdl);
1462 		}
1463 		dp_runtime_put(soc);
1464 		break;
1465 	default:
1466 		dp_runtime_get(soc);
1467 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1468 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1469 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1470 		dp_runtime_put(soc);
1471 	}
1472 }
1473 #endif
1474 
1475 /**
1476  * dp_cce_classify() - Classify the frame based on CCE rules
1477  * @vdev: DP vdev handle
1478  * @nbuf: skb
1479  *
1480  * Classify frames based on CCE rules
1481  * Return: bool( true if classified,
1482  *               else false)
1483  */
1484 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1485 {
1486 	qdf_ether_header_t *eh = NULL;
1487 	uint16_t   ether_type;
1488 	qdf_llc_t *llcHdr;
1489 	qdf_nbuf_t nbuf_clone = NULL;
1490 	qdf_dot3_qosframe_t *qos_wh = NULL;
1491 
1492 	if (qdf_likely(vdev->skip_sw_tid_classification)) {
1493 	/*
1494 	 * In case of mesh packets or hlos tid override enabled,
1495 	 * don't do any classification
1496 	 */
1497 		if (qdf_unlikely(vdev->skip_sw_tid_classification
1498 					& DP_TX_SKIP_CCE_CLASSIFY))
1499 			return false;
1500 	}
1501 
1502 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1503 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1504 		ether_type = eh->ether_type;
1505 		llcHdr = (qdf_llc_t *)(nbuf->data +
1506 					sizeof(qdf_ether_header_t));
1507 	} else {
1508 		qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1509 		/* For encrypted packets don't do any classification */
1510 		if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
1511 			return false;
1512 
1513 		if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
1514 			if (qdf_unlikely(
1515 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
1516 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
1517 
1518 				ether_type = *(uint16_t *)(nbuf->data
1519 						+ QDF_IEEE80211_4ADDR_HDR_LEN
1520 						+ sizeof(qdf_llc_t)
1521 						- sizeof(ether_type));
1522 				llcHdr = (qdf_llc_t *)(nbuf->data +
1523 						QDF_IEEE80211_4ADDR_HDR_LEN);
1524 			} else {
1525 				ether_type = *(uint16_t *)(nbuf->data
1526 						+ QDF_IEEE80211_3ADDR_HDR_LEN
1527 						+ sizeof(qdf_llc_t)
1528 						- sizeof(ether_type));
1529 				llcHdr = (qdf_llc_t *)(nbuf->data +
1530 					QDF_IEEE80211_3ADDR_HDR_LEN);
1531 			}
1532 
1533 			if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
1534 				&& (ether_type ==
1535 				qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
1536 
1537 				DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
1538 				return true;
1539 			}
1540 		}
1541 
1542 		return false;
1543 	}
1544 
1545 	if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
1546 		ether_type = *(uint16_t *)(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1547 				sizeof(*llcHdr));
1548 		nbuf_clone = qdf_nbuf_clone(nbuf);
1549 		if (qdf_unlikely(nbuf_clone)) {
1550 			qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
1551 
1552 			if (ether_type == htons(ETHERTYPE_VLAN)) {
1553 				qdf_nbuf_pull_head(nbuf_clone,
1554 						sizeof(qdf_net_vlanhdr_t));
1555 			}
1556 		}
1557 	} else {
1558 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1559 			nbuf_clone = qdf_nbuf_clone(nbuf);
1560 			if (qdf_unlikely(nbuf_clone)) {
1561 				qdf_nbuf_pull_head(nbuf_clone,
1562 					sizeof(qdf_net_vlanhdr_t));
1563 			}
1564 		}
1565 	}
1566 
1567 	if (qdf_unlikely(nbuf_clone))
1568 		nbuf = nbuf_clone;
1569 
1570 
1571 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
1572 		|| qdf_nbuf_is_ipv4_arp_pkt(nbuf)
1573 		|| qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
1574 		|| qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
1575 		|| (qdf_nbuf_is_ipv4_pkt(nbuf)
1576 			&& qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
1577 		|| (qdf_nbuf_is_ipv6_pkt(nbuf) &&
1578 			qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
1579 		if (qdf_unlikely(nbuf_clone))
1580 			qdf_nbuf_free(nbuf_clone);
1581 		return true;
1582 	}
1583 
1584 	if (qdf_unlikely(nbuf_clone))
1585 		qdf_nbuf_free(nbuf_clone);
1586 
1587 	return false;
1588 }
1589 
1590 /**
1591  * dp_tx_get_tid() - Obtain TID to be used for this frame
1592  * @vdev: DP vdev handle
1593  * @nbuf: skb
1594  *
1595  * Extract the DSCP or PCP information from frame and map into TID value.
1596  *
1597  * Return: void
1598  */
1599 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1600 			  struct dp_tx_msdu_info_s *msdu_info)
1601 {
1602 	uint8_t tos = 0, dscp_tid_override = 0;
1603 	uint8_t *hdr_ptr, *L3datap;
1604 	uint8_t is_mcast = 0;
1605 	qdf_ether_header_t *eh = NULL;
1606 	qdf_ethervlan_header_t *evh = NULL;
1607 	uint16_t   ether_type;
1608 	qdf_llc_t *llcHdr;
1609 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1610 
1611 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1612 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1613 		eh = (qdf_ether_header_t *)nbuf->data;
1614 		hdr_ptr = (uint8_t *)(eh->ether_dhost);
1615 		L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
1616 	} else {
1617 		qdf_dot3_qosframe_t *qos_wh =
1618 			(qdf_dot3_qosframe_t *) nbuf->data;
1619 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1620 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1621 		return;
1622 	}
1623 
1624 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1625 	ether_type = eh->ether_type;
1626 
1627 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
1628 	/*
1629 	 * Check if packet is dot3 or eth2 type.
1630 	 */
1631 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1632 		ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1633 				sizeof(*llcHdr));
1634 
1635 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1636 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1637 				sizeof(*llcHdr);
1638 			ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
1639 					+ sizeof(*llcHdr) +
1640 					sizeof(qdf_net_vlanhdr_t));
1641 		} else {
1642 			L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
1643 				sizeof(*llcHdr);
1644 		}
1645 	} else {
1646 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1647 			evh = (qdf_ethervlan_header_t *) eh;
1648 			ether_type = evh->ether_type;
1649 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1650 		}
1651 	}
1652 
1653 	/*
1654 	 * Find priority from IP TOS DSCP field
1655 	 */
1656 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1657 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1658 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1659 			/* Only for unicast frames */
1660 			if (!is_mcast) {
1661 				/* send it on VO queue */
1662 				msdu_info->tid = DP_VO_TID;
1663 			}
1664 		} else {
1665 			/*
1666 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1667 			 * from TOS byte.
1668 			 */
1669 			tos = ip->ip_tos;
1670 			dscp_tid_override = 1;
1671 
1672 		}
1673 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1674 		/* TODO
1675 		 * use flowlabel
1676 		 *igmpmld cases to be handled in phase 2
1677 		 */
1678 		unsigned long ver_pri_flowlabel;
1679 		unsigned long pri;
1680 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1681 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1682 			DP_IPV6_PRIORITY_SHIFT;
1683 		tos = pri;
1684 		dscp_tid_override = 1;
1685 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1686 		msdu_info->tid = DP_VO_TID;
1687 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1688 		/* Only for unicast frames */
1689 		if (!is_mcast) {
1690 			/* send ucast arp on VO queue */
1691 			msdu_info->tid = DP_VO_TID;
1692 		}
1693 	}
1694 
1695 	/*
1696 	 * Assign all MCAST packets to BE
1697 	 */
1698 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1699 		if (is_mcast) {
1700 			tos = 0;
1701 			dscp_tid_override = 1;
1702 		}
1703 	}
1704 
1705 	if (dscp_tid_override == 1) {
1706 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1707 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1708 	}
1709 
1710 	if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
1711 		msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
1712 
1713 	return;
1714 }
1715 
1716 /**
1717  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1718  * @vdev: DP vdev handle
1719  * @nbuf: skb
1720  *
1721  * Software based TID classification is required when more than 2 DSCP-TID
1722  * mapping tables are needed.
1723  * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
1724  *
1725  * Return: void
1726  */
1727 static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1728 				      struct dp_tx_msdu_info_s *msdu_info)
1729 {
1730 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1731 
1732 	/*
1733 	 * skip_sw_tid_classification flag will set in below cases-
1734 	 * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
1735 	 * 2. hlos_tid_override enabled for vdev
1736 	 * 3. mesh mode enabled for vdev
1737 	 */
1738 	if (qdf_likely(vdev->skip_sw_tid_classification)) {
1739 		/* Update tid in msdu_info from skb priority */
1740 		if (qdf_unlikely(vdev->skip_sw_tid_classification
1741 			    & DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
1742 			msdu_info->tid = qdf_nbuf_get_priority(nbuf);
1743 			return;
1744 		}
1745 		return;
1746 	}
1747 
1748 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1749 }
1750 
1751 #ifdef FEATURE_WLAN_TDLS
1752 /**
1753  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1754  * @soc: datapath SOC
1755  * @vdev: datapath vdev
1756  * @tx_desc: TX descriptor
1757  *
1758  * Return: None
1759  */
1760 static void dp_tx_update_tdls_flags(struct dp_soc *soc,
1761 				    struct dp_vdev *vdev,
1762 				    struct dp_tx_desc_s *tx_desc)
1763 {
1764 	if (vdev) {
1765 		if (vdev->is_tdls_frame) {
1766 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1767 			vdev->is_tdls_frame = false;
1768 		}
1769 	}
1770 }
1771 
1772 /**
1773  * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1774  * @soc: dp_soc handle
1775  * @tx_desc: TX descriptor
1776  * @vdev: datapath vdev handle
1777  *
1778  * Return: None
1779  */
1780 static void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
1781 					 struct dp_tx_desc_s *tx_desc)
1782 {
1783 	struct hal_tx_completion_status ts = {0};
1784 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1785 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
1786 						     DP_MOD_ID_TDLS);
1787 
1788 	if (qdf_unlikely(!vdev)) {
1789 		dp_err_rl("vdev is null!");
1790 		goto error;
1791 	}
1792 
1793 	hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
1794 	if (vdev->tx_non_std_data_callback.func) {
1795 		qdf_nbuf_set_next(nbuf, NULL);
1796 		vdev->tx_non_std_data_callback.func(
1797 				vdev->tx_non_std_data_callback.ctxt,
1798 				nbuf, ts.status);
1799 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1800 		return;
1801 	} else {
1802 		dp_err_rl("callback func is null");
1803 	}
1804 
1805 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1806 error:
1807 	qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
1808 	qdf_nbuf_free(nbuf);
1809 }
1810 
1811 /**
1812  * dp_tx_msdu_single_map() - do nbuf map
1813  * @vdev: DP vdev handle
1814  * @tx_desc: DP TX descriptor pointer
1815  * @nbuf: skb pointer
1816  *
1817  * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
1818  * operation done in other component.
1819  *
1820  * Return: QDF_STATUS
1821  */
1822 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1823 					       struct dp_tx_desc_s *tx_desc,
1824 					       qdf_nbuf_t nbuf)
1825 {
1826 	if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
1827 		return qdf_nbuf_map_nbytes_single(vdev->osdev,
1828 						  nbuf,
1829 						  QDF_DMA_TO_DEVICE,
1830 						  nbuf->len);
1831 	else
1832 		return qdf_nbuf_map_single(vdev->osdev, nbuf,
1833 					   QDF_DMA_TO_DEVICE);
1834 }
1835 #else
1836 static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
1837 					   struct dp_vdev *vdev,
1838 					   struct dp_tx_desc_s *tx_desc)
1839 {
1840 }
1841 
1842 static inline void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
1843 						struct dp_tx_desc_s *tx_desc)
1844 {
1845 }
1846 
1847 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1848 					       struct dp_tx_desc_s *tx_desc,
1849 					       qdf_nbuf_t nbuf)
1850 {
1851 	return qdf_nbuf_map_nbytes_single(vdev->osdev,
1852 					  nbuf,
1853 					  QDF_DMA_TO_DEVICE,
1854 					  nbuf->len);
1855 }
1856 #endif
1857 
1858 #ifdef MESH_MODE_SUPPORT
1859 /**
1860  * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
1861  * @soc: datapath SOC
1862  * @vdev: datapath vdev
1863  * @tx_desc: TX descriptor
1864  *
1865  * Return: None
1866  */
1867 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
1868 					   struct dp_vdev *vdev,
1869 					   struct dp_tx_desc_s *tx_desc)
1870 {
1871 	if (qdf_unlikely(vdev->mesh_vdev))
1872 		tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
1873 }
1874 
1875 /**
1876  * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
1877  * @soc: dp_soc handle
1878  * @tx_desc: TX descriptor
1879  * @vdev: datapath vdev handle
1880  *
1881  * Return: None
1882  */
1883 static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
1884 					     struct dp_tx_desc_s *tx_desc)
1885 {
1886 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1887 	struct dp_vdev *vdev = NULL;
1888 
1889 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
1890 		qdf_nbuf_free(nbuf);
1891 		DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
1892 	} else {
1893 		vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
1894 					     DP_MOD_ID_MESH);
1895 		if (vdev && vdev->osif_tx_free_ext)
1896 			vdev->osif_tx_free_ext((nbuf));
1897 		else
1898 			qdf_nbuf_free(nbuf);
1899 
1900 		if (vdev)
1901 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
1902 	}
1903 }
1904 #else
1905 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
1906 					   struct dp_vdev *vdev,
1907 					   struct dp_tx_desc_s *tx_desc)
1908 {
1909 }
1910 
1911 static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
1912 					     struct dp_tx_desc_s *tx_desc)
1913 {
1914 }
1915 #endif
1916 
1917 /**
1918  * dp_tx_frame_is_drop() - checks if the packet is loopback
1919  * @vdev: DP vdev handle
1920  * @nbuf: skb
1921  *
1922  * Return: 1 if frame needs to be dropped else 0
1923  */
1924 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
1925 {
1926 	struct dp_pdev *pdev = NULL;
1927 	struct dp_ast_entry *src_ast_entry = NULL;
1928 	struct dp_ast_entry *dst_ast_entry = NULL;
1929 	struct dp_soc *soc = NULL;
1930 
1931 	qdf_assert(vdev);
1932 	pdev = vdev->pdev;
1933 	qdf_assert(pdev);
1934 	soc = pdev->soc;
1935 
1936 	dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
1937 				(soc, dstmac, vdev->pdev->pdev_id);
1938 
1939 	src_ast_entry = dp_peer_ast_hash_find_by_pdevid
1940 				(soc, srcmac, vdev->pdev->pdev_id);
1941 	if (dst_ast_entry && src_ast_entry) {
1942 		if (dst_ast_entry->peer_id ==
1943 				src_ast_entry->peer_id)
1944 			return 1;
1945 	}
1946 
1947 	return 0;
1948 }
1949 
1950 /**
1951  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1952  * @vdev: DP vdev handle
1953  * @nbuf: skb
1954  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1955  * @meta_data: Metadata to the fw
1956  * @tx_q: Tx queue to be used for this Tx frame
1957  * @peer_id: peer_id of the peer in case of NAWDS frames
1958  * @tx_exc_metadata: Handle that holds exception path metadata
1959  *
1960  * Return: NULL on success,
1961  *         nbuf when it fails to send
1962  */
1963 qdf_nbuf_t
1964 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1965 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
1966 		       struct cdp_tx_exception_metadata *tx_exc_metadata)
1967 {
1968 	struct dp_pdev *pdev = vdev->pdev;
1969 	struct dp_soc *soc = pdev->soc;
1970 	struct dp_tx_desc_s *tx_desc;
1971 	QDF_STATUS status;
1972 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
1973 	uint16_t htt_tcl_metadata = 0;
1974 	enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
1975 	uint8_t tid = msdu_info->tid;
1976 	struct cdp_tid_tx_stats *tid_stats = NULL;
1977 
1978 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
1979 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
1980 			msdu_info, tx_exc_metadata);
1981 	if (!tx_desc) {
1982 		dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
1983 			  vdev, tx_q->desc_pool_id);
1984 		drop_code = TX_DESC_ERR;
1985 		goto fail_return;
1986 	}
1987 
1988 	if (qdf_unlikely(soc->cce_disable)) {
1989 		if (dp_cce_classify(vdev, nbuf) == true) {
1990 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1991 			tid = DP_VO_TID;
1992 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1993 		}
1994 	}
1995 
1996 	dp_tx_update_tdls_flags(soc, vdev, tx_desc);
1997 
1998 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
1999 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2000 		HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
2001 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
2002 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
2003 				HTT_TCL_METADATA_TYPE_PEER_BASED);
2004 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
2005 				peer_id);
2006 	} else
2007 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2008 
2009 	if (msdu_info->exception_fw)
2010 		HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2011 
2012 	dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
2013 					 !pdev->enhanced_stats_en);
2014 
2015 	dp_tx_update_mesh_flags(soc, vdev, tx_desc);
2016 
2017 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
2018 			 dp_tx_msdu_single_map(vdev, tx_desc, nbuf))) {
2019 		/* Handle failure */
2020 		dp_err("qdf_nbuf_map failed");
2021 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
2022 		drop_code = TX_DMA_MAP_ERR;
2023 		goto release_desc;
2024 	}
2025 
2026 	tx_desc->dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
2027 	dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
2028 			       tx_desc->id, DP_TX_DESC_MAP);
2029 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
2030 	status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
2031 					     htt_tcl_metadata,
2032 					     tx_exc_metadata, msdu_info);
2033 
2034 	if (status != QDF_STATUS_SUCCESS) {
2035 		dp_tx_err_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2036 			     tx_desc, tx_q->ring_id);
2037 		dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
2038 				       tx_desc->id, DP_TX_DESC_UNMAP);
2039 		qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
2040 					     QDF_DMA_TO_DEVICE,
2041 					     nbuf->len);
2042 		drop_code = TX_HW_ENQUEUE;
2043 		goto release_desc;
2044 	}
2045 
2046 	return NULL;
2047 
2048 release_desc:
2049 	dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2050 
2051 fail_return:
2052 	dp_tx_get_tid(vdev, nbuf, msdu_info);
2053 	tid_stats = &pdev->stats.tid_stats.
2054 		    tid_tx_stats[tx_q->ring_id][tid];
2055 	tid_stats->swdrop_cnt[drop_code]++;
2056 	return nbuf;
2057 }
2058 
2059 /**
2060  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2061  * @soc: Soc handle
2062  * @desc: software Tx descriptor to be processed
2063  *
2064  * Return: none
2065  */
2066 static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
2067 				       struct dp_tx_desc_s *desc)
2068 {
2069 	qdf_nbuf_t nbuf = desc->nbuf;
2070 	enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
2071 
2072 	/* nbuf already freed in vdev detach path */
2073 	if (!nbuf)
2074 		return;
2075 
2076 	/* If it is TDLS mgmt, don't unmap or free the frame */
2077 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2078 		return dp_non_std_tx_comp_free_buff(soc, desc);
2079 
2080 	/* 0 : MSDU buffer, 1 : MLE */
2081 	if (desc->msdu_ext_desc) {
2082 		/* TSO free */
2083 		if (hal_tx_ext_desc_get_tso_enable(
2084 					desc->msdu_ext_desc->vaddr)) {
2085 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
2086 					       desc->id, DP_TX_COMP_MSDU_EXT);
2087 			dp_tx_tso_seg_history_add(soc, desc->tso_desc,
2088 						  desc->nbuf, desc->id, type);
2089 			/* unmap eash TSO seg before free the nbuf */
2090 			dp_tx_tso_unmap_segment(soc, desc->tso_desc,
2091 						desc->tso_num_desc);
2092 			qdf_nbuf_free(nbuf);
2093 			return;
2094 		}
2095 	}
2096 	/* If it's ME frame, dont unmap the cloned nbuf's */
2097 	if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
2098 		goto nbuf_free;
2099 
2100 	dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, desc->id, type);
2101 	dp_tx_unmap(soc, desc);
2102 
2103 	if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
2104 		return dp_mesh_tx_comp_free_buff(soc, desc);
2105 nbuf_free:
2106 	qdf_nbuf_free(nbuf);
2107 }
2108 
2109 /**
2110  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
2111  * @vdev: DP vdev handle
2112  * @nbuf: skb
2113  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
2114  *
2115  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
2116  *
2117  * Return: NULL on success,
2118  *         nbuf when it fails to send
2119  */
2120 #if QDF_LOCK_STATS
2121 noinline
2122 #else
2123 #endif
2124 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2125 				    struct dp_tx_msdu_info_s *msdu_info)
2126 {
2127 	uint32_t i;
2128 	struct dp_pdev *pdev = vdev->pdev;
2129 	struct dp_soc *soc = pdev->soc;
2130 	struct dp_tx_desc_s *tx_desc;
2131 	bool is_cce_classified = false;
2132 	QDF_STATUS status;
2133 	uint16_t htt_tcl_metadata = 0;
2134 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
2135 	struct cdp_tid_tx_stats *tid_stats = NULL;
2136 	uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
2137 
2138 	if (qdf_unlikely(soc->cce_disable)) {
2139 		is_cce_classified = dp_cce_classify(vdev, nbuf);
2140 		if (is_cce_classified) {
2141 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
2142 			msdu_info->tid = DP_VO_TID;
2143 		}
2144 	}
2145 
2146 	if (msdu_info->frm_type == dp_tx_frm_me)
2147 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2148 
2149 	i = 0;
2150 	/* Print statement to track i and num_seg */
2151 	/*
2152 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
2153 	 * descriptors using information in msdu_info
2154 	 */
2155 	while (i < msdu_info->num_seg) {
2156 		/*
2157 		 * Setup Tx descriptor for an MSDU, and MSDU extension
2158 		 * descriptor
2159 		 */
2160 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
2161 				tx_q->desc_pool_id);
2162 
2163 		if (!tx_desc) {
2164 			if (msdu_info->frm_type == dp_tx_frm_me) {
2165 				prep_desc_fail++;
2166 				dp_tx_me_free_buf(pdev,
2167 					(void *)(msdu_info->u.sg_info
2168 						.curr_seg->frags[0].vaddr));
2169 				if (prep_desc_fail == msdu_info->num_seg) {
2170 					/*
2171 					 * Unmap is needed only if descriptor
2172 					 * preparation failed for all segments.
2173 					 */
2174 					qdf_nbuf_unmap(soc->osdev,
2175 						       msdu_info->u.sg_info.
2176 						       curr_seg->nbuf,
2177 						       QDF_DMA_TO_DEVICE);
2178 				}
2179 				/*
2180 				 * Free the nbuf for the current segment
2181 				 * and make it point to the next in the list.
2182 				 * For me, there are as many segments as there
2183 				 * are no of clients.
2184 				 */
2185 				qdf_nbuf_free(msdu_info->u.sg_info
2186 					      .curr_seg->nbuf);
2187 				if (msdu_info->u.sg_info.curr_seg->next) {
2188 					msdu_info->u.sg_info.curr_seg =
2189 						msdu_info->u.sg_info
2190 						.curr_seg->next;
2191 					nbuf = msdu_info->u.sg_info
2192 					       .curr_seg->nbuf;
2193 				}
2194 				i++;
2195 				continue;
2196 			}
2197 
2198 			if (msdu_info->frm_type == dp_tx_frm_tso) {
2199 				dp_tx_tso_seg_history_add(
2200 						soc,
2201 						msdu_info->u.tso_info.curr_seg,
2202 						nbuf, 0, DP_TX_DESC_UNMAP);
2203 				dp_tx_tso_unmap_segment(soc,
2204 							msdu_info->u.tso_info.
2205 							curr_seg,
2206 							msdu_info->u.tso_info.
2207 							tso_num_seg_list);
2208 
2209 				if (msdu_info->u.tso_info.curr_seg->next) {
2210 					msdu_info->u.tso_info.curr_seg =
2211 					msdu_info->u.tso_info.curr_seg->next;
2212 					i++;
2213 					continue;
2214 				}
2215 			}
2216 
2217 			goto done;
2218 		}
2219 
2220 		if (msdu_info->frm_type == dp_tx_frm_me) {
2221 			tx_desc->me_buffer =
2222 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
2223 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
2224 		}
2225 
2226 		if (is_cce_classified)
2227 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2228 
2229 		htt_tcl_metadata = vdev->htt_tcl_metadata;
2230 		if (msdu_info->exception_fw) {
2231 			HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
2232 		}
2233 
2234 		/*
2235 		 * For frames with multiple segments (TSO, ME), jump to next
2236 		 * segment.
2237 		 */
2238 		if (msdu_info->frm_type == dp_tx_frm_tso) {
2239 			if (msdu_info->u.tso_info.curr_seg->next) {
2240 				msdu_info->u.tso_info.curr_seg =
2241 					msdu_info->u.tso_info.curr_seg->next;
2242 
2243 				/*
2244 				 * If this is a jumbo nbuf, then increment the
2245 				 * number of nbuf users for each additional
2246 				 * segment of the msdu. This will ensure that
2247 				 * the skb is freed only after receiving tx
2248 				 * completion for all segments of an nbuf
2249 				 */
2250 				qdf_nbuf_inc_users(nbuf);
2251 
2252 				/* Check with MCL if this is needed */
2253 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
2254 				 */
2255 			}
2256 		}
2257 
2258 		/*
2259 		 * Enqueue the Tx MSDU descriptor to HW for transmit
2260 		 */
2261 		status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
2262 						     htt_tcl_metadata,
2263 						     NULL, msdu_info);
2264 
2265 		if (status != QDF_STATUS_SUCCESS) {
2266 			dp_info("Tx_hw_enqueue Fail tx_desc %pK queue %d",
2267 				tx_desc, tx_q->ring_id);
2268 
2269 			dp_tx_get_tid(vdev, nbuf, msdu_info);
2270 			tid_stats = &pdev->stats.tid_stats.
2271 				    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
2272 			tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
2273 
2274 			if (msdu_info->frm_type == dp_tx_frm_me) {
2275 				hw_enq_fail++;
2276 				if (hw_enq_fail == msdu_info->num_seg) {
2277 					/*
2278 					 * Unmap is needed only if enqueue
2279 					 * failed for all segments.
2280 					 */
2281 					qdf_nbuf_unmap(soc->osdev,
2282 						       msdu_info->u.sg_info.
2283 						       curr_seg->nbuf,
2284 						       QDF_DMA_TO_DEVICE);
2285 				}
2286 				/*
2287 				 * Free the nbuf for the current segment
2288 				 * and make it point to the next in the list.
2289 				 * For me, there are as many segments as there
2290 				 * are no of clients.
2291 				 */
2292 				qdf_nbuf_free(msdu_info->u.sg_info
2293 					      .curr_seg->nbuf);
2294 				if (msdu_info->u.sg_info.curr_seg->next) {
2295 					msdu_info->u.sg_info.curr_seg =
2296 						msdu_info->u.sg_info
2297 						.curr_seg->next;
2298 					nbuf = msdu_info->u.sg_info
2299 					       .curr_seg->nbuf;
2300 				} else
2301 					break;
2302 				i++;
2303 				dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2304 				continue;
2305 			}
2306 
2307 			/*
2308 			 * For TSO frames, the nbuf users increment done for
2309 			 * the current segment has to be reverted, since the
2310 			 * hw enqueue for this segment failed
2311 			 */
2312 			if (msdu_info->frm_type == dp_tx_frm_tso &&
2313 			    msdu_info->u.tso_info.curr_seg) {
2314 				/*
2315 				 * unmap and free current,
2316 				 * retransmit remaining segments
2317 				 */
2318 				dp_tx_comp_free_buf(soc, tx_desc);
2319 				i++;
2320 				dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2321 				continue;
2322 			}
2323 
2324 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
2325 			goto done;
2326 		}
2327 
2328 		/*
2329 		 * TODO
2330 		 * if tso_info structure can be modified to have curr_seg
2331 		 * as first element, following 2 blocks of code (for TSO and SG)
2332 		 * can be combined into 1
2333 		 */
2334 
2335 		/*
2336 		 * For Multicast-Unicast converted packets,
2337 		 * each converted frame (for a client) is represented as
2338 		 * 1 segment
2339 		 */
2340 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
2341 				(msdu_info->frm_type == dp_tx_frm_me)) {
2342 			if (msdu_info->u.sg_info.curr_seg->next) {
2343 				msdu_info->u.sg_info.curr_seg =
2344 					msdu_info->u.sg_info.curr_seg->next;
2345 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
2346 			} else
2347 				break;
2348 		}
2349 		i++;
2350 	}
2351 
2352 	nbuf = NULL;
2353 
2354 done:
2355 	return nbuf;
2356 }
2357 
2358 /**
2359  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
2360  *                     for SG frames
2361  * @vdev: DP vdev handle
2362  * @nbuf: skb
2363  * @seg_info: Pointer to Segment info Descriptor to be prepared
2364  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2365  *
2366  * Return: NULL on success,
2367  *         nbuf when it fails to send
2368  */
2369 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2370 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
2371 {
2372 	uint32_t cur_frag, nr_frags, i;
2373 	qdf_dma_addr_t paddr;
2374 	struct dp_tx_sg_info_s *sg_info;
2375 
2376 	sg_info = &msdu_info->u.sg_info;
2377 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
2378 
2379 	if (QDF_STATUS_SUCCESS !=
2380 		qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
2381 					   QDF_DMA_TO_DEVICE,
2382 					   qdf_nbuf_headlen(nbuf))) {
2383 		dp_tx_err("dma map error");
2384 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2385 
2386 		qdf_nbuf_free(nbuf);
2387 		return NULL;
2388 	}
2389 
2390 	paddr = qdf_nbuf_mapped_paddr_get(nbuf);
2391 	seg_info->frags[0].paddr_lo = paddr;
2392 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
2393 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
2394 	seg_info->frags[0].vaddr = (void *) nbuf;
2395 
2396 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
2397 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
2398 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
2399 			dp_tx_err("frag dma map error");
2400 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
2401 			goto map_err;
2402 		}
2403 
2404 		paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
2405 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
2406 		seg_info->frags[cur_frag + 1].paddr_hi =
2407 			((uint64_t) paddr) >> 32;
2408 		seg_info->frags[cur_frag + 1].len =
2409 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
2410 	}
2411 
2412 	seg_info->frag_cnt = (cur_frag + 1);
2413 	seg_info->total_len = qdf_nbuf_len(nbuf);
2414 	seg_info->next = NULL;
2415 
2416 	sg_info->curr_seg = seg_info;
2417 
2418 	msdu_info->frm_type = dp_tx_frm_sg;
2419 	msdu_info->num_seg = 1;
2420 
2421 	return nbuf;
2422 map_err:
2423 	/* restore paddr into nbuf before calling unmap */
2424 	qdf_nbuf_mapped_paddr_set(nbuf,
2425 				  (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
2426 				  ((uint64_t)
2427 				  seg_info->frags[0].paddr_hi) << 32));
2428 	qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
2429 				     QDF_DMA_TO_DEVICE,
2430 				     seg_info->frags[0].len);
2431 	for (i = 1; i <= cur_frag; i++) {
2432 		qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
2433 				   (seg_info->frags[i].paddr_lo | ((uint64_t)
2434 				   seg_info->frags[i].paddr_hi) << 32),
2435 				   seg_info->frags[i].len,
2436 				   QDF_DMA_TO_DEVICE);
2437 	}
2438 	qdf_nbuf_free(nbuf);
2439 	return NULL;
2440 }
2441 
2442 /**
2443  * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
2444  * @vdev: DP vdev handle
2445  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2446  * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
2447  *
2448  * Return: NULL on failure,
2449  *         nbuf when extracted successfully
2450  */
2451 static
2452 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
2453 				    struct dp_tx_msdu_info_s *msdu_info,
2454 				    uint16_t ppdu_cookie)
2455 {
2456 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2457 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2458 
2459 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2460 
2461 	HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
2462 				(msdu_info->meta_data[5], 1);
2463 	HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
2464 				(msdu_info->meta_data[5], 1);
2465 	HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
2466 				(msdu_info->meta_data[6], ppdu_cookie);
2467 
2468 	msdu_info->exception_fw = 1;
2469 	msdu_info->is_tx_sniffer = 1;
2470 }
2471 
2472 #ifdef MESH_MODE_SUPPORT
2473 
2474 /**
2475  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
2476 				and prepare msdu_info for mesh frames.
2477  * @vdev: DP vdev handle
2478  * @nbuf: skb
2479  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2480  *
2481  * Return: NULL on failure,
2482  *         nbuf when extracted successfully
2483  */
2484 static
2485 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2486 				struct dp_tx_msdu_info_s *msdu_info)
2487 {
2488 	struct meta_hdr_s *mhdr;
2489 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2490 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2491 
2492 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2493 
2494 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
2495 		msdu_info->exception_fw = 0;
2496 		goto remove_meta_hdr;
2497 	}
2498 
2499 	msdu_info->exception_fw = 1;
2500 
2501 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2502 
2503 	meta_data->host_tx_desc_pool = 1;
2504 	meta_data->update_peer_cache = 1;
2505 	meta_data->learning_frame = 1;
2506 
2507 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
2508 		meta_data->power = mhdr->power;
2509 
2510 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
2511 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
2512 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
2513 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
2514 
2515 		meta_data->dyn_bw = 1;
2516 
2517 		meta_data->valid_pwr = 1;
2518 		meta_data->valid_mcs_mask = 1;
2519 		meta_data->valid_nss_mask = 1;
2520 		meta_data->valid_preamble_type  = 1;
2521 		meta_data->valid_retries = 1;
2522 		meta_data->valid_bw_info = 1;
2523 	}
2524 
2525 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
2526 		meta_data->encrypt_type = 0;
2527 		meta_data->valid_encrypt_type = 1;
2528 		meta_data->learning_frame = 0;
2529 	}
2530 
2531 	meta_data->valid_key_flags = 1;
2532 	meta_data->key_flags = (mhdr->keyix & 0x3);
2533 
2534 remove_meta_hdr:
2535 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
2536 		dp_tx_err("qdf_nbuf_pull_head failed");
2537 		qdf_nbuf_free(nbuf);
2538 		return NULL;
2539 	}
2540 
2541 	msdu_info->tid = qdf_nbuf_get_priority(nbuf);
2542 
2543 	dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x"
2544 		   " tid %d to_fw %d",
2545 		   msdu_info->meta_data[0],
2546 		   msdu_info->meta_data[1],
2547 		   msdu_info->meta_data[2],
2548 		   msdu_info->meta_data[3],
2549 		   msdu_info->meta_data[4],
2550 		   msdu_info->meta_data[5],
2551 		   msdu_info->tid, msdu_info->exception_fw);
2552 
2553 	return nbuf;
2554 }
2555 #else
2556 static
2557 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2558 				struct dp_tx_msdu_info_s *msdu_info)
2559 {
2560 	return nbuf;
2561 }
2562 
2563 #endif
2564 
2565 /**
2566  * dp_check_exc_metadata() - Checks if parameters are valid
2567  * @tx_exc - holds all exception path parameters
2568  *
2569  * Returns true when all the parameters are valid else false
2570  *
2571  */
2572 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
2573 {
2574 	bool invalid_tid = (tx_exc->tid >= DP_MAX_TIDS && tx_exc->tid !=
2575 			    HTT_INVALID_TID);
2576 	bool invalid_encap_type =
2577 			(tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
2578 			 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
2579 	bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
2580 				 tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
2581 	bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
2582 			       tx_exc->ppdu_cookie == 0);
2583 
2584 	if (invalid_tid || invalid_encap_type || invalid_sec_type ||
2585 	    invalid_cookie) {
2586 		return false;
2587 	}
2588 
2589 	return true;
2590 }
2591 
2592 #ifdef ATH_SUPPORT_IQUE
2593 /**
2594  * dp_tx_mcast_enhance() - Multicast enhancement on TX
2595  * @vdev: vdev handle
2596  * @nbuf: skb
2597  *
2598  * Return: true on success,
2599  *         false on failure
2600  */
2601 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
2602 {
2603 	qdf_ether_header_t *eh;
2604 
2605 	/* Mcast to Ucast Conversion*/
2606 	if (qdf_likely(!vdev->mcast_enhancement_en))
2607 		return true;
2608 
2609 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2610 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2611 	    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2612 		dp_verbose_debug("Mcast frm for ME %pK", vdev);
2613 		qdf_nbuf_set_next(nbuf, NULL);
2614 
2615 		DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
2616 				 qdf_nbuf_len(nbuf));
2617 		if (dp_tx_prepare_send_me(vdev, nbuf) ==
2618 				QDF_STATUS_SUCCESS) {
2619 			return false;
2620 		}
2621 
2622 		if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
2623 			if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
2624 					QDF_STATUS_SUCCESS) {
2625 				return false;
2626 			}
2627 		}
2628 	}
2629 
2630 	return true;
2631 }
2632 #else
2633 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
2634 {
2635 	return true;
2636 }
2637 #endif
2638 
2639 /**
2640  * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
2641  * @nbuf: qdf_nbuf_t
2642  * @vdev: struct dp_vdev *
2643  *
2644  * Allow packet for processing only if it is for peer client which is
2645  * connected with same vap. Drop packet if client is connected to
2646  * different vap.
2647  *
2648  * Return: QDF_STATUS
2649  */
2650 static inline QDF_STATUS
2651 dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
2652 {
2653 	struct dp_ast_entry *dst_ast_entry = NULL;
2654 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2655 
2656 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
2657 	    DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
2658 		return QDF_STATUS_SUCCESS;
2659 
2660 	qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
2661 	dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
2662 							eh->ether_dhost,
2663 							vdev->vdev_id);
2664 
2665 	/* If there is no ast entry, return failure */
2666 	if (qdf_unlikely(!dst_ast_entry)) {
2667 		qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
2668 		return QDF_STATUS_E_FAILURE;
2669 	}
2670 	qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
2671 
2672 	return QDF_STATUS_SUCCESS;
2673 }
2674 
2675 /**
2676  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
2677  * @soc: DP soc handle
2678  * @vdev_id: id of DP vdev handle
2679  * @nbuf: skb
2680  * @tx_exc_metadata: Handle that holds exception path meta data
2681  *
2682  * Entry point for Core Tx layer (DP_TX) invoked from
2683  * hard_start_xmit in OSIF/HDD to transmit frames through fw
2684  *
2685  * Return: NULL on success,
2686  *         nbuf when it fails to send
2687  */
2688 qdf_nbuf_t
2689 dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2690 		     qdf_nbuf_t nbuf,
2691 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
2692 {
2693 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2694 	qdf_ether_header_t *eh = NULL;
2695 	struct dp_tx_msdu_info_s msdu_info;
2696 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
2697 						     DP_MOD_ID_TX_EXCEPTION);
2698 
2699 	if (qdf_unlikely(!vdev))
2700 		goto fail;
2701 
2702 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
2703 
2704 	if (!tx_exc_metadata)
2705 		goto fail;
2706 
2707 	msdu_info.tid = tx_exc_metadata->tid;
2708 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2709 	dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
2710 			 QDF_MAC_ADDR_REF(nbuf->data));
2711 
2712 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
2713 
2714 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
2715 		dp_tx_err("Invalid parameters in exception path");
2716 		goto fail;
2717 	}
2718 
2719 	/* Basic sanity checks for unsupported packets */
2720 
2721 	/* MESH mode */
2722 	if (qdf_unlikely(vdev->mesh_vdev)) {
2723 		dp_tx_err("Mesh mode is not supported in exception path");
2724 		goto fail;
2725 	}
2726 
2727 	/*
2728 	 * Classify the frame and call corresponding
2729 	 * "prepare" function which extracts the segment (TSO)
2730 	 * and fragmentation information (for TSO , SG, ME, or Raw)
2731 	 * into MSDU_INFO structure which is later used to fill
2732 	 * SW and HW descriptors.
2733 	 */
2734 	if (qdf_nbuf_is_tso(nbuf)) {
2735 		dp_verbose_debug("TSO frame %pK", vdev);
2736 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
2737 				 qdf_nbuf_len(nbuf));
2738 
2739 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
2740 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
2741 					 qdf_nbuf_len(nbuf));
2742 			goto fail;
2743 		}
2744 
2745 		goto send_multiple;
2746 	}
2747 
2748 	/* SG */
2749 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2750 		struct dp_tx_seg_info_s seg_info = {0};
2751 
2752 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
2753 		if (!nbuf)
2754 			goto fail;
2755 
2756 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
2757 
2758 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
2759 				 qdf_nbuf_len(nbuf));
2760 
2761 		goto send_multiple;
2762 	}
2763 
2764 	if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
2765 		DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
2766 				 qdf_nbuf_len(nbuf));
2767 
2768 		dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
2769 					       tx_exc_metadata->ppdu_cookie);
2770 	}
2771 
2772 	/*
2773 	 * Get HW Queue to use for this frame.
2774 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
2775 	 * dedicated for data and 1 for command.
2776 	 * "queue_id" maps to one hardware ring.
2777 	 *  With each ring, we also associate a unique Tx descriptor pool
2778 	 *  to minimize lock contention for these resources.
2779 	 */
2780 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2781 
2782 	/*
2783 	 * Check exception descriptors
2784 	 */
2785 	if (dp_tx_exception_limit_check(vdev))
2786 		goto fail;
2787 
2788 	/*  Single linear frame */
2789 	/*
2790 	 * If nbuf is a simple linear frame, use send_single function to
2791 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2792 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2793 	 */
2794 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
2795 			tx_exc_metadata->peer_id, tx_exc_metadata);
2796 
2797 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
2798 	return nbuf;
2799 
2800 send_multiple:
2801 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2802 
2803 fail:
2804 	if (vdev)
2805 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
2806 	dp_verbose_debug("pkt send failed");
2807 	return nbuf;
2808 }
2809 
2810 /**
2811  * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
2812  *      in exception path in special case to avoid regular exception path chk.
2813  * @soc: DP soc handle
2814  * @vdev_id: id of DP vdev handle
2815  * @nbuf: skb
2816  * @tx_exc_metadata: Handle that holds exception path meta data
2817  *
2818  * Entry point for Core Tx layer (DP_TX) invoked from
2819  * hard_start_xmit in OSIF/HDD to transmit frames through fw
2820  *
2821  * Return: NULL on success,
2822  *         nbuf when it fails to send
2823  */
2824 qdf_nbuf_t
2825 dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
2826 				   uint8_t vdev_id, qdf_nbuf_t nbuf,
2827 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
2828 {
2829 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2830 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
2831 						     DP_MOD_ID_TX_EXCEPTION);
2832 
2833 	if (qdf_unlikely(!vdev))
2834 		goto fail;
2835 
2836 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
2837 			== QDF_STATUS_E_FAILURE)) {
2838 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
2839 		goto fail;
2840 	}
2841 
2842 	/* Unref count as it will agin be taken inside dp_tx_exception */
2843 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
2844 
2845 	return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
2846 
2847 fail:
2848 	if (vdev)
2849 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
2850 	dp_verbose_debug("pkt send failed");
2851 	return nbuf;
2852 }
2853 
2854 /**
2855  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
2856  * @soc: DP soc handle
2857  * @vdev_id: DP vdev handle
2858  * @nbuf: skb
2859  *
2860  * Entry point for Core Tx layer (DP_TX) invoked from
2861  * hard_start_xmit in OSIF/HDD
2862  *
2863  * Return: NULL on success,
2864  *         nbuf when it fails to send
2865  */
2866 #ifdef MESH_MODE_SUPPORT
2867 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2868 			   qdf_nbuf_t nbuf)
2869 {
2870 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2871 	struct meta_hdr_s *mhdr;
2872 	qdf_nbuf_t nbuf_mesh = NULL;
2873 	qdf_nbuf_t nbuf_clone = NULL;
2874 	struct dp_vdev *vdev;
2875 	uint8_t no_enc_frame = 0;
2876 
2877 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
2878 	if (!nbuf_mesh) {
2879 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2880 				"qdf_nbuf_unshare failed");
2881 		return nbuf;
2882 	}
2883 
2884 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
2885 	if (!vdev) {
2886 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2887 				"vdev is NULL for vdev_id %d", vdev_id);
2888 		return nbuf;
2889 	}
2890 
2891 	nbuf = nbuf_mesh;
2892 
2893 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2894 
2895 	if ((vdev->sec_type != cdp_sec_type_none) &&
2896 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
2897 		no_enc_frame = 1;
2898 
2899 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
2900 		qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
2901 
2902 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
2903 		       !no_enc_frame) {
2904 		nbuf_clone = qdf_nbuf_clone(nbuf);
2905 		if (!nbuf_clone) {
2906 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2907 				"qdf_nbuf_clone failed");
2908 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
2909 			return nbuf;
2910 		}
2911 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
2912 	}
2913 
2914 	if (nbuf_clone) {
2915 		if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
2916 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2917 		} else {
2918 			qdf_nbuf_free(nbuf_clone);
2919 		}
2920 	}
2921 
2922 	if (no_enc_frame)
2923 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
2924 	else
2925 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
2926 
2927 	nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
2928 	if ((!nbuf) && no_enc_frame) {
2929 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2930 	}
2931 
2932 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
2933 	return nbuf;
2934 }
2935 
2936 #else
2937 
2938 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
2939 			   qdf_nbuf_t nbuf)
2940 {
2941 	return dp_tx_send(soc, vdev_id, nbuf);
2942 }
2943 
2944 #endif
2945 
2946 /**
2947  * dp_tx_nawds_handler() - NAWDS handler
2948  *
2949  * @soc: DP soc handle
2950  * @vdev_id: id of DP vdev handle
2951  * @msdu_info: msdu_info required to create HTT metadata
2952  * @nbuf: skb
2953  *
2954  * This API transfers the multicast frames with the peer id
2955  * on NAWDS enabled peer.
2956 
2957  * Return: none
2958  */
2959 
2960 static inline
2961 void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
2962 			 struct dp_tx_msdu_info_s *msdu_info, qdf_nbuf_t nbuf)
2963 {
2964 	struct dp_peer *peer = NULL;
2965 	qdf_nbuf_t nbuf_clone = NULL;
2966 	uint16_t peer_id = DP_INVALID_PEER;
2967 	uint16_t sa_peer_id = DP_INVALID_PEER;
2968 	struct dp_ast_entry *ast_entry = NULL;
2969 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2970 
2971 	qdf_spin_lock_bh(&soc->ast_lock);
2972 	ast_entry = dp_peer_ast_hash_find_by_pdevid
2973 				(soc,
2974 				 (uint8_t *)(eh->ether_shost),
2975 				 vdev->pdev->pdev_id);
2976 
2977 	if (ast_entry)
2978 		sa_peer_id = ast_entry->peer_id;
2979 	qdf_spin_unlock_bh(&soc->ast_lock);
2980 
2981 	qdf_spin_lock_bh(&vdev->peer_list_lock);
2982 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2983 		if (!peer->bss_peer && peer->nawds_enabled) {
2984 			peer_id = peer->peer_id;
2985 			/* Multicast packets needs to be
2986 			 * dropped in case of intra bss forwarding
2987 			 */
2988 			if (sa_peer_id == peer->peer_id) {
2989 				dp_tx_debug("multicast packet");
2990 				DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
2991 				continue;
2992 			}
2993 			nbuf_clone = qdf_nbuf_clone(nbuf);
2994 
2995 			if (!nbuf_clone) {
2996 				QDF_TRACE(QDF_MODULE_ID_DP,
2997 					  QDF_TRACE_LEVEL_ERROR,
2998 					  FL("nbuf clone failed"));
2999 				break;
3000 			}
3001 
3002 			nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
3003 							    msdu_info, peer_id,
3004 							    NULL);
3005 
3006 			if (nbuf_clone) {
3007 				dp_tx_debug("pkt send failed");
3008 				qdf_nbuf_free(nbuf_clone);
3009 			} else {
3010 				if (peer_id != DP_INVALID_PEER)
3011 					DP_STATS_INC_PKT(peer, tx.nawds_mcast,
3012 							 1, qdf_nbuf_len(nbuf));
3013 			}
3014 		}
3015 	}
3016 
3017 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3018 }
3019 
3020 /**
3021  * dp_tx_send() - Transmit a frame on a given VAP
3022  * @soc: DP soc handle
3023  * @vdev_id: id of DP vdev handle
3024  * @nbuf: skb
3025  *
3026  * Entry point for Core Tx layer (DP_TX) invoked from
3027  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
3028  * cases
3029  *
3030  * Return: NULL on success,
3031  *         nbuf when it fails to send
3032  */
3033 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3034 		      qdf_nbuf_t nbuf)
3035 {
3036 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3037 	uint16_t peer_id = HTT_INVALID_PEER;
3038 	/*
3039 	 * doing a memzero is causing additional function call overhead
3040 	 * so doing static stack clearing
3041 	 */
3042 	struct dp_tx_msdu_info_s msdu_info = {0};
3043 	struct dp_vdev *vdev = NULL;
3044 
3045 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3046 		return nbuf;
3047 
3048 	/*
3049 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3050 	 * this in per packet path.
3051 	 *
3052 	 * As in this path vdev memory is already protected with netdev
3053 	 * tx lock
3054 	 */
3055 	vdev = soc->vdev_id_map[vdev_id];
3056 	if (qdf_unlikely(!vdev))
3057 		return nbuf;
3058 
3059 	dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
3060 			 QDF_MAC_ADDR_REF(nbuf->data));
3061 
3062 	/*
3063 	 * Set Default Host TID value to invalid TID
3064 	 * (TID override disabled)
3065 	 */
3066 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
3067 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
3068 
3069 	if (qdf_unlikely(vdev->mesh_vdev)) {
3070 		qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
3071 								&msdu_info);
3072 		if (!nbuf_mesh) {
3073 			dp_verbose_debug("Extracting mesh metadata failed");
3074 			return nbuf;
3075 		}
3076 		nbuf = nbuf_mesh;
3077 	}
3078 
3079 	/*
3080 	 * Get HW Queue to use for this frame.
3081 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
3082 	 * dedicated for data and 1 for command.
3083 	 * "queue_id" maps to one hardware ring.
3084 	 *  With each ring, we also associate a unique Tx descriptor pool
3085 	 *  to minimize lock contention for these resources.
3086 	 */
3087 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3088 
3089 	/*
3090 	 * TCL H/W supports 2 DSCP-TID mapping tables.
3091 	 *  Table 1 - Default DSCP-TID mapping table
3092 	 *  Table 2 - 1 DSCP-TID override table
3093 	 *
3094 	 * If we need a different DSCP-TID mapping for this vap,
3095 	 * call tid_classify to extract DSCP/ToS from frame and
3096 	 * map to a TID and store in msdu_info. This is later used
3097 	 * to fill in TCL Input descriptor (per-packet TID override).
3098 	 */
3099 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
3100 
3101 	/*
3102 	 * Classify the frame and call corresponding
3103 	 * "prepare" function which extracts the segment (TSO)
3104 	 * and fragmentation information (for TSO , SG, ME, or Raw)
3105 	 * into MSDU_INFO structure which is later used to fill
3106 	 * SW and HW descriptors.
3107 	 */
3108 	if (qdf_nbuf_is_tso(nbuf)) {
3109 		dp_verbose_debug("TSO frame %pK", vdev);
3110 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
3111 				 qdf_nbuf_len(nbuf));
3112 
3113 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
3114 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
3115 					 qdf_nbuf_len(nbuf));
3116 			return nbuf;
3117 		}
3118 
3119 		goto send_multiple;
3120 	}
3121 
3122 	/* SG */
3123 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
3124 		struct dp_tx_seg_info_s seg_info = {0};
3125 
3126 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
3127 		if (!nbuf)
3128 			return NULL;
3129 
3130 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
3131 
3132 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
3133 				qdf_nbuf_len(nbuf));
3134 
3135 		goto send_multiple;
3136 	}
3137 
3138 	if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
3139 		return NULL;
3140 
3141 	/* RAW */
3142 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
3143 		struct dp_tx_seg_info_s seg_info = {0};
3144 
3145 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
3146 		if (!nbuf)
3147 			return NULL;
3148 
3149 		dp_verbose_debug("Raw frame %pK", vdev);
3150 
3151 		goto send_multiple;
3152 
3153 	}
3154 
3155 	if (qdf_unlikely(vdev->nawds_enabled)) {
3156 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
3157 					  qdf_nbuf_data(nbuf);
3158 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost))
3159 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf);
3160 
3161 		peer_id = DP_INVALID_PEER;
3162 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
3163 				 1, qdf_nbuf_len(nbuf));
3164 	}
3165 
3166 	/*  Single linear frame */
3167 	/*
3168 	 * If nbuf is a simple linear frame, use send_single function to
3169 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
3170 	 * SRNG. There is no need to setup a MSDU extension descriptor.
3171 	 */
3172 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
3173 
3174 	return nbuf;
3175 
3176 send_multiple:
3177 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3178 
3179 	if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
3180 		dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
3181 
3182 	return nbuf;
3183 }
3184 
3185 /**
3186  * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
3187  *      case to vaoid check in perpkt path.
3188  * @soc: DP soc handle
3189  * @vdev_id: id of DP vdev handle
3190  * @nbuf: skb
3191  *
3192  * Entry point for Core Tx layer (DP_TX) invoked from
3193  * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
3194  * with special condition to avoid per pkt check in dp_tx_send
3195  *
3196  * Return: NULL on success,
3197  *         nbuf when it fails to send
3198  */
3199 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
3200 				    uint8_t vdev_id, qdf_nbuf_t nbuf)
3201 {
3202 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3203 	struct dp_vdev *vdev = NULL;
3204 
3205 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
3206 		return nbuf;
3207 
3208 	/*
3209 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
3210 	 * this in per packet path.
3211 	 *
3212 	 * As in this path vdev memory is already protected with netdev
3213 	 * tx lock
3214 	 */
3215 	vdev = soc->vdev_id_map[vdev_id];
3216 	if (qdf_unlikely(!vdev))
3217 		return nbuf;
3218 
3219 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
3220 			== QDF_STATUS_E_FAILURE)) {
3221 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
3222 		return nbuf;
3223 	}
3224 
3225 	return dp_tx_send(soc_hdl, vdev_id, nbuf);
3226 }
3227 
3228 #ifdef UMAC_SUPPORT_PROXY_ARP
3229 /**
3230  * dp_tx_proxy_arp() - Tx proxy arp handler
3231  * @vdev: datapath vdev handle
3232  * @buf: sk buffer
3233  *
3234  * Return: status
3235  */
3236 static inline
3237 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3238 {
3239 	if (vdev->osif_proxy_arp)
3240 		return vdev->osif_proxy_arp(vdev->osif_vdev, nbuf);
3241 
3242 	/*
3243 	 * when UMAC_SUPPORT_PROXY_ARP is defined, we expect
3244 	 * osif_proxy_arp has a valid function pointer assigned
3245 	 * to it
3246 	 */
3247 	dp_tx_err("valid function pointer for osif_proxy_arp is expected!!\n");
3248 
3249 	return QDF_STATUS_NOT_INITIALIZED;
3250 }
3251 #else
3252 /**
3253  * dp_tx_proxy_arp() - Tx proxy arp handler
3254  * @vdev: datapath vdev handle
3255  * @buf: sk buffer
3256  *
3257  * This function always return 0 when UMAC_SUPPORT_PROXY_ARP
3258  * is not defined.
3259  *
3260  * Return: status
3261  */
3262 static inline
3263 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3264 {
3265 	return QDF_STATUS_SUCCESS;
3266 }
3267 #endif
3268 
3269 /**
3270  * dp_tx_reinject_handler() - Tx Reinject Handler
3271  * @soc: datapath soc handle
3272  * @vdev: datapath vdev handle
3273  * @tx_desc: software descriptor head pointer
3274  * @status : Tx completion status from HTT descriptor
3275  *
3276  * This function reinjects frames back to Target.
3277  * Todo - Host queue needs to be added
3278  *
3279  * Return: none
3280  */
3281 static
3282 void dp_tx_reinject_handler(struct dp_soc *soc,
3283 			    struct dp_vdev *vdev,
3284 			    struct dp_tx_desc_s *tx_desc,
3285 			    uint8_t *status)
3286 {
3287 	struct dp_peer *peer = NULL;
3288 	uint32_t peer_id = HTT_INVALID_PEER;
3289 	qdf_nbuf_t nbuf = tx_desc->nbuf;
3290 	qdf_nbuf_t nbuf_copy = NULL;
3291 	struct dp_tx_msdu_info_s msdu_info;
3292 #ifdef WDS_VENDOR_EXTENSION
3293 	int is_mcast = 0, is_ucast = 0;
3294 	int num_peers_3addr = 0;
3295 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
3296 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
3297 #endif
3298 
3299 	qdf_assert(vdev);
3300 
3301 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
3302 
3303 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3304 
3305 	dp_tx_debug("Tx reinject path");
3306 
3307 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
3308 			qdf_nbuf_len(tx_desc->nbuf));
3309 
3310 #ifdef WDS_VENDOR_EXTENSION
3311 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
3312 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
3313 	} else {
3314 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
3315 	}
3316 	is_ucast = !is_mcast;
3317 
3318 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3319 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3320 		if (peer->bss_peer)
3321 			continue;
3322 
3323 		/* Detect wds peers that use 3-addr framing for mcast.
3324 		 * if there are any, the bss_peer is used to send the
3325 		 * the mcast frame using 3-addr format. all wds enabled
3326 		 * peers that use 4-addr framing for mcast frames will
3327 		 * be duplicated and sent as 4-addr frames below.
3328 		 */
3329 		if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
3330 			num_peers_3addr = 1;
3331 			break;
3332 		}
3333 	}
3334 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3335 #endif
3336 
3337 	if (qdf_unlikely(vdev->mesh_vdev)) {
3338 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
3339 	} else {
3340 		qdf_spin_lock_bh(&vdev->peer_list_lock);
3341 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3342 			if ((peer->peer_id != HTT_INVALID_PEER) &&
3343 #ifdef WDS_VENDOR_EXTENSION
3344 			/*
3345 			 * . if 3-addr STA, then send on BSS Peer
3346 			 * . if Peer WDS enabled and accept 4-addr mcast,
3347 			 * send mcast on that peer only
3348 			 * . if Peer WDS enabled and accept 4-addr ucast,
3349 			 * send ucast on that peer only
3350 			 */
3351 			((peer->bss_peer && num_peers_3addr && is_mcast) ||
3352 			 (peer->wds_enabled &&
3353 				  ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
3354 				   (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
3355 #else
3356 			(peer->bss_peer &&
3357 			 (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) {
3358 #endif
3359 				peer_id = DP_INVALID_PEER;
3360 
3361 				nbuf_copy = qdf_nbuf_copy(nbuf);
3362 
3363 				if (!nbuf_copy) {
3364 					dp_tx_debug("nbuf copy failed");
3365 					break;
3366 				}
3367 
3368 				nbuf_copy = dp_tx_send_msdu_single(vdev,
3369 						nbuf_copy,
3370 						&msdu_info,
3371 						peer_id,
3372 						NULL);
3373 
3374 				if (nbuf_copy) {
3375 					dp_tx_debug("pkt send failed");
3376 					qdf_nbuf_free(nbuf_copy);
3377 				}
3378 			}
3379 		}
3380 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
3381 	}
3382 
3383 	qdf_nbuf_free(nbuf);
3384 
3385 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3386 }
3387 
3388 /**
3389  * dp_tx_inspect_handler() - Tx Inspect Handler
3390  * @soc: datapath soc handle
3391  * @vdev: datapath vdev handle
3392  * @tx_desc: software descriptor head pointer
3393  * @status : Tx completion status from HTT descriptor
3394  *
3395  * Handles Tx frames sent back to Host for inspection
3396  * (ProxyARP)
3397  *
3398  * Return: none
3399  */
3400 static void dp_tx_inspect_handler(struct dp_soc *soc,
3401 				  struct dp_vdev *vdev,
3402 				  struct dp_tx_desc_s *tx_desc,
3403 				  uint8_t *status)
3404 {
3405 
3406 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3407 			"%s Tx inspect path",
3408 			__func__);
3409 
3410 	DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
3411 			 qdf_nbuf_len(tx_desc->nbuf));
3412 
3413 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
3414 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3415 }
3416 
3417 #ifdef MESH_MODE_SUPPORT
3418 /**
3419  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
3420  *                                         in mesh meta header
3421  * @tx_desc: software descriptor head pointer
3422  * @ts: pointer to tx completion stats
3423  * Return: none
3424  */
3425 static
3426 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
3427 		struct hal_tx_completion_status *ts)
3428 {
3429 	qdf_nbuf_t netbuf = tx_desc->nbuf;
3430 
3431 	if (!tx_desc->msdu_ext_desc) {
3432 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
3433 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3434 				"netbuf %pK offset %d",
3435 				netbuf, tx_desc->pkt_offset);
3436 			return;
3437 		}
3438 	}
3439 }
3440 
3441 #else
3442 static
3443 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
3444 		struct hal_tx_completion_status *ts)
3445 {
3446 }
3447 
3448 #endif
3449 
3450 #ifdef QCA_PEER_EXT_STATS
3451 /*
3452  * dp_tx_compute_tid_delay() - Compute per TID delay
3453  * @stats: Per TID delay stats
3454  * @tx_desc: Software Tx descriptor
3455  *
3456  * Compute the software enqueue and hw enqueue delays and
3457  * update the respective histograms
3458  *
3459  * Return: void
3460  */
3461 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
3462 				    struct dp_tx_desc_s *tx_desc)
3463 {
3464 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
3465 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
3466 	uint32_t sw_enqueue_delay, fwhw_transmit_delay;
3467 
3468 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
3469 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
3470 	timestamp_hw_enqueue = tx_desc->timestamp;
3471 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
3472 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
3473 					 timestamp_hw_enqueue);
3474 
3475 	/*
3476 	 * Update the Tx software enqueue delay and HW enque-Completion delay.
3477 	 */
3478 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
3479 	dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
3480 }
3481 
3482 /*
3483  * dp_tx_update_peer_ext_stats() - Update the peer extended stats
3484  * @peer: DP peer context
3485  * @tx_desc: Tx software descriptor
3486  * @tid: Transmission ID
3487  * @ring_id: Rx CPU context ID/CPU_ID
3488  *
3489  * Update the peer extended stats. These are enhanced other
3490  * delay stats per msdu level.
3491  *
3492  * Return: void
3493  */
3494 static void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
3495 					struct dp_tx_desc_s *tx_desc,
3496 					uint8_t tid, uint8_t ring_id)
3497 {
3498 	struct dp_pdev *pdev = peer->vdev->pdev;
3499 	struct dp_soc *soc = NULL;
3500 	struct cdp_peer_ext_stats *pext_stats = NULL;
3501 
3502 	soc = pdev->soc;
3503 	if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
3504 		return;
3505 
3506 	pext_stats = peer->pext_stats;
3507 
3508 	qdf_assert(pext_stats);
3509 	qdf_assert(ring < CDP_MAX_TXRX_CTX);
3510 
3511 	/*
3512 	 * For non-TID packets use the TID 9
3513 	 */
3514 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
3515 		tid = CDP_MAX_DATA_TIDS - 1;
3516 
3517 	dp_tx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id],
3518 				tx_desc);
3519 }
3520 #else
3521 static inline void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
3522 					       struct dp_tx_desc_s *tx_desc,
3523 					       uint8_t tid, uint8_t ring_id)
3524 {
3525 }
3526 #endif
3527 
3528 /**
3529  * dp_tx_compute_delay() - Compute and fill in all timestamps
3530  *				to pass in correct fields
3531  *
3532  * @vdev: pdev handle
3533  * @tx_desc: tx descriptor
3534  * @tid: tid value
3535  * @ring_id: TCL or WBM ring number for transmit path
3536  * Return: none
3537  */
3538 static void dp_tx_compute_delay(struct dp_vdev *vdev,
3539 				struct dp_tx_desc_s *tx_desc,
3540 				uint8_t tid, uint8_t ring_id)
3541 {
3542 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
3543 	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
3544 
3545 	if (qdf_likely(!vdev->pdev->delay_stats_flag))
3546 		return;
3547 
3548 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
3549 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
3550 	timestamp_hw_enqueue = tx_desc->timestamp;
3551 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
3552 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
3553 					 timestamp_hw_enqueue);
3554 	interframe_delay = (uint32_t)(timestamp_ingress -
3555 				      vdev->prev_tx_enq_tstamp);
3556 
3557 	/*
3558 	 * Delay in software enqueue
3559 	 */
3560 	dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid,
3561 			      CDP_DELAY_STATS_SW_ENQ, ring_id);
3562 	/*
3563 	 * Delay between packet enqueued to HW and Tx completion
3564 	 */
3565 	dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid,
3566 			      CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id);
3567 
3568 	/*
3569 	 * Update interframe delay stats calculated at hardstart receive point.
3570 	 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
3571 	 * interframe delay will not be calculate correctly for 1st frame.
3572 	 * On the other side, this will help in avoiding extra per packet check
3573 	 * of !vdev->prev_tx_enq_tstamp.
3574 	 */
3575 	dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
3576 			      CDP_DELAY_STATS_TX_INTERFRAME, ring_id);
3577 	vdev->prev_tx_enq_tstamp = timestamp_ingress;
3578 }
3579 
3580 #ifdef DISABLE_DP_STATS
3581 static
3582 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
3583 {
3584 }
3585 #else
3586 static
3587 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
3588 {
3589 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
3590 
3591 	DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
3592 	if (subtype != QDF_PROTO_INVALID)
3593 		DP_STATS_INC(peer, tx.no_ack_count[subtype], 1);
3594 }
3595 #endif
3596 
3597 /**
3598  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
3599  *				per wbm ring
3600  *
3601  * @tx_desc: software descriptor head pointer
3602  * @ts: Tx completion status
3603  * @peer: peer handle
3604  * @ring_id: ring number
3605  *
3606  * Return: None
3607  */
3608 static inline void
3609 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
3610 			struct hal_tx_completion_status *ts,
3611 			struct dp_peer *peer, uint8_t ring_id)
3612 {
3613 	struct dp_pdev *pdev = peer->vdev->pdev;
3614 	struct dp_soc *soc = NULL;
3615 	uint8_t mcs, pkt_type;
3616 	uint8_t tid = ts->tid;
3617 	uint32_t length;
3618 	struct cdp_tid_tx_stats *tid_stats;
3619 
3620 	if (!pdev)
3621 		return;
3622 
3623 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
3624 		tid = CDP_MAX_DATA_TIDS - 1;
3625 
3626 	tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
3627 	soc = pdev->soc;
3628 
3629 	mcs = ts->mcs;
3630 	pkt_type = ts->pkt_type;
3631 
3632 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
3633 		dp_err("Release source is not from TQM");
3634 		return;
3635 	}
3636 
3637 	length = qdf_nbuf_len(tx_desc->nbuf);
3638 	DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
3639 
3640 	if (qdf_unlikely(pdev->delay_stats_flag))
3641 		dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id);
3642 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
3643 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
3644 
3645 	DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length,
3646 			  (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
3647 
3648 	DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
3649 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
3650 
3651 	DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
3652 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
3653 
3654 	DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
3655 		     (ts->status == HAL_TX_TQM_RR_FW_REASON1));
3656 
3657 	DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
3658 		     (ts->status == HAL_TX_TQM_RR_FW_REASON2));
3659 
3660 	DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
3661 		     (ts->status == HAL_TX_TQM_RR_FW_REASON3));
3662 
3663 	/*
3664 	 * tx_failed is ideally supposed to be updated from HTT ppdu completion
3665 	 * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there
3666 	 * are no completions for failed cases. Hence updating tx_failed from
3667 	 * data path. Please note that if tx_failed is fixed to be from ppdu,
3668 	 * then this has to be removed
3669 	 */
3670 	peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num +
3671 				peer->stats.tx.dropped.fw_rem_notx +
3672 				peer->stats.tx.dropped.fw_rem_tx +
3673 				peer->stats.tx.dropped.age_out +
3674 				peer->stats.tx.dropped.fw_reason1 +
3675 				peer->stats.tx.dropped.fw_reason2 +
3676 				peer->stats.tx.dropped.fw_reason3;
3677 
3678 	if (ts->status < CDP_MAX_TX_TQM_STATUS) {
3679 		tid_stats->tqm_status_cnt[ts->status]++;
3680 	}
3681 
3682 	if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
3683 		dp_update_no_ack_stats(tx_desc->nbuf, peer);
3684 		return;
3685 	}
3686 
3687 	DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
3688 
3689 	DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
3690 	DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
3691 
3692 	/*
3693 	 * Following Rate Statistics are updated from HTT PPDU events from FW.
3694 	 * Return from here if HTT PPDU events are enabled.
3695 	 */
3696 	if (!(soc->process_tx_status))
3697 		return;
3698 
3699 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3700 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
3701 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3702 			((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
3703 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3704 			((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
3705 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3706 			((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
3707 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3708 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
3709 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3710 			((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
3711 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3712 			((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
3713 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3714 			((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
3715 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3716 			((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
3717 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3718 			((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
3719 
3720 	DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
3721 	DP_STATS_INC(peer, tx.bw[ts->bw], 1);
3722 	DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
3723 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
3724 	DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
3725 	DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
3726 	DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
3727 }
3728 
3729 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3730 /**
3731  * dp_tx_flow_pool_lock() - take flow pool lock
3732  * @soc: core txrx main context
3733  * @tx_desc: tx desc
3734  *
3735  * Return: None
3736  */
3737 static inline
3738 void dp_tx_flow_pool_lock(struct dp_soc *soc,
3739 			  struct dp_tx_desc_s *tx_desc)
3740 {
3741 	struct dp_tx_desc_pool_s *pool;
3742 	uint8_t desc_pool_id;
3743 
3744 	desc_pool_id = tx_desc->pool_id;
3745 	pool = &soc->tx_desc[desc_pool_id];
3746 
3747 	qdf_spin_lock_bh(&pool->flow_pool_lock);
3748 }
3749 
3750 /**
3751  * dp_tx_flow_pool_unlock() - release flow pool lock
3752  * @soc: core txrx main context
3753  * @tx_desc: tx desc
3754  *
3755  * Return: None
3756  */
3757 static inline
3758 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
3759 			    struct dp_tx_desc_s *tx_desc)
3760 {
3761 	struct dp_tx_desc_pool_s *pool;
3762 	uint8_t desc_pool_id;
3763 
3764 	desc_pool_id = tx_desc->pool_id;
3765 	pool = &soc->tx_desc[desc_pool_id];
3766 
3767 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
3768 }
3769 #else
3770 static inline
3771 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
3772 {
3773 }
3774 
3775 static inline
3776 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
3777 {
3778 }
3779 #endif
3780 
3781 /**
3782  * dp_tx_notify_completion() - Notify tx completion for this desc
3783  * @soc: core txrx main context
3784  * @vdev: datapath vdev handle
3785  * @tx_desc: tx desc
3786  * @netbuf:  buffer
3787  * @status: tx status
3788  *
3789  * Return: none
3790  */
3791 static inline void dp_tx_notify_completion(struct dp_soc *soc,
3792 					   struct dp_vdev *vdev,
3793 					   struct dp_tx_desc_s *tx_desc,
3794 					   qdf_nbuf_t netbuf,
3795 					   uint8_t status)
3796 {
3797 	void *osif_dev;
3798 	ol_txrx_completion_fp tx_compl_cbk = NULL;
3799 	uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
3800 
3801 	qdf_assert(tx_desc);
3802 
3803 	dp_tx_flow_pool_lock(soc, tx_desc);
3804 
3805 	if (!vdev ||
3806 	    !vdev->osif_vdev) {
3807 		dp_tx_flow_pool_unlock(soc, tx_desc);
3808 		return;
3809 	}
3810 
3811 	osif_dev = vdev->osif_vdev;
3812 	tx_compl_cbk = vdev->tx_comp;
3813 	dp_tx_flow_pool_unlock(soc, tx_desc);
3814 
3815 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
3816 		flag |= BIT(QDF_TX_RX_STATUS_OK);
3817 
3818 	if (tx_compl_cbk)
3819 		tx_compl_cbk(netbuf, osif_dev, flag);
3820 }
3821 
3822 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
3823  * @pdev: pdev handle
3824  * @tid: tid value
3825  * @txdesc_ts: timestamp from txdesc
3826  * @ppdu_id: ppdu id
3827  *
3828  * Return: none
3829  */
3830 #ifdef FEATURE_PERPKT_INFO
3831 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
3832 					       struct dp_peer *peer,
3833 					       uint8_t tid,
3834 					       uint64_t txdesc_ts,
3835 					       uint32_t ppdu_id)
3836 {
3837 	uint64_t delta_ms;
3838 	struct cdp_tx_sojourn_stats *sojourn_stats;
3839 
3840 	if (qdf_unlikely(!pdev->enhanced_stats_en))
3841 		return;
3842 
3843 	if (qdf_unlikely(tid == HTT_INVALID_TID ||
3844 			 tid >= CDP_DATA_TID_MAX))
3845 		return;
3846 
3847 	if (qdf_unlikely(!pdev->sojourn_buf))
3848 		return;
3849 
3850 	sojourn_stats = (struct cdp_tx_sojourn_stats *)
3851 		qdf_nbuf_data(pdev->sojourn_buf);
3852 
3853 	sojourn_stats->cookie = (void *)peer->rdkstats_ctx;
3854 
3855 	delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
3856 				txdesc_ts;
3857 	qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid],
3858 			    delta_ms);
3859 	sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
3860 	sojourn_stats->num_msdus[tid] = 1;
3861 	sojourn_stats->avg_sojourn_msdu[tid].internal =
3862 		peer->avg_sojourn_msdu[tid].internal;
3863 	dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
3864 			     pdev->sojourn_buf, HTT_INVALID_PEER,
3865 			     WDI_NO_VAL, pdev->pdev_id);
3866 	sojourn_stats->sum_sojourn_msdu[tid] = 0;
3867 	sojourn_stats->num_msdus[tid] = 0;
3868 	sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
3869 }
3870 #else
3871 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
3872 					       struct dp_peer *peer,
3873 					       uint8_t tid,
3874 					       uint64_t txdesc_ts,
3875 					       uint32_t ppdu_id)
3876 {
3877 }
3878 #endif
3879 
3880 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
3881 /**
3882  * dp_send_completion_to_pkt_capture() - send tx completion to packet capture
3883  * @soc: dp_soc handle
3884  * @desc: Tx Descriptor
3885  * @ts: HAL Tx completion descriptor contents
3886  *
3887  * This function is used to send tx completion to packet capture
3888  */
3889 void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
3890 				       struct dp_tx_desc_s *desc,
3891 				       struct hal_tx_completion_status *ts)
3892 {
3893 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc,
3894 			     desc, ts->peer_id,
3895 			     WDI_NO_VAL, desc->pdev->pdev_id);
3896 }
3897 #endif
3898 
3899 /**
3900  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
3901  * @soc: DP Soc handle
3902  * @tx_desc: software Tx descriptor
3903  * @ts : Tx completion status from HAL/HTT descriptor
3904  *
3905  * Return: none
3906  */
3907 static inline void
3908 dp_tx_comp_process_desc(struct dp_soc *soc,
3909 			struct dp_tx_desc_s *desc,
3910 			struct hal_tx_completion_status *ts,
3911 			struct dp_peer *peer)
3912 {
3913 	uint64_t time_latency = 0;
3914 
3915 	/*
3916 	 * m_copy/tx_capture modes are not supported for
3917 	 * scatter gather packets
3918 	 */
3919 	if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
3920 		time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
3921 				desc->timestamp);
3922 	}
3923 
3924 	dp_send_completion_to_pkt_capture(soc, desc, ts);
3925 
3926 	if (!(desc->msdu_ext_desc)) {
3927 		dp_tx_enh_unmap(soc, desc);
3928 
3929 		if (QDF_STATUS_SUCCESS ==
3930 		    dp_monitor_tx_add_to_comp_queue(soc, desc, ts, peer)) {
3931 			return;
3932 		}
3933 
3934 		if (QDF_STATUS_SUCCESS ==
3935 		    dp_get_completion_indication_for_stack(soc,
3936 							   desc->pdev,
3937 							   peer, ts,
3938 							   desc->nbuf,
3939 							   time_latency)) {
3940 			dp_send_completion_to_stack(soc,
3941 						    desc->pdev,
3942 						    ts->peer_id,
3943 						    ts->ppdu_id,
3944 						    desc->nbuf);
3945 			return;
3946 		}
3947 	}
3948 
3949 	desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX;
3950 	dp_tx_comp_free_buf(soc, desc);
3951 }
3952 
3953 #ifdef DISABLE_DP_STATS
3954 /**
3955  * dp_tx_update_connectivity_stats() - update tx connectivity stats
3956  * @soc: core txrx main context
3957  * @tx_desc: tx desc
3958  * @status: tx status
3959  *
3960  * Return: none
3961  */
3962 static inline
3963 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
3964 				     struct dp_vdev *vdev,
3965 				     struct dp_tx_desc_s *tx_desc,
3966 				     uint8_t status)
3967 {
3968 }
3969 #else
3970 static inline
3971 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
3972 				     struct dp_vdev *vdev,
3973 				     struct dp_tx_desc_s *tx_desc,
3974 				     uint8_t status)
3975 {
3976 	void *osif_dev;
3977 	ol_txrx_stats_rx_fp stats_cbk;
3978 	uint8_t pkt_type;
3979 
3980 	qdf_assert(tx_desc);
3981 
3982 	if (!vdev ||
3983 	    !vdev->osif_vdev ||
3984 	    !vdev->stats_cb)
3985 		return;
3986 
3987 	osif_dev = vdev->osif_vdev;
3988 	stats_cbk = vdev->stats_cb;
3989 
3990 	stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
3991 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
3992 		stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
3993 			  &pkt_type);
3994 }
3995 #endif
3996 
3997 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
3998 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3999 		      uint32_t delta_tsf)
4000 {
4001 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4002 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4003 						     DP_MOD_ID_CDP);
4004 
4005 	if (!vdev) {
4006 		dp_err_rl("vdev %d does not exist", vdev_id);
4007 		return;
4008 	}
4009 
4010 	vdev->delta_tsf = delta_tsf;
4011 	dp_debug("vdev id %u delta_tsf %u", vdev_id, delta_tsf);
4012 
4013 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4014 }
4015 
4016 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
4017 				      uint8_t vdev_id, bool enable)
4018 {
4019 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4020 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4021 						     DP_MOD_ID_CDP);
4022 
4023 	if (!vdev) {
4024 		dp_err_rl("vdev %d does not exist", vdev_id);
4025 		return QDF_STATUS_E_FAILURE;
4026 	}
4027 
4028 	qdf_atomic_set(&vdev->ul_delay_report, enable);
4029 
4030 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4031 
4032 	return QDF_STATUS_SUCCESS;
4033 }
4034 
4035 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4036 			       uint32_t *val)
4037 {
4038 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4039 	struct dp_vdev *vdev;
4040 	uint32_t delay_accum;
4041 	uint32_t pkts_accum;
4042 
4043 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
4044 	if (!vdev) {
4045 		dp_err_rl("vdev %d does not exist", vdev_id);
4046 		return QDF_STATUS_E_FAILURE;
4047 	}
4048 
4049 	if (!qdf_atomic_read(&vdev->ul_delay_report)) {
4050 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4051 		return QDF_STATUS_E_FAILURE;
4052 	}
4053 
4054 	/* Average uplink delay based on current accumulated values */
4055 	delay_accum = qdf_atomic_read(&vdev->ul_delay_accum);
4056 	pkts_accum = qdf_atomic_read(&vdev->ul_pkts_accum);
4057 
4058 	*val = delay_accum / pkts_accum;
4059 	dp_debug("uplink_delay %u delay_accum %u pkts_accum %u", *val,
4060 		 delay_accum, pkts_accum);
4061 
4062 	/* Reset accumulated values to 0 */
4063 	qdf_atomic_set(&vdev->ul_delay_accum, 0);
4064 	qdf_atomic_set(&vdev->ul_pkts_accum, 0);
4065 
4066 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4067 
4068 	return QDF_STATUS_SUCCESS;
4069 }
4070 
4071 static void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
4072 				      struct hal_tx_completion_status *ts)
4073 {
4074 	uint32_t buffer_ts;
4075 	uint32_t delta_tsf;
4076 	uint32_t ul_delay;
4077 
4078 	/* Tx_rate_stats_info_valid is 0 and tsf is invalid then */
4079 	if (!ts->valid)
4080 		return;
4081 
4082 	if (qdf_unlikely(!vdev)) {
4083 		dp_info_rl("vdev is null or delete in progrss");
4084 		return;
4085 	}
4086 
4087 	if (!qdf_atomic_read(&vdev->ul_delay_report))
4088 		return;
4089 
4090 	delta_tsf = vdev->delta_tsf;
4091 
4092 	/* buffer_timestamp is in units of 1024 us and is [31:13] of
4093 	 * WBM_RELEASE_RING_4. After left shift 10 bits, it's
4094 	 * valid up to 29 bits.
4095 	 */
4096 	buffer_ts = ts->buffer_timestamp << 10;
4097 
4098 	ul_delay = ts->tsf - buffer_ts - delta_tsf;
4099 	ul_delay &= 0x1FFFFFFF; /* mask 29 BITS */
4100 	if (ul_delay > 0x1000000) {
4101 		dp_info_rl("----------------------\n"
4102 			   "Tx completion status:\n"
4103 			   "----------------------\n"
4104 			   "release_src = %d\n"
4105 			   "ppdu_id = 0x%x\n"
4106 			   "release_reason = %d\n"
4107 			   "tsf = %u (0x%x)\n"
4108 			   "buffer_timestamp = %u (0x%x)\n"
4109 			   "delta_tsf = %u (0x%x)\n",
4110 			   ts->release_src, ts->ppdu_id, ts->status,
4111 			   ts->tsf, ts->tsf, ts->buffer_timestamp,
4112 			   ts->buffer_timestamp, delta_tsf, delta_tsf);
4113 		return;
4114 	}
4115 
4116 	ul_delay /= 1000; /* in unit of ms */
4117 
4118 	qdf_atomic_add(ul_delay, &vdev->ul_delay_accum);
4119 	qdf_atomic_inc(&vdev->ul_pkts_accum);
4120 }
4121 #else /* !WLAN_FEATURE_TSF_UPLINK_DELAY */
4122 static inline
4123 void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
4124 			       struct hal_tx_completion_status *ts)
4125 {
4126 }
4127 #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
4128 
4129 /**
4130  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
4131  * @soc: DP soc handle
4132  * @tx_desc: software descriptor head pointer
4133  * @ts: Tx completion status
4134  * @peer: peer handle
4135  * @ring_id: ring number
4136  *
4137  * Return: none
4138  */
4139 static inline
4140 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
4141 				  struct dp_tx_desc_s *tx_desc,
4142 				  struct hal_tx_completion_status *ts,
4143 				  struct dp_peer *peer, uint8_t ring_id)
4144 {
4145 	uint32_t length;
4146 	qdf_ether_header_t *eh;
4147 	struct dp_vdev *vdev = NULL;
4148 	qdf_nbuf_t nbuf = tx_desc->nbuf;
4149 	enum qdf_dp_tx_rx_status dp_status;
4150 
4151 	if (!nbuf) {
4152 		dp_info_rl("invalid tx descriptor. nbuf NULL");
4153 		goto out;
4154 	}
4155 
4156 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
4157 	length = qdf_nbuf_len(nbuf);
4158 
4159 	dp_status = dp_tx_hw_to_qdf(ts->status);
4160 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
4161 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
4162 				 QDF_TRACE_DEFAULT_PDEV_ID,
4163 				 qdf_nbuf_data_addr(nbuf),
4164 				 sizeof(qdf_nbuf_data(nbuf)),
4165 				 tx_desc->id, ts->status, dp_status));
4166 
4167 	dp_tx_comp_debug("-------------------- \n"
4168 			 "Tx Completion Stats: \n"
4169 			 "-------------------- \n"
4170 			 "ack_frame_rssi = %d \n"
4171 			 "first_msdu = %d \n"
4172 			 "last_msdu = %d \n"
4173 			 "msdu_part_of_amsdu = %d \n"
4174 			 "rate_stats valid = %d \n"
4175 			 "bw = %d \n"
4176 			 "pkt_type = %d \n"
4177 			 "stbc = %d \n"
4178 			 "ldpc = %d \n"
4179 			 "sgi = %d \n"
4180 			 "mcs = %d \n"
4181 			 "ofdma = %d \n"
4182 			 "tones_in_ru = %d \n"
4183 			 "tsf = %d \n"
4184 			 "ppdu_id = %d \n"
4185 			 "transmit_cnt = %d \n"
4186 			 "tid = %d \n"
4187 			 "peer_id = %d\n",
4188 			 ts->ack_frame_rssi, ts->first_msdu,
4189 			 ts->last_msdu, ts->msdu_part_of_amsdu,
4190 			 ts->valid, ts->bw, ts->pkt_type, ts->stbc,
4191 			 ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
4192 			 ts->tones_in_ru, ts->tsf, ts->ppdu_id,
4193 			 ts->transmit_cnt, ts->tid, ts->peer_id);
4194 
4195 	/* Update SoC level stats */
4196 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
4197 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
4198 
4199 	if (!peer) {
4200 		dp_info_rl("peer is null or deletion in progress");
4201 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
4202 		goto out;
4203 	}
4204 	vdev = peer->vdev;
4205 
4206 	dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
4207 	dp_tx_update_uplink_delay(soc, vdev, ts);
4208 
4209 	/* Update per-packet stats for mesh mode */
4210 	if (qdf_unlikely(vdev->mesh_vdev) &&
4211 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
4212 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
4213 
4214 	/* Update peer level stats */
4215 	if (qdf_unlikely(peer->bss_peer && vdev->opmode == wlan_op_mode_ap)) {
4216 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
4217 			DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
4218 
4219 			if ((peer->vdev->tx_encap_type ==
4220 				htt_cmn_pkt_type_ethernet) &&
4221 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
4222 				DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
4223 			}
4224 		}
4225 	} else {
4226 		DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
4227 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
4228 			DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
4229 			if (qdf_unlikely(peer->in_twt)) {
4230 				DP_STATS_INC_PKT(peer,
4231 						 tx.tx_success_twt,
4232 						 1, length);
4233 			}
4234 		}
4235 	}
4236 
4237 	dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id);
4238 	dp_tx_update_peer_ext_stats(peer, tx_desc, ts->tid, ring_id);
4239 
4240 #ifdef QCA_SUPPORT_RDK_STATS
4241 	if (soc->rdkstats_enabled)
4242 		dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid,
4243 					    tx_desc->timestamp,
4244 					    ts->ppdu_id);
4245 #endif
4246 
4247 out:
4248 	return;
4249 }
4250 
4251 /**
4252  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
4253  * @soc: core txrx main context
4254  * @comp_head: software descriptor head pointer
4255  * @ring_id: ring number
4256  *
4257  * This function will process batch of descriptors reaped by dp_tx_comp_handler
4258  * and release the software descriptors after processing is complete
4259  *
4260  * Return: none
4261  */
4262 static void
4263 dp_tx_comp_process_desc_list(struct dp_soc *soc,
4264 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id)
4265 {
4266 	struct dp_tx_desc_s *desc;
4267 	struct dp_tx_desc_s *next;
4268 	struct hal_tx_completion_status ts;
4269 	struct dp_peer *peer = NULL;
4270 	uint16_t peer_id = DP_INVALID_PEER;
4271 	qdf_nbuf_t netbuf;
4272 
4273 	desc = comp_head;
4274 
4275 	while (desc) {
4276 		if (peer_id != desc->peer_id) {
4277 			if (peer)
4278 				dp_peer_unref_delete(peer,
4279 						     DP_MOD_ID_TX_COMP);
4280 			peer_id = desc->peer_id;
4281 			peer = dp_peer_get_ref_by_id(soc, peer_id,
4282 						     DP_MOD_ID_TX_COMP);
4283 		}
4284 
4285 		if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
4286 			struct dp_pdev *pdev = desc->pdev;
4287 
4288 			if (qdf_likely(peer)) {
4289 				/*
4290 				 * Increment peer statistics
4291 				 * Minimal statistics update done here
4292 				 */
4293 				DP_STATS_INC_PKT(peer, tx.comp_pkt, 1,
4294 						 desc->length);
4295 
4296 				if (desc->tx_status !=
4297 						HAL_TX_TQM_RR_FRAME_ACKED)
4298 					DP_STATS_INC(peer, tx.tx_failed, 1);
4299 			}
4300 
4301 			qdf_assert(pdev);
4302 			dp_tx_outstanding_dec(pdev);
4303 
4304 			/*
4305 			 * Calling a QDF WRAPPER here is creating signifcant
4306 			 * performance impact so avoided the wrapper call here
4307 			 */
4308 			next = desc->next;
4309 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
4310 					       desc->id, DP_TX_COMP_UNMAP);
4311 			qdf_nbuf_unmap_nbytes_single_paddr(soc->osdev,
4312 							   desc->nbuf,
4313 							   desc->dma_addr,
4314 							   QDF_DMA_TO_DEVICE,
4315 							   desc->length);
4316 			qdf_nbuf_free(desc->nbuf);
4317 			dp_tx_desc_free(soc, desc, desc->pool_id);
4318 			desc = next;
4319 			continue;
4320 		}
4321 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
4322 
4323 		dp_tx_comp_process_tx_status(soc, desc, &ts, peer, ring_id);
4324 
4325 		netbuf = desc->nbuf;
4326 		/* check tx complete notification */
4327 		if (peer && qdf_nbuf_tx_notify_comp_get(netbuf))
4328 			dp_tx_notify_completion(soc, peer->vdev, desc,
4329 						netbuf, ts.status);
4330 
4331 		dp_tx_comp_process_desc(soc, desc, &ts, peer);
4332 
4333 		next = desc->next;
4334 
4335 		dp_tx_desc_release(desc, desc->pool_id);
4336 		desc = next;
4337 	}
4338 	if (peer)
4339 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_COMP);
4340 }
4341 
4342 /**
4343  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
4344  * @soc: Handle to DP soc structure
4345  * @tx_desc: software descriptor head pointer
4346  * @status : Tx completion status from HTT descriptor
4347  * @ring_id: ring number
4348  *
4349  * This function will process HTT Tx indication messages from Target
4350  *
4351  * Return: none
4352  */
4353 static
4354 void dp_tx_process_htt_completion(struct dp_soc *soc,
4355 				  struct dp_tx_desc_s *tx_desc, uint8_t *status,
4356 				  uint8_t ring_id)
4357 {
4358 	uint8_t tx_status;
4359 	struct dp_pdev *pdev;
4360 	struct dp_vdev *vdev;
4361 	struct hal_tx_completion_status ts = {0};
4362 	uint32_t *htt_desc = (uint32_t *)status;
4363 	struct dp_peer *peer;
4364 	struct cdp_tid_tx_stats *tid_stats = NULL;
4365 	struct htt_soc *htt_handle;
4366 	uint8_t vdev_id;
4367 
4368 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
4369 	htt_handle = (struct htt_soc *)soc->htt_handle;
4370 	htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
4371 
4372 	/*
4373 	 * There can be scenario where WBM consuming descriptor enqueued
4374 	 * from TQM2WBM first and TQM completion can happen before MEC
4375 	 * notification comes from FW2WBM. Avoid access any field of tx
4376 	 * descriptor in case of MEC notify.
4377 	 */
4378 	if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY) {
4379 		/*
4380 		 * Get vdev id from HTT status word in case of MEC
4381 		 * notification
4382 		 */
4383 		vdev_id = HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(htt_desc[3]);
4384 		if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
4385 			return;
4386 
4387 		vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4388 				DP_MOD_ID_HTT_COMP);
4389 		if (!vdev)
4390 			return;
4391 		dp_tx_mec_handler(vdev, status);
4392 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
4393 		return;
4394 	}
4395 
4396 	/*
4397 	 * If the descriptor is already freed in vdev_detach,
4398 	 * continue to next descriptor
4399 	 */
4400 	if ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) && !tx_desc->flags) {
4401 		dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d", tx_desc->id);
4402 		return;
4403 	}
4404 
4405 	pdev = tx_desc->pdev;
4406 
4407 	if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
4408 		dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id);
4409 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
4410 		dp_tx_comp_free_buf(soc, tx_desc);
4411 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
4412 		return;
4413 	}
4414 
4415 	qdf_assert(tx_desc->pdev);
4416 
4417 	vdev_id = tx_desc->vdev_id;
4418 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4419 			DP_MOD_ID_HTT_COMP);
4420 
4421 	if (!vdev)
4422 		return;
4423 
4424 	switch (tx_status) {
4425 	case HTT_TX_FW2WBM_TX_STATUS_OK:
4426 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
4427 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
4428 	{
4429 		uint8_t tid;
4430 		if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
4431 			ts.peer_id =
4432 				HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
4433 						htt_desc[2]);
4434 			ts.tid =
4435 				HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
4436 						htt_desc[2]);
4437 		} else {
4438 			ts.peer_id = HTT_INVALID_PEER;
4439 			ts.tid = HTT_INVALID_TID;
4440 		}
4441 		ts.ppdu_id =
4442 			HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
4443 					htt_desc[1]);
4444 		ts.ack_frame_rssi =
4445 			HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
4446 					htt_desc[1]);
4447 
4448 		ts.tsf = htt_desc[3];
4449 		ts.first_msdu = 1;
4450 		ts.last_msdu = 1;
4451 		tid = ts.tid;
4452 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4453 			tid = CDP_MAX_DATA_TIDS - 1;
4454 
4455 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
4456 
4457 		if (qdf_unlikely(pdev->delay_stats_flag))
4458 			dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
4459 		if (tx_status < CDP_MAX_TX_HTT_STATUS) {
4460 			tid_stats->htt_status_cnt[tx_status]++;
4461 		}
4462 
4463 		peer = dp_peer_get_ref_by_id(soc, ts.peer_id,
4464 					     DP_MOD_ID_HTT_COMP);
4465 
4466 		dp_tx_comp_process_tx_status(soc, tx_desc, &ts, peer, ring_id);
4467 		dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
4468 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
4469 
4470 		if (qdf_likely(peer))
4471 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT_COMP);
4472 
4473 		break;
4474 	}
4475 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
4476 	{
4477 		dp_tx_reinject_handler(soc, vdev, tx_desc, status);
4478 		break;
4479 	}
4480 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
4481 	{
4482 		dp_tx_inspect_handler(soc, vdev, tx_desc, status);
4483 		break;
4484 	}
4485 	default:
4486 		dp_tx_comp_debug("Invalid HTT tx_status %d\n",
4487 				 tx_status);
4488 		break;
4489 	}
4490 
4491 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
4492 }
4493 
4494 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
4495 static inline
4496 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
4497 				   int max_reap_limit)
4498 {
4499 	bool limit_hit = false;
4500 
4501 	limit_hit =
4502 		(num_reaped >= max_reap_limit) ? true : false;
4503 
4504 	if (limit_hit)
4505 		DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
4506 
4507 	return limit_hit;
4508 }
4509 
4510 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
4511 {
4512 	return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
4513 }
4514 
4515 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
4516 {
4517 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
4518 
4519 	return cfg->tx_comp_loop_pkt_limit;
4520 }
4521 #else
4522 static inline
4523 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
4524 				   int max_reap_limit)
4525 {
4526 	return false;
4527 }
4528 
4529 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
4530 {
4531 	return false;
4532 }
4533 
4534 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
4535 {
4536 	return 0;
4537 }
4538 #endif
4539 
4540 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
4541 static inline int
4542 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
4543 				  int *max_reap_limit)
4544 {
4545 	return soc->arch_ops.dp_srng_test_and_update_nf_params(soc, dp_srng,
4546 							       max_reap_limit);
4547 }
4548 #else
4549 static inline int
4550 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
4551 				  int *max_reap_limit)
4552 {
4553 	return 0;
4554 }
4555 #endif
4556 
4557 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
4558 			    hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
4559 			    uint32_t quota)
4560 {
4561 	void *tx_comp_hal_desc;
4562 	uint8_t buffer_src;
4563 	struct dp_tx_desc_s *tx_desc = NULL;
4564 	struct dp_tx_desc_s *head_desc = NULL;
4565 	struct dp_tx_desc_s *tail_desc = NULL;
4566 	uint32_t num_processed = 0;
4567 	uint32_t count;
4568 	uint32_t num_avail_for_reap = 0;
4569 	bool force_break = false;
4570 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
4571 	int max_reap_limit, ring_near_full;
4572 
4573 	DP_HIST_INIT();
4574 
4575 more_data:
4576 	/* Re-initialize local variables to be re-used */
4577 	head_desc = NULL;
4578 	tail_desc = NULL;
4579 	count = 0;
4580 	max_reap_limit = dp_tx_comp_get_loop_pkt_limit(soc);
4581 
4582 	ring_near_full = dp_srng_test_and_update_nf_params(soc, tx_comp_ring,
4583 							   &max_reap_limit);
4584 
4585 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
4586 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
4587 		return 0;
4588 	}
4589 
4590 	num_avail_for_reap = hal_srng_dst_num_valid(soc->hal_soc, hal_ring_hdl, 0);
4591 
4592 	if (num_avail_for_reap >= quota)
4593 		num_avail_for_reap = quota;
4594 
4595 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
4596 
4597 	/* Find head descriptor from completion ring */
4598 	while (qdf_likely(num_avail_for_reap--)) {
4599 
4600 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
4601 		if (qdf_unlikely(!tx_comp_hal_desc))
4602 			break;
4603 		buffer_src = hal_tx_comp_get_buffer_source(soc->hal_soc,
4604 							   tx_comp_hal_desc);
4605 
4606 		/* If this buffer was not released by TQM or FW, then it is not
4607 		 * Tx completion indication, assert */
4608 		if (qdf_unlikely(buffer_src !=
4609 					HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
4610 				 (qdf_unlikely(buffer_src !=
4611 					HAL_TX_COMP_RELEASE_SOURCE_FW))) {
4612 			uint8_t wbm_internal_error;
4613 
4614 			dp_err_rl(
4615 				"Tx comp release_src != TQM | FW but from %d",
4616 				buffer_src);
4617 			hal_dump_comp_desc(tx_comp_hal_desc);
4618 			DP_STATS_INC(soc, tx.invalid_release_source, 1);
4619 
4620 			/* When WBM sees NULL buffer_addr_info in any of
4621 			 * ingress rings it sends an error indication,
4622 			 * with wbm_internal_error=1, to a specific ring.
4623 			 * The WBM2SW ring used to indicate these errors is
4624 			 * fixed in HW, and that ring is being used as Tx
4625 			 * completion ring. These errors are not related to
4626 			 * Tx completions, and should just be ignored
4627 			 */
4628 			wbm_internal_error = hal_get_wbm_internal_error(
4629 							soc->hal_soc,
4630 							tx_comp_hal_desc);
4631 
4632 			if (wbm_internal_error) {
4633 				dp_err_rl("Tx comp wbm_internal_error!!");
4634 				DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
4635 
4636 				if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
4637 								buffer_src)
4638 					dp_handle_wbm_internal_error(
4639 						soc,
4640 						tx_comp_hal_desc,
4641 						hal_tx_comp_get_buffer_type(
4642 							tx_comp_hal_desc));
4643 
4644 			} else {
4645 				dp_err_rl("Tx comp wbm_internal_error false");
4646 				DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
4647 			}
4648 			continue;
4649 		}
4650 
4651 		soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
4652 							       tx_comp_hal_desc,
4653 							       &tx_desc);
4654 		if (!tx_desc) {
4655 			dp_err("unable to retrieve tx_desc!");
4656 			QDF_BUG(0);
4657 			continue;
4658 		}
4659 		tx_desc->buffer_src = buffer_src;
4660 		/*
4661 		 * If the release source is FW, process the HTT status
4662 		 */
4663 		if (qdf_unlikely(buffer_src ==
4664 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
4665 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
4666 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
4667 					htt_tx_status);
4668 			dp_tx_process_htt_completion(soc, tx_desc,
4669 					htt_tx_status, ring_id);
4670 		} else {
4671 			tx_desc->peer_id =
4672 				hal_tx_comp_get_peer_id(tx_comp_hal_desc);
4673 			tx_desc->tx_status =
4674 				hal_tx_comp_get_tx_status(tx_comp_hal_desc);
4675 			tx_desc->buffer_src = buffer_src;
4676 			/*
4677 			 * If the fast completion mode is enabled extended
4678 			 * metadata from descriptor is not copied
4679 			 */
4680 			if (qdf_likely(tx_desc->flags &
4681 						DP_TX_DESC_FLAG_SIMPLE))
4682 				goto add_to_pool;
4683 
4684 			/*
4685 			 * If the descriptor is already freed in vdev_detach,
4686 			 * continue to next descriptor
4687 			 */
4688 			if (qdf_unlikely
4689 				((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
4690 				 !tx_desc->flags)) {
4691 				dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
4692 						   tx_desc->id);
4693 				DP_STATS_INC(soc, tx.tx_comp_exception, 1);
4694 				continue;
4695 			}
4696 
4697 			if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
4698 				dp_tx_comp_info_rl("pdev in down state %d",
4699 						   tx_desc->id);
4700 				tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
4701 				dp_tx_comp_free_buf(soc, tx_desc);
4702 				dp_tx_desc_release(tx_desc, tx_desc->pool_id);
4703 				goto next_desc;
4704 			}
4705 
4706 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
4707 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
4708 				dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
4709 						 tx_desc->flags, tx_desc->id);
4710 				qdf_assert_always(0);
4711 			}
4712 
4713 			/* Collect hw completion contents */
4714 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
4715 					      &tx_desc->comp, 1);
4716 add_to_pool:
4717 			DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
4718 
4719 			/* First ring descriptor on the cycle */
4720 			if (!head_desc) {
4721 				head_desc = tx_desc;
4722 				tail_desc = tx_desc;
4723 			}
4724 
4725 			tail_desc->next = tx_desc;
4726 			tx_desc->next = NULL;
4727 			tail_desc = tx_desc;
4728 		}
4729 next_desc:
4730 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
4731 
4732 		/*
4733 		 * Processed packet count is more than given quota
4734 		 * stop to processing
4735 		 */
4736 
4737 		count++;
4738 
4739 		if (dp_tx_comp_loop_pkt_limit_hit(soc, count, max_reap_limit))
4740 			break;
4741 	}
4742 
4743 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
4744 
4745 	/* Process the reaped descriptors */
4746 	if (head_desc)
4747 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
4748 
4749 	/*
4750 	 * If we are processing in near-full condition, there are 3 scenario
4751 	 * 1) Ring entries has reached critical state
4752 	 * 2) Ring entries are still near high threshold
4753 	 * 3) Ring entries are below the safe level
4754 	 *
4755 	 * One more loop will move te state to normal processing and yield
4756 	 */
4757 	if (ring_near_full)
4758 		goto more_data;
4759 
4760 	if (dp_tx_comp_enable_eol_data_check(soc)) {
4761 
4762 		if (num_processed >= quota)
4763 			force_break = true;
4764 
4765 		if (!force_break &&
4766 		    hal_srng_dst_peek_sync_locked(soc->hal_soc,
4767 						  hal_ring_hdl)) {
4768 			DP_STATS_INC(soc, tx.hp_oos2, 1);
4769 			if (!hif_exec_should_yield(soc->hif_handle,
4770 						   int_ctx->dp_intr_id))
4771 				goto more_data;
4772 		}
4773 	}
4774 	DP_TX_HIST_STATS_PER_PDEV();
4775 
4776 	return num_processed;
4777 }
4778 
4779 #ifdef FEATURE_WLAN_TDLS
4780 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4781 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
4782 {
4783 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4784 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4785 						     DP_MOD_ID_TDLS);
4786 
4787 	if (!vdev) {
4788 		dp_err("vdev handle for id %d is NULL", vdev_id);
4789 		return NULL;
4790 	}
4791 
4792 	if (tx_spec & OL_TX_SPEC_NO_FREE)
4793 		vdev->is_tdls_frame = true;
4794 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
4795 
4796 	return dp_tx_send(soc_hdl, vdev_id, msdu_list);
4797 }
4798 #endif
4799 
4800 static void dp_tx_vdev_update_feature_flags(struct dp_vdev *vdev)
4801 {
4802 	struct wlan_cfg_dp_soc_ctxt *cfg;
4803 
4804 	struct dp_soc *soc;
4805 
4806 	soc = vdev->pdev->soc;
4807 	if (!soc)
4808 		return;
4809 
4810 	cfg = soc->wlan_cfg_ctx;
4811 	if (!cfg)
4812 		return;
4813 
4814 	if (vdev->opmode == wlan_op_mode_ndi)
4815 		vdev->csum_enabled = wlan_cfg_get_nan_checksum_offload(cfg);
4816 	else if ((vdev->subtype == wlan_op_subtype_p2p_device) ||
4817 		 (vdev->subtype == wlan_op_subtype_p2p_cli) ||
4818 		 (vdev->subtype == wlan_op_subtype_p2p_go))
4819 		vdev->csum_enabled = wlan_cfg_get_p2p_checksum_offload(cfg);
4820 	else
4821 		vdev->csum_enabled = wlan_cfg_get_checksum_offload(cfg);
4822 }
4823 
4824 /**
4825  * dp_tx_vdev_attach() - attach vdev to dp tx
4826  * @vdev: virtual device instance
4827  *
4828  * Return: QDF_STATUS_SUCCESS: success
4829  *         QDF_STATUS_E_RESOURCES: Error return
4830  */
4831 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
4832 {
4833 	int pdev_id;
4834 	/*
4835 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
4836 	 */
4837 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
4838 				     HTT_TCL_METADATA_TYPE_VDEV_BASED);
4839 
4840 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
4841 					vdev->vdev_id);
4842 
4843 	pdev_id =
4844 		dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
4845 						       vdev->pdev->pdev_id);
4846 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
4847 
4848 	/*
4849 	 * Set HTT Extension Valid bit to 0 by default
4850 	 */
4851 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
4852 
4853 	dp_tx_vdev_update_search_flags(vdev);
4854 
4855 	dp_tx_vdev_update_feature_flags(vdev);
4856 
4857 	return QDF_STATUS_SUCCESS;
4858 }
4859 
4860 #ifndef FEATURE_WDS
4861 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
4862 {
4863 	return false;
4864 }
4865 #endif
4866 
4867 /**
4868  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
4869  * @vdev: virtual device instance
4870  *
4871  * Return: void
4872  *
4873  */
4874 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
4875 {
4876 	struct dp_soc *soc = vdev->pdev->soc;
4877 
4878 	/*
4879 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
4880 	 * for TDLS link
4881 	 *
4882 	 * Enable AddrY (SA based search) only for non-WDS STA and
4883 	 * ProxySTA VAP (in HKv1) modes.
4884 	 *
4885 	 * In all other VAP modes, only DA based search should be
4886 	 * enabled
4887 	 */
4888 	if (vdev->opmode == wlan_op_mode_sta &&
4889 	    vdev->tdls_link_connected)
4890 		vdev->hal_desc_addr_search_flags =
4891 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
4892 	else if ((vdev->opmode == wlan_op_mode_sta) &&
4893 		 !dp_tx_da_search_override(vdev))
4894 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
4895 	else
4896 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
4897 
4898 	/* Set search type only when peer map v2 messaging is enabled
4899 	 * as we will have the search index (AST hash) only when v2 is
4900 	 * enabled
4901 	 */
4902 	if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
4903 		vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
4904 	else
4905 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
4906 }
4907 
4908 static inline bool
4909 dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
4910 			  struct dp_vdev *vdev,
4911 			  struct dp_tx_desc_s *tx_desc)
4912 {
4913 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
4914 		return false;
4915 
4916 	/*
4917 	 * if vdev is given, then only check whether desc
4918 	 * vdev match. if vdev is NULL, then check whether
4919 	 * desc pdev match.
4920 	 */
4921 	return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
4922 		(tx_desc->pdev == pdev);
4923 }
4924 
4925 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
4926 /**
4927  * dp_tx_desc_flush() - release resources associated
4928  *                      to TX Desc
4929  *
4930  * @dp_pdev: Handle to DP pdev structure
4931  * @vdev: virtual device instance
4932  * NULL: no specific Vdev is required and check all allcated TX desc
4933  * on this pdev.
4934  * Non-NULL: only check the allocated TX Desc associated to this Vdev.
4935  *
4936  * @force_free:
4937  * true: flush the TX desc.
4938  * false: only reset the Vdev in each allocated TX desc
4939  * that associated to current Vdev.
4940  *
4941  * This function will go through the TX desc pool to flush
4942  * the outstanding TX data or reset Vdev to NULL in associated TX
4943  * Desc.
4944  */
4945 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
4946 		      bool force_free)
4947 {
4948 	uint8_t i;
4949 	uint32_t j;
4950 	uint32_t num_desc, page_id, offset;
4951 	uint16_t num_desc_per_page;
4952 	struct dp_soc *soc = pdev->soc;
4953 	struct dp_tx_desc_s *tx_desc = NULL;
4954 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
4955 
4956 	if (!vdev && !force_free) {
4957 		dp_err("Reset TX desc vdev, Vdev param is required!");
4958 		return;
4959 	}
4960 
4961 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
4962 		tx_desc_pool = &soc->tx_desc[i];
4963 		if (!(tx_desc_pool->pool_size) ||
4964 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
4965 		    !(tx_desc_pool->desc_pages.cacheable_pages))
4966 			continue;
4967 
4968 		/*
4969 		 * Add flow pool lock protection in case pool is freed
4970 		 * due to all tx_desc is recycled when handle TX completion.
4971 		 * this is not necessary when do force flush as:
4972 		 * a. double lock will happen if dp_tx_desc_release is
4973 		 *    also trying to acquire it.
4974 		 * b. dp interrupt has been disabled before do force TX desc
4975 		 *    flush in dp_pdev_deinit().
4976 		 */
4977 		if (!force_free)
4978 			qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
4979 		num_desc = tx_desc_pool->pool_size;
4980 		num_desc_per_page =
4981 			tx_desc_pool->desc_pages.num_element_per_page;
4982 		for (j = 0; j < num_desc; j++) {
4983 			page_id = j / num_desc_per_page;
4984 			offset = j % num_desc_per_page;
4985 
4986 			if (qdf_unlikely(!(tx_desc_pool->
4987 					 desc_pages.cacheable_pages)))
4988 				break;
4989 
4990 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
4991 
4992 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
4993 				/*
4994 				 * Free TX desc if force free is
4995 				 * required, otherwise only reset vdev
4996 				 * in this TX desc.
4997 				 */
4998 				if (force_free) {
4999 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
5000 					dp_tx_comp_free_buf(soc, tx_desc);
5001 					dp_tx_desc_release(tx_desc, i);
5002 				} else {
5003 					tx_desc->vdev_id = DP_INVALID_VDEV_ID;
5004 				}
5005 			}
5006 		}
5007 		if (!force_free)
5008 			qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
5009 	}
5010 }
5011 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
5012 /**
5013  * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
5014  *
5015  * @soc: Handle to DP soc structure
5016  * @tx_desc: pointer of one TX desc
5017  * @desc_pool_id: TX Desc pool id
5018  */
5019 static inline void
5020 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
5021 		      uint8_t desc_pool_id)
5022 {
5023 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
5024 
5025 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
5026 
5027 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
5028 }
5029 
5030 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
5031 		      bool force_free)
5032 {
5033 	uint8_t i, num_pool;
5034 	uint32_t j;
5035 	uint32_t num_desc, page_id, offset;
5036 	uint16_t num_desc_per_page;
5037 	struct dp_soc *soc = pdev->soc;
5038 	struct dp_tx_desc_s *tx_desc = NULL;
5039 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
5040 
5041 	if (!vdev && !force_free) {
5042 		dp_err("Reset TX desc vdev, Vdev param is required!");
5043 		return;
5044 	}
5045 
5046 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5047 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5048 
5049 	for (i = 0; i < num_pool; i++) {
5050 		tx_desc_pool = &soc->tx_desc[i];
5051 		if (!tx_desc_pool->desc_pages.cacheable_pages)
5052 			continue;
5053 
5054 		num_desc_per_page =
5055 			tx_desc_pool->desc_pages.num_element_per_page;
5056 		for (j = 0; j < num_desc; j++) {
5057 			page_id = j / num_desc_per_page;
5058 			offset = j % num_desc_per_page;
5059 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
5060 
5061 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
5062 				if (force_free) {
5063 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
5064 					dp_tx_comp_free_buf(soc, tx_desc);
5065 					dp_tx_desc_release(tx_desc, i);
5066 				} else {
5067 					dp_tx_desc_reset_vdev(soc, tx_desc,
5068 							      i);
5069 				}
5070 			}
5071 		}
5072 	}
5073 }
5074 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
5075 
5076 /**
5077  * dp_tx_vdev_detach() - detach vdev from dp tx
5078  * @vdev: virtual device instance
5079  *
5080  * Return: QDF_STATUS_SUCCESS: success
5081  *         QDF_STATUS_E_RESOURCES: Error return
5082  */
5083 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
5084 {
5085 	struct dp_pdev *pdev = vdev->pdev;
5086 
5087 	/* Reset TX desc associated to this Vdev as NULL */
5088 	dp_tx_desc_flush(pdev, vdev, false);
5089 
5090 	return QDF_STATUS_SUCCESS;
5091 }
5092 
5093 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
5094 /* Pools will be allocated dynamically */
5095 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
5096 					   int num_desc)
5097 {
5098 	uint8_t i;
5099 
5100 	for (i = 0; i < num_pool; i++) {
5101 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
5102 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
5103 	}
5104 
5105 	return QDF_STATUS_SUCCESS;
5106 }
5107 
5108 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
5109 					  int num_desc)
5110 {
5111 	return QDF_STATUS_SUCCESS;
5112 }
5113 
5114 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
5115 {
5116 }
5117 
5118 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
5119 {
5120 	uint8_t i;
5121 
5122 	for (i = 0; i < num_pool; i++)
5123 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
5124 }
5125 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
5126 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
5127 					   int num_desc)
5128 {
5129 	uint8_t i, count;
5130 
5131 	/* Allocate software Tx descriptor pools */
5132 	for (i = 0; i < num_pool; i++) {
5133 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
5134 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5135 				  FL("Tx Desc Pool alloc %d failed %pK"),
5136 				  i, soc);
5137 			goto fail;
5138 		}
5139 	}
5140 	return QDF_STATUS_SUCCESS;
5141 
5142 fail:
5143 	for (count = 0; count < i; count++)
5144 		dp_tx_desc_pool_free(soc, count);
5145 
5146 	return QDF_STATUS_E_NOMEM;
5147 }
5148 
5149 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
5150 					  int num_desc)
5151 {
5152 	uint8_t i;
5153 	for (i = 0; i < num_pool; i++) {
5154 		if (dp_tx_desc_pool_init(soc, i, num_desc)) {
5155 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5156 				  FL("Tx Desc Pool init %d failed %pK"),
5157 				  i, soc);
5158 			return QDF_STATUS_E_NOMEM;
5159 		}
5160 	}
5161 	return QDF_STATUS_SUCCESS;
5162 }
5163 
5164 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
5165 {
5166 	uint8_t i;
5167 
5168 	for (i = 0; i < num_pool; i++)
5169 		dp_tx_desc_pool_deinit(soc, i);
5170 }
5171 
5172 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
5173 {
5174 	uint8_t i;
5175 
5176 	for (i = 0; i < num_pool; i++)
5177 		dp_tx_desc_pool_free(soc, i);
5178 }
5179 
5180 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
5181 
5182 /**
5183  * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
5184  * @soc: core txrx main context
5185  * @num_pool: number of pools
5186  *
5187  */
5188 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
5189 {
5190 	dp_tx_tso_desc_pool_deinit(soc, num_pool);
5191 	dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
5192 }
5193 
5194 /**
5195  * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
5196  * @soc: core txrx main context
5197  * @num_pool: number of pools
5198  *
5199  */
5200 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
5201 {
5202 	dp_tx_tso_desc_pool_free(soc, num_pool);
5203 	dp_tx_tso_num_seg_pool_free(soc, num_pool);
5204 }
5205 
5206 /**
5207  * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
5208  * @soc: core txrx main context
5209  *
5210  * This function frees all tx related descriptors as below
5211  * 1. Regular TX descriptors (static pools)
5212  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
5213  * 3. TSO descriptors
5214  *
5215  */
5216 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
5217 {
5218 	uint8_t num_pool;
5219 
5220 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5221 
5222 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
5223 	dp_tx_ext_desc_pool_free(soc, num_pool);
5224 	dp_tx_delete_static_pools(soc, num_pool);
5225 }
5226 
5227 /**
5228  * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
5229  * @soc: core txrx main context
5230  *
5231  * This function de-initializes all tx related descriptors as below
5232  * 1. Regular TX descriptors (static pools)
5233  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
5234  * 3. TSO descriptors
5235  *
5236  */
5237 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
5238 {
5239 	uint8_t num_pool;
5240 
5241 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5242 
5243 	dp_tx_flow_control_deinit(soc);
5244 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
5245 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
5246 	dp_tx_deinit_static_pools(soc, num_pool);
5247 }
5248 
5249 /**
5250  * dp_tso_attach() - TSO attach handler
5251  * @txrx_soc: Opaque Dp handle
5252  *
5253  * Reserve TSO descriptor buffers
5254  *
5255  * Return: QDF_STATUS_E_FAILURE on failure or
5256  * QDF_STATUS_SUCCESS on success
5257  */
5258 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
5259 					 uint8_t num_pool,
5260 					 uint16_t num_desc)
5261 {
5262 	if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
5263 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
5264 		return QDF_STATUS_E_FAILURE;
5265 	}
5266 
5267 	if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
5268 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
5269 		       num_pool, soc);
5270 		return QDF_STATUS_E_FAILURE;
5271 	}
5272 	return QDF_STATUS_SUCCESS;
5273 }
5274 
5275 /**
5276  * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
5277  * @soc: DP soc handle
5278  * @num_pool: Number of pools
5279  * @num_desc: Number of descriptors
5280  *
5281  * Initialize TSO descriptor pools
5282  *
5283  * Return: QDF_STATUS_E_FAILURE on failure or
5284  * QDF_STATUS_SUCCESS on success
5285  */
5286 
5287 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
5288 					uint8_t num_pool,
5289 					uint16_t num_desc)
5290 {
5291 	if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
5292 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
5293 		return QDF_STATUS_E_FAILURE;
5294 	}
5295 
5296 	if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
5297 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
5298 		       num_pool, soc);
5299 		return QDF_STATUS_E_FAILURE;
5300 	}
5301 	return QDF_STATUS_SUCCESS;
5302 }
5303 
5304 /**
5305  * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
5306  * @soc: core txrx main context
5307  *
5308  * This function allocates memory for following descriptor pools
5309  * 1. regular sw tx descriptor pools (static pools)
5310  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
5311  * 3. TSO descriptor pools
5312  *
5313  * Return: QDF_STATUS_SUCCESS: success
5314  *         QDF_STATUS_E_RESOURCES: Error return
5315  */
5316 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
5317 {
5318 	uint8_t num_pool;
5319 	uint32_t num_desc;
5320 	uint32_t num_ext_desc;
5321 
5322 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5323 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5324 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
5325 
5326 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5327 		  "%s Tx Desc Alloc num_pool = %d, descs = %d",
5328 		  __func__, num_pool, num_desc);
5329 
5330 	if ((num_pool > MAX_TXDESC_POOLS) ||
5331 	    (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
5332 		goto fail1;
5333 
5334 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
5335 		goto fail1;
5336 
5337 	if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
5338 		goto fail2;
5339 
5340 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
5341 		return QDF_STATUS_SUCCESS;
5342 
5343 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
5344 		goto fail3;
5345 
5346 	return QDF_STATUS_SUCCESS;
5347 
5348 fail3:
5349 	dp_tx_ext_desc_pool_free(soc, num_pool);
5350 fail2:
5351 	dp_tx_delete_static_pools(soc, num_pool);
5352 fail1:
5353 	return QDF_STATUS_E_RESOURCES;
5354 }
5355 
5356 /**
5357  * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
5358  * @soc: core txrx main context
5359  *
5360  * This function initializes the following TX descriptor pools
5361  * 1. regular sw tx descriptor pools (static pools)
5362  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
5363  * 3. TSO descriptor pools
5364  *
5365  * Return: QDF_STATUS_SUCCESS: success
5366  *	   QDF_STATUS_E_RESOURCES: Error return
5367  */
5368 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
5369 {
5370 	uint8_t num_pool;
5371 	uint32_t num_desc;
5372 	uint32_t num_ext_desc;
5373 
5374 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5375 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5376 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
5377 
5378 	if (dp_tx_init_static_pools(soc, num_pool, num_desc))
5379 		goto fail1;
5380 
5381 	if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
5382 		goto fail2;
5383 
5384 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
5385 		return QDF_STATUS_SUCCESS;
5386 
5387 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
5388 		goto fail3;
5389 
5390 	dp_tx_flow_control_init(soc);
5391 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
5392 	return QDF_STATUS_SUCCESS;
5393 
5394 fail3:
5395 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
5396 fail2:
5397 	dp_tx_deinit_static_pools(soc, num_pool);
5398 fail1:
5399 	return QDF_STATUS_E_RESOURCES;
5400 }
5401 
5402 /**
5403  * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
5404  * @txrx_soc: dp soc handle
5405  *
5406  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
5407  *			QDF_STATUS_E_FAILURE
5408  */
5409 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
5410 {
5411 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5412 	uint8_t num_pool;
5413 	uint32_t num_desc;
5414 	uint32_t num_ext_desc;
5415 
5416 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5417 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
5418 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
5419 
5420 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
5421 		return QDF_STATUS_E_FAILURE;
5422 
5423 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
5424 		return QDF_STATUS_E_FAILURE;
5425 
5426 	return QDF_STATUS_SUCCESS;
5427 }
5428 
5429 /**
5430  * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
5431  * @txrx_soc: dp soc handle
5432  *
5433  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
5434  */
5435 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
5436 {
5437 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5438 	uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5439 
5440 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
5441 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
5442 
5443 	return QDF_STATUS_SUCCESS;
5444 }
5445 
5446