xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision 45a38684b07295822dc8eba39e293408f203eec8)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "dp_htt.h"
21 #include "hal_hw_headers.h"
22 #include "dp_tx.h"
23 #include "dp_tx_desc.h"
24 #include "dp_peer.h"
25 #include "dp_types.h"
26 #include "hal_tx.h"
27 #include "qdf_mem.h"
28 #include "qdf_nbuf.h"
29 #include "qdf_net_types.h"
30 #include <wlan_cfg.h>
31 #include "dp_ipa.h"
32 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
33 #include "if_meta_hdr.h"
34 #endif
35 #include "enet.h"
36 #include "dp_internal.h"
37 #ifdef FEATURE_WDS
38 #include "dp_txrx_wds.h"
39 #endif
40 #ifdef ATH_SUPPORT_IQUE
41 #include "dp_txrx_me.h"
42 #endif
43 #include "dp_hist.h"
44 
45 
46 /* TODO Add support in TSO */
47 #define DP_DESC_NUM_FRAG(x) 0
48 
49 /* disable TQM_BYPASS */
50 #define TQM_BYPASS_WAR 0
51 
52 /* invalid peer id for reinject*/
53 #define DP_INVALID_PEER 0XFFFE
54 
55 /*mapping between hal encrypt type and cdp_sec_type*/
56 #define MAX_CDP_SEC_TYPE 12
57 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
58 					HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
59 					HAL_TX_ENCRYPT_TYPE_WEP_128,
60 					HAL_TX_ENCRYPT_TYPE_WEP_104,
61 					HAL_TX_ENCRYPT_TYPE_WEP_40,
62 					HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
63 					HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
64 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
65 					HAL_TX_ENCRYPT_TYPE_WAPI,
66 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
67 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
68 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
69 					HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
70 
71 #ifdef QCA_TX_LIMIT_CHECK
72 /**
73  * dp_tx_limit_check - Check if allocated tx descriptors reached
74  * soc max limit and pdev max limit
75  * @vdev: DP vdev handle
76  *
77  * Return: true if allocated tx descriptors reached max configured value, else
78  * false
79  */
80 static inline bool
81 dp_tx_limit_check(struct dp_vdev *vdev)
82 {
83 	struct dp_pdev *pdev = vdev->pdev;
84 	struct dp_soc *soc = pdev->soc;
85 
86 	if (qdf_atomic_read(&soc->num_tx_outstanding) >=
87 			soc->num_tx_allowed) {
88 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
89 			  "%s: queued packets are more than max tx, drop the frame",
90 			  __func__);
91 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
92 		return true;
93 	}
94 
95 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
96 			pdev->num_tx_allowed) {
97 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
98 			  "%s: queued packets are more than max tx, drop the frame",
99 			  __func__);
100 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
101 		return true;
102 	}
103 	return false;
104 }
105 
106 /**
107  * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
108  * reached soc max limit
109  * @vdev: DP vdev handle
110  *
111  * Return: true if allocated tx descriptors reached max configured value, else
112  * false
113  */
114 static inline bool
115 dp_tx_exception_limit_check(struct dp_vdev *vdev)
116 {
117 	struct dp_pdev *pdev = vdev->pdev;
118 	struct dp_soc *soc = pdev->soc;
119 
120 	if (qdf_atomic_read(&soc->num_tx_exception) >=
121 			soc->num_msdu_exception_desc) {
122 		dp_info("exc packets are more than max drop the exc pkt");
123 		DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
124 		return true;
125 	}
126 
127 	return false;
128 }
129 
130 /**
131  * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
132  * @vdev: DP pdev handle
133  *
134  * Return: void
135  */
136 static inline void
137 dp_tx_outstanding_inc(struct dp_pdev *pdev)
138 {
139 	struct dp_soc *soc = pdev->soc;
140 
141 	qdf_atomic_inc(&pdev->num_tx_outstanding);
142 	qdf_atomic_inc(&soc->num_tx_outstanding);
143 }
144 
145 /**
146  * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
147  * @vdev: DP pdev handle
148  *
149  * Return: void
150  */
151 static inline void
152 dp_tx_outstanding_dec(struct dp_pdev *pdev)
153 {
154 	struct dp_soc *soc = pdev->soc;
155 
156 	qdf_atomic_dec(&pdev->num_tx_outstanding);
157 	qdf_atomic_dec(&soc->num_tx_outstanding);
158 }
159 
160 #else //QCA_TX_LIMIT_CHECK
161 static inline bool
162 dp_tx_limit_check(struct dp_vdev *vdev)
163 {
164 	return false;
165 }
166 
167 static inline bool
168 dp_tx_exception_limit_check(struct dp_vdev *vdev)
169 {
170 	return false;
171 }
172 
173 static inline void
174 dp_tx_outstanding_inc(struct dp_pdev *pdev)
175 {
176 	qdf_atomic_inc(&pdev->num_tx_outstanding);
177 }
178 
179 static inline void
180 dp_tx_outstanding_dec(struct dp_pdev *pdev)
181 {
182 	qdf_atomic_dec(&pdev->num_tx_outstanding);
183 }
184 #endif //QCA_TX_LIMIT_CHECK
185 
186 #if defined(FEATURE_TSO)
187 /**
188  * dp_tx_tso_unmap_segment() - Unmap TSO segment
189  *
190  * @soc - core txrx main context
191  * @seg_desc - tso segment descriptor
192  * @num_seg_desc - tso number segment descriptor
193  */
194 static void dp_tx_tso_unmap_segment(
195 		struct dp_soc *soc,
196 		struct qdf_tso_seg_elem_t *seg_desc,
197 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
198 {
199 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
200 	if (qdf_unlikely(!seg_desc)) {
201 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
202 			 __func__, __LINE__);
203 		qdf_assert(0);
204 	} else if (qdf_unlikely(!num_seg_desc)) {
205 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
206 			 __func__, __LINE__);
207 		qdf_assert(0);
208 	} else {
209 		bool is_last_seg;
210 		/* no tso segment left to do dma unmap */
211 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
212 			return;
213 
214 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
215 					true : false;
216 		qdf_nbuf_unmap_tso_segment(soc->osdev,
217 					   seg_desc, is_last_seg);
218 		num_seg_desc->num_seg.tso_cmn_num_seg--;
219 	}
220 }
221 
222 /**
223  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
224  *                            back to the freelist
225  *
226  * @soc - soc device handle
227  * @tx_desc - Tx software descriptor
228  */
229 static void dp_tx_tso_desc_release(struct dp_soc *soc,
230 				   struct dp_tx_desc_s *tx_desc)
231 {
232 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
233 	if (qdf_unlikely(!tx_desc->tso_desc)) {
234 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
235 			  "%s %d TSO desc is NULL!",
236 			  __func__, __LINE__);
237 		qdf_assert(0);
238 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
239 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
240 			  "%s %d TSO num desc is NULL!",
241 			  __func__, __LINE__);
242 		qdf_assert(0);
243 	} else {
244 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
245 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
246 
247 		/* Add the tso num segment into the free list */
248 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
249 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
250 					    tx_desc->tso_num_desc);
251 			tx_desc->tso_num_desc = NULL;
252 			DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
253 		}
254 
255 		/* Add the tso segment into the free list*/
256 		dp_tx_tso_desc_free(soc,
257 				    tx_desc->pool_id, tx_desc->tso_desc);
258 		tx_desc->tso_desc = NULL;
259 	}
260 }
261 #else
262 static void dp_tx_tso_unmap_segment(
263 		struct dp_soc *soc,
264 		struct qdf_tso_seg_elem_t *seg_desc,
265 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
266 
267 {
268 }
269 
270 static void dp_tx_tso_desc_release(struct dp_soc *soc,
271 				   struct dp_tx_desc_s *tx_desc)
272 {
273 }
274 #endif
275 /**
276  * dp_tx_desc_release() - Release Tx Descriptor
277  * @tx_desc : Tx Descriptor
278  * @desc_pool_id: Descriptor Pool ID
279  *
280  * Deallocate all resources attached to Tx descriptor and free the Tx
281  * descriptor.
282  *
283  * Return:
284  */
285 static void
286 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
287 {
288 	struct dp_pdev *pdev = tx_desc->pdev;
289 	struct dp_soc *soc;
290 	uint8_t comp_status = 0;
291 
292 	qdf_assert(pdev);
293 
294 	soc = pdev->soc;
295 
296 	dp_tx_outstanding_dec(pdev);
297 
298 	if (tx_desc->frm_type == dp_tx_frm_tso)
299 		dp_tx_tso_desc_release(soc, tx_desc);
300 
301 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
302 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
303 
304 	if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
305 		dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
306 
307 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
308 		qdf_atomic_dec(&soc->num_tx_exception);
309 
310 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
311 				hal_tx_comp_get_buffer_source(&tx_desc->comp))
312 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
313 							     soc->hal_soc);
314 	else
315 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
316 
317 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
318 		"Tx Completion Release desc %d status %d outstanding %d",
319 		tx_desc->id, comp_status,
320 		qdf_atomic_read(&pdev->num_tx_outstanding));
321 
322 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
323 	return;
324 }
325 
326 /**
327  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
328  * @vdev: DP vdev Handle
329  * @nbuf: skb
330  * @msdu_info: msdu_info required to create HTT metadata
331  *
332  * Prepares and fills HTT metadata in the frame pre-header for special frames
333  * that should be transmitted using varying transmit parameters.
334  * There are 2 VDEV modes that currently needs this special metadata -
335  *  1) Mesh Mode
336  *  2) DSRC Mode
337  *
338  * Return: HTT metadata size
339  *
340  */
341 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
342 					  struct dp_tx_msdu_info_s *msdu_info)
343 {
344 	uint32_t *meta_data = msdu_info->meta_data;
345 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
346 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
347 
348 	uint8_t htt_desc_size;
349 
350 	/* Size rounded of multiple of 8 bytes */
351 	uint8_t htt_desc_size_aligned;
352 
353 	uint8_t *hdr = NULL;
354 
355 	/*
356 	 * Metadata - HTT MSDU Extension header
357 	 */
358 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
359 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
360 
361 	if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
362 	    HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
363 							   meta_data[0])) {
364 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
365 				 htt_desc_size_aligned)) {
366 			nbuf = qdf_nbuf_realloc_headroom(nbuf,
367 							 htt_desc_size_aligned);
368 			if (!nbuf) {
369 				/*
370 				 * qdf_nbuf_realloc_headroom won't do skb_clone
371 				 * as skb_realloc_headroom does. so, no free is
372 				 * needed here.
373 				 */
374 				DP_STATS_INC(vdev,
375 					     tx_i.dropped.headroom_insufficient,
376 					     1);
377 				qdf_print(" %s[%d] skb_realloc_headroom failed",
378 					  __func__, __LINE__);
379 				return 0;
380 			}
381 		}
382 		/* Fill and add HTT metaheader */
383 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
384 		if (!hdr) {
385 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
386 					"Error in filling HTT metadata");
387 
388 			return 0;
389 		}
390 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
391 
392 	} else if (vdev->opmode == wlan_op_mode_ocb) {
393 		/* Todo - Add support for DSRC */
394 	}
395 
396 	return htt_desc_size_aligned;
397 }
398 
399 /**
400  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
401  * @tso_seg: TSO segment to process
402  * @ext_desc: Pointer to MSDU extension descriptor
403  *
404  * Return: void
405  */
406 #if defined(FEATURE_TSO)
407 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
408 		void *ext_desc)
409 {
410 	uint8_t num_frag;
411 	uint32_t tso_flags;
412 
413 	/*
414 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
415 	 * tcp_flag_mask
416 	 *
417 	 * Checksum enable flags are set in TCL descriptor and not in Extension
418 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
419 	 */
420 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
421 
422 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
423 
424 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
425 		tso_seg->tso_flags.ip_len);
426 
427 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
428 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
429 
430 
431 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
432 		uint32_t lo = 0;
433 		uint32_t hi = 0;
434 
435 		qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
436 				  (tso_seg->tso_frags[num_frag].length));
437 
438 		qdf_dmaaddr_to_32s(
439 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
440 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
441 			tso_seg->tso_frags[num_frag].length);
442 	}
443 
444 	return;
445 }
446 #else
447 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
448 		void *ext_desc)
449 {
450 	return;
451 }
452 #endif
453 
454 #if defined(FEATURE_TSO)
455 /**
456  * dp_tx_free_tso_seg_list() - Loop through the tso segments
457  *                             allocated and free them
458  *
459  * @soc: soc handle
460  * @free_seg: list of tso segments
461  * @msdu_info: msdu descriptor
462  *
463  * Return - void
464  */
465 static void dp_tx_free_tso_seg_list(
466 		struct dp_soc *soc,
467 		struct qdf_tso_seg_elem_t *free_seg,
468 		struct dp_tx_msdu_info_s *msdu_info)
469 {
470 	struct qdf_tso_seg_elem_t *next_seg;
471 
472 	while (free_seg) {
473 		next_seg = free_seg->next;
474 		dp_tx_tso_desc_free(soc,
475 				    msdu_info->tx_queue.desc_pool_id,
476 				    free_seg);
477 		free_seg = next_seg;
478 	}
479 }
480 
481 /**
482  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
483  *                                 allocated and free them
484  *
485  * @soc:  soc handle
486  * @free_num_seg: list of tso number segments
487  * @msdu_info: msdu descriptor
488  * Return - void
489  */
490 static void dp_tx_free_tso_num_seg_list(
491 		struct dp_soc *soc,
492 		struct qdf_tso_num_seg_elem_t *free_num_seg,
493 		struct dp_tx_msdu_info_s *msdu_info)
494 {
495 	struct qdf_tso_num_seg_elem_t *next_num_seg;
496 
497 	while (free_num_seg) {
498 		next_num_seg = free_num_seg->next;
499 		dp_tso_num_seg_free(soc,
500 				    msdu_info->tx_queue.desc_pool_id,
501 				    free_num_seg);
502 		free_num_seg = next_num_seg;
503 	}
504 }
505 
506 /**
507  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
508  *                              do dma unmap for each segment
509  *
510  * @soc: soc handle
511  * @free_seg: list of tso segments
512  * @num_seg_desc: tso number segment descriptor
513  *
514  * Return - void
515  */
516 static void dp_tx_unmap_tso_seg_list(
517 		struct dp_soc *soc,
518 		struct qdf_tso_seg_elem_t *free_seg,
519 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
520 {
521 	struct qdf_tso_seg_elem_t *next_seg;
522 
523 	if (qdf_unlikely(!num_seg_desc)) {
524 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
525 		return;
526 	}
527 
528 	while (free_seg) {
529 		next_seg = free_seg->next;
530 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
531 		free_seg = next_seg;
532 	}
533 }
534 
535 #ifdef FEATURE_TSO_STATS
536 /**
537  * dp_tso_get_stats_idx: Retrieve the tso packet id
538  * @pdev - pdev handle
539  *
540  * Return: id
541  */
542 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
543 {
544 	uint32_t stats_idx;
545 
546 	stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
547 						% CDP_MAX_TSO_PACKETS);
548 	return stats_idx;
549 }
550 #else
551 static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
552 {
553 	return 0;
554 }
555 #endif /* FEATURE_TSO_STATS */
556 
557 /**
558  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
559  *				     free the tso segments descriptor and
560  *				     tso num segments descriptor
561  *
562  * @soc:  soc handle
563  * @msdu_info: msdu descriptor
564  * @tso_seg_unmap: flag to show if dma unmap is necessary
565  *
566  * Return - void
567  */
568 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
569 					  struct dp_tx_msdu_info_s *msdu_info,
570 					  bool tso_seg_unmap)
571 {
572 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
573 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
574 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
575 					tso_info->tso_num_seg_list;
576 
577 	/* do dma unmap for each segment */
578 	if (tso_seg_unmap)
579 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
580 
581 	/* free all tso number segment descriptor though looks only have 1 */
582 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
583 
584 	/* free all tso segment descriptor */
585 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
586 }
587 
588 /**
589  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
590  * @vdev: virtual device handle
591  * @msdu: network buffer
592  * @msdu_info: meta data associated with the msdu
593  *
594  * Return: QDF_STATUS_SUCCESS success
595  */
596 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
597 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
598 {
599 	struct qdf_tso_seg_elem_t *tso_seg;
600 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
601 	struct dp_soc *soc = vdev->pdev->soc;
602 	struct dp_pdev *pdev = vdev->pdev;
603 	struct qdf_tso_info_t *tso_info;
604 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
605 	tso_info = &msdu_info->u.tso_info;
606 	tso_info->curr_seg = NULL;
607 	tso_info->tso_seg_list = NULL;
608 	tso_info->num_segs = num_seg;
609 	msdu_info->frm_type = dp_tx_frm_tso;
610 	tso_info->tso_num_seg_list = NULL;
611 
612 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
613 
614 	while (num_seg) {
615 		tso_seg = dp_tx_tso_desc_alloc(
616 				soc, msdu_info->tx_queue.desc_pool_id);
617 		if (tso_seg) {
618 			tso_seg->next = tso_info->tso_seg_list;
619 			tso_info->tso_seg_list = tso_seg;
620 			num_seg--;
621 		} else {
622 			dp_err_rl("Failed to alloc tso seg desc");
623 			DP_STATS_INC_PKT(vdev->pdev,
624 					 tso_stats.tso_no_mem_dropped, 1,
625 					 qdf_nbuf_len(msdu));
626 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
627 
628 			return QDF_STATUS_E_NOMEM;
629 		}
630 	}
631 
632 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
633 
634 	tso_num_seg = dp_tso_num_seg_alloc(soc,
635 			msdu_info->tx_queue.desc_pool_id);
636 
637 	if (tso_num_seg) {
638 		tso_num_seg->next = tso_info->tso_num_seg_list;
639 		tso_info->tso_num_seg_list = tso_num_seg;
640 	} else {
641 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
642 			 __func__);
643 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
644 
645 		return QDF_STATUS_E_NOMEM;
646 	}
647 
648 	msdu_info->num_seg =
649 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
650 
651 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
652 			msdu_info->num_seg);
653 
654 	if (!(msdu_info->num_seg)) {
655 		/*
656 		 * Free allocated TSO seg desc and number seg desc,
657 		 * do unmap for segments if dma map has done.
658 		 */
659 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
660 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
661 
662 		return QDF_STATUS_E_INVAL;
663 	}
664 
665 	tso_info->curr_seg = tso_info->tso_seg_list;
666 
667 	tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
668 	dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
669 			     msdu, msdu_info->num_seg);
670 	dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
671 				    tso_info->msdu_stats_idx);
672 	dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
673 	return QDF_STATUS_SUCCESS;
674 }
675 #else
676 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
677 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
678 {
679 	return QDF_STATUS_E_NOMEM;
680 }
681 #endif
682 
683 /**
684  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
685  * @vdev: DP Vdev handle
686  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
687  * @desc_pool_id: Descriptor Pool ID
688  *
689  * Return:
690  */
691 static
692 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
693 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
694 {
695 	uint8_t i;
696 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
697 	struct dp_tx_seg_info_s *seg_info;
698 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
699 	struct dp_soc *soc = vdev->pdev->soc;
700 
701 	/* Allocate an extension descriptor */
702 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
703 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
704 
705 	if (!msdu_ext_desc) {
706 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
707 		return NULL;
708 	}
709 
710 	if (msdu_info->exception_fw &&
711 			qdf_unlikely(vdev->mesh_vdev)) {
712 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
713 				&msdu_info->meta_data[0],
714 				sizeof(struct htt_tx_msdu_desc_ext2_t));
715 		qdf_atomic_inc(&soc->num_tx_exception);
716 	}
717 
718 	switch (msdu_info->frm_type) {
719 	case dp_tx_frm_sg:
720 	case dp_tx_frm_me:
721 	case dp_tx_frm_raw:
722 		seg_info = msdu_info->u.sg_info.curr_seg;
723 		/* Update the buffer pointers in MSDU Extension Descriptor */
724 		for (i = 0; i < seg_info->frag_cnt; i++) {
725 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
726 				seg_info->frags[i].paddr_lo,
727 				seg_info->frags[i].paddr_hi,
728 				seg_info->frags[i].len);
729 		}
730 
731 		break;
732 
733 	case dp_tx_frm_tso:
734 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
735 				&cached_ext_desc[0]);
736 		break;
737 
738 
739 	default:
740 		break;
741 	}
742 
743 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
744 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
745 
746 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
747 			msdu_ext_desc->vaddr);
748 
749 	return msdu_ext_desc;
750 }
751 
752 /**
753  * dp_tx_trace_pkt() - Trace TX packet at DP layer
754  *
755  * @skb: skb to be traced
756  * @msdu_id: msdu_id of the packet
757  * @vdev_id: vdev_id of the packet
758  *
759  * Return: None
760  */
761 #ifdef DP_DISABLE_TX_PKT_TRACE
762 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
763 			    uint8_t vdev_id)
764 {
765 }
766 #else
767 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
768 			    uint8_t vdev_id)
769 {
770 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
771 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
772 	DPTRACE(qdf_dp_trace_ptr(skb,
773 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
774 				 QDF_TRACE_DEFAULT_PDEV_ID,
775 				 qdf_nbuf_data_addr(skb),
776 				 sizeof(qdf_nbuf_data(skb)),
777 				 msdu_id, vdev_id));
778 
779 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
780 
781 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
782 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
783 				      msdu_id, QDF_TX));
784 }
785 #endif
786 
787 /**
788  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
789  * @vdev: DP vdev handle
790  * @nbuf: skb
791  * @desc_pool_id: Descriptor pool ID
792  * @meta_data: Metadata to the fw
793  * @tx_exc_metadata: Handle that holds exception path metadata
794  * Allocate and prepare Tx descriptor with msdu information.
795  *
796  * Return: Pointer to Tx Descriptor on success,
797  *         NULL on failure
798  */
799 static
800 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
801 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
802 		struct dp_tx_msdu_info_s *msdu_info,
803 		struct cdp_tx_exception_metadata *tx_exc_metadata)
804 {
805 	uint8_t align_pad;
806 	uint8_t is_exception = 0;
807 	uint8_t htt_hdr_size;
808 	struct dp_tx_desc_s *tx_desc;
809 	struct dp_pdev *pdev = vdev->pdev;
810 	struct dp_soc *soc = pdev->soc;
811 
812 	if (dp_tx_limit_check(vdev))
813 		return NULL;
814 
815 	/* Allocate software Tx descriptor */
816 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
817 	if (qdf_unlikely(!tx_desc)) {
818 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
819 		return NULL;
820 	}
821 
822 	dp_tx_outstanding_inc(pdev);
823 
824 	/* Initialize the SW tx descriptor */
825 	tx_desc->nbuf = nbuf;
826 	tx_desc->frm_type = dp_tx_frm_std;
827 	tx_desc->tx_encap_type = ((tx_exc_metadata &&
828 		(tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
829 		tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
830 	tx_desc->vdev = vdev;
831 	tx_desc->pdev = pdev;
832 	tx_desc->msdu_ext_desc = NULL;
833 	tx_desc->pkt_offset = 0;
834 
835 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
836 
837 	if (qdf_unlikely(vdev->multipass_en)) {
838 		if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
839 			goto failure;
840 	}
841 
842 	/*
843 	 * For special modes (vdev_type == ocb or mesh), data frames should be
844 	 * transmitted using varying transmit parameters (tx spec) which include
845 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
846 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
847 	 * These frames are sent as exception packets to firmware.
848 	 *
849 	 * HW requirement is that metadata should always point to a
850 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
851 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
852 	 *  to get 8-byte aligned start address along with align_pad added
853 	 *
854 	 *  |-----------------------------|
855 	 *  |                             |
856 	 *  |-----------------------------| <-----Buffer Pointer Address given
857 	 *  |                             |  ^    in HW descriptor (aligned)
858 	 *  |       HTT Metadata          |  |
859 	 *  |                             |  |
860 	 *  |                             |  | Packet Offset given in descriptor
861 	 *  |                             |  |
862 	 *  |-----------------------------|  |
863 	 *  |       Alignment Pad         |  v
864 	 *  |-----------------------------| <----- Actual buffer start address
865 	 *  |        SKB Data             |           (Unaligned)
866 	 *  |                             |
867 	 *  |                             |
868 	 *  |                             |
869 	 *  |                             |
870 	 *  |                             |
871 	 *  |-----------------------------|
872 	 */
873 	if (qdf_unlikely((msdu_info->exception_fw)) ||
874 				(vdev->opmode == wlan_op_mode_ocb) ||
875 				(tx_exc_metadata &&
876 				tx_exc_metadata->is_tx_sniffer)) {
877 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
878 
879 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
880 			DP_STATS_INC(vdev,
881 				     tx_i.dropped.headroom_insufficient, 1);
882 			goto failure;
883 		}
884 
885 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
886 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
887 					"qdf_nbuf_push_head failed");
888 			goto failure;
889 		}
890 
891 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
892 				msdu_info);
893 		if (htt_hdr_size == 0)
894 			goto failure;
895 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
896 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
897 		is_exception = 1;
898 	}
899 
900 #if !TQM_BYPASS_WAR
901 	if (is_exception || tx_exc_metadata)
902 #endif
903 	{
904 		/* Temporary WAR due to TQM VP issues */
905 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
906 		qdf_atomic_inc(&soc->num_tx_exception);
907 	}
908 
909 	return tx_desc;
910 
911 failure:
912 	dp_tx_desc_release(tx_desc, desc_pool_id);
913 	return NULL;
914 }
915 
916 /**
917  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
918  * @vdev: DP vdev handle
919  * @nbuf: skb
920  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
921  * @desc_pool_id : Descriptor Pool ID
922  *
923  * Allocate and prepare Tx descriptor with msdu and fragment descritor
924  * information. For frames wth fragments, allocate and prepare
925  * an MSDU extension descriptor
926  *
927  * Return: Pointer to Tx Descriptor on success,
928  *         NULL on failure
929  */
930 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
931 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
932 		uint8_t desc_pool_id)
933 {
934 	struct dp_tx_desc_s *tx_desc;
935 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
936 	struct dp_pdev *pdev = vdev->pdev;
937 	struct dp_soc *soc = pdev->soc;
938 
939 	if (dp_tx_limit_check(vdev))
940 		return NULL;
941 
942 	/* Allocate software Tx descriptor */
943 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
944 	if (!tx_desc) {
945 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
946 		return NULL;
947 	}
948 
949 	dp_tx_outstanding_inc(pdev);
950 
951 	/* Initialize the SW tx descriptor */
952 	tx_desc->nbuf = nbuf;
953 	tx_desc->frm_type = msdu_info->frm_type;
954 	tx_desc->tx_encap_type = vdev->tx_encap_type;
955 	tx_desc->vdev = vdev;
956 	tx_desc->pdev = pdev;
957 	tx_desc->pkt_offset = 0;
958 	tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
959 	tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
960 
961 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
962 
963 	/* Handle scattered frames - TSO/SG/ME */
964 	/* Allocate and prepare an extension descriptor for scattered frames */
965 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
966 	if (!msdu_ext_desc) {
967 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
968 				"%s Tx Extension Descriptor Alloc Fail",
969 				__func__);
970 		goto failure;
971 	}
972 
973 #if TQM_BYPASS_WAR
974 	/* Temporary WAR due to TQM VP issues */
975 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
976 	qdf_atomic_inc(&soc->num_tx_exception);
977 #endif
978 	if (qdf_unlikely(msdu_info->exception_fw))
979 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
980 
981 	tx_desc->msdu_ext_desc = msdu_ext_desc;
982 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
983 
984 	return tx_desc;
985 failure:
986 	dp_tx_desc_release(tx_desc, desc_pool_id);
987 	return NULL;
988 }
989 
990 /**
991  * dp_tx_prepare_raw() - Prepare RAW packet TX
992  * @vdev: DP vdev handle
993  * @nbuf: buffer pointer
994  * @seg_info: Pointer to Segment info Descriptor to be prepared
995  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
996  *     descriptor
997  *
998  * Return:
999  */
1000 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1001 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1002 {
1003 	qdf_nbuf_t curr_nbuf = NULL;
1004 	uint16_t total_len = 0;
1005 	qdf_dma_addr_t paddr;
1006 	int32_t i;
1007 	int32_t mapped_buf_num = 0;
1008 
1009 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
1010 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1011 
1012 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
1013 
1014 	/* Continue only if frames are of DATA type */
1015 	if (!DP_FRAME_IS_DATA(qos_wh)) {
1016 		DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
1017 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1018 			  "Pkt. recd is of not data type");
1019 		goto error;
1020 	}
1021 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
1022 	if (vdev->raw_mode_war &&
1023 	    (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
1024 	    (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
1025 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
1026 
1027 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
1028 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
1029 
1030 		if (QDF_STATUS_SUCCESS !=
1031 			qdf_nbuf_map_nbytes_single(vdev->osdev,
1032 						   curr_nbuf,
1033 						   QDF_DMA_TO_DEVICE,
1034 						   curr_nbuf->len)) {
1035 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1036 				"%s dma map error ", __func__);
1037 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
1038 			mapped_buf_num = i;
1039 			goto error;
1040 		}
1041 
1042 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
1043 		seg_info->frags[i].paddr_lo = paddr;
1044 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
1045 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
1046 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
1047 		total_len += qdf_nbuf_len(curr_nbuf);
1048 	}
1049 
1050 	seg_info->frag_cnt = i;
1051 	seg_info->total_len = total_len;
1052 	seg_info->next = NULL;
1053 
1054 	sg_info->curr_seg = seg_info;
1055 
1056 	msdu_info->frm_type = dp_tx_frm_raw;
1057 	msdu_info->num_seg = 1;
1058 
1059 	return nbuf;
1060 
1061 error:
1062 	i = 0;
1063 	while (nbuf) {
1064 		curr_nbuf = nbuf;
1065 		if (i < mapped_buf_num) {
1066 			qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
1067 						     QDF_DMA_TO_DEVICE,
1068 						     curr_nbuf->len);
1069 			i++;
1070 		}
1071 		nbuf = qdf_nbuf_next(nbuf);
1072 		qdf_nbuf_free(curr_nbuf);
1073 	}
1074 	return NULL;
1075 
1076 }
1077 
1078 /**
1079  * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
1080  * @soc: DP soc handle
1081  * @nbuf: Buffer pointer
1082  *
1083  * unmap the chain of nbufs that belong to this RAW frame.
1084  *
1085  * Return: None
1086  */
1087 static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
1088 				    qdf_nbuf_t nbuf)
1089 {
1090 	qdf_nbuf_t cur_nbuf = nbuf;
1091 
1092 	do {
1093 		qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
1094 					     QDF_DMA_TO_DEVICE,
1095 					     cur_nbuf->len);
1096 		cur_nbuf = qdf_nbuf_next(cur_nbuf);
1097 	} while (cur_nbuf);
1098 }
1099 
1100 #ifdef VDEV_PEER_PROTOCOL_COUNT
1101 #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, nbuf) \
1102 { \
1103 	qdf_nbuf_t nbuf_local; \
1104 	struct dp_vdev *vdev_local = vdev_hdl; \
1105 	do { \
1106 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
1107 			break; \
1108 		nbuf_local = nbuf; \
1109 		if (qdf_unlikely(((vdev_local)->tx_encap_type) == \
1110 			 htt_cmn_pkt_type_raw)) \
1111 			break; \
1112 		else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local)))) \
1113 			break; \
1114 		else if (qdf_nbuf_is_tso((nbuf_local))) \
1115 			break; \
1116 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
1117 						       (nbuf_local), \
1118 						       NULL, 1, 0); \
1119 	} while (0); \
1120 }
1121 #else
1122 #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, skb)
1123 #endif
1124 
1125 /**
1126  * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
1127  * @soc: DP Soc Handle
1128  * @vdev: DP vdev handle
1129  * @tx_desc: Tx Descriptor Handle
1130  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1131  * @fw_metadata: Metadata to send to Target Firmware along with frame
1132  * @ring_id: Ring ID of H/W ring to which we enqueue the packet
1133  * @tx_exc_metadata: Handle that holds exception path meta data
1134  *
1135  *  Gets the next free TCL HW DMA descriptor and sets up required parameters
1136  *  from software Tx descriptor
1137  *
1138  * Return: QDF_STATUS_SUCCESS: success
1139  *         QDF_STATUS_E_RESOURCES: Error return
1140  */
1141 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
1142 				   struct dp_tx_desc_s *tx_desc, uint8_t tid,
1143 				   uint16_t fw_metadata, uint8_t ring_id,
1144 				   struct cdp_tx_exception_metadata
1145 					*tx_exc_metadata)
1146 {
1147 	uint8_t type;
1148 	void *hal_tx_desc;
1149 	uint32_t *hal_tx_desc_cached;
1150 
1151 	/*
1152 	 * Setting it initialization statically here to avoid
1153 	 * a memset call jump with qdf_mem_set call
1154 	 */
1155 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
1156 
1157 	enum cdp_sec_type sec_type = ((tx_exc_metadata &&
1158 			tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
1159 			tx_exc_metadata->sec_type : vdev->sec_type);
1160 
1161 	/* Return Buffer Manager ID */
1162 	uint8_t bm_id = dp_tx_get_rbm_id(soc, ring_id);
1163 
1164 	hal_ring_handle_t hal_ring_hdl = NULL;
1165 
1166 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
1167 
1168 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
1169 		dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
1170 		return QDF_STATUS_E_RESOURCES;
1171 	}
1172 
1173 	hal_tx_desc_cached = (void *) cached_desc;
1174 
1175 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
1176 		tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
1177 		type = HAL_TX_BUF_TYPE_EXT_DESC;
1178 		tx_desc->dma_addr = tx_desc->msdu_ext_desc->paddr;
1179 	} else {
1180 		tx_desc->length = qdf_nbuf_len(tx_desc->nbuf) -
1181 					tx_desc->pkt_offset;
1182 		type = HAL_TX_BUF_TYPE_BUFFER;
1183 		tx_desc->dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
1184 	}
1185 
1186 	qdf_assert_always(tx_desc->dma_addr);
1187 
1188 	hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
1189 				 tx_desc->dma_addr, bm_id, tx_desc->id,
1190 				 type);
1191 	hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
1192 				vdev->lmac_id);
1193 	hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
1194 				    vdev->search_type);
1195 	hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
1196 				     vdev->bss_ast_idx);
1197 	hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
1198 					  vdev->dscp_tid_map_id);
1199 
1200 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
1201 			sec_type_map[sec_type]);
1202 	hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
1203 				      (vdev->bss_ast_hash & 0xF));
1204 
1205 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
1206 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
1207 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
1208 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
1209 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
1210 					  vdev->hal_desc_addr_search_flags);
1211 
1212 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
1213 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
1214 
1215 	/* verify checksum offload configuration*/
1216 	if (vdev->csum_enabled &&
1217 	    ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
1218 		|| qdf_nbuf_is_tso(tx_desc->nbuf)))  {
1219 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
1220 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
1221 	}
1222 
1223 	if (tid != HTT_TX_EXT_TID_INVALID)
1224 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
1225 
1226 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
1227 		hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
1228 
1229 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1230 	    qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled(
1231 			 soc->wlan_cfg_ctx)))
1232 		tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
1233 
1234 	dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
1235 			 tx_desc->length, type, (uint64_t)tx_desc->dma_addr,
1236 			 tx_desc->pkt_offset, tx_desc->id);
1237 
1238 	hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
1239 
1240 	if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
1241 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1242 			  "%s %d : HAL RING Access Failed -- %pK",
1243 			 __func__, __LINE__, hal_ring_hdl);
1244 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1245 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1246 		return status;
1247 	}
1248 
1249 	/* Sync cached descriptor with HW */
1250 
1251 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
1252 	if (qdf_unlikely(!hal_tx_desc)) {
1253 		dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
1254 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1255 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1256 		goto ring_access_fail;
1257 	}
1258 
1259 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
1260 	dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
1261 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
1262 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
1263 	status = QDF_STATUS_SUCCESS;
1264 
1265 ring_access_fail:
1266 	if (hif_pm_runtime_get(soc->hif_handle,
1267 			       RTPM_ID_DW_TX_HW_ENQUEUE) == 0) {
1268 		dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1269 		hif_pm_runtime_put(soc->hif_handle,
1270 				   RTPM_ID_DW_TX_HW_ENQUEUE);
1271 	} else {
1272 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1273 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1274 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1275 	}
1276 
1277 	return status;
1278 }
1279 
1280 
1281 /**
1282  * dp_cce_classify() - Classify the frame based on CCE rules
1283  * @vdev: DP vdev handle
1284  * @nbuf: skb
1285  *
1286  * Classify frames based on CCE rules
1287  * Return: bool( true if classified,
1288  *               else false)
1289  */
1290 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1291 {
1292 	qdf_ether_header_t *eh = NULL;
1293 	uint16_t   ether_type;
1294 	qdf_llc_t *llcHdr;
1295 	qdf_nbuf_t nbuf_clone = NULL;
1296 	qdf_dot3_qosframe_t *qos_wh = NULL;
1297 
1298 	/* for mesh packets don't do any classification */
1299 	if (qdf_unlikely(vdev->mesh_vdev))
1300 		return false;
1301 
1302 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1303 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1304 		ether_type = eh->ether_type;
1305 		llcHdr = (qdf_llc_t *)(nbuf->data +
1306 					sizeof(qdf_ether_header_t));
1307 	} else {
1308 		qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1309 		/* For encrypted packets don't do any classification */
1310 		if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
1311 			return false;
1312 
1313 		if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
1314 			if (qdf_unlikely(
1315 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
1316 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
1317 
1318 				ether_type = *(uint16_t *)(nbuf->data
1319 						+ QDF_IEEE80211_4ADDR_HDR_LEN
1320 						+ sizeof(qdf_llc_t)
1321 						- sizeof(ether_type));
1322 				llcHdr = (qdf_llc_t *)(nbuf->data +
1323 						QDF_IEEE80211_4ADDR_HDR_LEN);
1324 			} else {
1325 				ether_type = *(uint16_t *)(nbuf->data
1326 						+ QDF_IEEE80211_3ADDR_HDR_LEN
1327 						+ sizeof(qdf_llc_t)
1328 						- sizeof(ether_type));
1329 				llcHdr = (qdf_llc_t *)(nbuf->data +
1330 					QDF_IEEE80211_3ADDR_HDR_LEN);
1331 			}
1332 
1333 			if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
1334 				&& (ether_type ==
1335 				qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
1336 
1337 				DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
1338 				return true;
1339 			}
1340 		}
1341 
1342 		return false;
1343 	}
1344 
1345 	if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
1346 		ether_type = *(uint16_t *)(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1347 				sizeof(*llcHdr));
1348 		nbuf_clone = qdf_nbuf_clone(nbuf);
1349 		if (qdf_unlikely(nbuf_clone)) {
1350 			qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
1351 
1352 			if (ether_type == htons(ETHERTYPE_VLAN)) {
1353 				qdf_nbuf_pull_head(nbuf_clone,
1354 						sizeof(qdf_net_vlanhdr_t));
1355 			}
1356 		}
1357 	} else {
1358 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1359 			nbuf_clone = qdf_nbuf_clone(nbuf);
1360 			if (qdf_unlikely(nbuf_clone)) {
1361 				qdf_nbuf_pull_head(nbuf_clone,
1362 					sizeof(qdf_net_vlanhdr_t));
1363 			}
1364 		}
1365 	}
1366 
1367 	if (qdf_unlikely(nbuf_clone))
1368 		nbuf = nbuf_clone;
1369 
1370 
1371 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
1372 		|| qdf_nbuf_is_ipv4_arp_pkt(nbuf)
1373 		|| qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
1374 		|| qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
1375 		|| (qdf_nbuf_is_ipv4_pkt(nbuf)
1376 			&& qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
1377 		|| (qdf_nbuf_is_ipv6_pkt(nbuf) &&
1378 			qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
1379 		if (qdf_unlikely(nbuf_clone))
1380 			qdf_nbuf_free(nbuf_clone);
1381 		return true;
1382 	}
1383 
1384 	if (qdf_unlikely(nbuf_clone))
1385 		qdf_nbuf_free(nbuf_clone);
1386 
1387 	return false;
1388 }
1389 
1390 /**
1391  * dp_tx_get_tid() - Obtain TID to be used for this frame
1392  * @vdev: DP vdev handle
1393  * @nbuf: skb
1394  *
1395  * Extract the DSCP or PCP information from frame and map into TID value.
1396  *
1397  * Return: void
1398  */
1399 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1400 			  struct dp_tx_msdu_info_s *msdu_info)
1401 {
1402 	uint8_t tos = 0, dscp_tid_override = 0;
1403 	uint8_t *hdr_ptr, *L3datap;
1404 	uint8_t is_mcast = 0;
1405 	qdf_ether_header_t *eh = NULL;
1406 	qdf_ethervlan_header_t *evh = NULL;
1407 	uint16_t   ether_type;
1408 	qdf_llc_t *llcHdr;
1409 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1410 
1411 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1412 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1413 		eh = (qdf_ether_header_t *)nbuf->data;
1414 		hdr_ptr = eh->ether_dhost;
1415 		L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
1416 	} else {
1417 		qdf_dot3_qosframe_t *qos_wh =
1418 			(qdf_dot3_qosframe_t *) nbuf->data;
1419 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1420 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1421 		return;
1422 	}
1423 
1424 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1425 	ether_type = eh->ether_type;
1426 
1427 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
1428 	/*
1429 	 * Check if packet is dot3 or eth2 type.
1430 	 */
1431 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1432 		ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1433 				sizeof(*llcHdr));
1434 
1435 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1436 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1437 				sizeof(*llcHdr);
1438 			ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
1439 					+ sizeof(*llcHdr) +
1440 					sizeof(qdf_net_vlanhdr_t));
1441 		} else {
1442 			L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
1443 				sizeof(*llcHdr);
1444 		}
1445 	} else {
1446 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1447 			evh = (qdf_ethervlan_header_t *) eh;
1448 			ether_type = evh->ether_type;
1449 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1450 		}
1451 	}
1452 
1453 	/*
1454 	 * Find priority from IP TOS DSCP field
1455 	 */
1456 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1457 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1458 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1459 			/* Only for unicast frames */
1460 			if (!is_mcast) {
1461 				/* send it on VO queue */
1462 				msdu_info->tid = DP_VO_TID;
1463 			}
1464 		} else {
1465 			/*
1466 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1467 			 * from TOS byte.
1468 			 */
1469 			tos = ip->ip_tos;
1470 			dscp_tid_override = 1;
1471 
1472 		}
1473 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1474 		/* TODO
1475 		 * use flowlabel
1476 		 *igmpmld cases to be handled in phase 2
1477 		 */
1478 		unsigned long ver_pri_flowlabel;
1479 		unsigned long pri;
1480 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1481 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1482 			DP_IPV6_PRIORITY_SHIFT;
1483 		tos = pri;
1484 		dscp_tid_override = 1;
1485 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1486 		msdu_info->tid = DP_VO_TID;
1487 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1488 		/* Only for unicast frames */
1489 		if (!is_mcast) {
1490 			/* send ucast arp on VO queue */
1491 			msdu_info->tid = DP_VO_TID;
1492 		}
1493 	}
1494 
1495 	/*
1496 	 * Assign all MCAST packets to BE
1497 	 */
1498 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1499 		if (is_mcast) {
1500 			tos = 0;
1501 			dscp_tid_override = 1;
1502 		}
1503 	}
1504 
1505 	if (dscp_tid_override == 1) {
1506 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1507 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1508 	}
1509 
1510 	if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
1511 		msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
1512 
1513 	return;
1514 }
1515 
1516 /**
1517  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1518  * @vdev: DP vdev handle
1519  * @nbuf: skb
1520  *
1521  * Software based TID classification is required when more than 2 DSCP-TID
1522  * mapping tables are needed.
1523  * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
1524  *
1525  * Return: void
1526  */
1527 static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1528 				      struct dp_tx_msdu_info_s *msdu_info)
1529 {
1530 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1531 
1532 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1533 
1534 	if (pdev->soc && vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map)
1535 		return;
1536 
1537 	/* for mesh packets don't do any classification */
1538 	if (qdf_unlikely(vdev->mesh_vdev))
1539 		return;
1540 
1541 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1542 }
1543 
1544 #ifdef FEATURE_WLAN_TDLS
1545 /**
1546  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1547  * @tx_desc: TX descriptor
1548  *
1549  * Return: None
1550  */
1551 static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1552 {
1553 	if (tx_desc->vdev) {
1554 		if (tx_desc->vdev->is_tdls_frame) {
1555 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1556 			tx_desc->vdev->is_tdls_frame = false;
1557 		}
1558 	}
1559 }
1560 
1561 /**
1562  * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1563  * @soc: dp_soc handle
1564  * @tx_desc: TX descriptor
1565  * @vdev: datapath vdev handle
1566  *
1567  * Return: None
1568  */
1569 static void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
1570 					 struct dp_tx_desc_s *tx_desc,
1571 					 struct dp_vdev *vdev)
1572 {
1573 	struct hal_tx_completion_status ts = {0};
1574 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1575 
1576 	if (qdf_unlikely(!vdev)) {
1577 		dp_err_rl("vdev is null!");
1578 		goto error;
1579 	}
1580 
1581 	hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
1582 	if (vdev->tx_non_std_data_callback.func) {
1583 		qdf_nbuf_set_next(nbuf, NULL);
1584 		vdev->tx_non_std_data_callback.func(
1585 				vdev->tx_non_std_data_callback.ctxt,
1586 				nbuf, ts.status);
1587 		return;
1588 	} else {
1589 		dp_err_rl("callback func is null");
1590 	}
1591 
1592 error:
1593 	qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
1594 	qdf_nbuf_free(nbuf);
1595 }
1596 
1597 /**
1598  * dp_tx_msdu_single_map() - do nbuf map
1599  * @vdev: DP vdev handle
1600  * @tx_desc: DP TX descriptor pointer
1601  * @nbuf: skb pointer
1602  *
1603  * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
1604  * operation done in other component.
1605  *
1606  * Return: QDF_STATUS
1607  */
1608 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1609 					       struct dp_tx_desc_s *tx_desc,
1610 					       qdf_nbuf_t nbuf)
1611 {
1612 	if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
1613 		return qdf_nbuf_map_nbytes_single(vdev->osdev,
1614 						  nbuf,
1615 						  QDF_DMA_TO_DEVICE,
1616 						  nbuf->len);
1617 	else
1618 		return qdf_nbuf_map_single(vdev->osdev, nbuf,
1619 					   QDF_DMA_TO_DEVICE);
1620 }
1621 #else
1622 static inline void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1623 {
1624 }
1625 
1626 static inline void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
1627 						struct dp_tx_desc_s *tx_desc,
1628 						struct dp_vdev *vdev)
1629 {
1630 }
1631 
1632 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1633 					       struct dp_tx_desc_s *tx_desc,
1634 					       qdf_nbuf_t nbuf)
1635 {
1636 	return qdf_nbuf_map_nbytes_single(vdev->osdev,
1637 					  nbuf,
1638 					  QDF_DMA_TO_DEVICE,
1639 					  nbuf->len);
1640 }
1641 #endif
1642 
1643 /**
1644  * dp_tx_frame_is_drop() - checks if the packet is loopback
1645  * @vdev: DP vdev handle
1646  * @nbuf: skb
1647  *
1648  * Return: 1 if frame needs to be dropped else 0
1649  */
1650 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
1651 {
1652 	struct dp_pdev *pdev = NULL;
1653 	struct dp_ast_entry *src_ast_entry = NULL;
1654 	struct dp_ast_entry *dst_ast_entry = NULL;
1655 	struct dp_soc *soc = NULL;
1656 
1657 	qdf_assert(vdev);
1658 	pdev = vdev->pdev;
1659 	qdf_assert(pdev);
1660 	soc = pdev->soc;
1661 
1662 	dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
1663 				(soc, dstmac, vdev->pdev->pdev_id);
1664 
1665 	src_ast_entry = dp_peer_ast_hash_find_by_pdevid
1666 				(soc, srcmac, vdev->pdev->pdev_id);
1667 	if (dst_ast_entry && src_ast_entry) {
1668 		if (dst_ast_entry->peer->peer_id ==
1669 				src_ast_entry->peer->peer_id)
1670 			return 1;
1671 	}
1672 
1673 	return 0;
1674 }
1675 
1676 /**
1677  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1678  * @vdev: DP vdev handle
1679  * @nbuf: skb
1680  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1681  * @meta_data: Metadata to the fw
1682  * @tx_q: Tx queue to be used for this Tx frame
1683  * @peer_id: peer_id of the peer in case of NAWDS frames
1684  * @tx_exc_metadata: Handle that holds exception path metadata
1685  *
1686  * Return: NULL on success,
1687  *         nbuf when it fails to send
1688  */
1689 qdf_nbuf_t
1690 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1691 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
1692 		       struct cdp_tx_exception_metadata *tx_exc_metadata)
1693 {
1694 	struct dp_pdev *pdev = vdev->pdev;
1695 	struct dp_soc *soc = pdev->soc;
1696 	struct dp_tx_desc_s *tx_desc;
1697 	QDF_STATUS status;
1698 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
1699 	uint16_t htt_tcl_metadata = 0;
1700 	enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
1701 	uint8_t tid = msdu_info->tid;
1702 	struct cdp_tid_tx_stats *tid_stats = NULL;
1703 
1704 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
1705 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
1706 			msdu_info, tx_exc_metadata);
1707 	if (!tx_desc) {
1708 		dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
1709 			  vdev, tx_q->desc_pool_id);
1710 		drop_code = TX_DESC_ERR;
1711 		goto fail_return;
1712 	}
1713 
1714 	if (qdf_unlikely(soc->cce_disable)) {
1715 		if (dp_cce_classify(vdev, nbuf) == true) {
1716 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1717 			tid = DP_VO_TID;
1718 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1719 		}
1720 	}
1721 
1722 	dp_tx_update_tdls_flags(tx_desc);
1723 
1724 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
1725 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1726 		HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
1727 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
1728 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
1729 				HTT_TCL_METADATA_TYPE_PEER_BASED);
1730 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
1731 				peer_id);
1732 	} else
1733 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1734 
1735 	if (msdu_info->exception_fw)
1736 		HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1737 
1738 	dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
1739 					 !pdev->enhanced_stats_en);
1740 
1741 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
1742 			 dp_tx_msdu_single_map(vdev, tx_desc, nbuf))) {
1743 		/* Handle failure */
1744 		dp_err("qdf_nbuf_map failed");
1745 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
1746 		drop_code = TX_DMA_MAP_ERR;
1747 		goto release_desc;
1748 	}
1749 
1750 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
1751 	status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
1752 			htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
1753 
1754 	if (status != QDF_STATUS_SUCCESS) {
1755 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1756 			  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1757 			  __func__, tx_desc, tx_q->ring_id);
1758 		qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
1759 					     QDF_DMA_TO_DEVICE,
1760 					     nbuf->len);
1761 		drop_code = TX_HW_ENQUEUE;
1762 		goto release_desc;
1763 	}
1764 
1765 	return NULL;
1766 
1767 release_desc:
1768 	dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1769 
1770 fail_return:
1771 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1772 	tid_stats = &pdev->stats.tid_stats.
1773 		    tid_tx_stats[tx_q->ring_id][tid];
1774 	tid_stats->swdrop_cnt[drop_code]++;
1775 	return nbuf;
1776 }
1777 
1778 /**
1779  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
1780  * @vdev: DP vdev handle
1781  * @nbuf: skb
1782  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
1783  *
1784  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
1785  *
1786  * Return: NULL on success,
1787  *         nbuf when it fails to send
1788  */
1789 #if QDF_LOCK_STATS
1790 noinline
1791 #else
1792 #endif
1793 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1794 				    struct dp_tx_msdu_info_s *msdu_info)
1795 {
1796 	uint32_t i;
1797 	struct dp_pdev *pdev = vdev->pdev;
1798 	struct dp_soc *soc = pdev->soc;
1799 	struct dp_tx_desc_s *tx_desc;
1800 	bool is_cce_classified = false;
1801 	QDF_STATUS status;
1802 	uint16_t htt_tcl_metadata = 0;
1803 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1804 	struct cdp_tid_tx_stats *tid_stats = NULL;
1805 
1806 	if (qdf_unlikely(soc->cce_disable)) {
1807 		is_cce_classified = dp_cce_classify(vdev, nbuf);
1808 		if (is_cce_classified) {
1809 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1810 			msdu_info->tid = DP_VO_TID;
1811 		}
1812 	}
1813 
1814 	if (msdu_info->frm_type == dp_tx_frm_me)
1815 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1816 
1817 	i = 0;
1818 	/* Print statement to track i and num_seg */
1819 	/*
1820 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
1821 	 * descriptors using information in msdu_info
1822 	 */
1823 	while (i < msdu_info->num_seg) {
1824 		/*
1825 		 * Setup Tx descriptor for an MSDU, and MSDU extension
1826 		 * descriptor
1827 		 */
1828 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
1829 				tx_q->desc_pool_id);
1830 
1831 		if (!tx_desc) {
1832 			if (msdu_info->frm_type == dp_tx_frm_me) {
1833 				dp_tx_me_free_buf(pdev,
1834 					(void *)(msdu_info->u.sg_info
1835 						.curr_seg->frags[0].vaddr));
1836 				i++;
1837 				continue;
1838 			}
1839 			goto done;
1840 		}
1841 
1842 		if (msdu_info->frm_type == dp_tx_frm_me) {
1843 			tx_desc->me_buffer =
1844 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
1845 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
1846 		}
1847 
1848 		if (is_cce_classified)
1849 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1850 
1851 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1852 		if (msdu_info->exception_fw) {
1853 			HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1854 		}
1855 
1856 		/*
1857 		 * Enqueue the Tx MSDU descriptor to HW for transmit
1858 		 */
1859 		status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
1860 			htt_tcl_metadata, tx_q->ring_id, NULL);
1861 
1862 		if (status != QDF_STATUS_SUCCESS) {
1863 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1864 					"%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1865 					__func__, tx_desc, tx_q->ring_id);
1866 
1867 			dp_tx_get_tid(vdev, nbuf, msdu_info);
1868 			tid_stats = &pdev->stats.tid_stats.
1869 				    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
1870 			tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
1871 
1872 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1873 			if (msdu_info->frm_type == dp_tx_frm_me) {
1874 				i++;
1875 				continue;
1876 			}
1877 			goto done;
1878 		}
1879 
1880 		/*
1881 		 * TODO
1882 		 * if tso_info structure can be modified to have curr_seg
1883 		 * as first element, following 2 blocks of code (for TSO and SG)
1884 		 * can be combined into 1
1885 		 */
1886 
1887 		/*
1888 		 * For frames with multiple segments (TSO, ME), jump to next
1889 		 * segment.
1890 		 */
1891 		if (msdu_info->frm_type == dp_tx_frm_tso) {
1892 			if (msdu_info->u.tso_info.curr_seg->next) {
1893 				msdu_info->u.tso_info.curr_seg =
1894 					msdu_info->u.tso_info.curr_seg->next;
1895 
1896 				/*
1897 				 * If this is a jumbo nbuf, then increment the number of
1898 				 * nbuf users for each additional segment of the msdu.
1899 				 * This will ensure that the skb is freed only after
1900 				 * receiving tx completion for all segments of an nbuf
1901 				 */
1902 				qdf_nbuf_inc_users(nbuf);
1903 
1904 				/* Check with MCL if this is needed */
1905 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
1906 			}
1907 		}
1908 
1909 		/*
1910 		 * For Multicast-Unicast converted packets,
1911 		 * each converted frame (for a client) is represented as
1912 		 * 1 segment
1913 		 */
1914 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
1915 				(msdu_info->frm_type == dp_tx_frm_me)) {
1916 			if (msdu_info->u.sg_info.curr_seg->next) {
1917 				msdu_info->u.sg_info.curr_seg =
1918 					msdu_info->u.sg_info.curr_seg->next;
1919 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1920 			}
1921 		}
1922 		i++;
1923 	}
1924 
1925 	nbuf = NULL;
1926 
1927 done:
1928 	return nbuf;
1929 }
1930 
1931 /**
1932  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
1933  *                     for SG frames
1934  * @vdev: DP vdev handle
1935  * @nbuf: skb
1936  * @seg_info: Pointer to Segment info Descriptor to be prepared
1937  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1938  *
1939  * Return: NULL on success,
1940  *         nbuf when it fails to send
1941  */
1942 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1943 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1944 {
1945 	uint32_t cur_frag, nr_frags;
1946 	qdf_dma_addr_t paddr;
1947 	struct dp_tx_sg_info_s *sg_info;
1948 
1949 	sg_info = &msdu_info->u.sg_info;
1950 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
1951 
1952 	if (QDF_STATUS_SUCCESS !=
1953 		qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
1954 					   QDF_DMA_TO_DEVICE, nbuf->len)) {
1955 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1956 				"dma map error");
1957 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1958 
1959 		qdf_nbuf_free(nbuf);
1960 		return NULL;
1961 	}
1962 
1963 	paddr = qdf_nbuf_mapped_paddr_get(nbuf);
1964 	seg_info->frags[0].paddr_lo = paddr;
1965 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
1966 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
1967 	seg_info->frags[0].vaddr = (void *) nbuf;
1968 
1969 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
1970 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
1971 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
1972 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1973 					"frag dma map error");
1974 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1975 			qdf_nbuf_free(nbuf);
1976 			return NULL;
1977 		}
1978 
1979 		paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
1980 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
1981 		seg_info->frags[cur_frag + 1].paddr_hi =
1982 			((uint64_t) paddr) >> 32;
1983 		seg_info->frags[cur_frag + 1].len =
1984 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
1985 	}
1986 
1987 	seg_info->frag_cnt = (cur_frag + 1);
1988 	seg_info->total_len = qdf_nbuf_len(nbuf);
1989 	seg_info->next = NULL;
1990 
1991 	sg_info->curr_seg = seg_info;
1992 
1993 	msdu_info->frm_type = dp_tx_frm_sg;
1994 	msdu_info->num_seg = 1;
1995 
1996 	return nbuf;
1997 }
1998 
1999 /**
2000  * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
2001  * @vdev: DP vdev handle
2002  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2003  * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
2004  *
2005  * Return: NULL on failure,
2006  *         nbuf when extracted successfully
2007  */
2008 static
2009 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
2010 				    struct dp_tx_msdu_info_s *msdu_info,
2011 				    uint16_t ppdu_cookie)
2012 {
2013 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2014 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2015 
2016 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2017 
2018 	HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
2019 				(msdu_info->meta_data[5], 1);
2020 	HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
2021 				(msdu_info->meta_data[5], 1);
2022 	HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
2023 				(msdu_info->meta_data[6], ppdu_cookie);
2024 
2025 	msdu_info->exception_fw = 1;
2026 	msdu_info->is_tx_sniffer = 1;
2027 }
2028 
2029 #ifdef MESH_MODE_SUPPORT
2030 
2031 /**
2032  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
2033 				and prepare msdu_info for mesh frames.
2034  * @vdev: DP vdev handle
2035  * @nbuf: skb
2036  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2037  *
2038  * Return: NULL on failure,
2039  *         nbuf when extracted successfully
2040  */
2041 static
2042 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2043 				struct dp_tx_msdu_info_s *msdu_info)
2044 {
2045 	struct meta_hdr_s *mhdr;
2046 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2047 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2048 
2049 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2050 
2051 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
2052 		msdu_info->exception_fw = 0;
2053 		goto remove_meta_hdr;
2054 	}
2055 
2056 	msdu_info->exception_fw = 1;
2057 
2058 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2059 
2060 	meta_data->host_tx_desc_pool = 1;
2061 	meta_data->update_peer_cache = 1;
2062 	meta_data->learning_frame = 1;
2063 
2064 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
2065 		meta_data->power = mhdr->power;
2066 
2067 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
2068 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
2069 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
2070 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
2071 
2072 		meta_data->dyn_bw = 1;
2073 
2074 		meta_data->valid_pwr = 1;
2075 		meta_data->valid_mcs_mask = 1;
2076 		meta_data->valid_nss_mask = 1;
2077 		meta_data->valid_preamble_type  = 1;
2078 		meta_data->valid_retries = 1;
2079 		meta_data->valid_bw_info = 1;
2080 	}
2081 
2082 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
2083 		meta_data->encrypt_type = 0;
2084 		meta_data->valid_encrypt_type = 1;
2085 		meta_data->learning_frame = 0;
2086 	}
2087 
2088 	meta_data->valid_key_flags = 1;
2089 	meta_data->key_flags = (mhdr->keyix & 0x3);
2090 
2091 remove_meta_hdr:
2092 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
2093 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2094 				"qdf_nbuf_pull_head failed");
2095 		qdf_nbuf_free(nbuf);
2096 		return NULL;
2097 	}
2098 
2099 	msdu_info->tid = qdf_nbuf_get_priority(nbuf);
2100 
2101 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2102 			"%s , Meta hdr %0x %0x %0x %0x %0x %0x"
2103 			" tid %d to_fw %d",
2104 			__func__, msdu_info->meta_data[0],
2105 			msdu_info->meta_data[1],
2106 			msdu_info->meta_data[2],
2107 			msdu_info->meta_data[3],
2108 			msdu_info->meta_data[4],
2109 			msdu_info->meta_data[5],
2110 			msdu_info->tid, msdu_info->exception_fw);
2111 
2112 	return nbuf;
2113 }
2114 #else
2115 static
2116 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2117 				struct dp_tx_msdu_info_s *msdu_info)
2118 {
2119 	return nbuf;
2120 }
2121 
2122 #endif
2123 
2124 /**
2125  * dp_check_exc_metadata() - Checks if parameters are valid
2126  * @tx_exc - holds all exception path parameters
2127  *
2128  * Returns true when all the parameters are valid else false
2129  *
2130  */
2131 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
2132 {
2133 	bool invalid_tid = (tx_exc->tid > DP_MAX_TIDS && tx_exc->tid !=
2134 			    HTT_INVALID_TID);
2135 	bool invalid_encap_type =
2136 			(tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
2137 			 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
2138 	bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
2139 				 tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
2140 	bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
2141 			       tx_exc->ppdu_cookie == 0);
2142 
2143 	if (invalid_tid || invalid_encap_type || invalid_sec_type ||
2144 	    invalid_cookie) {
2145 		return false;
2146 	}
2147 
2148 	return true;
2149 }
2150 
2151 /**
2152  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
2153  * @soc: DP soc handle
2154  * @vdev_id: id of DP vdev handle
2155  * @nbuf: skb
2156  * @tx_exc_metadata: Handle that holds exception path meta data
2157  *
2158  * Entry point for Core Tx layer (DP_TX) invoked from
2159  * hard_start_xmit in OSIF/HDD to transmit frames through fw
2160  *
2161  * Return: NULL on success,
2162  *         nbuf when it fails to send
2163  */
2164 qdf_nbuf_t
2165 dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf,
2166 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
2167 {
2168 	qdf_ether_header_t *eh = NULL;
2169 	struct dp_tx_msdu_info_s msdu_info;
2170 	struct dp_vdev *vdev =
2171 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
2172 						   vdev_id);
2173 
2174 	if (qdf_unlikely(!vdev))
2175 		goto fail;
2176 
2177 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
2178 
2179 	if (!tx_exc_metadata)
2180 		goto fail;
2181 
2182 	msdu_info.tid = tx_exc_metadata->tid;
2183 
2184 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2185 	dp_verbose_debug("skb %pM", nbuf->data);
2186 
2187 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
2188 
2189 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
2190 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2191 			"Invalid parameters in exception path");
2192 		goto fail;
2193 	}
2194 
2195 	/* Basic sanity checks for unsupported packets */
2196 
2197 	/* MESH mode */
2198 	if (qdf_unlikely(vdev->mesh_vdev)) {
2199 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2200 			"Mesh mode is not supported in exception path");
2201 		goto fail;
2202 	}
2203 
2204 	/* TSO or SG */
2205 	if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
2206 	    qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2207 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2208 			  "TSO and SG are not supported in exception path");
2209 
2210 		goto fail;
2211 	}
2212 
2213 	/* RAW */
2214 	if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
2215 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2216 			  "Raw frame is not supported in exception path");
2217 		goto fail;
2218 	}
2219 
2220 
2221 	/* Mcast enhancement*/
2222 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
2223 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2224 		    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2225 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2226 					  "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW");
2227 		}
2228 	}
2229 
2230 	if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
2231 		DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
2232 				 qdf_nbuf_len(nbuf));
2233 
2234 		dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
2235 					       tx_exc_metadata->ppdu_cookie);
2236 	}
2237 
2238 	/*
2239 	 * Get HW Queue to use for this frame.
2240 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
2241 	 * dedicated for data and 1 for command.
2242 	 * "queue_id" maps to one hardware ring.
2243 	 *  With each ring, we also associate a unique Tx descriptor pool
2244 	 *  to minimize lock contention for these resources.
2245 	 */
2246 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2247 
2248 	/*
2249 	 * Check exception descriptors
2250 	 */
2251 	if (dp_tx_exception_limit_check(vdev))
2252 		goto fail;
2253 
2254 	/*  Single linear frame */
2255 	/*
2256 	 * If nbuf is a simple linear frame, use send_single function to
2257 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2258 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2259 	 */
2260 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
2261 			tx_exc_metadata->peer_id, tx_exc_metadata);
2262 
2263 	return nbuf;
2264 
2265 fail:
2266 	dp_verbose_debug("pkt send failed");
2267 	return nbuf;
2268 }
2269 
2270 /**
2271  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
2272  * @soc: DP soc handle
2273  * @vdev_id: DP vdev handle
2274  * @nbuf: skb
2275  *
2276  * Entry point for Core Tx layer (DP_TX) invoked from
2277  * hard_start_xmit in OSIF/HDD
2278  *
2279  * Return: NULL on success,
2280  *         nbuf when it fails to send
2281  */
2282 #ifdef MESH_MODE_SUPPORT
2283 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
2284 			   qdf_nbuf_t nbuf)
2285 {
2286 	struct meta_hdr_s *mhdr;
2287 	qdf_nbuf_t nbuf_mesh = NULL;
2288 	qdf_nbuf_t nbuf_clone = NULL;
2289 	struct dp_vdev *vdev;
2290 	uint8_t no_enc_frame = 0;
2291 
2292 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
2293 	if (!nbuf_mesh) {
2294 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2295 				"qdf_nbuf_unshare failed");
2296 		return nbuf;
2297 	}
2298 
2299 	vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
2300 						  vdev_id);
2301 	if (!vdev) {
2302 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2303 				"vdev is NULL for vdev_id %d", vdev_id);
2304 		return nbuf;
2305 	}
2306 
2307 	nbuf = nbuf_mesh;
2308 
2309 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2310 
2311 	if ((vdev->sec_type != cdp_sec_type_none) &&
2312 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
2313 		no_enc_frame = 1;
2314 
2315 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
2316 		qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
2317 
2318 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
2319 		       !no_enc_frame) {
2320 		nbuf_clone = qdf_nbuf_clone(nbuf);
2321 		if (!nbuf_clone) {
2322 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2323 				"qdf_nbuf_clone failed");
2324 			return nbuf;
2325 		}
2326 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
2327 	}
2328 
2329 	if (nbuf_clone) {
2330 		if (!dp_tx_send(soc, vdev_id, nbuf_clone)) {
2331 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2332 		} else {
2333 			qdf_nbuf_free(nbuf_clone);
2334 		}
2335 	}
2336 
2337 	if (no_enc_frame)
2338 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
2339 	else
2340 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
2341 
2342 	nbuf = dp_tx_send(soc, vdev_id, nbuf);
2343 	if ((!nbuf) && no_enc_frame) {
2344 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2345 	}
2346 
2347 	return nbuf;
2348 }
2349 
2350 #else
2351 
2352 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
2353 			   qdf_nbuf_t nbuf)
2354 {
2355 	return dp_tx_send(soc, vdev_id, nbuf);
2356 }
2357 
2358 #endif
2359 
2360 /**
2361  * dp_tx_nawds_handler() - NAWDS handler
2362  *
2363  * @soc: DP soc handle
2364  * @vdev_id: id of DP vdev handle
2365  * @msdu_info: msdu_info required to create HTT metadata
2366  * @nbuf: skb
2367  *
2368  * This API transfers the multicast frames with the peer id
2369  * on NAWDS enabled peer.
2370 
2371  * Return: none
2372  */
2373 
2374 static inline
2375 void dp_tx_nawds_handler(struct cdp_soc_t *soc, struct dp_vdev *vdev,
2376 			 struct dp_tx_msdu_info_s *msdu_info, qdf_nbuf_t nbuf)
2377 {
2378 	struct dp_peer *peer = NULL;
2379 	qdf_nbuf_t nbuf_clone = NULL;
2380 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
2381 	uint16_t peer_id = DP_INVALID_PEER;
2382 	struct dp_peer *sa_peer = NULL;
2383 	struct dp_ast_entry *ast_entry = NULL;
2384 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2385 
2386 	if (qdf_nbuf_get_tx_ftype(nbuf) == CB_FTYPE_INTRABSS_FWD) {
2387 		qdf_spin_lock_bh(&dp_soc->ast_lock);
2388 
2389 		ast_entry = dp_peer_ast_hash_find_by_pdevid
2390 					(dp_soc,
2391 					 (uint8_t *)(eh->ether_shost),
2392 					 vdev->pdev->pdev_id);
2393 
2394 		if (ast_entry)
2395 			sa_peer = ast_entry->peer;
2396 		qdf_spin_unlock_bh(&dp_soc->ast_lock);
2397 	}
2398 
2399 	qdf_spin_lock_bh(&dp_soc->peer_ref_mutex);
2400 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2401 		if (!peer->bss_peer && peer->nawds_enabled) {
2402 			peer_id = peer->peer_id;
2403 			/* Multicast packets needs to be
2404 			 * dropped in case of intra bss forwarding
2405 			 */
2406 			if (sa_peer == peer) {
2407 				QDF_TRACE(QDF_MODULE_ID_DP,
2408 					  QDF_TRACE_LEVEL_DEBUG,
2409 					  " %s: multicast packet",  __func__);
2410 				DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
2411 				continue;
2412 			}
2413 			nbuf_clone = qdf_nbuf_clone(nbuf);
2414 
2415 			if (!nbuf_clone) {
2416 				QDF_TRACE(QDF_MODULE_ID_DP,
2417 					  QDF_TRACE_LEVEL_ERROR,
2418 					  FL("nbuf clone failed"));
2419 				break;
2420 			}
2421 
2422 			nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
2423 							    msdu_info, peer_id,
2424 							    NULL);
2425 
2426 			if (nbuf_clone) {
2427 				QDF_TRACE(QDF_MODULE_ID_DP,
2428 					  QDF_TRACE_LEVEL_DEBUG,
2429 					  FL("pkt send failed"));
2430 				qdf_nbuf_free(nbuf_clone);
2431 			} else {
2432 				if (peer_id != DP_INVALID_PEER)
2433 					DP_STATS_INC_PKT(peer, tx.nawds_mcast,
2434 							 1, qdf_nbuf_len(nbuf));
2435 			}
2436 		}
2437 	}
2438 
2439 	qdf_spin_unlock_bh(&dp_soc->peer_ref_mutex);
2440 }
2441 
2442 /**
2443  * dp_tx_send() - Transmit a frame on a given VAP
2444  * @soc: DP soc handle
2445  * @vdev_id: id of DP vdev handle
2446  * @nbuf: skb
2447  *
2448  * Entry point for Core Tx layer (DP_TX) invoked from
2449  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
2450  * cases
2451  *
2452  * Return: NULL on success,
2453  *         nbuf when it fails to send
2454  */
2455 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf)
2456 {
2457 	uint16_t peer_id = HTT_INVALID_PEER;
2458 	/*
2459 	 * doing a memzero is causing additional function call overhead
2460 	 * so doing static stack clearing
2461 	 */
2462 	struct dp_tx_msdu_info_s msdu_info = {0};
2463 	struct dp_vdev *vdev =
2464 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
2465 						   vdev_id);
2466 	if (qdf_unlikely(!vdev))
2467 		return nbuf;
2468 
2469 	dp_verbose_debug("skb %pM", nbuf->data);
2470 
2471 	/*
2472 	 * Set Default Host TID value to invalid TID
2473 	 * (TID override disabled)
2474 	 */
2475 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
2476 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
2477 
2478 	if (qdf_unlikely(vdev->mesh_vdev)) {
2479 		qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
2480 								&msdu_info);
2481 		if (!nbuf_mesh) {
2482 			dp_verbose_debug("Extracting mesh metadata failed");
2483 			return nbuf;
2484 		}
2485 		nbuf = nbuf_mesh;
2486 	}
2487 
2488 	/*
2489 	 * Get HW Queue to use for this frame.
2490 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
2491 	 * dedicated for data and 1 for command.
2492 	 * "queue_id" maps to one hardware ring.
2493 	 *  With each ring, we also associate a unique Tx descriptor pool
2494 	 *  to minimize lock contention for these resources.
2495 	 */
2496 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2497 
2498 	/*
2499 	 * TCL H/W supports 2 DSCP-TID mapping tables.
2500 	 *  Table 1 - Default DSCP-TID mapping table
2501 	 *  Table 2 - 1 DSCP-TID override table
2502 	 *
2503 	 * If we need a different DSCP-TID mapping for this vap,
2504 	 * call tid_classify to extract DSCP/ToS from frame and
2505 	 * map to a TID and store in msdu_info. This is later used
2506 	 * to fill in TCL Input descriptor (per-packet TID override).
2507 	 */
2508 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
2509 
2510 	/*
2511 	 * Classify the frame and call corresponding
2512 	 * "prepare" function which extracts the segment (TSO)
2513 	 * and fragmentation information (for TSO , SG, ME, or Raw)
2514 	 * into MSDU_INFO structure which is later used to fill
2515 	 * SW and HW descriptors.
2516 	 */
2517 	if (qdf_nbuf_is_tso(nbuf)) {
2518 		dp_verbose_debug("TSO frame %pK", vdev);
2519 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
2520 				 qdf_nbuf_len(nbuf));
2521 
2522 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
2523 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
2524 					 qdf_nbuf_len(nbuf));
2525 			return nbuf;
2526 		}
2527 
2528 		goto send_multiple;
2529 	}
2530 
2531 	/* SG */
2532 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2533 		struct dp_tx_seg_info_s seg_info = {0};
2534 
2535 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
2536 		if (!nbuf)
2537 			return NULL;
2538 
2539 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
2540 
2541 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
2542 				qdf_nbuf_len(nbuf));
2543 
2544 		goto send_multiple;
2545 	}
2546 
2547 #ifdef ATH_SUPPORT_IQUE
2548 	/* Mcast to Ucast Conversion*/
2549 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
2550 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
2551 					  qdf_nbuf_data(nbuf);
2552 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2553 		    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2554 			dp_verbose_debug("Mcast frm for ME %pK", vdev);
2555 
2556 			DP_STATS_INC_PKT(vdev,
2557 					tx_i.mcast_en.mcast_pkt, 1,
2558 					qdf_nbuf_len(nbuf));
2559 			if (dp_tx_prepare_send_me(vdev, nbuf) ==
2560 					QDF_STATUS_SUCCESS) {
2561 				return NULL;
2562 			}
2563 		}
2564 	}
2565 #endif
2566 
2567 	/* RAW */
2568 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
2569 		struct dp_tx_seg_info_s seg_info = {0};
2570 
2571 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
2572 		if (!nbuf)
2573 			return NULL;
2574 
2575 		dp_verbose_debug("Raw frame %pK", vdev);
2576 
2577 		goto send_multiple;
2578 
2579 	}
2580 
2581 	if (qdf_unlikely(vdev->nawds_enabled)) {
2582 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
2583 					  qdf_nbuf_data(nbuf);
2584 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost))
2585 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf);
2586 
2587 		peer_id = DP_INVALID_PEER;
2588 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
2589 				 1, qdf_nbuf_len(nbuf));
2590 	}
2591 
2592 	/*  Single linear frame */
2593 	/*
2594 	 * If nbuf is a simple linear frame, use send_single function to
2595 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2596 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2597 	 */
2598 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
2599 
2600 	return nbuf;
2601 
2602 send_multiple:
2603 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2604 
2605 	if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
2606 		dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
2607 
2608 	return nbuf;
2609 }
2610 
2611 /**
2612  * dp_tx_reinject_handler() - Tx Reinject Handler
2613  * @tx_desc: software descriptor head pointer
2614  * @status : Tx completion status from HTT descriptor
2615  *
2616  * This function reinjects frames back to Target.
2617  * Todo - Host queue needs to be added
2618  *
2619  * Return: none
2620  */
2621 static
2622 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2623 {
2624 	struct dp_vdev *vdev;
2625 	struct dp_peer *peer = NULL;
2626 	uint32_t peer_id = HTT_INVALID_PEER;
2627 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2628 	qdf_nbuf_t nbuf_copy = NULL;
2629 	struct dp_tx_msdu_info_s msdu_info;
2630 	struct dp_soc *soc = NULL;
2631 #ifdef WDS_VENDOR_EXTENSION
2632 	int is_mcast = 0, is_ucast = 0;
2633 	int num_peers_3addr = 0;
2634 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
2635 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
2636 #endif
2637 
2638 	vdev = tx_desc->vdev;
2639 	soc = vdev->pdev->soc;
2640 
2641 	qdf_assert(vdev);
2642 
2643 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
2644 
2645 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2646 
2647 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2648 			"%s Tx reinject path", __func__);
2649 
2650 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
2651 			qdf_nbuf_len(tx_desc->nbuf));
2652 
2653 #ifdef WDS_VENDOR_EXTENSION
2654 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
2655 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
2656 	} else {
2657 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
2658 	}
2659 	is_ucast = !is_mcast;
2660 
2661 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2662 		if (peer->bss_peer)
2663 			continue;
2664 
2665 		/* Detect wds peers that use 3-addr framing for mcast.
2666 		 * if there are any, the bss_peer is used to send the
2667 		 * the mcast frame using 3-addr format. all wds enabled
2668 		 * peers that use 4-addr framing for mcast frames will
2669 		 * be duplicated and sent as 4-addr frames below.
2670 		 */
2671 		if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
2672 			num_peers_3addr = 1;
2673 			break;
2674 		}
2675 	}
2676 #endif
2677 
2678 	if (qdf_unlikely(vdev->mesh_vdev)) {
2679 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
2680 	} else {
2681 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2682 			if ((peer->peer_id != HTT_INVALID_PEER) &&
2683 #ifdef WDS_VENDOR_EXTENSION
2684 			/*
2685 			 * . if 3-addr STA, then send on BSS Peer
2686 			 * . if Peer WDS enabled and accept 4-addr mcast,
2687 			 * send mcast on that peer only
2688 			 * . if Peer WDS enabled and accept 4-addr ucast,
2689 			 * send ucast on that peer only
2690 			 */
2691 			((peer->bss_peer && num_peers_3addr && is_mcast) ||
2692 			 (peer->wds_enabled &&
2693 				  ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
2694 				   (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
2695 #else
2696 			((peer->bss_peer &&
2697 			  !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))))) {
2698 #endif
2699 				peer_id = DP_INVALID_PEER;
2700 
2701 				nbuf_copy = qdf_nbuf_copy(nbuf);
2702 
2703 				if (!nbuf_copy) {
2704 					QDF_TRACE(QDF_MODULE_ID_DP,
2705 						QDF_TRACE_LEVEL_DEBUG,
2706 						FL("nbuf copy failed"));
2707 					break;
2708 				}
2709 
2710 				nbuf_copy = dp_tx_send_msdu_single(vdev,
2711 						nbuf_copy,
2712 						&msdu_info,
2713 						peer_id,
2714 						NULL);
2715 
2716 				if (nbuf_copy) {
2717 					QDF_TRACE(QDF_MODULE_ID_DP,
2718 						QDF_TRACE_LEVEL_DEBUG,
2719 						FL("pkt send failed"));
2720 					qdf_nbuf_free(nbuf_copy);
2721 				} else {
2722 					if (peer_id != DP_INVALID_PEER)
2723 						DP_STATS_INC_PKT(peer,
2724 							tx.nawds_mcast,
2725 							1, qdf_nbuf_len(nbuf));
2726 				}
2727 			}
2728 		}
2729 	}
2730 
2731 	qdf_nbuf_free(nbuf);
2732 
2733 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2734 }
2735 
2736 /**
2737  * dp_tx_inspect_handler() - Tx Inspect Handler
2738  * @tx_desc: software descriptor head pointer
2739  * @status : Tx completion status from HTT descriptor
2740  *
2741  * Handles Tx frames sent back to Host for inspection
2742  * (ProxyARP)
2743  *
2744  * Return: none
2745  */
2746 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2747 {
2748 
2749 	struct dp_soc *soc;
2750 	struct dp_pdev *pdev = tx_desc->pdev;
2751 
2752 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2753 			"%s Tx inspect path",
2754 			__func__);
2755 
2756 	qdf_assert(pdev);
2757 
2758 	soc = pdev->soc;
2759 
2760 	DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
2761 			qdf_nbuf_len(tx_desc->nbuf));
2762 
2763 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
2764 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2765 }
2766 
2767 #ifdef FEATURE_PERPKT_INFO
2768 /**
2769  * dp_get_completion_indication_for_stack() - send completion to stack
2770  * @soc : dp_soc handle
2771  * @pdev: dp_pdev handle
2772  * @peer: dp peer handle
2773  * @ts: transmit completion status structure
2774  * @netbuf: Buffer pointer for free
2775  *
2776  * This function is used for indication whether buffer needs to be
2777  * sent to stack for freeing or not
2778 */
2779 QDF_STATUS
2780 dp_get_completion_indication_for_stack(struct dp_soc *soc,
2781 				       struct dp_pdev *pdev,
2782 				       struct dp_peer *peer,
2783 				       struct hal_tx_completion_status *ts,
2784 				       qdf_nbuf_t netbuf,
2785 				       uint64_t time_latency)
2786 {
2787 	struct tx_capture_hdr *ppdu_hdr;
2788 	uint16_t peer_id = ts->peer_id;
2789 	uint32_t ppdu_id = ts->ppdu_id;
2790 	uint8_t first_msdu = ts->first_msdu;
2791 	uint8_t last_msdu = ts->last_msdu;
2792 	uint32_t txcap_hdr_size = sizeof(struct tx_capture_hdr);
2793 
2794 	if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
2795 			 !pdev->latency_capture_enable))
2796 		return QDF_STATUS_E_NOSUPPORT;
2797 
2798 	if (!peer) {
2799 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2800 				FL("Peer Invalid"));
2801 		return QDF_STATUS_E_INVAL;
2802 	}
2803 
2804 	if (pdev->mcopy_mode) {
2805 		/* If mcopy is enabled and mcopy_mode is M_COPY deliver 1st MSDU
2806 		 * per PPDU. If mcopy_mode is M_COPY_EXTENDED deliver 1st MSDU
2807 		 * for each MPDU
2808 		 */
2809 		if (pdev->mcopy_mode == M_COPY) {
2810 			if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
2811 			    (pdev->m_copy_id.tx_peer_id == peer_id)) {
2812 				return QDF_STATUS_E_INVAL;
2813 			}
2814 		}
2815 
2816 		if (!first_msdu)
2817 			return QDF_STATUS_E_INVAL;
2818 
2819 		pdev->m_copy_id.tx_ppdu_id = ppdu_id;
2820 		pdev->m_copy_id.tx_peer_id = peer_id;
2821 	}
2822 
2823 	if (qdf_unlikely(qdf_nbuf_headroom(netbuf) < txcap_hdr_size)) {
2824 		netbuf = qdf_nbuf_realloc_headroom(netbuf, txcap_hdr_size);
2825 		if (!netbuf) {
2826 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2827 				  FL("No headroom"));
2828 			return QDF_STATUS_E_NOMEM;
2829 		}
2830 	}
2831 
2832 	if (!qdf_nbuf_push_head(netbuf, txcap_hdr_size)) {
2833 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2834 				FL("No headroom"));
2835 		return QDF_STATUS_E_NOMEM;
2836 	}
2837 
2838 	ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
2839 	qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
2840 		     QDF_MAC_ADDR_SIZE);
2841 	qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
2842 		     QDF_MAC_ADDR_SIZE);
2843 	ppdu_hdr->ppdu_id = ppdu_id;
2844 	ppdu_hdr->peer_id = peer_id;
2845 	ppdu_hdr->first_msdu = first_msdu;
2846 	ppdu_hdr->last_msdu = last_msdu;
2847 	if (qdf_unlikely(pdev->latency_capture_enable)) {
2848 		ppdu_hdr->tsf = ts->tsf;
2849 		ppdu_hdr->time_latency = time_latency;
2850 	}
2851 
2852 	return QDF_STATUS_SUCCESS;
2853 }
2854 
2855 
2856 /**
2857  * dp_send_completion_to_stack() - send completion to stack
2858  * @soc :  dp_soc handle
2859  * @pdev:  dp_pdev handle
2860  * @peer_id: peer_id of the peer for which completion came
2861  * @ppdu_id: ppdu_id
2862  * @netbuf: Buffer pointer for free
2863  *
2864  * This function is used to send completion to stack
2865  * to free buffer
2866 */
2867 void  dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2868 					uint16_t peer_id, uint32_t ppdu_id,
2869 					qdf_nbuf_t netbuf)
2870 {
2871 	dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
2872 				netbuf, peer_id,
2873 				WDI_NO_VAL, pdev->pdev_id);
2874 }
2875 #else
2876 static QDF_STATUS
2877 dp_get_completion_indication_for_stack(struct dp_soc *soc,
2878 				       struct dp_pdev *pdev,
2879 				       struct dp_peer *peer,
2880 				       struct hal_tx_completion_status *ts,
2881 				       qdf_nbuf_t netbuf,
2882 				       uint64_t time_latency)
2883 {
2884 	return QDF_STATUS_E_NOSUPPORT;
2885 }
2886 
2887 static void
2888 dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2889 	uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
2890 {
2891 }
2892 #endif
2893 
2894 /**
2895  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2896  * @soc: Soc handle
2897  * @desc: software Tx descriptor to be processed
2898  *
2899  * Return: none
2900  */
2901 static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
2902 				       struct dp_tx_desc_s *desc)
2903 {
2904 	struct dp_vdev *vdev = desc->vdev;
2905 	qdf_nbuf_t nbuf = desc->nbuf;
2906 
2907 	/* nbuf already freed in vdev detach path */
2908 	if (!nbuf)
2909 		return;
2910 
2911 	/* If it is TDLS mgmt, don't unmap or free the frame */
2912 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2913 		return dp_non_std_tx_comp_free_buff(soc, desc, vdev);
2914 
2915 	/* 0 : MSDU buffer, 1 : MLE */
2916 	if (desc->msdu_ext_desc) {
2917 		/* TSO free */
2918 		if (hal_tx_ext_desc_get_tso_enable(
2919 					desc->msdu_ext_desc->vaddr)) {
2920 			/* unmap eash TSO seg before free the nbuf */
2921 			dp_tx_tso_unmap_segment(soc, desc->tso_desc,
2922 						desc->tso_num_desc);
2923 			qdf_nbuf_free(nbuf);
2924 			return;
2925 		}
2926 	}
2927 
2928 	qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
2929 				     QDF_DMA_TO_DEVICE, nbuf->len);
2930 
2931 	if (qdf_unlikely(!vdev)) {
2932 		qdf_nbuf_free(nbuf);
2933 		return;
2934 	}
2935 
2936 	if (qdf_likely(!vdev->mesh_vdev))
2937 		qdf_nbuf_free(nbuf);
2938 	else {
2939 		if (desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2940 			qdf_nbuf_free(nbuf);
2941 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2942 		} else
2943 			vdev->osif_tx_free_ext((nbuf));
2944 	}
2945 }
2946 
2947 #ifdef MESH_MODE_SUPPORT
2948 /**
2949  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
2950  *                                         in mesh meta header
2951  * @tx_desc: software descriptor head pointer
2952  * @ts: pointer to tx completion stats
2953  * Return: none
2954  */
2955 static
2956 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2957 		struct hal_tx_completion_status *ts)
2958 {
2959 	struct meta_hdr_s *mhdr;
2960 	qdf_nbuf_t netbuf = tx_desc->nbuf;
2961 
2962 	if (!tx_desc->msdu_ext_desc) {
2963 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
2964 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2965 				"netbuf %pK offset %d",
2966 				netbuf, tx_desc->pkt_offset);
2967 			return;
2968 		}
2969 	}
2970 	if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
2971 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2972 			"netbuf %pK offset %lu", netbuf,
2973 			sizeof(struct meta_hdr_s));
2974 		return;
2975 	}
2976 
2977 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
2978 	mhdr->rssi = ts->ack_frame_rssi;
2979 	mhdr->band = tx_desc->pdev->operating_channel.band;
2980 	mhdr->channel = tx_desc->pdev->operating_channel.num;
2981 }
2982 
2983 #else
2984 static
2985 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2986 		struct hal_tx_completion_status *ts)
2987 {
2988 }
2989 
2990 #endif
2991 
2992 #ifdef QCA_PEER_EXT_STATS
2993 /*
2994  * dp_tx_compute_tid_delay() - Compute per TID delay
2995  * @stats: Per TID delay stats
2996  * @tx_desc: Software Tx descriptor
2997  *
2998  * Compute the software enqueue and hw enqueue delays and
2999  * update the respective histograms
3000  *
3001  * Return: void
3002  */
3003 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
3004 				    struct dp_tx_desc_s *tx_desc)
3005 {
3006 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
3007 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
3008 	uint32_t sw_enqueue_delay, fwhw_transmit_delay;
3009 
3010 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
3011 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
3012 	timestamp_hw_enqueue = tx_desc->timestamp;
3013 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
3014 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
3015 					 timestamp_hw_enqueue);
3016 
3017 	/*
3018 	 * Update the Tx software enqueue delay and HW enque-Completion delay.
3019 	 */
3020 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
3021 	dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
3022 }
3023 
3024 /*
3025  * dp_tx_update_peer_ext_stats() - Update the peer extended stats
3026  * @peer: DP peer context
3027  * @tx_desc: Tx software descriptor
3028  * @tid: Transmission ID
3029  * @ring_id: Rx CPU context ID/CPU_ID
3030  *
3031  * Update the peer extended stats. These are enhanced other
3032  * delay stats per msdu level.
3033  *
3034  * Return: void
3035  */
3036 static void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
3037 					struct dp_tx_desc_s *tx_desc,
3038 					uint8_t tid, uint8_t ring_id)
3039 {
3040 	struct dp_pdev *pdev = peer->vdev->pdev;
3041 	struct dp_soc *soc = NULL;
3042 	struct cdp_peer_ext_stats *pext_stats = NULL;
3043 
3044 	soc = pdev->soc;
3045 	if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
3046 		return;
3047 
3048 	pext_stats = peer->pext_stats;
3049 
3050 	qdf_assert(pext_stats);
3051 	qdf_assert(ring < CDP_MAX_TXRX_CTX);
3052 
3053 	/*
3054 	 * For non-TID packets use the TID 9
3055 	 */
3056 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
3057 		tid = CDP_MAX_DATA_TIDS - 1;
3058 
3059 	dp_tx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id],
3060 				tx_desc);
3061 }
3062 #else
3063 static inline void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
3064 					       struct dp_tx_desc_s *tx_desc,
3065 					       uint8_t tid, uint8_t ring_id)
3066 {
3067 }
3068 #endif
3069 
3070 /**
3071  * dp_tx_compute_delay() - Compute and fill in all timestamps
3072  *				to pass in correct fields
3073  *
3074  * @vdev: pdev handle
3075  * @tx_desc: tx descriptor
3076  * @tid: tid value
3077  * @ring_id: TCL or WBM ring number for transmit path
3078  * Return: none
3079  */
3080 static void dp_tx_compute_delay(struct dp_vdev *vdev,
3081 				struct dp_tx_desc_s *tx_desc,
3082 				uint8_t tid, uint8_t ring_id)
3083 {
3084 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
3085 	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
3086 
3087 	if (qdf_likely(!vdev->pdev->delay_stats_flag))
3088 		return;
3089 
3090 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
3091 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
3092 	timestamp_hw_enqueue = tx_desc->timestamp;
3093 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
3094 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
3095 					 timestamp_hw_enqueue);
3096 	interframe_delay = (uint32_t)(timestamp_ingress -
3097 				      vdev->prev_tx_enq_tstamp);
3098 
3099 	/*
3100 	 * Delay in software enqueue
3101 	 */
3102 	dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid,
3103 			      CDP_DELAY_STATS_SW_ENQ, ring_id);
3104 	/*
3105 	 * Delay between packet enqueued to HW and Tx completion
3106 	 */
3107 	dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid,
3108 			      CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id);
3109 
3110 	/*
3111 	 * Update interframe delay stats calculated at hardstart receive point.
3112 	 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
3113 	 * interframe delay will not be calculate correctly for 1st frame.
3114 	 * On the other side, this will help in avoiding extra per packet check
3115 	 * of !vdev->prev_tx_enq_tstamp.
3116 	 */
3117 	dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
3118 			      CDP_DELAY_STATS_TX_INTERFRAME, ring_id);
3119 	vdev->prev_tx_enq_tstamp = timestamp_ingress;
3120 }
3121 
3122 #ifdef DISABLE_DP_STATS
3123 static
3124 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
3125 {
3126 }
3127 #else
3128 static
3129 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
3130 {
3131 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
3132 
3133 	DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
3134 	if (subtype != QDF_PROTO_INVALID)
3135 		DP_STATS_INC(peer, tx.no_ack_count[subtype], 1);
3136 }
3137 #endif
3138 
3139 /**
3140  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
3141  *				per wbm ring
3142  *
3143  * @tx_desc: software descriptor head pointer
3144  * @ts: Tx completion status
3145  * @peer: peer handle
3146  * @ring_id: ring number
3147  *
3148  * Return: None
3149  */
3150 static inline void
3151 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
3152 			struct hal_tx_completion_status *ts,
3153 			struct dp_peer *peer, uint8_t ring_id)
3154 {
3155 	struct dp_pdev *pdev = peer->vdev->pdev;
3156 	struct dp_soc *soc = NULL;
3157 	uint8_t mcs, pkt_type;
3158 	uint8_t tid = ts->tid;
3159 	uint32_t length;
3160 	struct cdp_tid_tx_stats *tid_stats;
3161 
3162 	if (!pdev)
3163 		return;
3164 
3165 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
3166 		tid = CDP_MAX_DATA_TIDS - 1;
3167 
3168 	tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
3169 	soc = pdev->soc;
3170 
3171 	mcs = ts->mcs;
3172 	pkt_type = ts->pkt_type;
3173 
3174 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
3175 		dp_err("Release source is not from TQM");
3176 		return;
3177 	}
3178 
3179 	length = qdf_nbuf_len(tx_desc->nbuf);
3180 	DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
3181 
3182 	if (qdf_unlikely(pdev->delay_stats_flag))
3183 		dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id);
3184 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
3185 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
3186 
3187 	DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length,
3188 			  (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
3189 
3190 	DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
3191 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
3192 
3193 	DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
3194 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
3195 
3196 	DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
3197 		     (ts->status == HAL_TX_TQM_RR_FW_REASON1));
3198 
3199 	DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
3200 		     (ts->status == HAL_TX_TQM_RR_FW_REASON2));
3201 
3202 	DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
3203 		     (ts->status == HAL_TX_TQM_RR_FW_REASON3));
3204 
3205 	/*
3206 	 * tx_failed is ideally supposed to be updated from HTT ppdu completion
3207 	 * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there
3208 	 * are no completions for failed cases. Hence updating tx_failed from
3209 	 * data path. Please note that if tx_failed is fixed to be from ppdu,
3210 	 * then this has to be removed
3211 	 */
3212 	peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num +
3213 				peer->stats.tx.dropped.fw_rem_notx +
3214 				peer->stats.tx.dropped.fw_rem_tx +
3215 				peer->stats.tx.dropped.age_out +
3216 				peer->stats.tx.dropped.fw_reason1 +
3217 				peer->stats.tx.dropped.fw_reason2 +
3218 				peer->stats.tx.dropped.fw_reason3;
3219 
3220 	if (ts->status < CDP_MAX_TX_TQM_STATUS) {
3221 		tid_stats->tqm_status_cnt[ts->status]++;
3222 	}
3223 
3224 	if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
3225 		dp_update_no_ack_stats(tx_desc->nbuf, peer);
3226 		return;
3227 	}
3228 
3229 	DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
3230 
3231 	DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
3232 	DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
3233 
3234 	/*
3235 	 * Following Rate Statistics are updated from HTT PPDU events from FW.
3236 	 * Return from here if HTT PPDU events are enabled.
3237 	 */
3238 	if (!(soc->process_tx_status))
3239 		return;
3240 
3241 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3242 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
3243 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3244 			((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
3245 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3246 			((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
3247 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3248 			((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
3249 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3250 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
3251 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3252 			((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
3253 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3254 			((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
3255 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3256 			((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
3257 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3258 			((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
3259 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3260 			((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
3261 
3262 	DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
3263 	DP_STATS_INC(peer, tx.bw[ts->bw], 1);
3264 	DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
3265 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
3266 	DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
3267 	DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
3268 	DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
3269 
3270 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
3271 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
3272 			     &peer->stats, ts->peer_id,
3273 			     UPDATE_PEER_STATS, pdev->pdev_id);
3274 #endif
3275 }
3276 
3277 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3278 /**
3279  * dp_tx_flow_pool_lock() - take flow pool lock
3280  * @soc: core txrx main context
3281  * @tx_desc: tx desc
3282  *
3283  * Return: None
3284  */
3285 static inline
3286 void dp_tx_flow_pool_lock(struct dp_soc *soc,
3287 			  struct dp_tx_desc_s *tx_desc)
3288 {
3289 	struct dp_tx_desc_pool_s *pool;
3290 	uint8_t desc_pool_id;
3291 
3292 	desc_pool_id = tx_desc->pool_id;
3293 	pool = &soc->tx_desc[desc_pool_id];
3294 
3295 	qdf_spin_lock_bh(&pool->flow_pool_lock);
3296 }
3297 
3298 /**
3299  * dp_tx_flow_pool_unlock() - release flow pool lock
3300  * @soc: core txrx main context
3301  * @tx_desc: tx desc
3302  *
3303  * Return: None
3304  */
3305 static inline
3306 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
3307 			    struct dp_tx_desc_s *tx_desc)
3308 {
3309 	struct dp_tx_desc_pool_s *pool;
3310 	uint8_t desc_pool_id;
3311 
3312 	desc_pool_id = tx_desc->pool_id;
3313 	pool = &soc->tx_desc[desc_pool_id];
3314 
3315 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
3316 }
3317 #else
3318 static inline
3319 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
3320 {
3321 }
3322 
3323 static inline
3324 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
3325 {
3326 }
3327 #endif
3328 
3329 /**
3330  * dp_tx_notify_completion() - Notify tx completion for this desc
3331  * @soc: core txrx main context
3332  * @tx_desc: tx desc
3333  * @netbuf:  buffer
3334  * @status: tx status
3335  *
3336  * Return: none
3337  */
3338 static inline void dp_tx_notify_completion(struct dp_soc *soc,
3339 					   struct dp_tx_desc_s *tx_desc,
3340 					   qdf_nbuf_t netbuf,
3341 					   uint8_t status)
3342 {
3343 	void *osif_dev;
3344 	ol_txrx_completion_fp tx_compl_cbk = NULL;
3345 	uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
3346 
3347 	qdf_assert(tx_desc);
3348 
3349 	dp_tx_flow_pool_lock(soc, tx_desc);
3350 
3351 	if (!tx_desc->vdev ||
3352 	    !tx_desc->vdev->osif_vdev) {
3353 		dp_tx_flow_pool_unlock(soc, tx_desc);
3354 		return;
3355 	}
3356 
3357 	osif_dev = tx_desc->vdev->osif_vdev;
3358 	tx_compl_cbk = tx_desc->vdev->tx_comp;
3359 	dp_tx_flow_pool_unlock(soc, tx_desc);
3360 
3361 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
3362 		flag |= BIT(QDF_TX_RX_STATUS_OK);
3363 
3364 	if (tx_compl_cbk)
3365 		tx_compl_cbk(netbuf, osif_dev, flag);
3366 }
3367 
3368 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
3369  * @pdev: pdev handle
3370  * @tid: tid value
3371  * @txdesc_ts: timestamp from txdesc
3372  * @ppdu_id: ppdu id
3373  *
3374  * Return: none
3375  */
3376 #ifdef FEATURE_PERPKT_INFO
3377 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
3378 					       struct dp_peer *peer,
3379 					       uint8_t tid,
3380 					       uint64_t txdesc_ts,
3381 					       uint32_t ppdu_id)
3382 {
3383 	uint64_t delta_ms;
3384 	struct cdp_tx_sojourn_stats *sojourn_stats;
3385 
3386 	if (qdf_unlikely(pdev->enhanced_stats_en == 0))
3387 		return;
3388 
3389 	if (qdf_unlikely(tid == HTT_INVALID_TID ||
3390 			 tid >= CDP_DATA_TID_MAX))
3391 		return;
3392 
3393 	if (qdf_unlikely(!pdev->sojourn_buf))
3394 		return;
3395 
3396 	sojourn_stats = (struct cdp_tx_sojourn_stats *)
3397 		qdf_nbuf_data(pdev->sojourn_buf);
3398 
3399 	sojourn_stats->cookie = (void *)peer->wlanstats_ctx;
3400 
3401 	delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
3402 				txdesc_ts;
3403 	qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid],
3404 			    delta_ms);
3405 	sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
3406 	sojourn_stats->num_msdus[tid] = 1;
3407 	sojourn_stats->avg_sojourn_msdu[tid].internal =
3408 		peer->avg_sojourn_msdu[tid].internal;
3409 	dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
3410 			     pdev->sojourn_buf, HTT_INVALID_PEER,
3411 			     WDI_NO_VAL, pdev->pdev_id);
3412 	sojourn_stats->sum_sojourn_msdu[tid] = 0;
3413 	sojourn_stats->num_msdus[tid] = 0;
3414 	sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
3415 }
3416 #else
3417 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
3418 					       struct dp_peer *peer,
3419 					       uint8_t tid,
3420 					       uint64_t txdesc_ts,
3421 					       uint32_t ppdu_id)
3422 {
3423 }
3424 #endif
3425 
3426 /**
3427  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
3428  * @soc: DP Soc handle
3429  * @tx_desc: software Tx descriptor
3430  * @ts : Tx completion status from HAL/HTT descriptor
3431  *
3432  * Return: none
3433  */
3434 static inline void
3435 dp_tx_comp_process_desc(struct dp_soc *soc,
3436 			struct dp_tx_desc_s *desc,
3437 			struct hal_tx_completion_status *ts,
3438 			struct dp_peer *peer)
3439 {
3440 	uint64_t time_latency = 0;
3441 	/*
3442 	 * m_copy/tx_capture modes are not supported for
3443 	 * scatter gather packets
3444 	 */
3445 	if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
3446 		time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
3447 				desc->timestamp);
3448 	}
3449 	if (!(desc->msdu_ext_desc)) {
3450 		if (QDF_STATUS_SUCCESS ==
3451 		    dp_tx_add_to_comp_queue(soc, desc, ts, peer)) {
3452 			return;
3453 		}
3454 
3455 		if (QDF_STATUS_SUCCESS ==
3456 		    dp_get_completion_indication_for_stack(soc,
3457 							   desc->pdev,
3458 							   peer, ts,
3459 							   desc->nbuf,
3460 							   time_latency)) {
3461 			qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
3462 						     QDF_DMA_TO_DEVICE,
3463 						     desc->nbuf->len);
3464 			dp_send_completion_to_stack(soc,
3465 						    desc->pdev,
3466 						    ts->peer_id,
3467 						    ts->ppdu_id,
3468 						    desc->nbuf);
3469 			return;
3470 		}
3471 	}
3472 
3473 	dp_tx_comp_free_buf(soc, desc);
3474 }
3475 
3476 #ifdef DISABLE_DP_STATS
3477 /**
3478  * dp_tx_update_connectivity_stats() - update tx connectivity stats
3479  * @soc: core txrx main context
3480  * @tx_desc: tx desc
3481  * @status: tx status
3482  *
3483  * Return: none
3484  */
3485 static inline
3486 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
3487 				     struct dp_tx_desc_s *tx_desc,
3488 				     uint8_t status)
3489 {
3490 }
3491 #else
3492 static inline
3493 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
3494 				     struct dp_tx_desc_s *tx_desc,
3495 				     uint8_t status)
3496 {
3497 	void *osif_dev;
3498 	ol_txrx_stats_rx_fp stats_cbk;
3499 	uint8_t pkt_type;
3500 
3501 	qdf_assert(tx_desc);
3502 
3503 	if (!tx_desc->vdev ||
3504 	    !tx_desc->vdev->osif_vdev ||
3505 	    !tx_desc->vdev->stats_cb)
3506 		return;
3507 
3508 	osif_dev = tx_desc->vdev->osif_vdev;
3509 	stats_cbk = tx_desc->vdev->stats_cb;
3510 
3511 	stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
3512 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
3513 		stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
3514 			  &pkt_type);
3515 }
3516 #endif
3517 
3518 /**
3519  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
3520  * @soc: DP soc handle
3521  * @tx_desc: software descriptor head pointer
3522  * @ts: Tx completion status
3523  * @peer: peer handle
3524  * @ring_id: ring number
3525  *
3526  * Return: none
3527  */
3528 static inline
3529 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
3530 				  struct dp_tx_desc_s *tx_desc,
3531 				  struct hal_tx_completion_status *ts,
3532 				  struct dp_peer *peer, uint8_t ring_id)
3533 {
3534 	uint32_t length;
3535 	qdf_ether_header_t *eh;
3536 	struct dp_vdev *vdev = tx_desc->vdev;
3537 	qdf_nbuf_t nbuf = tx_desc->nbuf;
3538 	uint8_t dp_status;
3539 
3540 	if (!vdev || !nbuf) {
3541 		dp_info_rl("invalid tx descriptor. vdev or nbuf NULL");
3542 		goto out;
3543 	}
3544 
3545 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3546 	length = qdf_nbuf_len(nbuf);
3547 
3548 	dp_status = qdf_dp_get_status_from_htt(ts->status);
3549 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
3550 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
3551 				 QDF_TRACE_DEFAULT_PDEV_ID,
3552 				 qdf_nbuf_data_addr(nbuf),
3553 				 sizeof(qdf_nbuf_data(nbuf)),
3554 				 tx_desc->id,
3555 				 dp_status));
3556 
3557 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3558 				"-------------------- \n"
3559 				"Tx Completion Stats: \n"
3560 				"-------------------- \n"
3561 				"ack_frame_rssi = %d \n"
3562 				"first_msdu = %d \n"
3563 				"last_msdu = %d \n"
3564 				"msdu_part_of_amsdu = %d \n"
3565 				"rate_stats valid = %d \n"
3566 				"bw = %d \n"
3567 				"pkt_type = %d \n"
3568 				"stbc = %d \n"
3569 				"ldpc = %d \n"
3570 				"sgi = %d \n"
3571 				"mcs = %d \n"
3572 				"ofdma = %d \n"
3573 				"tones_in_ru = %d \n"
3574 				"tsf = %d \n"
3575 				"ppdu_id = %d \n"
3576 				"transmit_cnt = %d \n"
3577 				"tid = %d \n"
3578 				"peer_id = %d\n",
3579 				ts->ack_frame_rssi, ts->first_msdu,
3580 				ts->last_msdu, ts->msdu_part_of_amsdu,
3581 				ts->valid, ts->bw, ts->pkt_type, ts->stbc,
3582 				ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
3583 				ts->tones_in_ru, ts->tsf, ts->ppdu_id,
3584 				ts->transmit_cnt, ts->tid, ts->peer_id);
3585 
3586 	/* Update SoC level stats */
3587 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
3588 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
3589 
3590 	if (!peer) {
3591 		dp_err_rl("peer is null or deletion in progress");
3592 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
3593 		goto out;
3594 	}
3595 
3596 	dp_tx_update_connectivity_stats(soc, tx_desc, ts->status);
3597 
3598 	/* Update per-packet stats for mesh mode */
3599 	if (qdf_unlikely(vdev->mesh_vdev) &&
3600 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
3601 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
3602 
3603 	/* Update peer level stats */
3604 	if (qdf_unlikely(peer->bss_peer && vdev->opmode == wlan_op_mode_ap)) {
3605 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
3606 			DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
3607 
3608 			if ((peer->vdev->tx_encap_type ==
3609 				htt_cmn_pkt_type_ethernet) &&
3610 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
3611 				DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
3612 			}
3613 		}
3614 	} else {
3615 		DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
3616 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
3617 			DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
3618 			if (qdf_unlikely(peer->in_twt)) {
3619 				DP_STATS_INC_PKT(peer,
3620 						 tx.tx_success_twt,
3621 						 1, length);
3622 			}
3623 		}
3624 	}
3625 
3626 	dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id);
3627 	dp_tx_update_peer_ext_stats(peer, tx_desc, ts->tid, ring_id);
3628 
3629 #ifdef QCA_SUPPORT_RDK_STATS
3630 	if (soc->wlanstats_enabled)
3631 		dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid,
3632 					    tx_desc->timestamp,
3633 					    ts->ppdu_id);
3634 #endif
3635 
3636 out:
3637 	return;
3638 }
3639 /**
3640  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
3641  * @soc: core txrx main context
3642  * @comp_head: software descriptor head pointer
3643  * @ring_id: ring number
3644  *
3645  * This function will process batch of descriptors reaped by dp_tx_comp_handler
3646  * and release the software descriptors after processing is complete
3647  *
3648  * Return: none
3649  */
3650 static void
3651 dp_tx_comp_process_desc_list(struct dp_soc *soc,
3652 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id)
3653 {
3654 	struct dp_tx_desc_s *desc;
3655 	struct dp_tx_desc_s *next;
3656 	struct hal_tx_completion_status ts;
3657 	struct dp_peer *peer;
3658 	qdf_nbuf_t netbuf;
3659 
3660 	desc = comp_head;
3661 
3662 	while (desc) {
3663 		if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
3664 			struct dp_pdev *pdev = desc->pdev;
3665 
3666 			peer = dp_peer_find_by_id(soc, desc->peer_id);
3667 			if (qdf_likely(peer)) {
3668 				/*
3669 				 * Increment peer statistics
3670 				 * Minimal statistics update done here
3671 				 */
3672 				DP_STATS_INC_PKT(peer, tx.comp_pkt, 1,
3673 						 desc->length);
3674 
3675 				if (desc->tx_status !=
3676 						HAL_TX_TQM_RR_FRAME_ACKED)
3677 					peer->stats.tx.tx_failed++;
3678 
3679 				dp_peer_unref_del_find_by_id(peer);
3680 			}
3681 
3682 			qdf_assert(pdev);
3683 			dp_tx_outstanding_dec(pdev);
3684 
3685 			/*
3686 			 * Calling a QDF WRAPPER here is creating signifcant
3687 			 * performance impact so avoided the wrapper call here
3688 			 */
3689 			next = desc->next;
3690 			qdf_mem_unmap_nbytes_single(soc->osdev,
3691 						    desc->dma_addr,
3692 						    QDF_DMA_TO_DEVICE,
3693 						    desc->length);
3694 			qdf_nbuf_free(desc->nbuf);
3695 			dp_tx_desc_free(soc, desc, desc->pool_id);
3696 			desc = next;
3697 			continue;
3698 		}
3699 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
3700 		peer = dp_peer_find_by_id(soc, ts.peer_id);
3701 		dp_tx_comp_process_tx_status(soc, desc, &ts, peer, ring_id);
3702 
3703 		netbuf = desc->nbuf;
3704 		/* check tx complete notification */
3705 		if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(netbuf))
3706 			dp_tx_notify_completion(soc, desc, netbuf, ts.status);
3707 
3708 		dp_tx_comp_process_desc(soc, desc, &ts, peer);
3709 
3710 		if (peer)
3711 			dp_peer_unref_del_find_by_id(peer);
3712 
3713 		next = desc->next;
3714 
3715 		dp_tx_desc_release(desc, desc->pool_id);
3716 		desc = next;
3717 	}
3718 
3719 }
3720 
3721 /**
3722  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
3723  * @tx_desc: software descriptor head pointer
3724  * @status : Tx completion status from HTT descriptor
3725  * @ring_id: ring number
3726  *
3727  * This function will process HTT Tx indication messages from Target
3728  *
3729  * Return: none
3730  */
3731 static
3732 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status,
3733 				  uint8_t ring_id)
3734 {
3735 	uint8_t tx_status;
3736 	struct dp_pdev *pdev;
3737 	struct dp_vdev *vdev;
3738 	struct dp_soc *soc;
3739 	struct hal_tx_completion_status ts = {0};
3740 	uint32_t *htt_desc = (uint32_t *)status;
3741 	struct dp_peer *peer;
3742 	struct cdp_tid_tx_stats *tid_stats = NULL;
3743 	struct htt_soc *htt_handle;
3744 
3745 	/*
3746 	 * If the descriptor is already freed in vdev_detach,
3747 	 * continue to next descriptor
3748 	 */
3749 	if (!tx_desc->vdev && !tx_desc->flags) {
3750 		QDF_TRACE(QDF_MODULE_ID_DP,
3751 			  QDF_TRACE_LEVEL_INFO,
3752 			  "Descriptor freed in vdev_detach %d",
3753 			  tx_desc->id);
3754 		return;
3755 	}
3756 
3757 	pdev = tx_desc->pdev;
3758 	soc = pdev->soc;
3759 
3760 	if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
3761 		QDF_TRACE(QDF_MODULE_ID_DP,
3762 			  QDF_TRACE_LEVEL_INFO,
3763 			  "pdev in down state %d",
3764 			  tx_desc->id);
3765 		dp_tx_comp_free_buf(soc, tx_desc);
3766 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3767 		return;
3768 	}
3769 
3770 	qdf_assert(tx_desc->pdev);
3771 
3772 	vdev = tx_desc->vdev;
3773 
3774 	if (!vdev)
3775 		return;
3776 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
3777 	htt_handle = (struct htt_soc *)soc->htt_handle;
3778 	htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
3779 
3780 	switch (tx_status) {
3781 	case HTT_TX_FW2WBM_TX_STATUS_OK:
3782 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
3783 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
3784 	{
3785 		uint8_t tid;
3786 		if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
3787 			ts.peer_id =
3788 				HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
3789 						htt_desc[2]);
3790 			ts.tid =
3791 				HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
3792 						htt_desc[2]);
3793 		} else {
3794 			ts.peer_id = HTT_INVALID_PEER;
3795 			ts.tid = HTT_INVALID_TID;
3796 		}
3797 		ts.ppdu_id =
3798 			HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
3799 					htt_desc[1]);
3800 		ts.ack_frame_rssi =
3801 			HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
3802 					htt_desc[1]);
3803 
3804 		ts.tsf = htt_desc[3];
3805 		ts.first_msdu = 1;
3806 		ts.last_msdu = 1;
3807 		tid = ts.tid;
3808 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
3809 			tid = CDP_MAX_DATA_TIDS - 1;
3810 
3811 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
3812 
3813 		if (qdf_unlikely(pdev->delay_stats_flag))
3814 			dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
3815 		if (tx_status < CDP_MAX_TX_HTT_STATUS) {
3816 			tid_stats->htt_status_cnt[tx_status]++;
3817 		}
3818 
3819 		peer = dp_peer_find_by_id(soc, ts.peer_id);
3820 		if (qdf_likely(peer))
3821 			dp_peer_unref_del_find_by_id(peer);
3822 
3823 		dp_tx_comp_process_tx_status(soc, tx_desc, &ts, peer, ring_id);
3824 		dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
3825 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3826 
3827 		break;
3828 	}
3829 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
3830 	{
3831 		dp_tx_reinject_handler(tx_desc, status);
3832 		break;
3833 	}
3834 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
3835 	{
3836 		dp_tx_inspect_handler(tx_desc, status);
3837 		break;
3838 	}
3839 	case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
3840 	{
3841 		dp_tx_mec_handler(vdev, status);
3842 		break;
3843 	}
3844 	default:
3845 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3846 			  "%s Invalid HTT tx_status %d\n",
3847 			  __func__, tx_status);
3848 		break;
3849 	}
3850 }
3851 
3852 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
3853 static inline
3854 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
3855 {
3856 	bool limit_hit = false;
3857 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
3858 
3859 	limit_hit =
3860 		(num_reaped >= cfg->tx_comp_loop_pkt_limit) ? true : false;
3861 
3862 	if (limit_hit)
3863 		DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
3864 
3865 	return limit_hit;
3866 }
3867 
3868 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
3869 {
3870 	return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
3871 }
3872 #else
3873 static inline
3874 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
3875 {
3876 	return false;
3877 }
3878 
3879 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
3880 {
3881 	return false;
3882 }
3883 #endif
3884 
3885 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
3886 			    hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
3887 			    uint32_t quota)
3888 {
3889 	void *tx_comp_hal_desc;
3890 	uint8_t buffer_src;
3891 	uint8_t pool_id;
3892 	uint32_t tx_desc_id;
3893 	struct dp_tx_desc_s *tx_desc = NULL;
3894 	struct dp_tx_desc_s *head_desc = NULL;
3895 	struct dp_tx_desc_s *tail_desc = NULL;
3896 	uint32_t num_processed = 0;
3897 	uint32_t count;
3898 	uint32_t num_avail_for_reap = 0;
3899 	bool force_break = false;
3900 
3901 	DP_HIST_INIT();
3902 
3903 more_data:
3904 	/* Re-initialize local variables to be re-used */
3905 	head_desc = NULL;
3906 	tail_desc = NULL;
3907 	count = 0;
3908 
3909 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
3910 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
3911 		return 0;
3912 	}
3913 
3914 	num_avail_for_reap = hal_srng_dst_num_valid(soc->hal_soc, hal_ring_hdl, 0);
3915 
3916 	if (num_avail_for_reap >= quota)
3917 		num_avail_for_reap = quota;
3918 
3919 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
3920 
3921 	/* Find head descriptor from completion ring */
3922 	while (qdf_likely(num_avail_for_reap)) {
3923 
3924 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
3925 		if (qdf_unlikely(!tx_comp_hal_desc))
3926 			break;
3927 
3928 		buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
3929 
3930 		/* If this buffer was not released by TQM or FW, then it is not
3931 		 * Tx completion indication, assert */
3932 		if (qdf_unlikely(buffer_src !=
3933 					HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
3934 				 (qdf_unlikely(buffer_src !=
3935 					HAL_TX_COMP_RELEASE_SOURCE_FW))) {
3936 			uint8_t wbm_internal_error;
3937 
3938 			dp_err_rl(
3939 				"Tx comp release_src != TQM | FW but from %d",
3940 				buffer_src);
3941 			hal_dump_comp_desc(tx_comp_hal_desc);
3942 			DP_STATS_INC(soc, tx.invalid_release_source, 1);
3943 
3944 			/* When WBM sees NULL buffer_addr_info in any of
3945 			 * ingress rings it sends an error indication,
3946 			 * with wbm_internal_error=1, to a specific ring.
3947 			 * The WBM2SW ring used to indicate these errors is
3948 			 * fixed in HW, and that ring is being used as Tx
3949 			 * completion ring. These errors are not related to
3950 			 * Tx completions, and should just be ignored
3951 			 */
3952 			wbm_internal_error = hal_get_wbm_internal_error(
3953 							soc->hal_soc,
3954 							tx_comp_hal_desc);
3955 
3956 			if (wbm_internal_error) {
3957 				dp_err_rl("Tx comp wbm_internal_error!!");
3958 				DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
3959 
3960 				if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
3961 								buffer_src)
3962 					dp_handle_wbm_internal_error(
3963 						soc,
3964 						tx_comp_hal_desc,
3965 						hal_tx_comp_get_buffer_type(
3966 							tx_comp_hal_desc));
3967 
3968 			} else {
3969 				dp_err_rl("Tx comp wbm_internal_error false");
3970 				DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
3971 			}
3972 			continue;
3973 		}
3974 
3975 		/* Get descriptor id */
3976 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
3977 		pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
3978 			DP_TX_DESC_ID_POOL_OS;
3979 
3980 		/* Find Tx descriptor */
3981 		tx_desc = dp_tx_desc_find(soc, pool_id,
3982 				(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
3983 				DP_TX_DESC_ID_PAGE_OS,
3984 				(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
3985 				DP_TX_DESC_ID_OFFSET_OS);
3986 
3987 		/*
3988 		 * If the release source is FW, process the HTT status
3989 		 */
3990 		if (qdf_unlikely(buffer_src ==
3991 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
3992 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
3993 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
3994 					htt_tx_status);
3995 			dp_tx_process_htt_completion(tx_desc,
3996 					htt_tx_status, ring_id);
3997 		} else {
3998 			/*
3999 			 * If the fast completion mode is enabled extended
4000 			 * metadata from descriptor is not copied
4001 			 */
4002 			if (qdf_likely(tx_desc->flags &
4003 						DP_TX_DESC_FLAG_SIMPLE)) {
4004 				tx_desc->peer_id =
4005 					hal_tx_comp_get_peer_id(tx_comp_hal_desc);
4006 				tx_desc->tx_status =
4007 					hal_tx_comp_get_tx_status(tx_comp_hal_desc);
4008 				goto add_to_pool;
4009 			}
4010 
4011 			/*
4012 			 * If the descriptor is already freed in vdev_detach,
4013 			 * continue to next descriptor
4014 			 */
4015 			if (qdf_unlikely(!tx_desc->vdev) &&
4016 					 qdf_unlikely(!tx_desc->flags)) {
4017 				QDF_TRACE(QDF_MODULE_ID_DP,
4018 					  QDF_TRACE_LEVEL_INFO,
4019 					  "Descriptor freed in vdev_detach %d",
4020 					  tx_desc_id);
4021 				continue;
4022 			}
4023 
4024 			if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
4025 				QDF_TRACE(QDF_MODULE_ID_DP,
4026 					  QDF_TRACE_LEVEL_INFO,
4027 					  "pdev in down state %d",
4028 					  tx_desc_id);
4029 
4030 				dp_tx_comp_free_buf(soc, tx_desc);
4031 				dp_tx_desc_release(tx_desc, tx_desc->pool_id);
4032 				goto next_desc;
4033 			}
4034 
4035 			/* Pool id is not matching. Error */
4036 			if (tx_desc->pool_id != pool_id) {
4037 				QDF_TRACE(QDF_MODULE_ID_DP,
4038 					QDF_TRACE_LEVEL_FATAL,
4039 					"Tx Comp pool id %d not matched %d",
4040 					pool_id, tx_desc->pool_id);
4041 
4042 				qdf_assert_always(0);
4043 			}
4044 
4045 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
4046 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
4047 				QDF_TRACE(QDF_MODULE_ID_DP,
4048 					  QDF_TRACE_LEVEL_FATAL,
4049 					  "Txdesc invalid, flgs = %x,id = %d",
4050 					  tx_desc->flags, tx_desc_id);
4051 				qdf_assert_always(0);
4052 			}
4053 
4054 			/* Collect hw completion contents */
4055 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
4056 					      &tx_desc->comp, 1);
4057 add_to_pool:
4058 			DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
4059 
4060 			/* First ring descriptor on the cycle */
4061 			if (!head_desc) {
4062 				head_desc = tx_desc;
4063 				tail_desc = tx_desc;
4064 			}
4065 
4066 			tail_desc->next = tx_desc;
4067 			tx_desc->next = NULL;
4068 			tail_desc = tx_desc;
4069 		}
4070 next_desc:
4071 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
4072 
4073 		/*
4074 		 * Processed packet count is more than given quota
4075 		 * stop to processing
4076 		 */
4077 
4078 		count++;
4079 
4080 		if (dp_tx_comp_loop_pkt_limit_hit(soc, count))
4081 			break;
4082 	}
4083 
4084 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
4085 
4086 	/* Process the reaped descriptors */
4087 	if (head_desc)
4088 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
4089 
4090 	if (dp_tx_comp_enable_eol_data_check(soc)) {
4091 
4092 		if (num_processed >= quota)
4093 			force_break = true;
4094 
4095 		if (!force_break &&
4096 		    hal_srng_dst_peek_sync_locked(soc->hal_soc,
4097 						  hal_ring_hdl)) {
4098 			DP_STATS_INC(soc, tx.hp_oos2, 1);
4099 			if (!hif_exec_should_yield(soc->hif_handle,
4100 						   int_ctx->dp_intr_id))
4101 				goto more_data;
4102 		}
4103 	}
4104 	DP_TX_HIST_STATS_PER_PDEV();
4105 
4106 	return num_processed;
4107 }
4108 
4109 #ifdef FEATURE_WLAN_TDLS
4110 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4111 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
4112 {
4113 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4114 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
4115 
4116 	if (!vdev) {
4117 		dp_err("vdev handle for id %d is NULL", vdev_id);
4118 		return NULL;
4119 	}
4120 
4121 	if (tx_spec & OL_TX_SPEC_NO_FREE)
4122 		vdev->is_tdls_frame = true;
4123 
4124 	return dp_tx_send(soc_hdl, vdev_id, msdu_list);
4125 }
4126 #endif
4127 
4128 static void dp_tx_vdev_update_feature_flags(struct dp_vdev *vdev)
4129 {
4130 	struct wlan_cfg_dp_soc_ctxt *cfg;
4131 
4132 	struct dp_soc *soc;
4133 
4134 	soc = vdev->pdev->soc;
4135 	if (!soc)
4136 		return;
4137 
4138 	cfg = soc->wlan_cfg_ctx;
4139 	if (!cfg)
4140 		return;
4141 
4142 	if (vdev->opmode == wlan_op_mode_ndi)
4143 		vdev->csum_enabled = wlan_cfg_get_nan_checksum_offload(cfg);
4144 	else if ((vdev->subtype == wlan_op_subtype_p2p_device) ||
4145 		 (vdev->subtype == wlan_op_subtype_p2p_cli) ||
4146 		 (vdev->subtype == wlan_op_subtype_p2p_go))
4147 		vdev->csum_enabled = wlan_cfg_get_p2p_checksum_offload(cfg);
4148 	else
4149 		vdev->csum_enabled = wlan_cfg_get_checksum_offload(cfg);
4150 }
4151 
4152 /**
4153  * dp_tx_vdev_attach() - attach vdev to dp tx
4154  * @vdev: virtual device instance
4155  *
4156  * Return: QDF_STATUS_SUCCESS: success
4157  *         QDF_STATUS_E_RESOURCES: Error return
4158  */
4159 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
4160 {
4161 	int pdev_id;
4162 	/*
4163 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
4164 	 */
4165 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
4166 			HTT_TCL_METADATA_TYPE_VDEV_BASED);
4167 
4168 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
4169 			vdev->vdev_id);
4170 
4171 	pdev_id =
4172 		dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
4173 						       vdev->pdev->pdev_id);
4174 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
4175 
4176 	/*
4177 	 * Set HTT Extension Valid bit to 0 by default
4178 	 */
4179 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
4180 
4181 	dp_tx_vdev_update_search_flags(vdev);
4182 
4183 	dp_tx_vdev_update_feature_flags(vdev);
4184 
4185 	return QDF_STATUS_SUCCESS;
4186 }
4187 
4188 #ifndef FEATURE_WDS
4189 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
4190 {
4191 	return false;
4192 }
4193 #endif
4194 
4195 /**
4196  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
4197  * @vdev: virtual device instance
4198  *
4199  * Return: void
4200  *
4201  */
4202 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
4203 {
4204 	struct dp_soc *soc = vdev->pdev->soc;
4205 
4206 	/*
4207 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
4208 	 * for TDLS link
4209 	 *
4210 	 * Enable AddrY (SA based search) only for non-WDS STA and
4211 	 * ProxySTA VAP (in HKv1) modes.
4212 	 *
4213 	 * In all other VAP modes, only DA based search should be
4214 	 * enabled
4215 	 */
4216 	if (vdev->opmode == wlan_op_mode_sta &&
4217 	    vdev->tdls_link_connected)
4218 		vdev->hal_desc_addr_search_flags =
4219 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
4220 	else if ((vdev->opmode == wlan_op_mode_sta) &&
4221 		 !dp_tx_da_search_override(vdev))
4222 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
4223 	else
4224 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
4225 
4226 	/* Set search type only when peer map v2 messaging is enabled
4227 	 * as we will have the search index (AST hash) only when v2 is
4228 	 * enabled
4229 	 */
4230 	if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
4231 		vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
4232 	else
4233 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
4234 }
4235 
4236 static inline bool
4237 dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
4238 			  struct dp_vdev *vdev,
4239 			  struct dp_tx_desc_s *tx_desc)
4240 {
4241 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
4242 		return false;
4243 
4244 	/*
4245 	 * if vdev is given, then only check whether desc
4246 	 * vdev match. if vdev is NULL, then check whether
4247 	 * desc pdev match.
4248 	 */
4249 	return vdev ? (tx_desc->vdev == vdev) : (tx_desc->pdev == pdev);
4250 }
4251 
4252 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
4253 /**
4254  * dp_tx_desc_flush() - release resources associated
4255  *                      to TX Desc
4256  *
4257  * @dp_pdev: Handle to DP pdev structure
4258  * @vdev: virtual device instance
4259  * NULL: no specific Vdev is required and check all allcated TX desc
4260  * on this pdev.
4261  * Non-NULL: only check the allocated TX Desc associated to this Vdev.
4262  *
4263  * @force_free:
4264  * true: flush the TX desc.
4265  * false: only reset the Vdev in each allocated TX desc
4266  * that associated to current Vdev.
4267  *
4268  * This function will go through the TX desc pool to flush
4269  * the outstanding TX data or reset Vdev to NULL in associated TX
4270  * Desc.
4271  */
4272 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
4273 		      bool force_free)
4274 {
4275 	uint8_t i;
4276 	uint32_t j;
4277 	uint32_t num_desc, page_id, offset;
4278 	uint16_t num_desc_per_page;
4279 	struct dp_soc *soc = pdev->soc;
4280 	struct dp_tx_desc_s *tx_desc = NULL;
4281 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
4282 
4283 	if (!vdev && !force_free) {
4284 		dp_err("Reset TX desc vdev, Vdev param is required!");
4285 		return;
4286 	}
4287 
4288 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
4289 		tx_desc_pool = &soc->tx_desc[i];
4290 		if (!(tx_desc_pool->pool_size) ||
4291 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
4292 		    !(tx_desc_pool->desc_pages.cacheable_pages))
4293 			continue;
4294 
4295 		/*
4296 		 * Add flow pool lock protection in case pool is freed
4297 		 * due to all tx_desc is recycled when handle TX completion.
4298 		 * this is not necessary when do force flush as:
4299 		 * a. double lock will happen if dp_tx_desc_release is
4300 		 *    also trying to acquire it.
4301 		 * b. dp interrupt has been disabled before do force TX desc
4302 		 *    flush in dp_pdev_deinit().
4303 		 */
4304 		if (!force_free)
4305 			qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
4306 		num_desc = tx_desc_pool->pool_size;
4307 		num_desc_per_page =
4308 			tx_desc_pool->desc_pages.num_element_per_page;
4309 		for (j = 0; j < num_desc; j++) {
4310 			page_id = j / num_desc_per_page;
4311 			offset = j % num_desc_per_page;
4312 
4313 			if (qdf_unlikely(!(tx_desc_pool->
4314 					 desc_pages.cacheable_pages)))
4315 				break;
4316 
4317 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
4318 
4319 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
4320 				/*
4321 				 * Free TX desc if force free is
4322 				 * required, otherwise only reset vdev
4323 				 * in this TX desc.
4324 				 */
4325 				if (force_free) {
4326 					dp_tx_comp_free_buf(soc, tx_desc);
4327 					dp_tx_desc_release(tx_desc, i);
4328 				} else {
4329 					tx_desc->vdev = NULL;
4330 				}
4331 			}
4332 		}
4333 		if (!force_free)
4334 			qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
4335 	}
4336 }
4337 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
4338 /**
4339  * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
4340  *
4341  * @soc: Handle to DP soc structure
4342  * @tx_desc: pointer of one TX desc
4343  * @desc_pool_id: TX Desc pool id
4344  */
4345 static inline void
4346 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
4347 		      uint8_t desc_pool_id)
4348 {
4349 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
4350 
4351 	tx_desc->vdev = NULL;
4352 
4353 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
4354 }
4355 
4356 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
4357 		      bool force_free)
4358 {
4359 	uint8_t i, num_pool;
4360 	uint32_t j;
4361 	uint32_t num_desc, page_id, offset;
4362 	uint16_t num_desc_per_page;
4363 	struct dp_soc *soc = pdev->soc;
4364 	struct dp_tx_desc_s *tx_desc = NULL;
4365 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
4366 
4367 	if (!vdev && !force_free) {
4368 		dp_err("Reset TX desc vdev, Vdev param is required!");
4369 		return;
4370 	}
4371 
4372 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4373 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4374 
4375 	for (i = 0; i < num_pool; i++) {
4376 		tx_desc_pool = &soc->tx_desc[i];
4377 		if (!tx_desc_pool->desc_pages.cacheable_pages)
4378 			continue;
4379 
4380 		num_desc_per_page =
4381 			tx_desc_pool->desc_pages.num_element_per_page;
4382 		for (j = 0; j < num_desc; j++) {
4383 			page_id = j / num_desc_per_page;
4384 			offset = j % num_desc_per_page;
4385 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
4386 
4387 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
4388 				if (force_free) {
4389 					dp_tx_comp_free_buf(soc, tx_desc);
4390 					dp_tx_desc_release(tx_desc, i);
4391 				} else {
4392 					dp_tx_desc_reset_vdev(soc, tx_desc,
4393 							      i);
4394 				}
4395 			}
4396 		}
4397 	}
4398 }
4399 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
4400 
4401 /**
4402  * dp_tx_vdev_detach() - detach vdev from dp tx
4403  * @vdev: virtual device instance
4404  *
4405  * Return: QDF_STATUS_SUCCESS: success
4406  *         QDF_STATUS_E_RESOURCES: Error return
4407  */
4408 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
4409 {
4410 	struct dp_pdev *pdev = vdev->pdev;
4411 
4412 	/* Reset TX desc associated to this Vdev as NULL */
4413 	dp_tx_desc_flush(pdev, vdev, false);
4414 	dp_tx_vdev_multipass_deinit(vdev);
4415 
4416 	return QDF_STATUS_SUCCESS;
4417 }
4418 
4419 /**
4420  * dp_tx_pdev_attach() - attach pdev to dp tx
4421  * @pdev: physical device instance
4422  *
4423  * Return: QDF_STATUS_SUCCESS: success
4424  *         QDF_STATUS_E_RESOURCES: Error return
4425  */
4426 QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev)
4427 {
4428 	struct dp_soc *soc = pdev->soc;
4429 
4430 	/* Initialize Flow control counters */
4431 	qdf_atomic_init(&pdev->num_tx_outstanding);
4432 
4433 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
4434 		/* Initialize descriptors in TCL Ring */
4435 		hal_tx_init_data_ring(soc->hal_soc,
4436 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
4437 	}
4438 
4439 	return QDF_STATUS_SUCCESS;
4440 }
4441 
4442 /**
4443  * dp_tx_pdev_detach() - detach pdev from dp tx
4444  * @pdev: physical device instance
4445  *
4446  * Return: QDF_STATUS_SUCCESS: success
4447  *         QDF_STATUS_E_RESOURCES: Error return
4448  */
4449 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
4450 {
4451 	/* flush TX outstanding data per pdev */
4452 	dp_tx_desc_flush(pdev, NULL, true);
4453 	dp_tx_me_exit(pdev);
4454 	return QDF_STATUS_SUCCESS;
4455 }
4456 
4457 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
4458 /* Pools will be allocated dynamically */
4459 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
4460 					   int num_desc)
4461 {
4462 	uint8_t i;
4463 
4464 	for (i = 0; i < num_pool; i++) {
4465 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
4466 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
4467 	}
4468 
4469 	return QDF_STATUS_SUCCESS;
4470 }
4471 
4472 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
4473 					  int num_desc)
4474 {
4475 	return QDF_STATUS_SUCCESS;
4476 }
4477 
4478 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
4479 {
4480 }
4481 
4482 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
4483 {
4484 	uint8_t i;
4485 
4486 	for (i = 0; i < num_pool; i++)
4487 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
4488 }
4489 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
4490 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
4491 					   int num_desc)
4492 {
4493 	uint8_t i, count;
4494 
4495 	/* Allocate software Tx descriptor pools */
4496 	for (i = 0; i < num_pool; i++) {
4497 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
4498 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4499 				  FL("Tx Desc Pool alloc %d failed %pK"),
4500 				  i, soc);
4501 			goto fail;
4502 		}
4503 	}
4504 	return QDF_STATUS_SUCCESS;
4505 
4506 fail:
4507 	for (count = 0; count < i; count++)
4508 		dp_tx_desc_pool_free(soc, count);
4509 
4510 	return QDF_STATUS_E_NOMEM;
4511 }
4512 
4513 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
4514 					  int num_desc)
4515 {
4516 	uint8_t i;
4517 	for (i = 0; i < num_pool; i++) {
4518 		if (dp_tx_desc_pool_init(soc, i, num_desc)) {
4519 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4520 				  FL("Tx Desc Pool init %d failed %pK"),
4521 				  i, soc);
4522 			return QDF_STATUS_E_NOMEM;
4523 		}
4524 	}
4525 	return QDF_STATUS_SUCCESS;
4526 }
4527 
4528 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
4529 {
4530 	uint8_t i;
4531 
4532 	for (i = 0; i < num_pool; i++)
4533 		dp_tx_desc_pool_deinit(soc, i);
4534 }
4535 
4536 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
4537 {
4538 	uint8_t i;
4539 
4540 	for (i = 0; i < num_pool; i++)
4541 		dp_tx_desc_pool_free(soc, i);
4542 }
4543 
4544 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
4545 
4546 /**
4547  * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
4548  * @soc: core txrx main context
4549  * @num_pool: number of pools
4550  *
4551  */
4552 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
4553 {
4554 	dp_tx_tso_desc_pool_deinit(soc, num_pool);
4555 	dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
4556 }
4557 
4558 /**
4559  * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
4560  * @soc: core txrx main context
4561  * @num_pool: number of pools
4562  *
4563  */
4564 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
4565 {
4566 	dp_tx_tso_desc_pool_free(soc, num_pool);
4567 	dp_tx_tso_num_seg_pool_free(soc, num_pool);
4568 }
4569 
4570 /**
4571  * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
4572  * @soc: core txrx main context
4573  *
4574  * This function frees all tx related descriptors as below
4575  * 1. Regular TX descriptors (static pools)
4576  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
4577  * 3. TSO descriptors
4578  *
4579  */
4580 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
4581 {
4582 	uint8_t num_pool;
4583 
4584 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4585 
4586 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
4587 	dp_tx_ext_desc_pool_free(soc, num_pool);
4588 	dp_tx_delete_static_pools(soc, num_pool);
4589 }
4590 
4591 /**
4592  * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
4593  * @soc: core txrx main context
4594  *
4595  * This function de-initializes all tx related descriptors as below
4596  * 1. Regular TX descriptors (static pools)
4597  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
4598  * 3. TSO descriptors
4599  *
4600  */
4601 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
4602 {
4603 	uint8_t num_pool;
4604 
4605 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4606 
4607 	dp_tx_flow_control_deinit(soc);
4608 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
4609 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
4610 	dp_tx_deinit_static_pools(soc, num_pool);
4611 }
4612 
4613 /**
4614  * dp_tso_attach() - TSO attach handler
4615  * @txrx_soc: Opaque Dp handle
4616  *
4617  * Reserve TSO descriptor buffers
4618  *
4619  * Return: QDF_STATUS_E_FAILURE on failure or
4620  * QDF_STATUS_SUCCESS on success
4621  */
4622 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
4623 					 uint8_t num_pool,
4624 					 uint16_t num_desc)
4625 {
4626 	if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
4627 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
4628 		return QDF_STATUS_E_FAILURE;
4629 	}
4630 
4631 	if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
4632 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
4633 		       num_pool, soc);
4634 		return QDF_STATUS_E_FAILURE;
4635 	}
4636 	return QDF_STATUS_SUCCESS;
4637 }
4638 
4639 /**
4640  * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
4641  * @soc: DP soc handle
4642  * @num_pool: Number of pools
4643  * @num_desc: Number of descriptors
4644  *
4645  * Initialize TSO descriptor pools
4646  *
4647  * Return: QDF_STATUS_E_FAILURE on failure or
4648  * QDF_STATUS_SUCCESS on success
4649  */
4650 
4651 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
4652 					uint8_t num_pool,
4653 					uint16_t num_desc)
4654 {
4655 	if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
4656 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
4657 		return QDF_STATUS_E_FAILURE;
4658 	}
4659 
4660 	if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
4661 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
4662 		       num_pool, soc);
4663 		return QDF_STATUS_E_FAILURE;
4664 	}
4665 	return QDF_STATUS_SUCCESS;
4666 }
4667 
4668 /**
4669  * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
4670  * @soc: core txrx main context
4671  *
4672  * This function allocates memory for following descriptor pools
4673  * 1. regular sw tx descriptor pools (static pools)
4674  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
4675  * 3. TSO descriptor pools
4676  *
4677  * Return: QDF_STATUS_SUCCESS: success
4678  *         QDF_STATUS_E_RESOURCES: Error return
4679  */
4680 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
4681 {
4682 	uint8_t num_pool;
4683 	uint32_t num_desc;
4684 	uint32_t num_ext_desc;
4685 
4686 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4687 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4688 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
4689 
4690 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4691 		  "%s Tx Desc Alloc num_pool = %d, descs = %d",
4692 		  __func__, num_pool, num_desc);
4693 
4694 	if ((num_pool > MAX_TXDESC_POOLS) ||
4695 	    (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
4696 		goto fail1;
4697 
4698 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
4699 		goto fail1;
4700 
4701 	if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
4702 		goto fail2;
4703 
4704 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
4705 		return QDF_STATUS_SUCCESS;
4706 
4707 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
4708 		goto fail3;
4709 
4710 	return QDF_STATUS_SUCCESS;
4711 
4712 fail3:
4713 	dp_tx_ext_desc_pool_free(soc, num_pool);
4714 fail2:
4715 	dp_tx_delete_static_pools(soc, num_pool);
4716 fail1:
4717 	return QDF_STATUS_E_RESOURCES;
4718 }
4719 
4720 /**
4721  * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
4722  * @soc: core txrx main context
4723  *
4724  * This function initializes the following TX descriptor pools
4725  * 1. regular sw tx descriptor pools (static pools)
4726  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
4727  * 3. TSO descriptor pools
4728  *
4729  * Return: QDF_STATUS_SUCCESS: success
4730  *	   QDF_STATUS_E_RESOURCES: Error return
4731  */
4732 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
4733 {
4734 	uint8_t num_pool;
4735 	uint32_t num_desc;
4736 	uint32_t num_ext_desc;
4737 
4738 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4739 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4740 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
4741 
4742 	if (dp_tx_init_static_pools(soc, num_pool, num_desc))
4743 		goto fail1;
4744 
4745 	if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
4746 		goto fail2;
4747 
4748 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
4749 		return QDF_STATUS_SUCCESS;
4750 
4751 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
4752 		goto fail3;
4753 
4754 	dp_tx_flow_control_init(soc);
4755 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
4756 	return QDF_STATUS_SUCCESS;
4757 
4758 fail3:
4759 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
4760 fail2:
4761 	dp_tx_deinit_static_pools(soc, num_pool);
4762 fail1:
4763 	return QDF_STATUS_E_RESOURCES;
4764 }
4765 
4766 /**
4767  * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
4768  * @txrx_soc: dp soc handle
4769  *
4770  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
4771  *			QDF_STATUS_E_FAILURE
4772  */
4773 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
4774 {
4775 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4776 	uint8_t num_pool;
4777 	uint32_t num_desc;
4778 	uint32_t num_ext_desc;
4779 
4780 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4781 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4782 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
4783 
4784 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
4785 		return QDF_STATUS_E_FAILURE;
4786 
4787 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
4788 		return QDF_STATUS_E_FAILURE;
4789 
4790 	return QDF_STATUS_SUCCESS;
4791 }
4792 
4793 /**
4794  * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
4795  * @txrx_soc: dp soc handle
4796  *
4797  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
4798  */
4799 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
4800 {
4801 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4802 	uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4803 
4804 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
4805 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
4806 
4807 	return QDF_STATUS_SUCCESS;
4808 }
4809 
4810