xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision f28396d060cff5c6519f883cb28ae0116ce479f1)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "dp_htt.h"
21 #include "hal_hw_headers.h"
22 #include "dp_tx.h"
23 #include "dp_tx_desc.h"
24 #include "dp_peer.h"
25 #include "dp_types.h"
26 #include "hal_tx.h"
27 #include "qdf_mem.h"
28 #include "qdf_nbuf.h"
29 #include "qdf_net_types.h"
30 #include <wlan_cfg.h>
31 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
32 #include "if_meta_hdr.h"
33 #endif
34 #include "enet.h"
35 #include "dp_internal.h"
36 #ifdef FEATURE_WDS
37 #include "dp_txrx_wds.h"
38 #endif
39 #ifdef ATH_SUPPORT_IQUE
40 #include "dp_txrx_me.h"
41 #endif
42 
43 
44 /* TODO Add support in TSO */
45 #define DP_DESC_NUM_FRAG(x) 0
46 
47 /* disable TQM_BYPASS */
48 #define TQM_BYPASS_WAR 0
49 
50 /* invalid peer id for reinject*/
51 #define DP_INVALID_PEER 0XFFFE
52 
53 /*mapping between hal encrypt type and cdp_sec_type*/
54 #define MAX_CDP_SEC_TYPE 12
55 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
56 					HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
57 					HAL_TX_ENCRYPT_TYPE_WEP_128,
58 					HAL_TX_ENCRYPT_TYPE_WEP_104,
59 					HAL_TX_ENCRYPT_TYPE_WEP_40,
60 					HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
61 					HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
62 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
63 					HAL_TX_ENCRYPT_TYPE_WAPI,
64 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
65 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
66 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
67 					HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
68 
69 #ifdef QCA_TX_LIMIT_CHECK
70 /**
71  * dp_tx_limit_check - Check if allocated tx descriptors reached
72  * soc max limit and pdev max limit
73  * @vdev: DP vdev handle
74  *
75  * Return: true if allocated tx descriptors reached max configured value, else
76  * false
77  */
78 static inline bool
79 dp_tx_limit_check(struct dp_vdev *vdev)
80 {
81 	struct dp_pdev *pdev = vdev->pdev;
82 	struct dp_soc *soc = pdev->soc;
83 
84 	if (qdf_atomic_read(&soc->num_tx_outstanding) >=
85 			soc->num_tx_allowed) {
86 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
87 			  "%s: queued packets are more than max tx, drop the frame",
88 			  __func__);
89 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
90 		return true;
91 	}
92 
93 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
94 			pdev->num_tx_allowed) {
95 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
96 			  "%s: queued packets are more than max tx, drop the frame",
97 			  __func__);
98 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
99 		return true;
100 	}
101 	return false;
102 }
103 
104 /**
105  * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
106  * @vdev: DP pdev handle
107  *
108  * Return: void
109  */
110 static inline void
111 dp_tx_outstanding_inc(struct dp_pdev *pdev)
112 {
113 	struct dp_soc *soc = pdev->soc;
114 
115 	qdf_atomic_inc(&pdev->num_tx_outstanding);
116 	qdf_atomic_inc(&soc->num_tx_outstanding);
117 }
118 
119 /**
120  * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
121  * @vdev: DP pdev handle
122  *
123  * Return: void
124  */
125 static inline void
126 dp_tx_outstanding_dec(struct dp_pdev *pdev)
127 {
128 	struct dp_soc *soc = pdev->soc;
129 
130 	qdf_atomic_dec(&pdev->num_tx_outstanding);
131 	qdf_atomic_dec(&soc->num_tx_outstanding);
132 }
133 
134 #else //QCA_TX_LIMIT_CHECK
135 static inline bool
136 dp_tx_limit_check(struct dp_vdev *vdev)
137 {
138 	return false;
139 }
140 
141 static inline void
142 dp_tx_outstanding_inc(struct dp_pdev *pdev)
143 {
144 	qdf_atomic_inc(&pdev->num_tx_outstanding);
145 }
146 
147 static inline void
148 dp_tx_outstanding_dec(struct dp_pdev *pdev)
149 {
150 	qdf_atomic_dec(&pdev->num_tx_outstanding);
151 }
152 #endif //QCA_TX_LIMIT_CHECK
153 
154 #if defined(FEATURE_TSO)
155 /**
156  * dp_tx_tso_unmap_segment() - Unmap TSO segment
157  *
158  * @soc - core txrx main context
159  * @seg_desc - tso segment descriptor
160  * @num_seg_desc - tso number segment descriptor
161  */
162 static void dp_tx_tso_unmap_segment(
163 		struct dp_soc *soc,
164 		struct qdf_tso_seg_elem_t *seg_desc,
165 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
166 {
167 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
168 	if (qdf_unlikely(!seg_desc)) {
169 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
170 			 __func__, __LINE__);
171 		qdf_assert(0);
172 	} else if (qdf_unlikely(!num_seg_desc)) {
173 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
174 			 __func__, __LINE__);
175 		qdf_assert(0);
176 	} else {
177 		bool is_last_seg;
178 		/* no tso segment left to do dma unmap */
179 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
180 			return;
181 
182 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
183 					true : false;
184 		qdf_nbuf_unmap_tso_segment(soc->osdev,
185 					   seg_desc, is_last_seg);
186 		num_seg_desc->num_seg.tso_cmn_num_seg--;
187 	}
188 }
189 
190 /**
191  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
192  *                            back to the freelist
193  *
194  * @soc - soc device handle
195  * @tx_desc - Tx software descriptor
196  */
197 static void dp_tx_tso_desc_release(struct dp_soc *soc,
198 				   struct dp_tx_desc_s *tx_desc)
199 {
200 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
201 	if (qdf_unlikely(!tx_desc->tso_desc)) {
202 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
203 			  "%s %d TSO desc is NULL!",
204 			  __func__, __LINE__);
205 		qdf_assert(0);
206 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
207 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
208 			  "%s %d TSO num desc is NULL!",
209 			  __func__, __LINE__);
210 		qdf_assert(0);
211 	} else {
212 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
213 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
214 
215 		/* Add the tso num segment into the free list */
216 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
217 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
218 					    tx_desc->tso_num_desc);
219 			tx_desc->tso_num_desc = NULL;
220 			DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
221 		}
222 
223 		/* Add the tso segment into the free list*/
224 		dp_tx_tso_desc_free(soc,
225 				    tx_desc->pool_id, tx_desc->tso_desc);
226 		tx_desc->tso_desc = NULL;
227 	}
228 }
229 #else
230 static void dp_tx_tso_unmap_segment(
231 		struct dp_soc *soc,
232 		struct qdf_tso_seg_elem_t *seg_desc,
233 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
234 
235 {
236 }
237 
238 static void dp_tx_tso_desc_release(struct dp_soc *soc,
239 				   struct dp_tx_desc_s *tx_desc)
240 {
241 }
242 #endif
243 /**
244  * dp_tx_desc_release() - Release Tx Descriptor
245  * @tx_desc : Tx Descriptor
246  * @desc_pool_id: Descriptor Pool ID
247  *
248  * Deallocate all resources attached to Tx descriptor and free the Tx
249  * descriptor.
250  *
251  * Return:
252  */
253 static void
254 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
255 {
256 	struct dp_pdev *pdev = tx_desc->pdev;
257 	struct dp_soc *soc;
258 	uint8_t comp_status = 0;
259 
260 	qdf_assert(pdev);
261 
262 	soc = pdev->soc;
263 
264 	if (tx_desc->frm_type == dp_tx_frm_tso)
265 		dp_tx_tso_desc_release(soc, tx_desc);
266 
267 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
268 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
269 
270 	if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
271 		dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
272 
273 	dp_tx_outstanding_dec(pdev);
274 
275 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
276 		qdf_atomic_dec(&pdev->num_tx_exception);
277 
278 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
279 				hal_tx_comp_get_buffer_source(&tx_desc->comp))
280 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
281 							     soc->hal_soc);
282 	else
283 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
284 
285 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
286 		"Tx Completion Release desc %d status %d outstanding %d",
287 		tx_desc->id, comp_status,
288 		qdf_atomic_read(&pdev->num_tx_outstanding));
289 
290 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
291 	return;
292 }
293 
294 /**
295  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
296  * @vdev: DP vdev Handle
297  * @nbuf: skb
298  * @msdu_info: msdu_info required to create HTT metadata
299  *
300  * Prepares and fills HTT metadata in the frame pre-header for special frames
301  * that should be transmitted using varying transmit parameters.
302  * There are 2 VDEV modes that currently needs this special metadata -
303  *  1) Mesh Mode
304  *  2) DSRC Mode
305  *
306  * Return: HTT metadata size
307  *
308  */
309 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
310 					  struct dp_tx_msdu_info_s *msdu_info)
311 {
312 	uint32_t *meta_data = msdu_info->meta_data;
313 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
314 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
315 
316 	uint8_t htt_desc_size;
317 
318 	/* Size rounded of multiple of 8 bytes */
319 	uint8_t htt_desc_size_aligned;
320 
321 	uint8_t *hdr = NULL;
322 
323 	/*
324 	 * Metadata - HTT MSDU Extension header
325 	 */
326 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
327 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
328 
329 	if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
330 	    HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
331 							   meta_data[0])) {
332 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
333 				 htt_desc_size_aligned)) {
334 			nbuf = qdf_nbuf_realloc_headroom(nbuf,
335 							 htt_desc_size_aligned);
336 			if (!nbuf) {
337 				/*
338 				 * qdf_nbuf_realloc_headroom won't do skb_clone
339 				 * as skb_realloc_headroom does. so, no free is
340 				 * needed here.
341 				 */
342 				DP_STATS_INC(vdev,
343 					     tx_i.dropped.headroom_insufficient,
344 					     1);
345 				qdf_print(" %s[%d] skb_realloc_headroom failed",
346 					  __func__, __LINE__);
347 				return 0;
348 			}
349 		}
350 		/* Fill and add HTT metaheader */
351 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
352 		if (!hdr) {
353 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
354 					"Error in filling HTT metadata");
355 
356 			return 0;
357 		}
358 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
359 
360 	} else if (vdev->opmode == wlan_op_mode_ocb) {
361 		/* Todo - Add support for DSRC */
362 	}
363 
364 	return htt_desc_size_aligned;
365 }
366 
367 /**
368  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
369  * @tso_seg: TSO segment to process
370  * @ext_desc: Pointer to MSDU extension descriptor
371  *
372  * Return: void
373  */
374 #if defined(FEATURE_TSO)
375 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
376 		void *ext_desc)
377 {
378 	uint8_t num_frag;
379 	uint32_t tso_flags;
380 
381 	/*
382 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
383 	 * tcp_flag_mask
384 	 *
385 	 * Checksum enable flags are set in TCL descriptor and not in Extension
386 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
387 	 */
388 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
389 
390 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
391 
392 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
393 		tso_seg->tso_flags.ip_len);
394 
395 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
396 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
397 
398 
399 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
400 		uint32_t lo = 0;
401 		uint32_t hi = 0;
402 
403 		qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
404 				  (tso_seg->tso_frags[num_frag].length));
405 
406 		qdf_dmaaddr_to_32s(
407 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
408 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
409 			tso_seg->tso_frags[num_frag].length);
410 	}
411 
412 	return;
413 }
414 #else
415 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
416 		void *ext_desc)
417 {
418 	return;
419 }
420 #endif
421 
422 #if defined(FEATURE_TSO)
423 /**
424  * dp_tx_free_tso_seg_list() - Loop through the tso segments
425  *                             allocated and free them
426  *
427  * @soc: soc handle
428  * @free_seg: list of tso segments
429  * @msdu_info: msdu descriptor
430  *
431  * Return - void
432  */
433 static void dp_tx_free_tso_seg_list(
434 		struct dp_soc *soc,
435 		struct qdf_tso_seg_elem_t *free_seg,
436 		struct dp_tx_msdu_info_s *msdu_info)
437 {
438 	struct qdf_tso_seg_elem_t *next_seg;
439 
440 	while (free_seg) {
441 		next_seg = free_seg->next;
442 		dp_tx_tso_desc_free(soc,
443 				    msdu_info->tx_queue.desc_pool_id,
444 				    free_seg);
445 		free_seg = next_seg;
446 	}
447 }
448 
449 /**
450  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
451  *                                 allocated and free them
452  *
453  * @soc:  soc handle
454  * @free_num_seg: list of tso number segments
455  * @msdu_info: msdu descriptor
456  * Return - void
457  */
458 static void dp_tx_free_tso_num_seg_list(
459 		struct dp_soc *soc,
460 		struct qdf_tso_num_seg_elem_t *free_num_seg,
461 		struct dp_tx_msdu_info_s *msdu_info)
462 {
463 	struct qdf_tso_num_seg_elem_t *next_num_seg;
464 
465 	while (free_num_seg) {
466 		next_num_seg = free_num_seg->next;
467 		dp_tso_num_seg_free(soc,
468 				    msdu_info->tx_queue.desc_pool_id,
469 				    free_num_seg);
470 		free_num_seg = next_num_seg;
471 	}
472 }
473 
474 /**
475  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
476  *                              do dma unmap for each segment
477  *
478  * @soc: soc handle
479  * @free_seg: list of tso segments
480  * @num_seg_desc: tso number segment descriptor
481  *
482  * Return - void
483  */
484 static void dp_tx_unmap_tso_seg_list(
485 		struct dp_soc *soc,
486 		struct qdf_tso_seg_elem_t *free_seg,
487 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
488 {
489 	struct qdf_tso_seg_elem_t *next_seg;
490 
491 	if (qdf_unlikely(!num_seg_desc)) {
492 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
493 		return;
494 	}
495 
496 	while (free_seg) {
497 		next_seg = free_seg->next;
498 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
499 		free_seg = next_seg;
500 	}
501 }
502 
503 #ifdef FEATURE_TSO_STATS
504 /**
505  * dp_tso_get_stats_idx: Retrieve the tso packet id
506  * @pdev - pdev handle
507  *
508  * Return: id
509  */
510 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
511 {
512 	uint32_t stats_idx;
513 
514 	stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
515 						% CDP_MAX_TSO_PACKETS);
516 	return stats_idx;
517 }
518 #else
519 static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
520 {
521 	return 0;
522 }
523 #endif /* FEATURE_TSO_STATS */
524 
525 /**
526  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
527  *				     free the tso segments descriptor and
528  *				     tso num segments descriptor
529  *
530  * @soc:  soc handle
531  * @msdu_info: msdu descriptor
532  * @tso_seg_unmap: flag to show if dma unmap is necessary
533  *
534  * Return - void
535  */
536 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
537 					  struct dp_tx_msdu_info_s *msdu_info,
538 					  bool tso_seg_unmap)
539 {
540 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
541 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
542 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
543 					tso_info->tso_num_seg_list;
544 
545 	/* do dma unmap for each segment */
546 	if (tso_seg_unmap)
547 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
548 
549 	/* free all tso number segment descriptor though looks only have 1 */
550 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
551 
552 	/* free all tso segment descriptor */
553 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
554 }
555 
556 /**
557  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
558  * @vdev: virtual device handle
559  * @msdu: network buffer
560  * @msdu_info: meta data associated with the msdu
561  *
562  * Return: QDF_STATUS_SUCCESS success
563  */
564 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
565 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
566 {
567 	struct qdf_tso_seg_elem_t *tso_seg;
568 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
569 	struct dp_soc *soc = vdev->pdev->soc;
570 	struct dp_pdev *pdev = vdev->pdev;
571 	struct qdf_tso_info_t *tso_info;
572 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
573 	tso_info = &msdu_info->u.tso_info;
574 	tso_info->curr_seg = NULL;
575 	tso_info->tso_seg_list = NULL;
576 	tso_info->num_segs = num_seg;
577 	msdu_info->frm_type = dp_tx_frm_tso;
578 	tso_info->tso_num_seg_list = NULL;
579 
580 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
581 
582 	while (num_seg) {
583 		tso_seg = dp_tx_tso_desc_alloc(
584 				soc, msdu_info->tx_queue.desc_pool_id);
585 		if (tso_seg) {
586 			tso_seg->next = tso_info->tso_seg_list;
587 			tso_info->tso_seg_list = tso_seg;
588 			num_seg--;
589 		} else {
590 			dp_err_rl("Failed to alloc tso seg desc");
591 			DP_STATS_INC_PKT(vdev->pdev,
592 					 tso_stats.tso_no_mem_dropped, 1,
593 					 qdf_nbuf_len(msdu));
594 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
595 
596 			return QDF_STATUS_E_NOMEM;
597 		}
598 	}
599 
600 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
601 
602 	tso_num_seg = dp_tso_num_seg_alloc(soc,
603 			msdu_info->tx_queue.desc_pool_id);
604 
605 	if (tso_num_seg) {
606 		tso_num_seg->next = tso_info->tso_num_seg_list;
607 		tso_info->tso_num_seg_list = tso_num_seg;
608 	} else {
609 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
610 			 __func__);
611 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
612 
613 		return QDF_STATUS_E_NOMEM;
614 	}
615 
616 	msdu_info->num_seg =
617 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
618 
619 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
620 			msdu_info->num_seg);
621 
622 	if (!(msdu_info->num_seg)) {
623 		/*
624 		 * Free allocated TSO seg desc and number seg desc,
625 		 * do unmap for segments if dma map has done.
626 		 */
627 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
628 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
629 
630 		return QDF_STATUS_E_INVAL;
631 	}
632 
633 	tso_info->curr_seg = tso_info->tso_seg_list;
634 
635 	tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
636 	dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
637 			     msdu, msdu_info->num_seg);
638 	dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
639 				    tso_info->msdu_stats_idx);
640 	dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
641 	return QDF_STATUS_SUCCESS;
642 }
643 #else
644 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
645 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
646 {
647 	return QDF_STATUS_E_NOMEM;
648 }
649 #endif
650 
651 /**
652  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
653  * @vdev: DP Vdev handle
654  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
655  * @desc_pool_id: Descriptor Pool ID
656  *
657  * Return:
658  */
659 static
660 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
661 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
662 {
663 	uint8_t i;
664 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
665 	struct dp_tx_seg_info_s *seg_info;
666 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
667 	struct dp_soc *soc = vdev->pdev->soc;
668 
669 	/* Allocate an extension descriptor */
670 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
671 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
672 
673 	if (!msdu_ext_desc) {
674 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
675 		return NULL;
676 	}
677 
678 	if (msdu_info->exception_fw &&
679 			qdf_unlikely(vdev->mesh_vdev)) {
680 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
681 				&msdu_info->meta_data[0],
682 				sizeof(struct htt_tx_msdu_desc_ext2_t));
683 		qdf_atomic_inc(&vdev->pdev->num_tx_exception);
684 	}
685 
686 	switch (msdu_info->frm_type) {
687 	case dp_tx_frm_sg:
688 	case dp_tx_frm_me:
689 	case dp_tx_frm_raw:
690 		seg_info = msdu_info->u.sg_info.curr_seg;
691 		/* Update the buffer pointers in MSDU Extension Descriptor */
692 		for (i = 0; i < seg_info->frag_cnt; i++) {
693 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
694 				seg_info->frags[i].paddr_lo,
695 				seg_info->frags[i].paddr_hi,
696 				seg_info->frags[i].len);
697 		}
698 
699 		break;
700 
701 	case dp_tx_frm_tso:
702 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
703 				&cached_ext_desc[0]);
704 		break;
705 
706 
707 	default:
708 		break;
709 	}
710 
711 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
712 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
713 
714 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
715 			msdu_ext_desc->vaddr);
716 
717 	return msdu_ext_desc;
718 }
719 
720 /**
721  * dp_tx_trace_pkt() - Trace TX packet at DP layer
722  *
723  * @skb: skb to be traced
724  * @msdu_id: msdu_id of the packet
725  * @vdev_id: vdev_id of the packet
726  *
727  * Return: None
728  */
729 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
730 			    uint8_t vdev_id)
731 {
732 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
733 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
734 	DPTRACE(qdf_dp_trace_ptr(skb,
735 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
736 				 QDF_TRACE_DEFAULT_PDEV_ID,
737 				 qdf_nbuf_data_addr(skb),
738 				 sizeof(qdf_nbuf_data(skb)),
739 				 msdu_id, vdev_id));
740 
741 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
742 
743 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
744 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
745 				      msdu_id, QDF_TX));
746 }
747 
748 /**
749  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
750  * @vdev: DP vdev handle
751  * @nbuf: skb
752  * @desc_pool_id: Descriptor pool ID
753  * @meta_data: Metadata to the fw
754  * @tx_exc_metadata: Handle that holds exception path metadata
755  * Allocate and prepare Tx descriptor with msdu information.
756  *
757  * Return: Pointer to Tx Descriptor on success,
758  *         NULL on failure
759  */
760 static
761 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
762 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
763 		struct dp_tx_msdu_info_s *msdu_info,
764 		struct cdp_tx_exception_metadata *tx_exc_metadata)
765 {
766 	uint8_t align_pad;
767 	uint8_t is_exception = 0;
768 	uint8_t htt_hdr_size;
769 	qdf_ether_header_t *eh;
770 	struct dp_tx_desc_s *tx_desc;
771 	struct dp_pdev *pdev = vdev->pdev;
772 	struct dp_soc *soc = pdev->soc;
773 
774 	if (dp_tx_limit_check(vdev))
775 		return NULL;
776 
777 	/* Allocate software Tx descriptor */
778 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
779 	if (qdf_unlikely(!tx_desc)) {
780 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
781 		return NULL;
782 	}
783 
784 	dp_tx_outstanding_inc(pdev);
785 
786 	/* Initialize the SW tx descriptor */
787 	tx_desc->nbuf = nbuf;
788 	tx_desc->frm_type = dp_tx_frm_std;
789 	tx_desc->tx_encap_type = ((tx_exc_metadata &&
790 		(tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
791 		tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
792 	tx_desc->vdev = vdev;
793 	tx_desc->pdev = pdev;
794 	tx_desc->msdu_ext_desc = NULL;
795 	tx_desc->pkt_offset = 0;
796 
797 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
798 
799 	if (qdf_unlikely(vdev->multipass_en)) {
800 		if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
801 			goto failure;
802 	}
803 
804 	/*
805 	 * For special modes (vdev_type == ocb or mesh), data frames should be
806 	 * transmitted using varying transmit parameters (tx spec) which include
807 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
808 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
809 	 * These frames are sent as exception packets to firmware.
810 	 *
811 	 * HW requirement is that metadata should always point to a
812 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
813 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
814 	 *  to get 8-byte aligned start address along with align_pad added
815 	 *
816 	 *  |-----------------------------|
817 	 *  |                             |
818 	 *  |-----------------------------| <-----Buffer Pointer Address given
819 	 *  |                             |  ^    in HW descriptor (aligned)
820 	 *  |       HTT Metadata          |  |
821 	 *  |                             |  |
822 	 *  |                             |  | Packet Offset given in descriptor
823 	 *  |                             |  |
824 	 *  |-----------------------------|  |
825 	 *  |       Alignment Pad         |  v
826 	 *  |-----------------------------| <----- Actual buffer start address
827 	 *  |        SKB Data             |           (Unaligned)
828 	 *  |                             |
829 	 *  |                             |
830 	 *  |                             |
831 	 *  |                             |
832 	 *  |                             |
833 	 *  |-----------------------------|
834 	 */
835 	if (qdf_unlikely((msdu_info->exception_fw)) ||
836 				(vdev->opmode == wlan_op_mode_ocb) ||
837 				(tx_exc_metadata &&
838 				tx_exc_metadata->is_tx_sniffer)) {
839 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
840 
841 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
842 			DP_STATS_INC(vdev,
843 				     tx_i.dropped.headroom_insufficient, 1);
844 			goto failure;
845 		}
846 
847 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
848 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
849 					"qdf_nbuf_push_head failed");
850 			goto failure;
851 		}
852 
853 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
854 				msdu_info);
855 		if (htt_hdr_size == 0)
856 			goto failure;
857 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
858 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
859 		is_exception = 1;
860 	}
861 
862 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
863 				qdf_nbuf_map(soc->osdev, nbuf,
864 					QDF_DMA_TO_DEVICE))) {
865 		/* Handle failure */
866 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
867 				"qdf_nbuf_map failed");
868 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
869 		goto failure;
870 	}
871 
872 	if (qdf_unlikely(vdev->nawds_enabled)) {
873 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
874 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
875 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
876 			is_exception = 1;
877 		}
878 	}
879 
880 #if !TQM_BYPASS_WAR
881 	if (is_exception || tx_exc_metadata)
882 #endif
883 	{
884 		/* Temporary WAR due to TQM VP issues */
885 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
886 		qdf_atomic_inc(&pdev->num_tx_exception);
887 	}
888 
889 	return tx_desc;
890 
891 failure:
892 	dp_tx_desc_release(tx_desc, desc_pool_id);
893 	return NULL;
894 }
895 
896 /**
897  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
898  * @vdev: DP vdev handle
899  * @nbuf: skb
900  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
901  * @desc_pool_id : Descriptor Pool ID
902  *
903  * Allocate and prepare Tx descriptor with msdu and fragment descritor
904  * information. For frames wth fragments, allocate and prepare
905  * an MSDU extension descriptor
906  *
907  * Return: Pointer to Tx Descriptor on success,
908  *         NULL on failure
909  */
910 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
911 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
912 		uint8_t desc_pool_id)
913 {
914 	struct dp_tx_desc_s *tx_desc;
915 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
916 	struct dp_pdev *pdev = vdev->pdev;
917 	struct dp_soc *soc = pdev->soc;
918 
919 	if (dp_tx_limit_check(vdev))
920 		return NULL;
921 
922 	/* Allocate software Tx descriptor */
923 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
924 	if (!tx_desc) {
925 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
926 		return NULL;
927 	}
928 
929 	dp_tx_outstanding_inc(pdev);
930 
931 	/* Initialize the SW tx descriptor */
932 	tx_desc->nbuf = nbuf;
933 	tx_desc->frm_type = msdu_info->frm_type;
934 	tx_desc->tx_encap_type = vdev->tx_encap_type;
935 	tx_desc->vdev = vdev;
936 	tx_desc->pdev = pdev;
937 	tx_desc->pkt_offset = 0;
938 	tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
939 	tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
940 
941 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
942 
943 	/* Handle scattered frames - TSO/SG/ME */
944 	/* Allocate and prepare an extension descriptor for scattered frames */
945 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
946 	if (!msdu_ext_desc) {
947 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
948 				"%s Tx Extension Descriptor Alloc Fail",
949 				__func__);
950 		goto failure;
951 	}
952 
953 #if TQM_BYPASS_WAR
954 	/* Temporary WAR due to TQM VP issues */
955 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
956 	qdf_atomic_inc(&pdev->num_tx_exception);
957 #endif
958 	if (qdf_unlikely(msdu_info->exception_fw))
959 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
960 
961 	tx_desc->msdu_ext_desc = msdu_ext_desc;
962 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
963 
964 	return tx_desc;
965 failure:
966 	dp_tx_desc_release(tx_desc, desc_pool_id);
967 	return NULL;
968 }
969 
970 /**
971  * dp_tx_prepare_raw() - Prepare RAW packet TX
972  * @vdev: DP vdev handle
973  * @nbuf: buffer pointer
974  * @seg_info: Pointer to Segment info Descriptor to be prepared
975  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
976  *     descriptor
977  *
978  * Return:
979  */
980 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
981 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
982 {
983 	qdf_nbuf_t curr_nbuf = NULL;
984 	uint16_t total_len = 0;
985 	qdf_dma_addr_t paddr;
986 	int32_t i;
987 	int32_t mapped_buf_num = 0;
988 
989 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
990 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
991 
992 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
993 
994 	/* Continue only if frames are of DATA type */
995 	if (!DP_FRAME_IS_DATA(qos_wh)) {
996 		DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
997 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
998 			  "Pkt. recd is of not data type");
999 		goto error;
1000 	}
1001 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
1002 	if (vdev->raw_mode_war &&
1003 	    (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
1004 	    (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
1005 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
1006 
1007 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
1008 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
1009 
1010 		if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf,
1011 					QDF_DMA_TO_DEVICE)) {
1012 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1013 				"%s dma map error ", __func__);
1014 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
1015 			mapped_buf_num = i;
1016 			goto error;
1017 		}
1018 
1019 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
1020 		seg_info->frags[i].paddr_lo = paddr;
1021 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
1022 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
1023 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
1024 		total_len += qdf_nbuf_len(curr_nbuf);
1025 	}
1026 
1027 	seg_info->frag_cnt = i;
1028 	seg_info->total_len = total_len;
1029 	seg_info->next = NULL;
1030 
1031 	sg_info->curr_seg = seg_info;
1032 
1033 	msdu_info->frm_type = dp_tx_frm_raw;
1034 	msdu_info->num_seg = 1;
1035 
1036 	return nbuf;
1037 
1038 error:
1039 	i = 0;
1040 	while (nbuf) {
1041 		curr_nbuf = nbuf;
1042 		if (i < mapped_buf_num) {
1043 			qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE);
1044 			i++;
1045 		}
1046 		nbuf = qdf_nbuf_next(nbuf);
1047 		qdf_nbuf_free(curr_nbuf);
1048 	}
1049 	return NULL;
1050 
1051 }
1052 
1053 /**
1054  * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
1055  * @soc: DP soc handle
1056  * @nbuf: Buffer pointer
1057  *
1058  * unmap the chain of nbufs that belong to this RAW frame.
1059  *
1060  * Return: None
1061  */
1062 static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
1063 				    qdf_nbuf_t nbuf)
1064 {
1065 	qdf_nbuf_t cur_nbuf = nbuf;
1066 
1067 	do {
1068 		qdf_nbuf_unmap(soc->osdev, cur_nbuf, QDF_DMA_TO_DEVICE);
1069 		cur_nbuf = qdf_nbuf_next(cur_nbuf);
1070 	} while (cur_nbuf);
1071 }
1072 
1073 #ifdef VDEV_PEER_PROTOCOL_COUNT
1074 #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, nbuf) \
1075 { \
1076 	qdf_nbuf_t nbuf_local; \
1077 	struct dp_vdev *vdev_local = vdev_hdl; \
1078 	do { \
1079 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
1080 			break; \
1081 		nbuf_local = nbuf; \
1082 		if (qdf_unlikely(((vdev_local)->tx_encap_type) == \
1083 			 htt_cmn_pkt_type_raw)) \
1084 			break; \
1085 		else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local)))) \
1086 			break; \
1087 		else if (qdf_nbuf_is_tso((nbuf_local))) \
1088 			break; \
1089 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
1090 						       (nbuf_local), \
1091 						       NULL, 1, 0); \
1092 	} while (0); \
1093 }
1094 #else
1095 #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, skb)
1096 #endif
1097 
1098 /**
1099  * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
1100  * @soc: DP Soc Handle
1101  * @vdev: DP vdev handle
1102  * @tx_desc: Tx Descriptor Handle
1103  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1104  * @fw_metadata: Metadata to send to Target Firmware along with frame
1105  * @ring_id: Ring ID of H/W ring to which we enqueue the packet
1106  * @tx_exc_metadata: Handle that holds exception path meta data
1107  *
1108  *  Gets the next free TCL HW DMA descriptor and sets up required parameters
1109  *  from software Tx descriptor
1110  *
1111  * Return:
1112  */
1113 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
1114 				   struct dp_tx_desc_s *tx_desc, uint8_t tid,
1115 				   uint16_t fw_metadata, uint8_t ring_id,
1116 				   struct cdp_tx_exception_metadata
1117 					*tx_exc_metadata)
1118 {
1119 	uint8_t type;
1120 	uint16_t length;
1121 	void *hal_tx_desc, *hal_tx_desc_cached;
1122 	qdf_dma_addr_t dma_addr;
1123 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
1124 
1125 	enum cdp_sec_type sec_type = ((tx_exc_metadata &&
1126 			tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
1127 			tx_exc_metadata->sec_type : vdev->sec_type);
1128 
1129 	/* Return Buffer Manager ID */
1130 	uint8_t bm_id = ring_id;
1131 	hal_ring_handle_t hal_ring_hdl = soc->tcl_data_ring[ring_id].hal_srng;
1132 
1133 	hal_tx_desc_cached = (void *) cached_desc;
1134 	qdf_mem_zero(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
1135 
1136 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
1137 		length = HAL_TX_EXT_DESC_WITH_META_DATA;
1138 		type = HAL_TX_BUF_TYPE_EXT_DESC;
1139 		dma_addr = tx_desc->msdu_ext_desc->paddr;
1140 	} else {
1141 		length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
1142 		type = HAL_TX_BUF_TYPE_BUFFER;
1143 		dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
1144 	}
1145 
1146 	qdf_assert_always(dma_addr);
1147 
1148 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
1149 	hal_tx_desc_set_buf_addr(hal_tx_desc_cached,
1150 					dma_addr, bm_id, tx_desc->id,
1151 					type, soc->hal_soc);
1152 
1153 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id))
1154 		return QDF_STATUS_E_RESOURCES;
1155 
1156 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
1157 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
1158 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
1159 	hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
1160 				vdev->pdev->lmac_id);
1161 	hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
1162 				    vdev->search_type);
1163 	hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
1164 				     vdev->bss_ast_idx);
1165 	hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
1166 					  vdev->dscp_tid_map_id);
1167 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
1168 			sec_type_map[sec_type]);
1169 	hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
1170 				      (vdev->bss_ast_hash & 0xF));
1171 
1172 	dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
1173 			 length, type, (uint64_t)dma_addr,
1174 			 tx_desc->pkt_offset, tx_desc->id);
1175 
1176 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
1177 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
1178 
1179 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
1180 			vdev->hal_desc_addr_search_flags);
1181 
1182 	/* verify checksum offload configuration*/
1183 	if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) &&
1184 		((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
1185 		|| qdf_nbuf_is_tso(tx_desc->nbuf)))  {
1186 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
1187 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
1188 	}
1189 
1190 	if (tid != HTT_TX_EXT_TID_INVALID)
1191 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
1192 
1193 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
1194 		hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
1195 
1196 
1197 	tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_get());
1198 	/* Sync cached descriptor with HW */
1199 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
1200 
1201 	if (!hal_tx_desc) {
1202 		dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
1203 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1204 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1205 		return QDF_STATUS_E_RESOURCES;
1206 	}
1207 
1208 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
1209 	dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
1210 
1211 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
1212 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
1213 
1214 	return QDF_STATUS_SUCCESS;
1215 }
1216 
1217 
1218 /**
1219  * dp_cce_classify() - Classify the frame based on CCE rules
1220  * @vdev: DP vdev handle
1221  * @nbuf: skb
1222  *
1223  * Classify frames based on CCE rules
1224  * Return: bool( true if classified,
1225  *               else false)
1226  */
1227 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1228 {
1229 	qdf_ether_header_t *eh = NULL;
1230 	uint16_t   ether_type;
1231 	qdf_llc_t *llcHdr;
1232 	qdf_nbuf_t nbuf_clone = NULL;
1233 	qdf_dot3_qosframe_t *qos_wh = NULL;
1234 
1235 	/* for mesh packets don't do any classification */
1236 	if (qdf_unlikely(vdev->mesh_vdev))
1237 		return false;
1238 
1239 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1240 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1241 		ether_type = eh->ether_type;
1242 		llcHdr = (qdf_llc_t *)(nbuf->data +
1243 					sizeof(qdf_ether_header_t));
1244 	} else {
1245 		qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1246 		/* For encrypted packets don't do any classification */
1247 		if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
1248 			return false;
1249 
1250 		if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
1251 			if (qdf_unlikely(
1252 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
1253 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
1254 
1255 				ether_type = *(uint16_t *)(nbuf->data
1256 						+ QDF_IEEE80211_4ADDR_HDR_LEN
1257 						+ sizeof(qdf_llc_t)
1258 						- sizeof(ether_type));
1259 				llcHdr = (qdf_llc_t *)(nbuf->data +
1260 						QDF_IEEE80211_4ADDR_HDR_LEN);
1261 			} else {
1262 				ether_type = *(uint16_t *)(nbuf->data
1263 						+ QDF_IEEE80211_3ADDR_HDR_LEN
1264 						+ sizeof(qdf_llc_t)
1265 						- sizeof(ether_type));
1266 				llcHdr = (qdf_llc_t *)(nbuf->data +
1267 					QDF_IEEE80211_3ADDR_HDR_LEN);
1268 			}
1269 
1270 			if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
1271 				&& (ether_type ==
1272 				qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
1273 
1274 				DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
1275 				return true;
1276 			}
1277 		}
1278 
1279 		return false;
1280 	}
1281 
1282 	if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
1283 		ether_type = *(uint16_t *)(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1284 				sizeof(*llcHdr));
1285 		nbuf_clone = qdf_nbuf_clone(nbuf);
1286 		if (qdf_unlikely(nbuf_clone)) {
1287 			qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
1288 
1289 			if (ether_type == htons(ETHERTYPE_VLAN)) {
1290 				qdf_nbuf_pull_head(nbuf_clone,
1291 						sizeof(qdf_net_vlanhdr_t));
1292 			}
1293 		}
1294 	} else {
1295 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1296 			nbuf_clone = qdf_nbuf_clone(nbuf);
1297 			if (qdf_unlikely(nbuf_clone)) {
1298 				qdf_nbuf_pull_head(nbuf_clone,
1299 					sizeof(qdf_net_vlanhdr_t));
1300 			}
1301 		}
1302 	}
1303 
1304 	if (qdf_unlikely(nbuf_clone))
1305 		nbuf = nbuf_clone;
1306 
1307 
1308 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
1309 		|| qdf_nbuf_is_ipv4_arp_pkt(nbuf)
1310 		|| qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
1311 		|| qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
1312 		|| (qdf_nbuf_is_ipv4_pkt(nbuf)
1313 			&& qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
1314 		|| (qdf_nbuf_is_ipv6_pkt(nbuf) &&
1315 			qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
1316 		if (qdf_unlikely(nbuf_clone))
1317 			qdf_nbuf_free(nbuf_clone);
1318 		return true;
1319 	}
1320 
1321 	if (qdf_unlikely(nbuf_clone))
1322 		qdf_nbuf_free(nbuf_clone);
1323 
1324 	return false;
1325 }
1326 
1327 /**
1328  * dp_tx_get_tid() - Obtain TID to be used for this frame
1329  * @vdev: DP vdev handle
1330  * @nbuf: skb
1331  *
1332  * Extract the DSCP or PCP information from frame and map into TID value.
1333  *
1334  * Return: void
1335  */
1336 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1337 			  struct dp_tx_msdu_info_s *msdu_info)
1338 {
1339 	uint8_t tos = 0, dscp_tid_override = 0;
1340 	uint8_t *hdr_ptr, *L3datap;
1341 	uint8_t is_mcast = 0;
1342 	qdf_ether_header_t *eh = NULL;
1343 	qdf_ethervlan_header_t *evh = NULL;
1344 	uint16_t   ether_type;
1345 	qdf_llc_t *llcHdr;
1346 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1347 
1348 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1349 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1350 		eh = (qdf_ether_header_t *)nbuf->data;
1351 		hdr_ptr = eh->ether_dhost;
1352 		L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
1353 	} else {
1354 		qdf_dot3_qosframe_t *qos_wh =
1355 			(qdf_dot3_qosframe_t *) nbuf->data;
1356 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1357 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1358 		return;
1359 	}
1360 
1361 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1362 	ether_type = eh->ether_type;
1363 
1364 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
1365 	/*
1366 	 * Check if packet is dot3 or eth2 type.
1367 	 */
1368 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1369 		ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1370 				sizeof(*llcHdr));
1371 
1372 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1373 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1374 				sizeof(*llcHdr);
1375 			ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
1376 					+ sizeof(*llcHdr) +
1377 					sizeof(qdf_net_vlanhdr_t));
1378 		} else {
1379 			L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
1380 				sizeof(*llcHdr);
1381 		}
1382 	} else {
1383 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1384 			evh = (qdf_ethervlan_header_t *) eh;
1385 			ether_type = evh->ether_type;
1386 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1387 		}
1388 	}
1389 
1390 	/*
1391 	 * Find priority from IP TOS DSCP field
1392 	 */
1393 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1394 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1395 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1396 			/* Only for unicast frames */
1397 			if (!is_mcast) {
1398 				/* send it on VO queue */
1399 				msdu_info->tid = DP_VO_TID;
1400 			}
1401 		} else {
1402 			/*
1403 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1404 			 * from TOS byte.
1405 			 */
1406 			tos = ip->ip_tos;
1407 			dscp_tid_override = 1;
1408 
1409 		}
1410 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1411 		/* TODO
1412 		 * use flowlabel
1413 		 *igmpmld cases to be handled in phase 2
1414 		 */
1415 		unsigned long ver_pri_flowlabel;
1416 		unsigned long pri;
1417 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1418 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1419 			DP_IPV6_PRIORITY_SHIFT;
1420 		tos = pri;
1421 		dscp_tid_override = 1;
1422 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1423 		msdu_info->tid = DP_VO_TID;
1424 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1425 		/* Only for unicast frames */
1426 		if (!is_mcast) {
1427 			/* send ucast arp on VO queue */
1428 			msdu_info->tid = DP_VO_TID;
1429 		}
1430 	}
1431 
1432 	/*
1433 	 * Assign all MCAST packets to BE
1434 	 */
1435 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1436 		if (is_mcast) {
1437 			tos = 0;
1438 			dscp_tid_override = 1;
1439 		}
1440 	}
1441 
1442 	if (dscp_tid_override == 1) {
1443 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1444 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1445 	}
1446 
1447 	if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
1448 		msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
1449 
1450 	return;
1451 }
1452 
1453 /**
1454  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1455  * @vdev: DP vdev handle
1456  * @nbuf: skb
1457  *
1458  * Software based TID classification is required when more than 2 DSCP-TID
1459  * mapping tables are needed.
1460  * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
1461  *
1462  * Return: void
1463  */
1464 static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1465 			       struct dp_tx_msdu_info_s *msdu_info)
1466 {
1467 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1468 
1469 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1470 
1471 	if (pdev->soc && vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map)
1472 		return;
1473 
1474 	/* for mesh packets don't do any classification */
1475 	if (qdf_unlikely(vdev->mesh_vdev))
1476 		return;
1477 
1478 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1479 }
1480 
1481 #ifdef FEATURE_WLAN_TDLS
1482 /**
1483  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1484  * @tx_desc: TX descriptor
1485  *
1486  * Return: None
1487  */
1488 static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1489 {
1490 	if (tx_desc->vdev) {
1491 		if (tx_desc->vdev->is_tdls_frame) {
1492 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1493 			tx_desc->vdev->is_tdls_frame = false;
1494 		}
1495 	}
1496 }
1497 
1498 /**
1499  * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1500  * @tx_desc: TX descriptor
1501  * @vdev: datapath vdev handle
1502  *
1503  * Return: None
1504  */
1505 static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
1506 					 struct dp_vdev *vdev)
1507 {
1508 	struct hal_tx_completion_status ts = {0};
1509 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1510 
1511 	if (qdf_unlikely(!vdev)) {
1512 		dp_err("vdev is null!");
1513 		return;
1514 	}
1515 
1516 	hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
1517 	if (vdev->tx_non_std_data_callback.func) {
1518 		qdf_nbuf_set_next(tx_desc->nbuf, NULL);
1519 		vdev->tx_non_std_data_callback.func(
1520 				vdev->tx_non_std_data_callback.ctxt,
1521 				nbuf, ts.status);
1522 		return;
1523 	}
1524 }
1525 #else
1526 static inline void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1527 {
1528 }
1529 
1530 static inline void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
1531 						struct dp_vdev *vdev)
1532 {
1533 }
1534 #endif
1535 
1536 /**
1537  * dp_tx_frame_is_drop() - checks if the packet is loopback
1538  * @vdev: DP vdev handle
1539  * @nbuf: skb
1540  *
1541  * Return: 1 if frame needs to be dropped else 0
1542  */
1543 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
1544 {
1545 	struct dp_pdev *pdev = NULL;
1546 	struct dp_ast_entry *src_ast_entry = NULL;
1547 	struct dp_ast_entry *dst_ast_entry = NULL;
1548 	struct dp_soc *soc = NULL;
1549 
1550 	qdf_assert(vdev);
1551 	pdev = vdev->pdev;
1552 	qdf_assert(pdev);
1553 	soc = pdev->soc;
1554 
1555 	dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
1556 				(soc, dstmac, vdev->pdev->pdev_id);
1557 
1558 	src_ast_entry = dp_peer_ast_hash_find_by_pdevid
1559 				(soc, srcmac, vdev->pdev->pdev_id);
1560 	if (dst_ast_entry && src_ast_entry) {
1561 		if (dst_ast_entry->peer->peer_ids[0] ==
1562 				src_ast_entry->peer->peer_ids[0])
1563 			return 1;
1564 	}
1565 
1566 	return 0;
1567 }
1568 
1569 /**
1570  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1571  * @vdev: DP vdev handle
1572  * @nbuf: skb
1573  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1574  * @meta_data: Metadata to the fw
1575  * @tx_q: Tx queue to be used for this Tx frame
1576  * @peer_id: peer_id of the peer in case of NAWDS frames
1577  * @tx_exc_metadata: Handle that holds exception path metadata
1578  *
1579  * Return: NULL on success,
1580  *         nbuf when it fails to send
1581  */
1582 qdf_nbuf_t
1583 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1584 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
1585 		       struct cdp_tx_exception_metadata *tx_exc_metadata)
1586 {
1587 	struct dp_pdev *pdev = vdev->pdev;
1588 	struct dp_soc *soc = pdev->soc;
1589 	struct dp_tx_desc_s *tx_desc;
1590 	QDF_STATUS status;
1591 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
1592 	hal_ring_handle_t hal_ring_hdl =
1593 				soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1594 	uint16_t htt_tcl_metadata = 0;
1595 	uint8_t tid = msdu_info->tid;
1596 	struct cdp_tid_tx_stats *tid_stats = NULL;
1597 
1598 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
1599 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
1600 			msdu_info, tx_exc_metadata);
1601 	if (!tx_desc) {
1602 		dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
1603 			  vdev, tx_q->desc_pool_id);
1604 		dp_tx_get_tid(vdev, nbuf, msdu_info);
1605 		tid_stats = &pdev->stats.tid_stats.
1606 			    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
1607 		tid_stats->swdrop_cnt[TX_DESC_ERR]++;
1608 		return nbuf;
1609 	}
1610 
1611 	if (qdf_unlikely(soc->cce_disable)) {
1612 		if (dp_cce_classify(vdev, nbuf) == true) {
1613 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1614 			tid = DP_VO_TID;
1615 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1616 		}
1617 	}
1618 
1619 	dp_tx_update_tdls_flags(tx_desc);
1620 
1621 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_ring_hdl))) {
1622 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1623 				"%s %d : HAL RING Access Failed -- %pK",
1624 				__func__, __LINE__, hal_ring_hdl);
1625 		dp_tx_get_tid(vdev, nbuf, msdu_info);
1626 		tid_stats = &pdev->stats.tid_stats.
1627 			    tid_tx_stats[tx_q->ring_id][tid];
1628 		tid_stats->swdrop_cnt[TX_HAL_RING_ACCESS_ERR]++;
1629 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1630 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1631 		qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE);
1632 		goto fail_return;
1633 	}
1634 
1635 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
1636 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1637 		HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
1638 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
1639 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
1640 				HTT_TCL_METADATA_TYPE_PEER_BASED);
1641 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
1642 				peer_id);
1643 	} else
1644 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1645 
1646 
1647 	if (msdu_info->exception_fw) {
1648 		HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1649 	}
1650 
1651 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
1652 	status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
1653 			htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
1654 
1655 	if (status != QDF_STATUS_SUCCESS) {
1656 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1657 			  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1658 			  __func__, tx_desc, tx_q->ring_id);
1659 		dp_tx_get_tid(vdev, nbuf, msdu_info);
1660 		tid_stats = &pdev->stats.tid_stats.
1661 			    tid_tx_stats[tx_q->ring_id][tid];
1662 		tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
1663 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1664 		qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE);
1665 		goto fail_return;
1666 	}
1667 
1668 	nbuf = NULL;
1669 
1670 fail_return:
1671 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1672 		hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
1673 		hif_pm_runtime_put(soc->hif_handle);
1674 	} else {
1675 		hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
1676 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1677 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1678 	}
1679 
1680 	return nbuf;
1681 }
1682 
1683 /**
1684  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
1685  * @vdev: DP vdev handle
1686  * @nbuf: skb
1687  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
1688  *
1689  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
1690  *
1691  * Return: NULL on success,
1692  *         nbuf when it fails to send
1693  */
1694 #if QDF_LOCK_STATS
1695 noinline
1696 #else
1697 #endif
1698 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1699 				    struct dp_tx_msdu_info_s *msdu_info)
1700 {
1701 	uint8_t i;
1702 	struct dp_pdev *pdev = vdev->pdev;
1703 	struct dp_soc *soc = pdev->soc;
1704 	struct dp_tx_desc_s *tx_desc;
1705 	bool is_cce_classified = false;
1706 	QDF_STATUS status;
1707 	uint16_t htt_tcl_metadata = 0;
1708 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1709 	hal_ring_handle_t hal_ring_hdl =
1710 				soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1711 	struct cdp_tid_tx_stats *tid_stats = NULL;
1712 
1713 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_ring_hdl))) {
1714 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1715 				"%s %d : HAL RING Access Failed -- %pK",
1716 				__func__, __LINE__, hal_ring_hdl);
1717 		dp_tx_get_tid(vdev, nbuf, msdu_info);
1718 		tid_stats = &pdev->stats.tid_stats.
1719 			    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
1720 		tid_stats->swdrop_cnt[TX_HAL_RING_ACCESS_ERR]++;
1721 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1722 		return nbuf;
1723 	}
1724 
1725 	if (qdf_unlikely(soc->cce_disable)) {
1726 		is_cce_classified = dp_cce_classify(vdev, nbuf);
1727 		if (is_cce_classified) {
1728 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1729 			msdu_info->tid = DP_VO_TID;
1730 		}
1731 	}
1732 
1733 	if (msdu_info->frm_type == dp_tx_frm_me)
1734 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1735 
1736 	i = 0;
1737 	/* Print statement to track i and num_seg */
1738 	/*
1739 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
1740 	 * descriptors using information in msdu_info
1741 	 */
1742 	while (i < msdu_info->num_seg) {
1743 		/*
1744 		 * Setup Tx descriptor for an MSDU, and MSDU extension
1745 		 * descriptor
1746 		 */
1747 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
1748 				tx_q->desc_pool_id);
1749 
1750 		if (!tx_desc) {
1751 			if (msdu_info->frm_type == dp_tx_frm_me) {
1752 				dp_tx_me_free_buf(pdev,
1753 					(void *)(msdu_info->u.sg_info
1754 						.curr_seg->frags[0].vaddr));
1755 				i++;
1756 				continue;
1757 			}
1758 			goto done;
1759 		}
1760 
1761 		if (msdu_info->frm_type == dp_tx_frm_me) {
1762 			tx_desc->me_buffer =
1763 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
1764 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
1765 		}
1766 
1767 		if (is_cce_classified)
1768 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1769 
1770 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1771 		if (msdu_info->exception_fw) {
1772 			HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1773 		}
1774 
1775 		/*
1776 		 * Enqueue the Tx MSDU descriptor to HW for transmit
1777 		 */
1778 		status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
1779 			htt_tcl_metadata, tx_q->ring_id, NULL);
1780 
1781 		if (status != QDF_STATUS_SUCCESS) {
1782 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1783 					"%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1784 					__func__, tx_desc, tx_q->ring_id);
1785 
1786 			dp_tx_get_tid(vdev, nbuf, msdu_info);
1787 			tid_stats = &pdev->stats.tid_stats.
1788 				    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
1789 			tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
1790 
1791 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1792 			if (msdu_info->frm_type == dp_tx_frm_me) {
1793 				i++;
1794 				continue;
1795 			}
1796 			goto done;
1797 		}
1798 
1799 		/*
1800 		 * TODO
1801 		 * if tso_info structure can be modified to have curr_seg
1802 		 * as first element, following 2 blocks of code (for TSO and SG)
1803 		 * can be combined into 1
1804 		 */
1805 
1806 		/*
1807 		 * For frames with multiple segments (TSO, ME), jump to next
1808 		 * segment.
1809 		 */
1810 		if (msdu_info->frm_type == dp_tx_frm_tso) {
1811 			if (msdu_info->u.tso_info.curr_seg->next) {
1812 				msdu_info->u.tso_info.curr_seg =
1813 					msdu_info->u.tso_info.curr_seg->next;
1814 
1815 				/*
1816 				 * If this is a jumbo nbuf, then increment the number of
1817 				 * nbuf users for each additional segment of the msdu.
1818 				 * This will ensure that the skb is freed only after
1819 				 * receiving tx completion for all segments of an nbuf
1820 				 */
1821 				qdf_nbuf_inc_users(nbuf);
1822 
1823 				/* Check with MCL if this is needed */
1824 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
1825 			}
1826 		}
1827 
1828 		/*
1829 		 * For Multicast-Unicast converted packets,
1830 		 * each converted frame (for a client) is represented as
1831 		 * 1 segment
1832 		 */
1833 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
1834 				(msdu_info->frm_type == dp_tx_frm_me)) {
1835 			if (msdu_info->u.sg_info.curr_seg->next) {
1836 				msdu_info->u.sg_info.curr_seg =
1837 					msdu_info->u.sg_info.curr_seg->next;
1838 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1839 			}
1840 		}
1841 		i++;
1842 	}
1843 
1844 	nbuf = NULL;
1845 
1846 done:
1847 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1848 		hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
1849 		hif_pm_runtime_put(soc->hif_handle);
1850 	} else {
1851 		hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
1852 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1853 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1854 	}
1855 
1856 	return nbuf;
1857 }
1858 
1859 /**
1860  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
1861  *                     for SG frames
1862  * @vdev: DP vdev handle
1863  * @nbuf: skb
1864  * @seg_info: Pointer to Segment info Descriptor to be prepared
1865  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1866  *
1867  * Return: NULL on success,
1868  *         nbuf when it fails to send
1869  */
1870 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1871 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1872 {
1873 	uint32_t cur_frag, nr_frags;
1874 	qdf_dma_addr_t paddr;
1875 	struct dp_tx_sg_info_s *sg_info;
1876 
1877 	sg_info = &msdu_info->u.sg_info;
1878 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
1879 
1880 	if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
1881 				QDF_DMA_TO_DEVICE)) {
1882 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1883 				"dma map error");
1884 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1885 
1886 		qdf_nbuf_free(nbuf);
1887 		return NULL;
1888 	}
1889 
1890 	paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1891 	seg_info->frags[0].paddr_lo = paddr;
1892 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
1893 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
1894 	seg_info->frags[0].vaddr = (void *) nbuf;
1895 
1896 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
1897 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
1898 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
1899 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1900 					"frag dma map error");
1901 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1902 			qdf_nbuf_free(nbuf);
1903 			return NULL;
1904 		}
1905 
1906 		paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1907 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
1908 		seg_info->frags[cur_frag + 1].paddr_hi =
1909 			((uint64_t) paddr) >> 32;
1910 		seg_info->frags[cur_frag + 1].len =
1911 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
1912 	}
1913 
1914 	seg_info->frag_cnt = (cur_frag + 1);
1915 	seg_info->total_len = qdf_nbuf_len(nbuf);
1916 	seg_info->next = NULL;
1917 
1918 	sg_info->curr_seg = seg_info;
1919 
1920 	msdu_info->frm_type = dp_tx_frm_sg;
1921 	msdu_info->num_seg = 1;
1922 
1923 	return nbuf;
1924 }
1925 
1926 /**
1927  * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
1928  * @vdev: DP vdev handle
1929  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1930  * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
1931  *
1932  * Return: NULL on failure,
1933  *         nbuf when extracted successfully
1934  */
1935 static
1936 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
1937 				    struct dp_tx_msdu_info_s *msdu_info,
1938 				    uint16_t ppdu_cookie)
1939 {
1940 	struct htt_tx_msdu_desc_ext2_t *meta_data =
1941 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
1942 
1943 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
1944 
1945 	HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
1946 				(msdu_info->meta_data[5], 1);
1947 	HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
1948 				(msdu_info->meta_data[5], 1);
1949 	HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
1950 				(msdu_info->meta_data[6], ppdu_cookie);
1951 
1952 	msdu_info->exception_fw = 1;
1953 	msdu_info->is_tx_sniffer = 1;
1954 }
1955 
1956 #ifdef MESH_MODE_SUPPORT
1957 
1958 /**
1959  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
1960 				and prepare msdu_info for mesh frames.
1961  * @vdev: DP vdev handle
1962  * @nbuf: skb
1963  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1964  *
1965  * Return: NULL on failure,
1966  *         nbuf when extracted successfully
1967  */
1968 static
1969 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1970 				struct dp_tx_msdu_info_s *msdu_info)
1971 {
1972 	struct meta_hdr_s *mhdr;
1973 	struct htt_tx_msdu_desc_ext2_t *meta_data =
1974 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
1975 
1976 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1977 
1978 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
1979 		msdu_info->exception_fw = 0;
1980 		goto remove_meta_hdr;
1981 	}
1982 
1983 	msdu_info->exception_fw = 1;
1984 
1985 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
1986 
1987 	meta_data->host_tx_desc_pool = 1;
1988 	meta_data->update_peer_cache = 1;
1989 	meta_data->learning_frame = 1;
1990 
1991 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
1992 		meta_data->power = mhdr->power;
1993 
1994 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
1995 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
1996 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
1997 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
1998 
1999 		meta_data->dyn_bw = 1;
2000 
2001 		meta_data->valid_pwr = 1;
2002 		meta_data->valid_mcs_mask = 1;
2003 		meta_data->valid_nss_mask = 1;
2004 		meta_data->valid_preamble_type  = 1;
2005 		meta_data->valid_retries = 1;
2006 		meta_data->valid_bw_info = 1;
2007 	}
2008 
2009 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
2010 		meta_data->encrypt_type = 0;
2011 		meta_data->valid_encrypt_type = 1;
2012 		meta_data->learning_frame = 0;
2013 	}
2014 
2015 	meta_data->valid_key_flags = 1;
2016 	meta_data->key_flags = (mhdr->keyix & 0x3);
2017 
2018 remove_meta_hdr:
2019 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
2020 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2021 				"qdf_nbuf_pull_head failed");
2022 		qdf_nbuf_free(nbuf);
2023 		return NULL;
2024 	}
2025 
2026 	msdu_info->tid = qdf_nbuf_get_priority(nbuf);
2027 
2028 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2029 			"%s , Meta hdr %0x %0x %0x %0x %0x %0x"
2030 			" tid %d to_fw %d",
2031 			__func__, msdu_info->meta_data[0],
2032 			msdu_info->meta_data[1],
2033 			msdu_info->meta_data[2],
2034 			msdu_info->meta_data[3],
2035 			msdu_info->meta_data[4],
2036 			msdu_info->meta_data[5],
2037 			msdu_info->tid, msdu_info->exception_fw);
2038 
2039 	return nbuf;
2040 }
2041 #else
2042 static
2043 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2044 				struct dp_tx_msdu_info_s *msdu_info)
2045 {
2046 	return nbuf;
2047 }
2048 
2049 #endif
2050 
2051 /**
2052  * dp_check_exc_metadata() - Checks if parameters are valid
2053  * @tx_exc - holds all exception path parameters
2054  *
2055  * Returns true when all the parameters are valid else false
2056  *
2057  */
2058 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
2059 {
2060 	bool invalid_tid = (tx_exc->tid > DP_MAX_TIDS && tx_exc->tid !=
2061 			    HTT_INVALID_TID);
2062 	bool invalid_encap_type =
2063 			(tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
2064 			 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
2065 	bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
2066 				 tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
2067 	bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
2068 			       tx_exc->ppdu_cookie == 0);
2069 
2070 	if (invalid_tid || invalid_encap_type || invalid_sec_type ||
2071 	    invalid_cookie) {
2072 		return false;
2073 	}
2074 
2075 	return true;
2076 }
2077 
2078 /**
2079  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
2080  * @soc: DP soc handle
2081  * @vdev_id: id of DP vdev handle
2082  * @nbuf: skb
2083  * @tx_exc_metadata: Handle that holds exception path meta data
2084  *
2085  * Entry point for Core Tx layer (DP_TX) invoked from
2086  * hard_start_xmit in OSIF/HDD to transmit frames through fw
2087  *
2088  * Return: NULL on success,
2089  *         nbuf when it fails to send
2090  */
2091 qdf_nbuf_t
2092 dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf,
2093 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
2094 {
2095 	qdf_ether_header_t *eh = NULL;
2096 	struct dp_tx_msdu_info_s msdu_info;
2097 	struct dp_vdev *vdev =
2098 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
2099 						   vdev_id);
2100 
2101 	if (qdf_unlikely(!vdev))
2102 		goto fail;
2103 
2104 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
2105 
2106 	if (!tx_exc_metadata)
2107 		goto fail;
2108 
2109 	msdu_info.tid = tx_exc_metadata->tid;
2110 
2111 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2112 	dp_verbose_debug("skb %pM", nbuf->data);
2113 
2114 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
2115 
2116 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
2117 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2118 			"Invalid parameters in exception path");
2119 		goto fail;
2120 	}
2121 
2122 	/* Basic sanity checks for unsupported packets */
2123 
2124 	/* MESH mode */
2125 	if (qdf_unlikely(vdev->mesh_vdev)) {
2126 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2127 			"Mesh mode is not supported in exception path");
2128 		goto fail;
2129 	}
2130 
2131 	/* TSO or SG */
2132 	if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
2133 	    qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2134 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2135 			  "TSO and SG are not supported in exception path");
2136 
2137 		goto fail;
2138 	}
2139 
2140 	/* RAW */
2141 	if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
2142 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2143 			  "Raw frame is not supported in exception path");
2144 		goto fail;
2145 	}
2146 
2147 
2148 	/* Mcast enhancement*/
2149 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
2150 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2151 		    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2152 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2153 					  "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW");
2154 		}
2155 	}
2156 
2157 	if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
2158 		DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
2159 				 qdf_nbuf_len(nbuf));
2160 
2161 		dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
2162 					       tx_exc_metadata->ppdu_cookie);
2163 	}
2164 
2165 	/*
2166 	 * Get HW Queue to use for this frame.
2167 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
2168 	 * dedicated for data and 1 for command.
2169 	 * "queue_id" maps to one hardware ring.
2170 	 *  With each ring, we also associate a unique Tx descriptor pool
2171 	 *  to minimize lock contention for these resources.
2172 	 */
2173 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2174 
2175 	/*  Single linear frame */
2176 	/*
2177 	 * If nbuf is a simple linear frame, use send_single function to
2178 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2179 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2180 	 */
2181 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
2182 			tx_exc_metadata->peer_id, tx_exc_metadata);
2183 
2184 	return nbuf;
2185 
2186 fail:
2187 	dp_verbose_debug("pkt send failed");
2188 	return nbuf;
2189 }
2190 
2191 /**
2192  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
2193  * @soc: DP soc handle
2194  * @vdev_id: DP vdev handle
2195  * @nbuf: skb
2196  *
2197  * Entry point for Core Tx layer (DP_TX) invoked from
2198  * hard_start_xmit in OSIF/HDD
2199  *
2200  * Return: NULL on success,
2201  *         nbuf when it fails to send
2202  */
2203 #ifdef MESH_MODE_SUPPORT
2204 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
2205 			   qdf_nbuf_t nbuf)
2206 {
2207 	struct meta_hdr_s *mhdr;
2208 	qdf_nbuf_t nbuf_mesh = NULL;
2209 	qdf_nbuf_t nbuf_clone = NULL;
2210 	struct dp_vdev *vdev;
2211 	uint8_t no_enc_frame = 0;
2212 
2213 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
2214 	if (!nbuf_mesh) {
2215 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2216 				"qdf_nbuf_unshare failed");
2217 		return nbuf;
2218 	}
2219 
2220 	vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
2221 						  vdev_id);
2222 	if (!vdev) {
2223 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2224 				"vdev is NULL for vdev_id %d", vdev_id);
2225 		return nbuf;
2226 	}
2227 
2228 	nbuf = nbuf_mesh;
2229 
2230 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2231 
2232 	if ((vdev->sec_type != cdp_sec_type_none) &&
2233 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
2234 		no_enc_frame = 1;
2235 
2236 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
2237 		qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
2238 
2239 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
2240 		       !no_enc_frame) {
2241 		nbuf_clone = qdf_nbuf_clone(nbuf);
2242 		if (!nbuf_clone) {
2243 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2244 				"qdf_nbuf_clone failed");
2245 			return nbuf;
2246 		}
2247 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
2248 	}
2249 
2250 	if (nbuf_clone) {
2251 		if (!dp_tx_send(soc, vdev_id, nbuf_clone)) {
2252 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2253 		} else {
2254 			qdf_nbuf_free(nbuf_clone);
2255 		}
2256 	}
2257 
2258 	if (no_enc_frame)
2259 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
2260 	else
2261 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
2262 
2263 	nbuf = dp_tx_send(soc, vdev_id, nbuf);
2264 	if ((!nbuf) && no_enc_frame) {
2265 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2266 	}
2267 
2268 	return nbuf;
2269 }
2270 
2271 #else
2272 
2273 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
2274 			   qdf_nbuf_t nbuf)
2275 {
2276 	return dp_tx_send(soc, vdev_id, nbuf);
2277 }
2278 
2279 #endif
2280 
2281 /**
2282  * dp_tx_send() - Transmit a frame on a given VAP
2283  * @soc: DP soc handle
2284  * @vdev_id: id of DP vdev handle
2285  * @nbuf: skb
2286  *
2287  * Entry point for Core Tx layer (DP_TX) invoked from
2288  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
2289  * cases
2290  *
2291  * Return: NULL on success,
2292  *         nbuf when it fails to send
2293  */
2294 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf)
2295 {
2296 	qdf_ether_header_t *eh = NULL;
2297 	struct dp_tx_msdu_info_s msdu_info;
2298 	struct dp_tx_seg_info_s seg_info;
2299 	uint16_t peer_id = HTT_INVALID_PEER;
2300 	qdf_nbuf_t nbuf_mesh = NULL;
2301 	struct dp_vdev *vdev =
2302 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
2303 						   vdev_id);
2304 
2305 	if (qdf_unlikely(!vdev))
2306 		return nbuf;
2307 
2308 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
2309 	qdf_mem_zero(&seg_info, sizeof(seg_info));
2310 
2311 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2312 
2313 	dp_verbose_debug("skb %pM", nbuf->data);
2314 
2315 	/*
2316 	 * Set Default Host TID value to invalid TID
2317 	 * (TID override disabled)
2318 	 */
2319 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
2320 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
2321 
2322 	if (qdf_unlikely(vdev->mesh_vdev)) {
2323 		nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
2324 								&msdu_info);
2325 		if (!nbuf_mesh) {
2326 			dp_verbose_debug("Extracting mesh metadata failed");
2327 			return nbuf;
2328 		}
2329 		nbuf = nbuf_mesh;
2330 	}
2331 
2332 	/*
2333 	 * Get HW Queue to use for this frame.
2334 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
2335 	 * dedicated for data and 1 for command.
2336 	 * "queue_id" maps to one hardware ring.
2337 	 *  With each ring, we also associate a unique Tx descriptor pool
2338 	 *  to minimize lock contention for these resources.
2339 	 */
2340 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2341 
2342 	/*
2343 	 * TCL H/W supports 2 DSCP-TID mapping tables.
2344 	 *  Table 1 - Default DSCP-TID mapping table
2345 	 *  Table 2 - 1 DSCP-TID override table
2346 	 *
2347 	 * If we need a different DSCP-TID mapping for this vap,
2348 	 * call tid_classify to extract DSCP/ToS from frame and
2349 	 * map to a TID and store in msdu_info. This is later used
2350 	 * to fill in TCL Input descriptor (per-packet TID override).
2351 	 */
2352 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
2353 
2354 	/*
2355 	 * Classify the frame and call corresponding
2356 	 * "prepare" function which extracts the segment (TSO)
2357 	 * and fragmentation information (for TSO , SG, ME, or Raw)
2358 	 * into MSDU_INFO structure which is later used to fill
2359 	 * SW and HW descriptors.
2360 	 */
2361 	if (qdf_nbuf_is_tso(nbuf)) {
2362 		dp_verbose_debug("TSO frame %pK", vdev);
2363 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
2364 				 qdf_nbuf_len(nbuf));
2365 
2366 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
2367 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
2368 					 qdf_nbuf_len(nbuf));
2369 			return nbuf;
2370 		}
2371 
2372 		goto send_multiple;
2373 	}
2374 
2375 	/* SG */
2376 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2377 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
2378 
2379 		if (!nbuf)
2380 			return NULL;
2381 
2382 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
2383 
2384 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
2385 				qdf_nbuf_len(nbuf));
2386 
2387 		goto send_multiple;
2388 	}
2389 
2390 #ifdef ATH_SUPPORT_IQUE
2391 	/* Mcast to Ucast Conversion*/
2392 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
2393 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2394 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2395 		    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2396 			dp_verbose_debug("Mcast frm for ME %pK", vdev);
2397 
2398 			DP_STATS_INC_PKT(vdev,
2399 					tx_i.mcast_en.mcast_pkt, 1,
2400 					qdf_nbuf_len(nbuf));
2401 			if (dp_tx_prepare_send_me(vdev, nbuf) ==
2402 					QDF_STATUS_SUCCESS) {
2403 				return NULL;
2404 			}
2405 		}
2406 	}
2407 #endif
2408 
2409 	/* RAW */
2410 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
2411 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
2412 		if (!nbuf)
2413 			return NULL;
2414 
2415 		dp_verbose_debug("Raw frame %pK", vdev);
2416 
2417 		goto send_multiple;
2418 
2419 	}
2420 
2421 	/*  Single linear frame */
2422 	/*
2423 	 * If nbuf is a simple linear frame, use send_single function to
2424 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2425 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2426 	 */
2427 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
2428 
2429 	return nbuf;
2430 
2431 send_multiple:
2432 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2433 
2434 	if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
2435 		dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
2436 
2437 	return nbuf;
2438 }
2439 
2440 /**
2441  * dp_tx_reinject_handler() - Tx Reinject Handler
2442  * @tx_desc: software descriptor head pointer
2443  * @status : Tx completion status from HTT descriptor
2444  *
2445  * This function reinjects frames back to Target.
2446  * Todo - Host queue needs to be added
2447  *
2448  * Return: none
2449  */
2450 static
2451 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2452 {
2453 	struct dp_vdev *vdev;
2454 	struct dp_peer *peer = NULL;
2455 	uint32_t peer_id = HTT_INVALID_PEER;
2456 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2457 	qdf_nbuf_t nbuf_copy = NULL;
2458 	struct dp_tx_msdu_info_s msdu_info;
2459 	struct dp_peer *sa_peer = NULL;
2460 	struct dp_ast_entry *ast_entry = NULL;
2461 	struct dp_soc *soc = NULL;
2462 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2463 #ifdef WDS_VENDOR_EXTENSION
2464 	int is_mcast = 0, is_ucast = 0;
2465 	int num_peers_3addr = 0;
2466 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
2467 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
2468 #endif
2469 
2470 	vdev = tx_desc->vdev;
2471 	soc = vdev->pdev->soc;
2472 
2473 	qdf_assert(vdev);
2474 
2475 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
2476 
2477 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2478 
2479 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2480 			"%s Tx reinject path", __func__);
2481 
2482 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
2483 			qdf_nbuf_len(tx_desc->nbuf));
2484 
2485 	qdf_spin_lock_bh(&(soc->ast_lock));
2486 
2487 	ast_entry = dp_peer_ast_hash_find_by_pdevid
2488 				(soc,
2489 				 (uint8_t *)(eh->ether_shost),
2490 				 vdev->pdev->pdev_id);
2491 
2492 	if (ast_entry)
2493 		sa_peer = ast_entry->peer;
2494 
2495 	qdf_spin_unlock_bh(&(soc->ast_lock));
2496 
2497 #ifdef WDS_VENDOR_EXTENSION
2498 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
2499 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
2500 	} else {
2501 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
2502 	}
2503 	is_ucast = !is_mcast;
2504 
2505 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2506 		if (peer->bss_peer)
2507 			continue;
2508 
2509 		/* Detect wds peers that use 3-addr framing for mcast.
2510 		 * if there are any, the bss_peer is used to send the
2511 		 * the mcast frame using 3-addr format. all wds enabled
2512 		 * peers that use 4-addr framing for mcast frames will
2513 		 * be duplicated and sent as 4-addr frames below.
2514 		 */
2515 		if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
2516 			num_peers_3addr = 1;
2517 			break;
2518 		}
2519 	}
2520 #endif
2521 
2522 	if (qdf_unlikely(vdev->mesh_vdev)) {
2523 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
2524 	} else {
2525 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2526 			if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
2527 #ifdef WDS_VENDOR_EXTENSION
2528 			/*
2529 			 * . if 3-addr STA, then send on BSS Peer
2530 			 * . if Peer WDS enabled and accept 4-addr mcast,
2531 			 * send mcast on that peer only
2532 			 * . if Peer WDS enabled and accept 4-addr ucast,
2533 			 * send ucast on that peer only
2534 			 */
2535 			((peer->bss_peer && num_peers_3addr && is_mcast) ||
2536 			 (peer->wds_enabled &&
2537 				  ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
2538 				   (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
2539 #else
2540 			((peer->bss_peer &&
2541 			  !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) ||
2542 				 peer->nawds_enabled)) {
2543 #endif
2544 				peer_id = DP_INVALID_PEER;
2545 
2546 				if (peer->nawds_enabled) {
2547 					peer_id = peer->peer_ids[0];
2548 					if (sa_peer == peer) {
2549 						QDF_TRACE(
2550 							QDF_MODULE_ID_DP,
2551 							QDF_TRACE_LEVEL_DEBUG,
2552 							" %s: multicast packet",
2553 							__func__);
2554 						DP_STATS_INC(peer,
2555 							tx.nawds_mcast_drop, 1);
2556 						continue;
2557 					}
2558 				}
2559 
2560 				nbuf_copy = qdf_nbuf_copy(nbuf);
2561 
2562 				if (!nbuf_copy) {
2563 					QDF_TRACE(QDF_MODULE_ID_DP,
2564 						QDF_TRACE_LEVEL_DEBUG,
2565 						FL("nbuf copy failed"));
2566 					break;
2567 				}
2568 
2569 				nbuf_copy = dp_tx_send_msdu_single(vdev,
2570 						nbuf_copy,
2571 						&msdu_info,
2572 						peer_id,
2573 						NULL);
2574 
2575 				if (nbuf_copy) {
2576 					QDF_TRACE(QDF_MODULE_ID_DP,
2577 						QDF_TRACE_LEVEL_DEBUG,
2578 						FL("pkt send failed"));
2579 					qdf_nbuf_free(nbuf_copy);
2580 				} else {
2581 					if (peer_id != DP_INVALID_PEER)
2582 						DP_STATS_INC_PKT(peer,
2583 							tx.nawds_mcast,
2584 							1, qdf_nbuf_len(nbuf));
2585 				}
2586 			}
2587 		}
2588 	}
2589 
2590 	if (vdev->nawds_enabled) {
2591 		peer_id = DP_INVALID_PEER;
2592 
2593 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
2594 					1, qdf_nbuf_len(nbuf));
2595 
2596 		nbuf = dp_tx_send_msdu_single(vdev,
2597 				nbuf,
2598 				&msdu_info,
2599 				peer_id, NULL);
2600 
2601 		if (nbuf) {
2602 			QDF_TRACE(QDF_MODULE_ID_DP,
2603 				QDF_TRACE_LEVEL_DEBUG,
2604 				FL("pkt send failed"));
2605 			qdf_nbuf_free(nbuf);
2606 		}
2607 	} else
2608 		qdf_nbuf_free(nbuf);
2609 
2610 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2611 }
2612 
2613 /**
2614  * dp_tx_inspect_handler() - Tx Inspect Handler
2615  * @tx_desc: software descriptor head pointer
2616  * @status : Tx completion status from HTT descriptor
2617  *
2618  * Handles Tx frames sent back to Host for inspection
2619  * (ProxyARP)
2620  *
2621  * Return: none
2622  */
2623 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2624 {
2625 
2626 	struct dp_soc *soc;
2627 	struct dp_pdev *pdev = tx_desc->pdev;
2628 
2629 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2630 			"%s Tx inspect path",
2631 			__func__);
2632 
2633 	qdf_assert(pdev);
2634 
2635 	soc = pdev->soc;
2636 
2637 	DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
2638 			qdf_nbuf_len(tx_desc->nbuf));
2639 
2640 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
2641 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2642 }
2643 
2644 #ifdef FEATURE_PERPKT_INFO
2645 /**
2646  * dp_get_completion_indication_for_stack() - send completion to stack
2647  * @soc : dp_soc handle
2648  * @pdev: dp_pdev handle
2649  * @peer: dp peer handle
2650  * @ts: transmit completion status structure
2651  * @netbuf: Buffer pointer for free
2652  *
2653  * This function is used for indication whether buffer needs to be
2654  * sent to stack for freeing or not
2655 */
2656 QDF_STATUS
2657 dp_get_completion_indication_for_stack(struct dp_soc *soc,
2658 				       struct dp_pdev *pdev,
2659 				       struct dp_peer *peer,
2660 				       struct hal_tx_completion_status *ts,
2661 				       qdf_nbuf_t netbuf,
2662 				       uint64_t time_latency)
2663 {
2664 	struct tx_capture_hdr *ppdu_hdr;
2665 	uint16_t peer_id = ts->peer_id;
2666 	uint32_t ppdu_id = ts->ppdu_id;
2667 	uint8_t first_msdu = ts->first_msdu;
2668 	uint8_t last_msdu = ts->last_msdu;
2669 
2670 	if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
2671 			 !pdev->latency_capture_enable))
2672 		return QDF_STATUS_E_NOSUPPORT;
2673 
2674 	if (!peer) {
2675 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2676 				FL("Peer Invalid"));
2677 		return QDF_STATUS_E_INVAL;
2678 	}
2679 
2680 	if (pdev->mcopy_mode) {
2681 		if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
2682 				(pdev->m_copy_id.tx_peer_id == peer_id)) {
2683 			return QDF_STATUS_E_INVAL;
2684 		}
2685 
2686 		pdev->m_copy_id.tx_ppdu_id = ppdu_id;
2687 		pdev->m_copy_id.tx_peer_id = peer_id;
2688 	}
2689 
2690 	if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) {
2691 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2692 				FL("No headroom"));
2693 		return QDF_STATUS_E_NOMEM;
2694 	}
2695 
2696 	ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
2697 	qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
2698 		     QDF_MAC_ADDR_SIZE);
2699 	qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
2700 		     QDF_MAC_ADDR_SIZE);
2701 	ppdu_hdr->ppdu_id = ppdu_id;
2702 	ppdu_hdr->peer_id = peer_id;
2703 	ppdu_hdr->first_msdu = first_msdu;
2704 	ppdu_hdr->last_msdu = last_msdu;
2705 	if (qdf_unlikely(pdev->latency_capture_enable)) {
2706 		ppdu_hdr->tsf = ts->tsf;
2707 		ppdu_hdr->time_latency = time_latency;
2708 	}
2709 
2710 	return QDF_STATUS_SUCCESS;
2711 }
2712 
2713 
2714 /**
2715  * dp_send_completion_to_stack() - send completion to stack
2716  * @soc :  dp_soc handle
2717  * @pdev:  dp_pdev handle
2718  * @peer_id: peer_id of the peer for which completion came
2719  * @ppdu_id: ppdu_id
2720  * @netbuf: Buffer pointer for free
2721  *
2722  * This function is used to send completion to stack
2723  * to free buffer
2724 */
2725 void  dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2726 					uint16_t peer_id, uint32_t ppdu_id,
2727 					qdf_nbuf_t netbuf)
2728 {
2729 	dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
2730 				netbuf, peer_id,
2731 				WDI_NO_VAL, pdev->pdev_id);
2732 }
2733 #else
2734 static QDF_STATUS
2735 dp_get_completion_indication_for_stack(struct dp_soc *soc,
2736 				       struct dp_pdev *pdev,
2737 				       struct dp_peer *peer,
2738 				       struct hal_tx_completion_status *ts,
2739 				       qdf_nbuf_t netbuf,
2740 				       uint64_t time_latency)
2741 {
2742 	return QDF_STATUS_E_NOSUPPORT;
2743 }
2744 
2745 static void
2746 dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2747 	uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
2748 {
2749 }
2750 #endif
2751 
2752 /**
2753  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2754  * @soc: Soc handle
2755  * @desc: software Tx descriptor to be processed
2756  *
2757  * Return: none
2758  */
2759 static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
2760 				       struct dp_tx_desc_s *desc)
2761 {
2762 	struct dp_vdev *vdev = desc->vdev;
2763 	qdf_nbuf_t nbuf = desc->nbuf;
2764 
2765 	/* nbuf already freed in vdev detach path */
2766 	if (!nbuf)
2767 		return;
2768 
2769 	/* If it is TDLS mgmt, don't unmap or free the frame */
2770 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2771 		return dp_non_std_tx_comp_free_buff(desc, vdev);
2772 
2773 	/* 0 : MSDU buffer, 1 : MLE */
2774 	if (desc->msdu_ext_desc) {
2775 		/* TSO free */
2776 		if (hal_tx_ext_desc_get_tso_enable(
2777 					desc->msdu_ext_desc->vaddr)) {
2778 			/* unmap eash TSO seg before free the nbuf */
2779 			dp_tx_tso_unmap_segment(soc, desc->tso_desc,
2780 						desc->tso_num_desc);
2781 			qdf_nbuf_free(nbuf);
2782 			return;
2783 		}
2784 	}
2785 
2786 	qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
2787 
2788 	if (qdf_unlikely(!vdev)) {
2789 		qdf_nbuf_free(nbuf);
2790 		return;
2791 	}
2792 
2793 	if (qdf_likely(!vdev->mesh_vdev))
2794 		qdf_nbuf_free(nbuf);
2795 	else {
2796 		if (desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2797 			qdf_nbuf_free(nbuf);
2798 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2799 		} else
2800 			vdev->osif_tx_free_ext((nbuf));
2801 	}
2802 }
2803 
2804 #ifdef MESH_MODE_SUPPORT
2805 /**
2806  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
2807  *                                         in mesh meta header
2808  * @tx_desc: software descriptor head pointer
2809  * @ts: pointer to tx completion stats
2810  * Return: none
2811  */
2812 static
2813 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2814 		struct hal_tx_completion_status *ts)
2815 {
2816 	struct meta_hdr_s *mhdr;
2817 	qdf_nbuf_t netbuf = tx_desc->nbuf;
2818 
2819 	if (!tx_desc->msdu_ext_desc) {
2820 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
2821 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2822 				"netbuf %pK offset %d",
2823 				netbuf, tx_desc->pkt_offset);
2824 			return;
2825 		}
2826 	}
2827 	if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
2828 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2829 			"netbuf %pK offset %lu", netbuf,
2830 			sizeof(struct meta_hdr_s));
2831 		return;
2832 	}
2833 
2834 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
2835 	mhdr->rssi = ts->ack_frame_rssi;
2836 	mhdr->channel = tx_desc->pdev->operating_channel;
2837 }
2838 
2839 #else
2840 static
2841 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2842 		struct hal_tx_completion_status *ts)
2843 {
2844 }
2845 
2846 #endif
2847 
2848 /**
2849  * dp_tx_compute_delay() - Compute and fill in all timestamps
2850  *				to pass in correct fields
2851  *
2852  * @vdev: pdev handle
2853  * @tx_desc: tx descriptor
2854  * @tid: tid value
2855  * @ring_id: TCL or WBM ring number for transmit path
2856  * Return: none
2857  */
2858 static void dp_tx_compute_delay(struct dp_vdev *vdev,
2859 				struct dp_tx_desc_s *tx_desc,
2860 				uint8_t tid, uint8_t ring_id)
2861 {
2862 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
2863 	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
2864 
2865 	if (qdf_likely(!vdev->pdev->delay_stats_flag))
2866 		return;
2867 
2868 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_get());
2869 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
2870 	timestamp_hw_enqueue = tx_desc->timestamp;
2871 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
2872 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
2873 					 timestamp_hw_enqueue);
2874 	interframe_delay = (uint32_t)(timestamp_ingress -
2875 				      vdev->prev_tx_enq_tstamp);
2876 
2877 	/*
2878 	 * Delay in software enqueue
2879 	 */
2880 	dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid,
2881 			      CDP_DELAY_STATS_SW_ENQ, ring_id);
2882 	/*
2883 	 * Delay between packet enqueued to HW and Tx completion
2884 	 */
2885 	dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid,
2886 			      CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id);
2887 
2888 	/*
2889 	 * Update interframe delay stats calculated at hardstart receive point.
2890 	 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
2891 	 * interframe delay will not be calculate correctly for 1st frame.
2892 	 * On the other side, this will help in avoiding extra per packet check
2893 	 * of !vdev->prev_tx_enq_tstamp.
2894 	 */
2895 	dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
2896 			      CDP_DELAY_STATS_TX_INTERFRAME, ring_id);
2897 	vdev->prev_tx_enq_tstamp = timestamp_ingress;
2898 }
2899 
2900 /**
2901  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
2902  *				per wbm ring
2903  *
2904  * @tx_desc: software descriptor head pointer
2905  * @ts: Tx completion status
2906  * @peer: peer handle
2907  * @ring_id: ring number
2908  *
2909  * Return: None
2910  */
2911 static inline void
2912 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
2913 			struct hal_tx_completion_status *ts,
2914 			struct dp_peer *peer, uint8_t ring_id)
2915 {
2916 	struct dp_pdev *pdev = peer->vdev->pdev;
2917 	struct dp_soc *soc = NULL;
2918 	uint8_t mcs, pkt_type;
2919 	uint8_t tid = ts->tid;
2920 	uint32_t length;
2921 	struct cdp_tid_tx_stats *tid_stats;
2922 
2923 	if (!pdev)
2924 		return;
2925 
2926 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
2927 		tid = CDP_MAX_DATA_TIDS - 1;
2928 
2929 	tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
2930 	soc = pdev->soc;
2931 
2932 	mcs = ts->mcs;
2933 	pkt_type = ts->pkt_type;
2934 
2935 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
2936 		dp_err("Release source is not from TQM");
2937 		return;
2938 	}
2939 
2940 	length = qdf_nbuf_len(tx_desc->nbuf);
2941 	DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
2942 
2943 	if (qdf_unlikely(pdev->delay_stats_flag))
2944 		dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id);
2945 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
2946 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
2947 
2948 	DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length,
2949 			  (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
2950 
2951 	DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
2952 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
2953 
2954 	DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
2955 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
2956 
2957 	DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
2958 		     (ts->status == HAL_TX_TQM_RR_FW_REASON1));
2959 
2960 	DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
2961 		     (ts->status == HAL_TX_TQM_RR_FW_REASON2));
2962 
2963 	DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
2964 		     (ts->status == HAL_TX_TQM_RR_FW_REASON3));
2965 
2966 	/*
2967 	 * tx_failed is ideally supposed to be updated from HTT ppdu completion
2968 	 * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there
2969 	 * are no completions for failed cases. Hence updating tx_failed from
2970 	 * data path. Please note that if tx_failed is fixed to be from ppdu,
2971 	 * then this has to be removed
2972 	 */
2973 	peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num +
2974 				peer->stats.tx.dropped.fw_rem_notx +
2975 				peer->stats.tx.dropped.fw_rem_tx +
2976 				peer->stats.tx.dropped.age_out +
2977 				peer->stats.tx.dropped.fw_reason1 +
2978 				peer->stats.tx.dropped.fw_reason2 +
2979 				peer->stats.tx.dropped.fw_reason3;
2980 
2981 	if (ts->status < CDP_MAX_TX_TQM_STATUS) {
2982 		tid_stats->tqm_status_cnt[ts->status]++;
2983 	}
2984 
2985 	if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
2986 		return;
2987 	}
2988 
2989 	DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
2990 
2991 	DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
2992 	DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
2993 
2994 	/*
2995 	 * Following Rate Statistics are updated from HTT PPDU events from FW.
2996 	 * Return from here if HTT PPDU events are enabled.
2997 	 */
2998 	if (!(soc->process_tx_status))
2999 		return;
3000 
3001 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3002 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
3003 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3004 			((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
3005 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3006 			((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
3007 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3008 			((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
3009 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3010 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
3011 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3012 			((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
3013 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3014 			((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
3015 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3016 			((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
3017 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3018 			((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
3019 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3020 			((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
3021 
3022 	DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
3023 	DP_STATS_INC(peer, tx.bw[ts->bw], 1);
3024 	DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
3025 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
3026 	DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
3027 	DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
3028 	DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
3029 
3030 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
3031 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
3032 			     &peer->stats, ts->peer_id,
3033 			     UPDATE_PEER_STATS, pdev->pdev_id);
3034 #endif
3035 }
3036 
3037 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3038 /**
3039  * dp_tx_flow_pool_lock() - take flow pool lock
3040  * @soc: core txrx main context
3041  * @tx_desc: tx desc
3042  *
3043  * Return: None
3044  */
3045 static inline
3046 void dp_tx_flow_pool_lock(struct dp_soc *soc,
3047 			  struct dp_tx_desc_s *tx_desc)
3048 {
3049 	struct dp_tx_desc_pool_s *pool;
3050 	uint8_t desc_pool_id;
3051 
3052 	desc_pool_id = tx_desc->pool_id;
3053 	pool = &soc->tx_desc[desc_pool_id];
3054 
3055 	qdf_spin_lock_bh(&pool->flow_pool_lock);
3056 }
3057 
3058 /**
3059  * dp_tx_flow_pool_unlock() - release flow pool lock
3060  * @soc: core txrx main context
3061  * @tx_desc: tx desc
3062  *
3063  * Return: None
3064  */
3065 static inline
3066 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
3067 			    struct dp_tx_desc_s *tx_desc)
3068 {
3069 	struct dp_tx_desc_pool_s *pool;
3070 	uint8_t desc_pool_id;
3071 
3072 	desc_pool_id = tx_desc->pool_id;
3073 	pool = &soc->tx_desc[desc_pool_id];
3074 
3075 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
3076 }
3077 #else
3078 static inline
3079 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
3080 {
3081 }
3082 
3083 static inline
3084 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
3085 {
3086 }
3087 #endif
3088 
3089 /**
3090  * dp_tx_notify_completion() - Notify tx completion for this desc
3091  * @soc: core txrx main context
3092  * @tx_desc: tx desc
3093  * @netbuf:  buffer
3094  *
3095  * Return: none
3096  */
3097 static inline void dp_tx_notify_completion(struct dp_soc *soc,
3098 					   struct dp_tx_desc_s *tx_desc,
3099 					   qdf_nbuf_t netbuf)
3100 {
3101 	void *osif_dev;
3102 	ol_txrx_completion_fp tx_compl_cbk = NULL;
3103 
3104 	qdf_assert(tx_desc);
3105 
3106 	dp_tx_flow_pool_lock(soc, tx_desc);
3107 
3108 	if (!tx_desc->vdev ||
3109 	    !tx_desc->vdev->osif_vdev) {
3110 		dp_tx_flow_pool_unlock(soc, tx_desc);
3111 		return;
3112 	}
3113 
3114 	osif_dev = tx_desc->vdev->osif_vdev;
3115 	tx_compl_cbk = tx_desc->vdev->tx_comp;
3116 	dp_tx_flow_pool_unlock(soc, tx_desc);
3117 
3118 	if (tx_compl_cbk)
3119 		tx_compl_cbk(netbuf, osif_dev);
3120 }
3121 
3122 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
3123  * @pdev: pdev handle
3124  * @tid: tid value
3125  * @txdesc_ts: timestamp from txdesc
3126  * @ppdu_id: ppdu id
3127  *
3128  * Return: none
3129  */
3130 #ifdef FEATURE_PERPKT_INFO
3131 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
3132 					       struct dp_peer *peer,
3133 					       uint8_t tid,
3134 					       uint64_t txdesc_ts,
3135 					       uint32_t ppdu_id)
3136 {
3137 	uint64_t delta_ms;
3138 	struct cdp_tx_sojourn_stats *sojourn_stats;
3139 
3140 	if (qdf_unlikely(pdev->enhanced_stats_en == 0))
3141 		return;
3142 
3143 	if (qdf_unlikely(tid == HTT_INVALID_TID ||
3144 			 tid >= CDP_DATA_TID_MAX))
3145 		return;
3146 
3147 	if (qdf_unlikely(!pdev->sojourn_buf))
3148 		return;
3149 
3150 	sojourn_stats = (struct cdp_tx_sojourn_stats *)
3151 		qdf_nbuf_data(pdev->sojourn_buf);
3152 
3153 	sojourn_stats->cookie = (void *)peer->wlanstats_ctx;
3154 
3155 	delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
3156 				txdesc_ts;
3157 	qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid],
3158 			    delta_ms);
3159 	sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
3160 	sojourn_stats->num_msdus[tid] = 1;
3161 	sojourn_stats->avg_sojourn_msdu[tid].internal =
3162 		peer->avg_sojourn_msdu[tid].internal;
3163 	dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
3164 			     pdev->sojourn_buf, HTT_INVALID_PEER,
3165 			     WDI_NO_VAL, pdev->pdev_id);
3166 	sojourn_stats->sum_sojourn_msdu[tid] = 0;
3167 	sojourn_stats->num_msdus[tid] = 0;
3168 	sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
3169 }
3170 #else
3171 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
3172 					       struct dp_peer *peer,
3173 					       uint8_t tid,
3174 					       uint64_t txdesc_ts,
3175 					       uint32_t ppdu_id)
3176 {
3177 }
3178 #endif
3179 
3180 /**
3181  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
3182  * @soc: DP Soc handle
3183  * @tx_desc: software Tx descriptor
3184  * @ts : Tx completion status from HAL/HTT descriptor
3185  *
3186  * Return: none
3187  */
3188 static inline void
3189 dp_tx_comp_process_desc(struct dp_soc *soc,
3190 			struct dp_tx_desc_s *desc,
3191 			struct hal_tx_completion_status *ts,
3192 			struct dp_peer *peer)
3193 {
3194 	uint64_t time_latency = 0;
3195 	/*
3196 	 * m_copy/tx_capture modes are not supported for
3197 	 * scatter gather packets
3198 	 */
3199 	if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
3200 		time_latency = (qdf_ktime_to_ms(qdf_ktime_get()) -
3201 				desc->timestamp);
3202 	}
3203 	if (!(desc->msdu_ext_desc)) {
3204 		if (QDF_STATUS_SUCCESS ==
3205 		    dp_tx_add_to_comp_queue(soc, desc, ts, peer)) {
3206 			return;
3207 		}
3208 
3209 		if (QDF_STATUS_SUCCESS ==
3210 		    dp_get_completion_indication_for_stack(soc,
3211 							   desc->pdev,
3212 							   peer, ts,
3213 							   desc->nbuf,
3214 							   time_latency)) {
3215 			qdf_nbuf_unmap(soc->osdev, desc->nbuf,
3216 				       QDF_DMA_TO_DEVICE);
3217 			dp_send_completion_to_stack(soc,
3218 						    desc->pdev,
3219 						    ts->peer_id,
3220 						    ts->ppdu_id,
3221 						    desc->nbuf);
3222 			return;
3223 		}
3224 	}
3225 
3226 	dp_tx_comp_free_buf(soc, desc);
3227 }
3228 
3229 /**
3230  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
3231  * @tx_desc: software descriptor head pointer
3232  * @ts: Tx completion status
3233  * @peer: peer handle
3234  * @ring_id: ring number
3235  *
3236  * Return: none
3237  */
3238 static inline
3239 void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
3240 				  struct hal_tx_completion_status *ts,
3241 				  struct dp_peer *peer, uint8_t ring_id)
3242 {
3243 	uint32_t length;
3244 	qdf_ether_header_t *eh;
3245 	struct dp_soc *soc = NULL;
3246 	struct dp_vdev *vdev = tx_desc->vdev;
3247 	qdf_nbuf_t nbuf = tx_desc->nbuf;
3248 
3249 	if (!vdev || !nbuf) {
3250 		dp_info_rl("invalid tx descriptor. vdev or nbuf NULL");
3251 		goto out;
3252 	}
3253 
3254 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3255 
3256 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
3257 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
3258 				 QDF_TRACE_DEFAULT_PDEV_ID,
3259 				 qdf_nbuf_data_addr(nbuf),
3260 				 sizeof(qdf_nbuf_data(nbuf)),
3261 				 tx_desc->id,
3262 				 ts->status));
3263 
3264 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3265 				"-------------------- \n"
3266 				"Tx Completion Stats: \n"
3267 				"-------------------- \n"
3268 				"ack_frame_rssi = %d \n"
3269 				"first_msdu = %d \n"
3270 				"last_msdu = %d \n"
3271 				"msdu_part_of_amsdu = %d \n"
3272 				"rate_stats valid = %d \n"
3273 				"bw = %d \n"
3274 				"pkt_type = %d \n"
3275 				"stbc = %d \n"
3276 				"ldpc = %d \n"
3277 				"sgi = %d \n"
3278 				"mcs = %d \n"
3279 				"ofdma = %d \n"
3280 				"tones_in_ru = %d \n"
3281 				"tsf = %d \n"
3282 				"ppdu_id = %d \n"
3283 				"transmit_cnt = %d \n"
3284 				"tid = %d \n"
3285 				"peer_id = %d\n",
3286 				ts->ack_frame_rssi, ts->first_msdu,
3287 				ts->last_msdu, ts->msdu_part_of_amsdu,
3288 				ts->valid, ts->bw, ts->pkt_type, ts->stbc,
3289 				ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
3290 				ts->tones_in_ru, ts->tsf, ts->ppdu_id,
3291 				ts->transmit_cnt, ts->tid, ts->peer_id);
3292 
3293 	soc = vdev->pdev->soc;
3294 
3295 	/* Update SoC level stats */
3296 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
3297 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
3298 
3299 	/* Update per-packet stats for mesh mode */
3300 	if (qdf_unlikely(vdev->mesh_vdev) &&
3301 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
3302 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
3303 
3304 	length = qdf_nbuf_len(nbuf);
3305 	/* Update peer level stats */
3306 	if (!peer) {
3307 		QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
3308 				   "peer is null or deletion in progress");
3309 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
3310 		goto out;
3311 	}
3312 
3313 	if (qdf_unlikely(peer->bss_peer && vdev->opmode == wlan_op_mode_ap)) {
3314 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
3315 			DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
3316 
3317 			if ((peer->vdev->tx_encap_type ==
3318 				htt_cmn_pkt_type_ethernet) &&
3319 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
3320 				DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
3321 			}
3322 		}
3323 	} else {
3324 		DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
3325 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
3326 			DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
3327 	}
3328 
3329 	dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id);
3330 
3331 #ifdef QCA_SUPPORT_RDK_STATS
3332 	if (soc->wlanstats_enabled)
3333 		dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid,
3334 					    tx_desc->timestamp,
3335 					    ts->ppdu_id);
3336 #endif
3337 
3338 out:
3339 	return;
3340 }
3341 /**
3342  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
3343  * @soc: core txrx main context
3344  * @comp_head: software descriptor head pointer
3345  * @ring_id: ring number
3346  *
3347  * This function will process batch of descriptors reaped by dp_tx_comp_handler
3348  * and release the software descriptors after processing is complete
3349  *
3350  * Return: none
3351  */
3352 static void
3353 dp_tx_comp_process_desc_list(struct dp_soc *soc,
3354 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id)
3355 {
3356 	struct dp_tx_desc_s *desc;
3357 	struct dp_tx_desc_s *next;
3358 	struct hal_tx_completion_status ts = {0};
3359 	struct dp_peer *peer;
3360 	qdf_nbuf_t netbuf;
3361 
3362 	desc = comp_head;
3363 
3364 	while (desc) {
3365 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
3366 		peer = dp_peer_find_by_id(soc, ts.peer_id);
3367 		dp_tx_comp_process_tx_status(desc, &ts, peer, ring_id);
3368 
3369 		netbuf = desc->nbuf;
3370 		/* check tx complete notification */
3371 		if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(netbuf))
3372 			dp_tx_notify_completion(soc, desc, netbuf);
3373 
3374 		dp_tx_comp_process_desc(soc, desc, &ts, peer);
3375 
3376 		if (peer)
3377 			dp_peer_unref_del_find_by_id(peer);
3378 
3379 		next = desc->next;
3380 
3381 		dp_tx_desc_release(desc, desc->pool_id);
3382 		desc = next;
3383 	}
3384 
3385 }
3386 
3387 /**
3388  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
3389  * @tx_desc: software descriptor head pointer
3390  * @status : Tx completion status from HTT descriptor
3391  * @ring_id: ring number
3392  *
3393  * This function will process HTT Tx indication messages from Target
3394  *
3395  * Return: none
3396  */
3397 static
3398 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status,
3399 				  uint8_t ring_id)
3400 {
3401 	uint8_t tx_status;
3402 	struct dp_pdev *pdev;
3403 	struct dp_vdev *vdev;
3404 	struct dp_soc *soc;
3405 	struct hal_tx_completion_status ts = {0};
3406 	uint32_t *htt_desc = (uint32_t *)status;
3407 	struct dp_peer *peer;
3408 	struct cdp_tid_tx_stats *tid_stats = NULL;
3409 	struct htt_soc *htt_handle;
3410 
3411 	qdf_assert(tx_desc->pdev);
3412 
3413 	pdev = tx_desc->pdev;
3414 	vdev = tx_desc->vdev;
3415 	soc = pdev->soc;
3416 
3417 	if (!vdev)
3418 		return;
3419 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
3420 	htt_handle = (struct htt_soc *)soc->htt_handle;
3421 	htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
3422 
3423 	switch (tx_status) {
3424 	case HTT_TX_FW2WBM_TX_STATUS_OK:
3425 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
3426 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
3427 	{
3428 		uint8_t tid;
3429 		if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
3430 			ts.peer_id =
3431 				HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
3432 						htt_desc[2]);
3433 			ts.tid =
3434 				HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
3435 						htt_desc[2]);
3436 		} else {
3437 			ts.peer_id = HTT_INVALID_PEER;
3438 			ts.tid = HTT_INVALID_TID;
3439 		}
3440 		ts.ppdu_id =
3441 			HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
3442 					htt_desc[1]);
3443 		ts.ack_frame_rssi =
3444 			HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
3445 					htt_desc[1]);
3446 
3447 		ts.first_msdu = 1;
3448 		ts.last_msdu = 1;
3449 		tid = ts.tid;
3450 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
3451 			tid = CDP_MAX_DATA_TIDS - 1;
3452 
3453 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
3454 
3455 		if (qdf_unlikely(pdev->delay_stats_flag))
3456 			dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
3457 		if (tx_status < CDP_MAX_TX_HTT_STATUS) {
3458 			tid_stats->htt_status_cnt[tx_status]++;
3459 		}
3460 
3461 		peer = dp_peer_find_by_id(soc, ts.peer_id);
3462 
3463 		if (qdf_likely(peer))
3464 			dp_peer_unref_del_find_by_id(peer);
3465 
3466 		dp_tx_comp_process_tx_status(tx_desc, &ts, peer, ring_id);
3467 		dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
3468 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3469 
3470 		break;
3471 	}
3472 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
3473 	{
3474 		dp_tx_reinject_handler(tx_desc, status);
3475 		break;
3476 	}
3477 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
3478 	{
3479 		dp_tx_inspect_handler(tx_desc, status);
3480 		break;
3481 	}
3482 	case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
3483 	{
3484 		dp_tx_mec_handler(vdev, status);
3485 		break;
3486 	}
3487 	default:
3488 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3489 			  "%s Invalid HTT tx_status %d\n",
3490 			  __func__, tx_status);
3491 		break;
3492 	}
3493 }
3494 
3495 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
3496 static inline
3497 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
3498 {
3499 	bool limit_hit = false;
3500 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
3501 
3502 	limit_hit =
3503 		(num_reaped >= cfg->tx_comp_loop_pkt_limit) ? true : false;
3504 
3505 	if (limit_hit)
3506 		DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
3507 
3508 	return limit_hit;
3509 }
3510 
3511 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
3512 {
3513 	return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
3514 }
3515 #else
3516 static inline
3517 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
3518 {
3519 	return false;
3520 }
3521 
3522 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
3523 {
3524 	return false;
3525 }
3526 #endif
3527 
3528 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
3529 			    hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
3530 			    uint32_t quota)
3531 {
3532 	void *tx_comp_hal_desc;
3533 	uint8_t buffer_src;
3534 	uint8_t pool_id;
3535 	uint32_t tx_desc_id;
3536 	struct dp_tx_desc_s *tx_desc = NULL;
3537 	struct dp_tx_desc_s *head_desc = NULL;
3538 	struct dp_tx_desc_s *tail_desc = NULL;
3539 	uint32_t num_processed = 0;
3540 	uint32_t count = 0;
3541 	bool force_break = false;
3542 
3543 	DP_HIST_INIT();
3544 
3545 more_data:
3546 	/* Re-initialize local variables to be re-used */
3547 		head_desc = NULL;
3548 		tail_desc = NULL;
3549 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
3550 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
3551 		return 0;
3552 	}
3553 
3554 	/* Find head descriptor from completion ring */
3555 	while (qdf_likely(tx_comp_hal_desc =
3556 			hal_srng_dst_get_next(soc->hal_soc, hal_ring_hdl))) {
3557 
3558 		buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
3559 
3560 		/* If this buffer was not released by TQM or FW, then it is not
3561 		 * Tx completion indication, assert */
3562 		if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
3563 				(buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
3564 			uint8_t wbm_internal_error;
3565 
3566 			dp_err_rl(
3567 				"Tx comp release_src != TQM | FW but from %d",
3568 				buffer_src);
3569 			hal_dump_comp_desc(tx_comp_hal_desc);
3570 			DP_STATS_INC(soc, tx.invalid_release_source, 1);
3571 
3572 			/* When WBM sees NULL buffer_addr_info in any of
3573 			 * ingress rings it sends an error indication,
3574 			 * with wbm_internal_error=1, to a specific ring.
3575 			 * The WBM2SW ring used to indicate these errors is
3576 			 * fixed in HW, and that ring is being used as Tx
3577 			 * completion ring. These errors are not related to
3578 			 * Tx completions, and should just be ignored
3579 			 */
3580 			wbm_internal_error = hal_get_wbm_internal_error(
3581 							soc->hal_soc,
3582 							tx_comp_hal_desc);
3583 
3584 			if (wbm_internal_error) {
3585 				dp_err_rl("Tx comp wbm_internal_error!!");
3586 				DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
3587 
3588 				if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
3589 								buffer_src)
3590 					dp_handle_wbm_internal_error(
3591 						soc,
3592 						tx_comp_hal_desc,
3593 						hal_tx_comp_get_buffer_type(
3594 							tx_comp_hal_desc));
3595 
3596 			} else {
3597 				dp_err_rl("Tx comp wbm_internal_error false");
3598 				DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
3599 			}
3600 			continue;
3601 		}
3602 
3603 		/* Get descriptor id */
3604 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
3605 		pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
3606 			DP_TX_DESC_ID_POOL_OS;
3607 
3608 		/* Find Tx descriptor */
3609 		tx_desc = dp_tx_desc_find(soc, pool_id,
3610 				(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
3611 				DP_TX_DESC_ID_PAGE_OS,
3612 				(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
3613 				DP_TX_DESC_ID_OFFSET_OS);
3614 
3615 		/*
3616 		 * If the descriptor is already freed in vdev_detach,
3617 		 * continue to next descriptor
3618 		 */
3619 		if (!tx_desc->vdev && !tx_desc->flags) {
3620 			QDF_TRACE(QDF_MODULE_ID_DP,
3621 				  QDF_TRACE_LEVEL_INFO,
3622 				  "Descriptor freed in vdev_detach %d",
3623 				  tx_desc_id);
3624 
3625 			num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
3626 			count++;
3627 			continue;
3628 		}
3629 
3630 		if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
3631 			QDF_TRACE(QDF_MODULE_ID_DP,
3632 				  QDF_TRACE_LEVEL_INFO,
3633 				  "pdev in down state %d",
3634 				  tx_desc_id);
3635 
3636 			num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
3637 			count++;
3638 
3639 			dp_tx_comp_free_buf(soc, tx_desc);
3640 			dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3641 			continue;
3642 		}
3643 
3644 		/*
3645 		 * If the release source is FW, process the HTT status
3646 		 */
3647 		if (qdf_unlikely(buffer_src ==
3648 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
3649 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
3650 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
3651 					htt_tx_status);
3652 			dp_tx_process_htt_completion(tx_desc,
3653 					htt_tx_status, ring_id);
3654 		} else {
3655 			/* Pool id is not matching. Error */
3656 			if (tx_desc->pool_id != pool_id) {
3657 				QDF_TRACE(QDF_MODULE_ID_DP,
3658 					QDF_TRACE_LEVEL_FATAL,
3659 					"Tx Comp pool id %d not matched %d",
3660 					pool_id, tx_desc->pool_id);
3661 
3662 				qdf_assert_always(0);
3663 			}
3664 
3665 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
3666 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
3667 				QDF_TRACE(QDF_MODULE_ID_DP,
3668 					QDF_TRACE_LEVEL_FATAL,
3669 					"Txdesc invalid, flgs = %x,id = %d",
3670 					tx_desc->flags,	tx_desc_id);
3671 				qdf_assert_always(0);
3672 			}
3673 
3674 			/* First ring descriptor on the cycle */
3675 			if (!head_desc) {
3676 				head_desc = tx_desc;
3677 				tail_desc = tx_desc;
3678 			}
3679 
3680 			tail_desc->next = tx_desc;
3681 			tx_desc->next = NULL;
3682 			tail_desc = tx_desc;
3683 
3684 			DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
3685 
3686 			/* Collect hw completion contents */
3687 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
3688 					&tx_desc->comp, 1);
3689 
3690 		}
3691 
3692 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
3693 
3694 		/*
3695 		 * Processed packet count is more than given quota
3696 		 * stop to processing
3697 		 */
3698 		if (num_processed >= quota) {
3699 			force_break = true;
3700 			break;
3701 		}
3702 
3703 		count++;
3704 
3705 		if (dp_tx_comp_loop_pkt_limit_hit(soc, count))
3706 			break;
3707 	}
3708 
3709 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
3710 
3711 	/* Process the reaped descriptors */
3712 	if (head_desc)
3713 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
3714 
3715 	if (dp_tx_comp_enable_eol_data_check(soc)) {
3716 		if (!force_break &&
3717 		    hal_srng_dst_peek_sync_locked(soc->hal_soc,
3718 						  hal_ring_hdl)) {
3719 			DP_STATS_INC(soc, tx.hp_oos2, 1);
3720 			if (!hif_exec_should_yield(soc->hif_handle,
3721 						   int_ctx->dp_intr_id))
3722 				goto more_data;
3723 		}
3724 	}
3725 	DP_TX_HIST_STATS_PER_PDEV();
3726 
3727 	return num_processed;
3728 }
3729 
3730 #ifdef FEATURE_WLAN_TDLS
3731 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3732 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
3733 {
3734 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3735 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
3736 
3737 	if (!vdev) {
3738 		dp_err("vdev handle for id %d is NULL", vdev_id);
3739 		return NULL;
3740 	}
3741 
3742 	if (tx_spec & OL_TX_SPEC_NO_FREE)
3743 		vdev->is_tdls_frame = true;
3744 
3745 	return dp_tx_send(soc_hdl, vdev_id, msdu_list);
3746 }
3747 #endif
3748 
3749 /**
3750  * dp_tx_vdev_attach() - attach vdev to dp tx
3751  * @vdev: virtual device instance
3752  *
3753  * Return: QDF_STATUS_SUCCESS: success
3754  *         QDF_STATUS_E_RESOURCES: Error return
3755  */
3756 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
3757 {
3758 	int pdev_id;
3759 	/*
3760 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
3761 	 */
3762 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
3763 			HTT_TCL_METADATA_TYPE_VDEV_BASED);
3764 
3765 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
3766 			vdev->vdev_id);
3767 
3768 	pdev_id =
3769 		dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
3770 						       vdev->pdev->pdev_id);
3771 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
3772 
3773 	/*
3774 	 * Set HTT Extension Valid bit to 0 by default
3775 	 */
3776 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
3777 
3778 	dp_tx_vdev_update_search_flags(vdev);
3779 
3780 	return QDF_STATUS_SUCCESS;
3781 }
3782 
3783 #ifndef FEATURE_WDS
3784 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
3785 {
3786 	return false;
3787 }
3788 #endif
3789 
3790 /**
3791  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
3792  * @vdev: virtual device instance
3793  *
3794  * Return: void
3795  *
3796  */
3797 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
3798 {
3799 	struct dp_soc *soc = vdev->pdev->soc;
3800 
3801 	/*
3802 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
3803 	 * for TDLS link
3804 	 *
3805 	 * Enable AddrY (SA based search) only for non-WDS STA and
3806 	 * ProxySTA VAP (in HKv1) modes.
3807 	 *
3808 	 * In all other VAP modes, only DA based search should be
3809 	 * enabled
3810 	 */
3811 	if (vdev->opmode == wlan_op_mode_sta &&
3812 	    vdev->tdls_link_connected)
3813 		vdev->hal_desc_addr_search_flags =
3814 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
3815 	else if ((vdev->opmode == wlan_op_mode_sta) &&
3816 		 !dp_tx_da_search_override(vdev))
3817 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
3818 	else
3819 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
3820 
3821 	/* Set search type only when peer map v2 messaging is enabled
3822 	 * as we will have the search index (AST hash) only when v2 is
3823 	 * enabled
3824 	 */
3825 	if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
3826 		vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
3827 	else
3828 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
3829 }
3830 
3831 static inline bool
3832 dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
3833 			  struct dp_vdev *vdev,
3834 			  struct dp_tx_desc_s *tx_desc)
3835 {
3836 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
3837 		return false;
3838 
3839 	/*
3840 	 * if vdev is given, then only check whether desc
3841 	 * vdev match. if vdev is NULL, then check whether
3842 	 * desc pdev match.
3843 	 */
3844 	return vdev ? (tx_desc->vdev == vdev) : (tx_desc->pdev == pdev);
3845 }
3846 
3847 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3848 /**
3849  * dp_tx_desc_flush() - release resources associated
3850  *                      to TX Desc
3851  *
3852  * @dp_pdev: Handle to DP pdev structure
3853  * @vdev: virtual device instance
3854  * NULL: no specific Vdev is required and check all allcated TX desc
3855  * on this pdev.
3856  * Non-NULL: only check the allocated TX Desc associated to this Vdev.
3857  *
3858  * @force_free:
3859  * true: flush the TX desc.
3860  * false: only reset the Vdev in each allocated TX desc
3861  * that associated to current Vdev.
3862  *
3863  * This function will go through the TX desc pool to flush
3864  * the outstanding TX data or reset Vdev to NULL in associated TX
3865  * Desc.
3866  */
3867 static void dp_tx_desc_flush(struct dp_pdev *pdev,
3868 			     struct dp_vdev *vdev,
3869 			     bool force_free)
3870 {
3871 	uint8_t i;
3872 	uint32_t j;
3873 	uint32_t num_desc, page_id, offset;
3874 	uint16_t num_desc_per_page;
3875 	struct dp_soc *soc = pdev->soc;
3876 	struct dp_tx_desc_s *tx_desc = NULL;
3877 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
3878 
3879 	if (!vdev && !force_free) {
3880 		dp_err("Reset TX desc vdev, Vdev param is required!");
3881 		return;
3882 	}
3883 
3884 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
3885 		tx_desc_pool = &soc->tx_desc[i];
3886 		if (!(tx_desc_pool->pool_size) ||
3887 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
3888 		    !(tx_desc_pool->desc_pages.cacheable_pages))
3889 			continue;
3890 
3891 		/*
3892 		 * Add flow pool lock protection in case pool is freed
3893 		 * due to all tx_desc is recycled when handle TX completion.
3894 		 * this is not necessary when do force flush as:
3895 		 * a. double lock will happen if dp_tx_desc_release is
3896 		 *    also trying to acquire it.
3897 		 * b. dp interrupt has been disabled before do force TX desc
3898 		 *    flush in dp_pdev_deinit().
3899 		 */
3900 		if (!force_free)
3901 			qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
3902 		num_desc = tx_desc_pool->pool_size;
3903 		num_desc_per_page =
3904 			tx_desc_pool->desc_pages.num_element_per_page;
3905 		for (j = 0; j < num_desc; j++) {
3906 			page_id = j / num_desc_per_page;
3907 			offset = j % num_desc_per_page;
3908 
3909 			if (qdf_unlikely(!(tx_desc_pool->
3910 					 desc_pages.cacheable_pages)))
3911 				break;
3912 
3913 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
3914 
3915 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
3916 				/*
3917 				 * Free TX desc if force free is
3918 				 * required, otherwise only reset vdev
3919 				 * in this TX desc.
3920 				 */
3921 				if (force_free) {
3922 					dp_tx_comp_free_buf(soc, tx_desc);
3923 					dp_tx_desc_release(tx_desc, i);
3924 				} else {
3925 					tx_desc->vdev = NULL;
3926 				}
3927 			}
3928 		}
3929 		if (!force_free)
3930 			qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
3931 	}
3932 }
3933 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3934 /**
3935  * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
3936  *
3937  * @soc: Handle to DP soc structure
3938  * @tx_desc: pointer of one TX desc
3939  * @desc_pool_id: TX Desc pool id
3940  */
3941 static inline void
3942 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
3943 		      uint8_t desc_pool_id)
3944 {
3945 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
3946 
3947 	tx_desc->vdev = NULL;
3948 
3949 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
3950 }
3951 
3952 static void dp_tx_desc_flush(struct dp_pdev *pdev,
3953 			     struct dp_vdev *vdev,
3954 			     bool force_free)
3955 {
3956 	uint8_t i, num_pool;
3957 	uint32_t j;
3958 	uint32_t num_desc, page_id, offset;
3959 	uint16_t num_desc_per_page;
3960 	struct dp_soc *soc = pdev->soc;
3961 	struct dp_tx_desc_s *tx_desc = NULL;
3962 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
3963 
3964 	if (!vdev && !force_free) {
3965 		dp_err("Reset TX desc vdev, Vdev param is required!");
3966 		return;
3967 	}
3968 
3969 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3970 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3971 
3972 	for (i = 0; i < num_pool; i++) {
3973 		tx_desc_pool = &soc->tx_desc[i];
3974 		if (!tx_desc_pool->desc_pages.cacheable_pages)
3975 			continue;
3976 
3977 		num_desc_per_page =
3978 			tx_desc_pool->desc_pages.num_element_per_page;
3979 		for (j = 0; j < num_desc; j++) {
3980 			page_id = j / num_desc_per_page;
3981 			offset = j % num_desc_per_page;
3982 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
3983 
3984 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
3985 				if (force_free) {
3986 					dp_tx_comp_free_buf(soc, tx_desc);
3987 					dp_tx_desc_release(tx_desc, i);
3988 				} else {
3989 					dp_tx_desc_reset_vdev(soc, tx_desc,
3990 							      i);
3991 				}
3992 			}
3993 		}
3994 	}
3995 }
3996 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3997 
3998 /**
3999  * dp_tx_vdev_detach() - detach vdev from dp tx
4000  * @vdev: virtual device instance
4001  *
4002  * Return: QDF_STATUS_SUCCESS: success
4003  *         QDF_STATUS_E_RESOURCES: Error return
4004  */
4005 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
4006 {
4007 	struct dp_pdev *pdev = vdev->pdev;
4008 
4009 	/* Reset TX desc associated to this Vdev as NULL */
4010 	dp_tx_desc_flush(pdev, vdev, false);
4011 	dp_tx_vdev_multipass_deinit(vdev);
4012 
4013 	return QDF_STATUS_SUCCESS;
4014 }
4015 
4016 /**
4017  * dp_tx_pdev_attach() - attach pdev to dp tx
4018  * @pdev: physical device instance
4019  *
4020  * Return: QDF_STATUS_SUCCESS: success
4021  *         QDF_STATUS_E_RESOURCES: Error return
4022  */
4023 QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
4024 {
4025 	struct dp_soc *soc = pdev->soc;
4026 
4027 	/* Initialize Flow control counters */
4028 	qdf_atomic_init(&pdev->num_tx_exception);
4029 	qdf_atomic_init(&pdev->num_tx_outstanding);
4030 
4031 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
4032 		/* Initialize descriptors in TCL Ring */
4033 		hal_tx_init_data_ring(soc->hal_soc,
4034 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
4035 	}
4036 
4037 	return QDF_STATUS_SUCCESS;
4038 }
4039 
4040 /**
4041  * dp_tx_pdev_detach() - detach pdev from dp tx
4042  * @pdev: physical device instance
4043  *
4044  * Return: QDF_STATUS_SUCCESS: success
4045  *         QDF_STATUS_E_RESOURCES: Error return
4046  */
4047 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
4048 {
4049 	/* flush TX outstanding data per pdev */
4050 	dp_tx_desc_flush(pdev, NULL, true);
4051 	dp_tx_me_exit(pdev);
4052 	return QDF_STATUS_SUCCESS;
4053 }
4054 
4055 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
4056 /* Pools will be allocated dynamically */
4057 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
4058 					int num_desc)
4059 {
4060 	uint8_t i;
4061 
4062 	for (i = 0; i < num_pool; i++) {
4063 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
4064 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
4065 	}
4066 
4067 	return 0;
4068 }
4069 
4070 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
4071 {
4072 	uint8_t i;
4073 
4074 	for (i = 0; i < num_pool; i++)
4075 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
4076 }
4077 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
4078 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
4079 					int num_desc)
4080 {
4081 	uint8_t i;
4082 
4083 	/* Allocate software Tx descriptor pools */
4084 	for (i = 0; i < num_pool; i++) {
4085 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
4086 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4087 					"%s Tx Desc Pool alloc %d failed %pK",
4088 					__func__, i, soc);
4089 			return ENOMEM;
4090 		}
4091 	}
4092 	return 0;
4093 }
4094 
4095 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
4096 {
4097 	uint8_t i;
4098 
4099 	for (i = 0; i < num_pool; i++) {
4100 		qdf_assert_always(!soc->tx_desc[i].num_allocated);
4101 		if (dp_tx_desc_pool_free(soc, i)) {
4102 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4103 				"%s Tx Desc Pool Free failed", __func__);
4104 		}
4105 	}
4106 }
4107 
4108 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
4109 
4110 #ifndef QCA_MEM_ATTACH_ON_WIFI3
4111 /**
4112  * dp_tso_attach_wifi3() - TSO attach handler
4113  * @txrx_soc: Opaque Dp handle
4114  *
4115  * Reserve TSO descriptor buffers
4116  *
4117  * Return: QDF_STATUS_E_FAILURE on failure or
4118  * QDF_STATUS_SUCCESS on success
4119  */
4120 static
4121 QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc)
4122 {
4123 	return dp_tso_soc_attach(txrx_soc);
4124 }
4125 
4126 /**
4127  * dp_tso_detach_wifi3() - TSO Detach handler
4128  * @txrx_soc: Opaque Dp handle
4129  *
4130  * Deallocate TSO descriptor buffers
4131  *
4132  * Return: QDF_STATUS_E_FAILURE on failure or
4133  * QDF_STATUS_SUCCESS on success
4134  */
4135 static
4136 QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc)
4137 {
4138 	return dp_tso_soc_detach(txrx_soc);
4139 }
4140 #else
4141 static
4142 QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc)
4143 {
4144 	return QDF_STATUS_SUCCESS;
4145 }
4146 
4147 static
4148 QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc)
4149 {
4150 	return QDF_STATUS_SUCCESS;
4151 }
4152 #endif
4153 
4154 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
4155 {
4156 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4157 	uint8_t i;
4158 	uint8_t num_pool;
4159 	uint32_t num_desc;
4160 
4161 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4162 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4163 
4164 	for (i = 0; i < num_pool; i++)
4165 		dp_tx_tso_desc_pool_free(soc, i);
4166 
4167 	dp_info("%s TSO Desc Pool %d Free descs = %d",
4168 		__func__, num_pool, num_desc);
4169 
4170 	for (i = 0; i < num_pool; i++)
4171 		dp_tx_tso_num_seg_pool_free(soc, i);
4172 
4173 	dp_info("%s TSO Num of seg Desc Pool %d Free descs = %d",
4174 		__func__, num_pool, num_desc);
4175 
4176 	return QDF_STATUS_SUCCESS;
4177 }
4178 
4179 /**
4180  * dp_tso_attach() - TSO attach handler
4181  * @txrx_soc: Opaque Dp handle
4182  *
4183  * Reserve TSO descriptor buffers
4184  *
4185  * Return: QDF_STATUS_E_FAILURE on failure or
4186  * QDF_STATUS_SUCCESS on success
4187  */
4188 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
4189 {
4190 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4191 	uint8_t i;
4192 	uint8_t num_pool;
4193 	uint32_t num_desc;
4194 
4195 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4196 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4197 
4198 	for (i = 0; i < num_pool; i++) {
4199 		if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
4200 			dp_err("TSO Desc Pool alloc %d failed %pK",
4201 			       i, soc);
4202 
4203 			return QDF_STATUS_E_FAILURE;
4204 		}
4205 	}
4206 
4207 	dp_info("%s TSO Desc Alloc %d, descs = %d",
4208 		__func__, num_pool, num_desc);
4209 
4210 	for (i = 0; i < num_pool; i++) {
4211 		if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
4212 			dp_err("TSO Num of seg Pool alloc %d failed %pK",
4213 			       i, soc);
4214 
4215 			return QDF_STATUS_E_FAILURE;
4216 		}
4217 	}
4218 	return QDF_STATUS_SUCCESS;
4219 }
4220 
4221 /**
4222  * dp_tx_soc_detach() - detach soc from dp tx
4223  * @soc: core txrx main context
4224  *
4225  * This function will detach dp tx into main device context
4226  * will free dp tx resource and initialize resources
4227  *
4228  * Return: QDF_STATUS_SUCCESS: success
4229  *         QDF_STATUS_E_RESOURCES: Error return
4230  */
4231 QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
4232 {
4233 	uint8_t num_pool;
4234 	uint16_t num_desc;
4235 	uint16_t num_ext_desc;
4236 	uint8_t i;
4237 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4238 
4239 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4240 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4241 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
4242 
4243 	dp_tx_flow_control_deinit(soc);
4244 	dp_tx_delete_static_pools(soc, num_pool);
4245 
4246 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4247 			"%s Tx Desc Pool Free num_pool = %d, descs = %d",
4248 			__func__, num_pool, num_desc);
4249 
4250 	for (i = 0; i < num_pool; i++) {
4251 		if (dp_tx_ext_desc_pool_free(soc, i)) {
4252 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4253 					"%s Tx Ext Desc Pool Free failed",
4254 					__func__);
4255 			return QDF_STATUS_E_RESOURCES;
4256 		}
4257 	}
4258 
4259 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4260 			"%s MSDU Ext Desc Pool %d Free descs = %d",
4261 			__func__, num_pool, num_ext_desc);
4262 
4263 	status = dp_tso_detach_wifi3(soc);
4264 	if (status != QDF_STATUS_SUCCESS)
4265 		return status;
4266 
4267 	return QDF_STATUS_SUCCESS;
4268 }
4269 
4270 /**
4271  * dp_tx_soc_attach() - attach soc to dp tx
4272  * @soc: core txrx main context
4273  *
4274  * This function will attach dp tx into main device context
4275  * will allocate dp tx resource and initialize resources
4276  *
4277  * Return: QDF_STATUS_SUCCESS: success
4278  *         QDF_STATUS_E_RESOURCES: Error return
4279  */
4280 QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
4281 {
4282 	uint8_t i;
4283 	uint8_t num_pool;
4284 	uint32_t num_desc;
4285 	uint32_t num_ext_desc;
4286 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4287 
4288 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4289 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4290 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
4291 
4292 	if (num_pool > MAX_TXDESC_POOLS)
4293 		goto fail;
4294 
4295 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
4296 		goto fail;
4297 
4298 	dp_tx_flow_control_init(soc);
4299 
4300 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4301 			"%s Tx Desc Alloc num_pool = %d, descs = %d",
4302 			__func__, num_pool, num_desc);
4303 
4304 	/* Allocate extension tx descriptor pools */
4305 	for (i = 0; i < num_pool; i++) {
4306 		if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
4307 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4308 				"MSDU Ext Desc Pool alloc %d failed %pK",
4309 				i, soc);
4310 
4311 			goto fail;
4312 		}
4313 	}
4314 
4315 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4316 			"%s MSDU Ext Desc Alloc %d, descs = %d",
4317 			__func__, num_pool, num_ext_desc);
4318 
4319 	status = dp_tso_attach_wifi3((void *)soc);
4320 	if (status != QDF_STATUS_SUCCESS)
4321 		goto fail;
4322 
4323 
4324 	/* Initialize descriptors in TCL Rings */
4325 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
4326 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
4327 			hal_tx_init_data_ring(soc->hal_soc,
4328 					soc->tcl_data_ring[i].hal_srng);
4329 		}
4330 	}
4331 
4332 	/*
4333 	 * todo - Add a runtime config option to enable this.
4334 	 */
4335 	/*
4336 	 * Due to multiple issues on NPR EMU, enable it selectively
4337 	 * only for NPR EMU, should be removed, once NPR platforms
4338 	 * are stable.
4339 	 */
4340 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
4341 
4342 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4343 			"%s HAL Tx init Success", __func__);
4344 
4345 	return QDF_STATUS_SUCCESS;
4346 
4347 fail:
4348 	/* Detach will take care of freeing only allocated resources */
4349 	dp_tx_soc_detach(soc);
4350 	return QDF_STATUS_E_RESOURCES;
4351 }
4352