xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision dd4dc88b837a295134aa9869114a2efee0f4894b)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "hal_hw_headers.h"
21 #include "dp_tx.h"
22 #include "dp_tx_desc.h"
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "hal_tx.h"
26 #include "qdf_mem.h"
27 #include "qdf_nbuf.h"
28 #include "qdf_net_types.h"
29 #include <wlan_cfg.h>
30 #ifdef MESH_MODE_SUPPORT
31 #include "if_meta_hdr.h"
32 #endif
33 #include "enet.h"
34 #include "dp_internal.h"
35 
36 #define DP_TX_QUEUE_MASK 0x3
37 
38 /* TODO Add support in TSO */
39 #define DP_DESC_NUM_FRAG(x) 0
40 
41 /* disable TQM_BYPASS */
42 #define TQM_BYPASS_WAR 0
43 
44 /* invalid peer id for reinject*/
45 #define DP_INVALID_PEER 0XFFFE
46 
47 /*mapping between hal encrypt type and cdp_sec_type*/
48 #define MAX_CDP_SEC_TYPE 12
49 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
50 					HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
51 					HAL_TX_ENCRYPT_TYPE_WEP_128,
52 					HAL_TX_ENCRYPT_TYPE_WEP_104,
53 					HAL_TX_ENCRYPT_TYPE_WEP_40,
54 					HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
55 					HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
56 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
57 					HAL_TX_ENCRYPT_TYPE_WAPI,
58 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
59 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
60 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
61 					HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
62 
63 #ifdef WLAN_TX_PKT_CAPTURE_ENH
64 #include "dp_tx_capture.h"
65 #endif
66 
67 /**
68  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
69  * @vdev: DP Virtual device handle
70  * @nbuf: Buffer pointer
71  * @queue: queue ids container for nbuf
72  *
73  * TX packet queue has 2 instances, software descriptors id and dma ring id
74  * Based on tx feature and hardware configuration queue id combination could be
75  * different.
76  * For example -
77  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
78  * With no XPS,lock based resource protection, Descriptor pool ids are different
79  * for each vdev, dma ring id will be same as single pdev id
80  *
81  * Return: None
82  */
83 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
84 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
85 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
86 {
87 	uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK;
88 	queue->desc_pool_id = queue_offset;
89 	queue->ring_id = vdev->pdev->soc->tx_ring_map[queue_offset];
90 
91 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
92 			"%s, pool_id:%d ring_id: %d",
93 			__func__, queue->desc_pool_id, queue->ring_id);
94 
95 	return;
96 }
97 #else /* QCA_OL_TX_MULTIQ_SUPPORT */
98 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
99 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
100 {
101 	/* get flow id */
102 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
103 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
104 
105 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
106 			"%s, pool_id:%d ring_id: %d",
107 			__func__, queue->desc_pool_id, queue->ring_id);
108 
109 	return;
110 }
111 #endif
112 
113 #if defined(FEATURE_TSO)
114 /**
115  * dp_tx_tso_unmap_segment() - Unmap TSO segment
116  *
117  * @soc - core txrx main context
118  * @seg_desc - tso segment descriptor
119  * @num_seg_desc - tso number segment descriptor
120  */
121 static void dp_tx_tso_unmap_segment(
122 		struct dp_soc *soc,
123 		struct qdf_tso_seg_elem_t *seg_desc,
124 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
125 {
126 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
127 	if (qdf_unlikely(!seg_desc)) {
128 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
129 			 __func__, __LINE__);
130 		qdf_assert(0);
131 	} else if (qdf_unlikely(!num_seg_desc)) {
132 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
133 			 __func__, __LINE__);
134 		qdf_assert(0);
135 	} else {
136 		bool is_last_seg;
137 		/* no tso segment left to do dma unmap */
138 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
139 			return;
140 
141 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
142 					true : false;
143 		qdf_nbuf_unmap_tso_segment(soc->osdev,
144 					   seg_desc, is_last_seg);
145 		num_seg_desc->num_seg.tso_cmn_num_seg--;
146 	}
147 }
148 
149 /**
150  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
151  *                            back to the freelist
152  *
153  * @soc - soc device handle
154  * @tx_desc - Tx software descriptor
155  */
156 static void dp_tx_tso_desc_release(struct dp_soc *soc,
157 				   struct dp_tx_desc_s *tx_desc)
158 {
159 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
160 	if (qdf_unlikely(!tx_desc->tso_desc)) {
161 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
162 			  "%s %d TSO desc is NULL!",
163 			  __func__, __LINE__);
164 		qdf_assert(0);
165 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
166 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
167 			  "%s %d TSO num desc is NULL!",
168 			  __func__, __LINE__);
169 		qdf_assert(0);
170 	} else {
171 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
172 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
173 
174 		/* Add the tso num segment into the free list */
175 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
176 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
177 					    tx_desc->tso_num_desc);
178 			tx_desc->tso_num_desc = NULL;
179 		}
180 
181 		/* Add the tso segment into the free list*/
182 		dp_tx_tso_desc_free(soc,
183 				    tx_desc->pool_id, tx_desc->tso_desc);
184 		tx_desc->tso_desc = NULL;
185 	}
186 }
187 #else
188 static void dp_tx_tso_unmap_segment(
189 		struct dp_soc *soc,
190 		struct qdf_tso_seg_elem_t *seg_desc,
191 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
192 
193 {
194 }
195 
196 static void dp_tx_tso_desc_release(struct dp_soc *soc,
197 				   struct dp_tx_desc_s *tx_desc)
198 {
199 }
200 #endif
201 /**
202  * dp_tx_desc_release() - Release Tx Descriptor
203  * @tx_desc : Tx Descriptor
204  * @desc_pool_id: Descriptor Pool ID
205  *
206  * Deallocate all resources attached to Tx descriptor and free the Tx
207  * descriptor.
208  *
209  * Return:
210  */
211 static void
212 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
213 {
214 	struct dp_pdev *pdev = tx_desc->pdev;
215 	struct dp_soc *soc;
216 	uint8_t comp_status = 0;
217 
218 	qdf_assert(pdev);
219 
220 	soc = pdev->soc;
221 
222 	if (tx_desc->frm_type == dp_tx_frm_tso)
223 		dp_tx_tso_desc_release(soc, tx_desc);
224 
225 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
226 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
227 
228 	if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
229 		dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
230 
231 	qdf_atomic_dec(&pdev->num_tx_outstanding);
232 
233 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
234 		qdf_atomic_dec(&pdev->num_tx_exception);
235 
236 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
237 				hal_tx_comp_get_buffer_source(&tx_desc->comp))
238 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
239 							     soc->hal_soc);
240 	else
241 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
242 
243 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
244 		"Tx Completion Release desc %d status %d outstanding %d",
245 		tx_desc->id, comp_status,
246 		qdf_atomic_read(&pdev->num_tx_outstanding));
247 
248 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
249 	return;
250 }
251 
252 /**
253  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
254  * @vdev: DP vdev Handle
255  * @nbuf: skb
256  *
257  * Prepares and fills HTT metadata in the frame pre-header for special frames
258  * that should be transmitted using varying transmit parameters.
259  * There are 2 VDEV modes that currently needs this special metadata -
260  *  1) Mesh Mode
261  *  2) DSRC Mode
262  *
263  * Return: HTT metadata size
264  *
265  */
266 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
267 		uint32_t *meta_data)
268 {
269 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
270 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
271 
272 	uint8_t htt_desc_size;
273 
274 	/* Size rounded of multiple of 8 bytes */
275 	uint8_t htt_desc_size_aligned;
276 
277 	uint8_t *hdr = NULL;
278 
279 	/*
280 	 * Metadata - HTT MSDU Extension header
281 	 */
282 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
283 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
284 
285 	if (vdev->mesh_vdev) {
286 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
287 					htt_desc_size_aligned)) {
288 			DP_STATS_INC(vdev,
289 				     tx_i.dropped.headroom_insufficient, 1);
290 			return 0;
291 		}
292 		/* Fill and add HTT metaheader */
293 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
294 		if (!hdr) {
295 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
296 					"Error in filling HTT metadata");
297 
298 			return 0;
299 		}
300 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
301 
302 	} else if (vdev->opmode == wlan_op_mode_ocb) {
303 		/* Todo - Add support for DSRC */
304 	}
305 
306 	return htt_desc_size_aligned;
307 }
308 
309 /**
310  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
311  * @tso_seg: TSO segment to process
312  * @ext_desc: Pointer to MSDU extension descriptor
313  *
314  * Return: void
315  */
316 #if defined(FEATURE_TSO)
317 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
318 		void *ext_desc)
319 {
320 	uint8_t num_frag;
321 	uint32_t tso_flags;
322 
323 	/*
324 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
325 	 * tcp_flag_mask
326 	 *
327 	 * Checksum enable flags are set in TCL descriptor and not in Extension
328 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
329 	 */
330 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
331 
332 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
333 
334 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
335 		tso_seg->tso_flags.ip_len);
336 
337 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
338 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
339 
340 
341 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
342 		uint32_t lo = 0;
343 		uint32_t hi = 0;
344 
345 		qdf_dmaaddr_to_32s(
346 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
347 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
348 			tso_seg->tso_frags[num_frag].length);
349 	}
350 
351 	return;
352 }
353 #else
354 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
355 		void *ext_desc)
356 {
357 	return;
358 }
359 #endif
360 
361 #if defined(FEATURE_TSO)
362 /**
363  * dp_tx_free_tso_seg_list() - Loop through the tso segments
364  *                             allocated and free them
365  *
366  * @soc: soc handle
367  * @free_seg: list of tso segments
368  * @msdu_info: msdu descriptor
369  *
370  * Return - void
371  */
372 static void dp_tx_free_tso_seg_list(
373 		struct dp_soc *soc,
374 		struct qdf_tso_seg_elem_t *free_seg,
375 		struct dp_tx_msdu_info_s *msdu_info)
376 {
377 	struct qdf_tso_seg_elem_t *next_seg;
378 
379 	while (free_seg) {
380 		next_seg = free_seg->next;
381 		dp_tx_tso_desc_free(soc,
382 				    msdu_info->tx_queue.desc_pool_id,
383 				    free_seg);
384 		free_seg = next_seg;
385 	}
386 }
387 
388 /**
389  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
390  *                                 allocated and free them
391  *
392  * @soc:  soc handle
393  * @free_num_seg: list of tso number segments
394  * @msdu_info: msdu descriptor
395  * Return - void
396  */
397 static void dp_tx_free_tso_num_seg_list(
398 		struct dp_soc *soc,
399 		struct qdf_tso_num_seg_elem_t *free_num_seg,
400 		struct dp_tx_msdu_info_s *msdu_info)
401 {
402 	struct qdf_tso_num_seg_elem_t *next_num_seg;
403 
404 	while (free_num_seg) {
405 		next_num_seg = free_num_seg->next;
406 		dp_tso_num_seg_free(soc,
407 				    msdu_info->tx_queue.desc_pool_id,
408 				    free_num_seg);
409 		free_num_seg = next_num_seg;
410 	}
411 }
412 
413 /**
414  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
415  *                              do dma unmap for each segment
416  *
417  * @soc: soc handle
418  * @free_seg: list of tso segments
419  * @num_seg_desc: tso number segment descriptor
420  *
421  * Return - void
422  */
423 static void dp_tx_unmap_tso_seg_list(
424 		struct dp_soc *soc,
425 		struct qdf_tso_seg_elem_t *free_seg,
426 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
427 {
428 	struct qdf_tso_seg_elem_t *next_seg;
429 
430 	if (qdf_unlikely(!num_seg_desc)) {
431 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
432 		return;
433 	}
434 
435 	while (free_seg) {
436 		next_seg = free_seg->next;
437 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
438 		free_seg = next_seg;
439 	}
440 }
441 
442 /**
443  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
444  *				     free the tso segments descriptor and
445  *				     tso num segments descriptor
446  *
447  * @soc:  soc handle
448  * @msdu_info: msdu descriptor
449  * @tso_seg_unmap: flag to show if dma unmap is necessary
450  *
451  * Return - void
452  */
453 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
454 					  struct dp_tx_msdu_info_s *msdu_info,
455 					  bool tso_seg_unmap)
456 {
457 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
458 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
459 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
460 					tso_info->tso_num_seg_list;
461 
462 	/* do dma unmap for each segment */
463 	if (tso_seg_unmap)
464 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
465 
466 	/* free all tso number segment descriptor though looks only have 1 */
467 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
468 
469 	/* free all tso segment descriptor */
470 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
471 }
472 
473 /**
474  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
475  * @vdev: virtual device handle
476  * @msdu: network buffer
477  * @msdu_info: meta data associated with the msdu
478  *
479  * Return: QDF_STATUS_SUCCESS success
480  */
481 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
482 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
483 {
484 	struct qdf_tso_seg_elem_t *tso_seg;
485 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
486 	struct dp_soc *soc = vdev->pdev->soc;
487 	struct qdf_tso_info_t *tso_info;
488 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
489 
490 	tso_info = &msdu_info->u.tso_info;
491 	tso_info->curr_seg = NULL;
492 	tso_info->tso_seg_list = NULL;
493 	tso_info->num_segs = num_seg;
494 	msdu_info->frm_type = dp_tx_frm_tso;
495 	tso_info->tso_num_seg_list = NULL;
496 
497 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
498 
499 	while (num_seg) {
500 		tso_seg = dp_tx_tso_desc_alloc(
501 				soc, msdu_info->tx_queue.desc_pool_id);
502 		if (tso_seg) {
503 			tso_seg->next = tso_info->tso_seg_list;
504 			tso_info->tso_seg_list = tso_seg;
505 			num_seg--;
506 		} else {
507 			DP_TRACE(ERROR, "%s: Failed to alloc tso seg desc",
508 				 __func__);
509 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
510 
511 			return QDF_STATUS_E_NOMEM;
512 		}
513 	}
514 
515 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
516 
517 	tso_num_seg = dp_tso_num_seg_alloc(soc,
518 			msdu_info->tx_queue.desc_pool_id);
519 
520 	if (tso_num_seg) {
521 		tso_num_seg->next = tso_info->tso_num_seg_list;
522 		tso_info->tso_num_seg_list = tso_num_seg;
523 	} else {
524 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
525 			 __func__);
526 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
527 
528 		return QDF_STATUS_E_NOMEM;
529 	}
530 
531 	msdu_info->num_seg =
532 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
533 
534 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
535 			msdu_info->num_seg);
536 
537 	if (!(msdu_info->num_seg)) {
538 		/*
539 		 * Free allocated TSO seg desc and number seg desc,
540 		 * do unmap for segments if dma map has done.
541 		 */
542 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
543 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
544 
545 		return QDF_STATUS_E_INVAL;
546 	}
547 
548 	tso_info->curr_seg = tso_info->tso_seg_list;
549 
550 	return QDF_STATUS_SUCCESS;
551 }
552 #else
553 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
554 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
555 {
556 	return QDF_STATUS_E_NOMEM;
557 }
558 #endif
559 
560 /**
561  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
562  * @vdev: DP Vdev handle
563  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
564  * @desc_pool_id: Descriptor Pool ID
565  *
566  * Return:
567  */
568 static
569 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
570 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
571 {
572 	uint8_t i;
573 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
574 	struct dp_tx_seg_info_s *seg_info;
575 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
576 	struct dp_soc *soc = vdev->pdev->soc;
577 
578 	/* Allocate an extension descriptor */
579 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
580 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
581 
582 	if (!msdu_ext_desc) {
583 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
584 		return NULL;
585 	}
586 
587 	if (msdu_info->exception_fw &&
588 			qdf_unlikely(vdev->mesh_vdev)) {
589 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
590 				&msdu_info->meta_data[0],
591 				sizeof(struct htt_tx_msdu_desc_ext2_t));
592 		qdf_atomic_inc(&vdev->pdev->num_tx_exception);
593 	}
594 
595 	switch (msdu_info->frm_type) {
596 	case dp_tx_frm_sg:
597 	case dp_tx_frm_me:
598 	case dp_tx_frm_raw:
599 		seg_info = msdu_info->u.sg_info.curr_seg;
600 		/* Update the buffer pointers in MSDU Extension Descriptor */
601 		for (i = 0; i < seg_info->frag_cnt; i++) {
602 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
603 				seg_info->frags[i].paddr_lo,
604 				seg_info->frags[i].paddr_hi,
605 				seg_info->frags[i].len);
606 		}
607 
608 		break;
609 
610 	case dp_tx_frm_tso:
611 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
612 				&cached_ext_desc[0]);
613 		break;
614 
615 
616 	default:
617 		break;
618 	}
619 
620 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
621 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
622 
623 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
624 			msdu_ext_desc->vaddr);
625 
626 	return msdu_ext_desc;
627 }
628 
629 /**
630  * dp_tx_trace_pkt() - Trace TX packet at DP layer
631  *
632  * @skb: skb to be traced
633  * @msdu_id: msdu_id of the packet
634  * @vdev_id: vdev_id of the packet
635  *
636  * Return: None
637  */
638 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
639 			    uint8_t vdev_id)
640 {
641 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
642 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
643 	DPTRACE(qdf_dp_trace_ptr(skb,
644 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
645 				 QDF_TRACE_DEFAULT_PDEV_ID,
646 				 qdf_nbuf_data_addr(skb),
647 				 sizeof(qdf_nbuf_data(skb)),
648 				 msdu_id, vdev_id));
649 
650 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
651 
652 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
653 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
654 				      msdu_id, QDF_TX));
655 }
656 
657 #ifdef QCA_512M_CONFIG
658 /**
659  * dp_tx_pdev_pflow_control - Check if allocated tx descriptors reached max
660  * tx descriptor configured value
661  * @vdev: DP vdev handle
662  *
663  * Return: true if allocated tx descriptors reached max configured value, else
664  * false.
665  */
666 static inline bool
667 dp_tx_pdev_pflow_control(struct dp_vdev *vdev)
668 {
669 	struct dp_pdev *pdev = vdev->pdev;
670 
671 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
672 			pdev->num_tx_allowed) {
673 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
674 			  "%s: queued packets are more than max tx, drop the frame",
675 			  __func__);
676 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
677 		return true;
678 	}
679 
680 	return false;
681 }
682 #else
683 static inline bool
684 dp_tx_pdev_pflow_control(struct dp_vdev *vdev)
685 {
686 	return false;
687 }
688 #endif
689 
690 /**
691  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
692  * @vdev: DP vdev handle
693  * @nbuf: skb
694  * @desc_pool_id: Descriptor pool ID
695  * @meta_data: Metadata to the fw
696  * @tx_exc_metadata: Handle that holds exception path metadata
697  * Allocate and prepare Tx descriptor with msdu information.
698  *
699  * Return: Pointer to Tx Descriptor on success,
700  *         NULL on failure
701  */
702 static
703 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
704 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
705 		struct dp_tx_msdu_info_s *msdu_info,
706 		struct cdp_tx_exception_metadata *tx_exc_metadata)
707 {
708 	uint8_t align_pad;
709 	uint8_t is_exception = 0;
710 	uint8_t htt_hdr_size;
711 	qdf_ether_header_t *eh;
712 	struct dp_tx_desc_s *tx_desc;
713 	struct dp_pdev *pdev = vdev->pdev;
714 	struct dp_soc *soc = pdev->soc;
715 
716 	if (dp_tx_pdev_pflow_control(vdev))
717 		return NULL;
718 
719 	/* Allocate software Tx descriptor */
720 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
721 	if (qdf_unlikely(!tx_desc)) {
722 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
723 		return NULL;
724 	}
725 
726 	/* Flow control/Congestion Control counters */
727 	qdf_atomic_inc(&pdev->num_tx_outstanding);
728 
729 	/* Initialize the SW tx descriptor */
730 	tx_desc->nbuf = nbuf;
731 	tx_desc->frm_type = dp_tx_frm_std;
732 	tx_desc->tx_encap_type = (tx_exc_metadata ?
733 			tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
734 	tx_desc->vdev = vdev;
735 	tx_desc->pdev = pdev;
736 	tx_desc->msdu_ext_desc = NULL;
737 	tx_desc->pkt_offset = 0;
738 
739 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
740 
741 	/*
742 	 * For special modes (vdev_type == ocb or mesh), data frames should be
743 	 * transmitted using varying transmit parameters (tx spec) which include
744 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
745 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
746 	 * These frames are sent as exception packets to firmware.
747 	 *
748 	 * HW requirement is that metadata should always point to a
749 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
750 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
751 	 *  to get 8-byte aligned start address along with align_pad added
752 	 *
753 	 *  |-----------------------------|
754 	 *  |                             |
755 	 *  |-----------------------------| <-----Buffer Pointer Address given
756 	 *  |                             |  ^    in HW descriptor (aligned)
757 	 *  |       HTT Metadata          |  |
758 	 *  |                             |  |
759 	 *  |                             |  | Packet Offset given in descriptor
760 	 *  |                             |  |
761 	 *  |-----------------------------|  |
762 	 *  |       Alignment Pad         |  v
763 	 *  |-----------------------------| <----- Actual buffer start address
764 	 *  |        SKB Data             |           (Unaligned)
765 	 *  |                             |
766 	 *  |                             |
767 	 *  |                             |
768 	 *  |                             |
769 	 *  |                             |
770 	 *  |-----------------------------|
771 	 */
772 	if (qdf_unlikely((msdu_info->exception_fw)) ||
773 				(vdev->opmode == wlan_op_mode_ocb)) {
774 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
775 
776 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
777 			DP_STATS_INC(vdev,
778 				     tx_i.dropped.headroom_insufficient, 1);
779 			goto failure;
780 		}
781 
782 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
783 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
784 					"qdf_nbuf_push_head failed");
785 			goto failure;
786 		}
787 
788 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
789 				msdu_info->meta_data);
790 		if (htt_hdr_size == 0)
791 			goto failure;
792 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
793 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
794 		is_exception = 1;
795 	}
796 
797 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
798 				qdf_nbuf_map(soc->osdev, nbuf,
799 					QDF_DMA_TO_DEVICE))) {
800 		/* Handle failure */
801 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
802 				"qdf_nbuf_map failed");
803 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
804 		goto failure;
805 	}
806 
807 	if (qdf_unlikely(vdev->nawds_enabled)) {
808 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
809 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
810 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
811 			is_exception = 1;
812 		}
813 	}
814 
815 #if !TQM_BYPASS_WAR
816 	if (is_exception || tx_exc_metadata)
817 #endif
818 	{
819 		/* Temporary WAR due to TQM VP issues */
820 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
821 		qdf_atomic_inc(&pdev->num_tx_exception);
822 	}
823 
824 	return tx_desc;
825 
826 failure:
827 	dp_tx_desc_release(tx_desc, desc_pool_id);
828 	return NULL;
829 }
830 
831 /**
832  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
833  * @vdev: DP vdev handle
834  * @nbuf: skb
835  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
836  * @desc_pool_id : Descriptor Pool ID
837  *
838  * Allocate and prepare Tx descriptor with msdu and fragment descritor
839  * information. For frames wth fragments, allocate and prepare
840  * an MSDU extension descriptor
841  *
842  * Return: Pointer to Tx Descriptor on success,
843  *         NULL on failure
844  */
845 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
846 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
847 		uint8_t desc_pool_id)
848 {
849 	struct dp_tx_desc_s *tx_desc;
850 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
851 	struct dp_pdev *pdev = vdev->pdev;
852 	struct dp_soc *soc = pdev->soc;
853 
854 	if (dp_tx_pdev_pflow_control(vdev))
855 		return NULL;
856 
857 	/* Allocate software Tx descriptor */
858 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
859 	if (!tx_desc) {
860 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
861 		return NULL;
862 	}
863 
864 	/* Flow control/Congestion Control counters */
865 	qdf_atomic_inc(&pdev->num_tx_outstanding);
866 
867 	/* Initialize the SW tx descriptor */
868 	tx_desc->nbuf = nbuf;
869 	tx_desc->frm_type = msdu_info->frm_type;
870 	tx_desc->tx_encap_type = vdev->tx_encap_type;
871 	tx_desc->vdev = vdev;
872 	tx_desc->pdev = pdev;
873 	tx_desc->pkt_offset = 0;
874 	tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
875 	tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
876 
877 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
878 
879 	/* Handle scattered frames - TSO/SG/ME */
880 	/* Allocate and prepare an extension descriptor for scattered frames */
881 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
882 	if (!msdu_ext_desc) {
883 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
884 				"%s Tx Extension Descriptor Alloc Fail",
885 				__func__);
886 		goto failure;
887 	}
888 
889 #if TQM_BYPASS_WAR
890 	/* Temporary WAR due to TQM VP issues */
891 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
892 	qdf_atomic_inc(&pdev->num_tx_exception);
893 #endif
894 	if (qdf_unlikely(msdu_info->exception_fw))
895 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
896 
897 	tx_desc->msdu_ext_desc = msdu_ext_desc;
898 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
899 
900 	return tx_desc;
901 failure:
902 	dp_tx_desc_release(tx_desc, desc_pool_id);
903 	return NULL;
904 }
905 
906 /**
907  * dp_tx_prepare_raw() - Prepare RAW packet TX
908  * @vdev: DP vdev handle
909  * @nbuf: buffer pointer
910  * @seg_info: Pointer to Segment info Descriptor to be prepared
911  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
912  *     descriptor
913  *
914  * Return:
915  */
916 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
917 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
918 {
919 	qdf_nbuf_t curr_nbuf = NULL;
920 	uint16_t total_len = 0;
921 	qdf_dma_addr_t paddr;
922 	int32_t i;
923 	int32_t mapped_buf_num = 0;
924 
925 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
926 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
927 
928 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
929 
930 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
931 	if (vdev->raw_mode_war &&
932 	    (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
933 	    (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
934 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
935 
936 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
937 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
938 
939 		if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf,
940 					QDF_DMA_TO_DEVICE)) {
941 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
942 				"%s dma map error ", __func__);
943 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
944 			mapped_buf_num = i;
945 			goto error;
946 		}
947 
948 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
949 		seg_info->frags[i].paddr_lo = paddr;
950 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
951 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
952 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
953 		total_len += qdf_nbuf_len(curr_nbuf);
954 	}
955 
956 	seg_info->frag_cnt = i;
957 	seg_info->total_len = total_len;
958 	seg_info->next = NULL;
959 
960 	sg_info->curr_seg = seg_info;
961 
962 	msdu_info->frm_type = dp_tx_frm_raw;
963 	msdu_info->num_seg = 1;
964 
965 	return nbuf;
966 
967 error:
968 	i = 0;
969 	while (nbuf) {
970 		curr_nbuf = nbuf;
971 		if (i < mapped_buf_num) {
972 			qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE);
973 			i++;
974 		}
975 		nbuf = qdf_nbuf_next(nbuf);
976 		qdf_nbuf_free(curr_nbuf);
977 	}
978 	return NULL;
979 
980 }
981 
982 /**
983  * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
984  * @soc: DP Soc Handle
985  * @vdev: DP vdev handle
986  * @tx_desc: Tx Descriptor Handle
987  * @tid: TID from HLOS for overriding default DSCP-TID mapping
988  * @fw_metadata: Metadata to send to Target Firmware along with frame
989  * @ring_id: Ring ID of H/W ring to which we enqueue the packet
990  * @tx_exc_metadata: Handle that holds exception path meta data
991  *
992  *  Gets the next free TCL HW DMA descriptor and sets up required parameters
993  *  from software Tx descriptor
994  *
995  * Return:
996  */
997 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
998 				   struct dp_tx_desc_s *tx_desc, uint8_t tid,
999 				   uint16_t fw_metadata, uint8_t ring_id,
1000 				   struct cdp_tx_exception_metadata
1001 					*tx_exc_metadata)
1002 {
1003 	uint8_t type;
1004 	uint16_t length;
1005 	void *hal_tx_desc, *hal_tx_desc_cached;
1006 	qdf_dma_addr_t dma_addr;
1007 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
1008 
1009 	enum cdp_sec_type sec_type = (tx_exc_metadata ?
1010 			tx_exc_metadata->sec_type : vdev->sec_type);
1011 
1012 	/* Return Buffer Manager ID */
1013 	uint8_t bm_id = ring_id;
1014 	void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng;
1015 
1016 	hal_tx_desc_cached = (void *) cached_desc;
1017 	qdf_mem_zero(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
1018 
1019 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
1020 		length = HAL_TX_EXT_DESC_WITH_META_DATA;
1021 		type = HAL_TX_BUF_TYPE_EXT_DESC;
1022 		dma_addr = tx_desc->msdu_ext_desc->paddr;
1023 	} else {
1024 		length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
1025 		type = HAL_TX_BUF_TYPE_BUFFER;
1026 		dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
1027 	}
1028 
1029 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
1030 	hal_tx_desc_set_buf_addr(hal_tx_desc_cached,
1031 					dma_addr, bm_id, tx_desc->id,
1032 					type, soc->hal_soc);
1033 
1034 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id))
1035 		return QDF_STATUS_E_RESOURCES;
1036 
1037 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
1038 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
1039 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
1040 	hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
1041 				vdev->pdev->lmac_id);
1042 	hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
1043 				    vdev->search_type);
1044 	hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
1045 				     vdev->bss_ast_hash);
1046 	hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
1047 					  vdev->dscp_tid_map_id);
1048 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
1049 			sec_type_map[sec_type]);
1050 
1051 	dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
1052 			 length, type, (uint64_t)dma_addr,
1053 			 tx_desc->pkt_offset, tx_desc->id);
1054 
1055 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
1056 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
1057 
1058 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
1059 			vdev->hal_desc_addr_search_flags);
1060 
1061 	/* verify checksum offload configuration*/
1062 	if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) &&
1063 		((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
1064 		|| qdf_nbuf_is_tso(tx_desc->nbuf)))  {
1065 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
1066 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
1067 	}
1068 
1069 	if (tid != HTT_TX_EXT_TID_INVALID)
1070 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
1071 
1072 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
1073 		hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1);
1074 
1075 
1076 	tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_get());
1077 	/* Sync cached descriptor with HW */
1078 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
1079 
1080 	if (!hal_tx_desc) {
1081 		dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
1082 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1083 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1084 		return QDF_STATUS_E_RESOURCES;
1085 	}
1086 
1087 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
1088 
1089 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
1090 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
1091 
1092 	return QDF_STATUS_SUCCESS;
1093 }
1094 
1095 
1096 /**
1097  * dp_cce_classify() - Classify the frame based on CCE rules
1098  * @vdev: DP vdev handle
1099  * @nbuf: skb
1100  *
1101  * Classify frames based on CCE rules
1102  * Return: bool( true if classified,
1103  *               else false)
1104  */
1105 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1106 {
1107 	qdf_ether_header_t *eh = NULL;
1108 	uint16_t   ether_type;
1109 	qdf_llc_t *llcHdr;
1110 	qdf_nbuf_t nbuf_clone = NULL;
1111 	qdf_dot3_qosframe_t *qos_wh = NULL;
1112 
1113 	/* for mesh packets don't do any classification */
1114 	if (qdf_unlikely(vdev->mesh_vdev))
1115 		return false;
1116 
1117 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1118 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1119 		ether_type = eh->ether_type;
1120 		llcHdr = (qdf_llc_t *)(nbuf->data +
1121 					sizeof(qdf_ether_header_t));
1122 	} else {
1123 		qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1124 		/* For encrypted packets don't do any classification */
1125 		if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
1126 			return false;
1127 
1128 		if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
1129 			if (qdf_unlikely(
1130 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
1131 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
1132 
1133 				ether_type = *(uint16_t *)(nbuf->data
1134 						+ QDF_IEEE80211_4ADDR_HDR_LEN
1135 						+ sizeof(qdf_llc_t)
1136 						- sizeof(ether_type));
1137 				llcHdr = (qdf_llc_t *)(nbuf->data +
1138 						QDF_IEEE80211_4ADDR_HDR_LEN);
1139 			} else {
1140 				ether_type = *(uint16_t *)(nbuf->data
1141 						+ QDF_IEEE80211_3ADDR_HDR_LEN
1142 						+ sizeof(qdf_llc_t)
1143 						- sizeof(ether_type));
1144 				llcHdr = (qdf_llc_t *)(nbuf->data +
1145 					QDF_IEEE80211_3ADDR_HDR_LEN);
1146 			}
1147 
1148 			if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
1149 				&& (ether_type ==
1150 				qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
1151 
1152 				DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
1153 				return true;
1154 			}
1155 		}
1156 
1157 		return false;
1158 	}
1159 
1160 	if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
1161 		ether_type = *(uint16_t *)(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1162 				sizeof(*llcHdr));
1163 		nbuf_clone = qdf_nbuf_clone(nbuf);
1164 		if (qdf_unlikely(nbuf_clone)) {
1165 			qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
1166 
1167 			if (ether_type == htons(ETHERTYPE_VLAN)) {
1168 				qdf_nbuf_pull_head(nbuf_clone,
1169 						sizeof(qdf_net_vlanhdr_t));
1170 			}
1171 		}
1172 	} else {
1173 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1174 			nbuf_clone = qdf_nbuf_clone(nbuf);
1175 			if (qdf_unlikely(nbuf_clone)) {
1176 				qdf_nbuf_pull_head(nbuf_clone,
1177 					sizeof(qdf_net_vlanhdr_t));
1178 			}
1179 		}
1180 	}
1181 
1182 	if (qdf_unlikely(nbuf_clone))
1183 		nbuf = nbuf_clone;
1184 
1185 
1186 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
1187 		|| qdf_nbuf_is_ipv4_arp_pkt(nbuf)
1188 		|| qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
1189 		|| qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
1190 		|| (qdf_nbuf_is_ipv4_pkt(nbuf)
1191 			&& qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
1192 		|| (qdf_nbuf_is_ipv6_pkt(nbuf) &&
1193 			qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
1194 		if (qdf_unlikely(nbuf_clone))
1195 			qdf_nbuf_free(nbuf_clone);
1196 		return true;
1197 	}
1198 
1199 	if (qdf_unlikely(nbuf_clone))
1200 		qdf_nbuf_free(nbuf_clone);
1201 
1202 	return false;
1203 }
1204 
1205 /**
1206  * dp_tx_get_tid() - Obtain TID to be used for this frame
1207  * @vdev: DP vdev handle
1208  * @nbuf: skb
1209  *
1210  * Extract the DSCP or PCP information from frame and map into TID value.
1211  *
1212  * Return: void
1213  */
1214 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1215 			  struct dp_tx_msdu_info_s *msdu_info)
1216 {
1217 	uint8_t tos = 0, dscp_tid_override = 0;
1218 	uint8_t *hdr_ptr, *L3datap;
1219 	uint8_t is_mcast = 0;
1220 	qdf_ether_header_t *eh = NULL;
1221 	qdf_ethervlan_header_t *evh = NULL;
1222 	uint16_t   ether_type;
1223 	qdf_llc_t *llcHdr;
1224 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1225 
1226 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1227 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1228 		eh = (qdf_ether_header_t *)nbuf->data;
1229 		hdr_ptr = eh->ether_dhost;
1230 		L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
1231 	} else {
1232 		qdf_dot3_qosframe_t *qos_wh =
1233 			(qdf_dot3_qosframe_t *) nbuf->data;
1234 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1235 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1236 		return;
1237 	}
1238 
1239 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1240 	ether_type = eh->ether_type;
1241 
1242 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
1243 	/*
1244 	 * Check if packet is dot3 or eth2 type.
1245 	 */
1246 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1247 		ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1248 				sizeof(*llcHdr));
1249 
1250 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1251 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1252 				sizeof(*llcHdr);
1253 			ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
1254 					+ sizeof(*llcHdr) +
1255 					sizeof(qdf_net_vlanhdr_t));
1256 		} else {
1257 			L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
1258 				sizeof(*llcHdr);
1259 		}
1260 	} else {
1261 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1262 			evh = (qdf_ethervlan_header_t *) eh;
1263 			ether_type = evh->ether_type;
1264 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1265 		}
1266 	}
1267 
1268 	/*
1269 	 * Find priority from IP TOS DSCP field
1270 	 */
1271 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1272 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1273 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1274 			/* Only for unicast frames */
1275 			if (!is_mcast) {
1276 				/* send it on VO queue */
1277 				msdu_info->tid = DP_VO_TID;
1278 			}
1279 		} else {
1280 			/*
1281 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1282 			 * from TOS byte.
1283 			 */
1284 			tos = ip->ip_tos;
1285 			dscp_tid_override = 1;
1286 
1287 		}
1288 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1289 		/* TODO
1290 		 * use flowlabel
1291 		 *igmpmld cases to be handled in phase 2
1292 		 */
1293 		unsigned long ver_pri_flowlabel;
1294 		unsigned long pri;
1295 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1296 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1297 			DP_IPV6_PRIORITY_SHIFT;
1298 		tos = pri;
1299 		dscp_tid_override = 1;
1300 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1301 		msdu_info->tid = DP_VO_TID;
1302 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1303 		/* Only for unicast frames */
1304 		if (!is_mcast) {
1305 			/* send ucast arp on VO queue */
1306 			msdu_info->tid = DP_VO_TID;
1307 		}
1308 	}
1309 
1310 	/*
1311 	 * Assign all MCAST packets to BE
1312 	 */
1313 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1314 		if (is_mcast) {
1315 			tos = 0;
1316 			dscp_tid_override = 1;
1317 		}
1318 	}
1319 
1320 	if (dscp_tid_override == 1) {
1321 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1322 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1323 	}
1324 
1325 	if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
1326 		msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
1327 
1328 	return;
1329 }
1330 
1331 /**
1332  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1333  * @vdev: DP vdev handle
1334  * @nbuf: skb
1335  *
1336  * Software based TID classification is required when more than 2 DSCP-TID
1337  * mapping tables are needed.
1338  * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
1339  *
1340  * Return: void
1341  */
1342 static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1343 			       struct dp_tx_msdu_info_s *msdu_info)
1344 {
1345 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1346 
1347 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1348 
1349 	if (pdev->soc && vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map)
1350 		return;
1351 
1352 	/* for mesh packets don't do any classification */
1353 	if (qdf_unlikely(vdev->mesh_vdev))
1354 		return;
1355 
1356 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1357 }
1358 
1359 #ifdef FEATURE_WLAN_TDLS
1360 /**
1361  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1362  * @tx_desc: TX descriptor
1363  *
1364  * Return: None
1365  */
1366 static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1367 {
1368 	if (tx_desc->vdev) {
1369 		if (tx_desc->vdev->is_tdls_frame) {
1370 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1371 			tx_desc->vdev->is_tdls_frame = false;
1372 		}
1373 	}
1374 }
1375 
1376 /**
1377  * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1378  * @tx_desc: TX descriptor
1379  * @vdev: datapath vdev handle
1380  *
1381  * Return: None
1382  */
1383 static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
1384 					 struct dp_vdev *vdev)
1385 {
1386 	struct hal_tx_completion_status ts = {0};
1387 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1388 
1389 	if (qdf_unlikely(!vdev)) {
1390 		dp_err("vdev is null!");
1391 		return;
1392 	}
1393 
1394 	hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
1395 	if (vdev->tx_non_std_data_callback.func) {
1396 		qdf_nbuf_set_next(tx_desc->nbuf, NULL);
1397 		vdev->tx_non_std_data_callback.func(
1398 				vdev->tx_non_std_data_callback.ctxt,
1399 				nbuf, ts.status);
1400 		return;
1401 	}
1402 }
1403 #else
1404 static inline void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1405 {
1406 }
1407 
1408 static inline void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
1409 						struct dp_vdev *vdev)
1410 {
1411 }
1412 #endif
1413 
1414 /**
1415  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1416  * @vdev: DP vdev handle
1417  * @nbuf: skb
1418  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1419  * @meta_data: Metadata to the fw
1420  * @tx_q: Tx queue to be used for this Tx frame
1421  * @peer_id: peer_id of the peer in case of NAWDS frames
1422  * @tx_exc_metadata: Handle that holds exception path metadata
1423  *
1424  * Return: NULL on success,
1425  *         nbuf when it fails to send
1426  */
1427 static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1428 		struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
1429 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1430 {
1431 	struct dp_pdev *pdev = vdev->pdev;
1432 	struct dp_soc *soc = pdev->soc;
1433 	struct dp_tx_desc_s *tx_desc;
1434 	QDF_STATUS status;
1435 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
1436 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1437 	uint16_t htt_tcl_metadata = 0;
1438 	uint8_t tid = msdu_info->tid;
1439 	struct cdp_tid_tx_stats *tid_stats = NULL;
1440 
1441 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
1442 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
1443 			msdu_info, tx_exc_metadata);
1444 	if (!tx_desc) {
1445 		dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
1446 			  vdev, tx_q->desc_pool_id);
1447 		dp_tx_get_tid(vdev, nbuf, msdu_info);
1448 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[msdu_info->tid];
1449 		tid_stats->swdrop_cnt[TX_DESC_ERR]++;
1450 		return nbuf;
1451 	}
1452 
1453 	if (qdf_unlikely(soc->cce_disable)) {
1454 		if (dp_cce_classify(vdev, nbuf) == true) {
1455 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1456 			tid = DP_VO_TID;
1457 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1458 		}
1459 	}
1460 
1461 	dp_tx_update_tdls_flags(tx_desc);
1462 
1463 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1464 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1465 				"%s %d : HAL RING Access Failed -- %pK",
1466 				__func__, __LINE__, hal_srng);
1467 		dp_tx_get_tid(vdev, nbuf, msdu_info);
1468 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[msdu_info->tid];
1469 		tid_stats->swdrop_cnt[TX_HAL_RING_ACCESS_ERR]++;
1470 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1471 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1472 		qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE);
1473 		goto fail_return;
1474 	}
1475 
1476 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
1477 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1478 		HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
1479 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
1480 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
1481 				HTT_TCL_METADATA_TYPE_PEER_BASED);
1482 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
1483 				peer_id);
1484 	} else
1485 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1486 
1487 
1488 	if (msdu_info->exception_fw) {
1489 		HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1490 	}
1491 
1492 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
1493 	status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
1494 			htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
1495 
1496 	if (status != QDF_STATUS_SUCCESS) {
1497 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1498 			  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1499 			  __func__, tx_desc, tx_q->ring_id);
1500 		dp_tx_get_tid(vdev, nbuf, msdu_info);
1501 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[msdu_info->tid];
1502 		tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
1503 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1504 		qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE);
1505 		goto fail_return;
1506 	}
1507 
1508 	nbuf = NULL;
1509 
1510 fail_return:
1511 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1512 		hal_srng_access_end(soc->hal_soc, hal_srng);
1513 		hif_pm_runtime_put(soc->hif_handle);
1514 	} else {
1515 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1516 	}
1517 
1518 	return nbuf;
1519 }
1520 
1521 /**
1522  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
1523  * @vdev: DP vdev handle
1524  * @nbuf: skb
1525  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
1526  *
1527  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
1528  *
1529  * Return: NULL on success,
1530  *         nbuf when it fails to send
1531  */
1532 #if QDF_LOCK_STATS
1533 static noinline
1534 #else
1535 static
1536 #endif
1537 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1538 				    struct dp_tx_msdu_info_s *msdu_info)
1539 {
1540 	uint8_t i;
1541 	struct dp_pdev *pdev = vdev->pdev;
1542 	struct dp_soc *soc = pdev->soc;
1543 	struct dp_tx_desc_s *tx_desc;
1544 	bool is_cce_classified = false;
1545 	QDF_STATUS status;
1546 	uint16_t htt_tcl_metadata = 0;
1547 
1548 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1549 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1550 	struct cdp_tid_tx_stats *tid_stats = NULL;
1551 
1552 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1553 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1554 				"%s %d : HAL RING Access Failed -- %pK",
1555 				__func__, __LINE__, hal_srng);
1556 		dp_tx_get_tid(vdev, nbuf, msdu_info);
1557 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[msdu_info->tid];
1558 		tid_stats->swdrop_cnt[TX_HAL_RING_ACCESS_ERR]++;
1559 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1560 		return nbuf;
1561 	}
1562 
1563 	if (qdf_unlikely(soc->cce_disable)) {
1564 		is_cce_classified = dp_cce_classify(vdev, nbuf);
1565 		if (is_cce_classified) {
1566 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1567 			msdu_info->tid = DP_VO_TID;
1568 		}
1569 	}
1570 
1571 	if (msdu_info->frm_type == dp_tx_frm_me)
1572 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1573 
1574 	i = 0;
1575 	/* Print statement to track i and num_seg */
1576 	/*
1577 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
1578 	 * descriptors using information in msdu_info
1579 	 */
1580 	while (i < msdu_info->num_seg) {
1581 		/*
1582 		 * Setup Tx descriptor for an MSDU, and MSDU extension
1583 		 * descriptor
1584 		 */
1585 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
1586 				tx_q->desc_pool_id);
1587 
1588 		if (!tx_desc) {
1589 			if (msdu_info->frm_type == dp_tx_frm_me) {
1590 				dp_tx_me_free_buf(pdev,
1591 					(void *)(msdu_info->u.sg_info
1592 						.curr_seg->frags[0].vaddr));
1593 			}
1594 			goto done;
1595 		}
1596 
1597 		if (msdu_info->frm_type == dp_tx_frm_me) {
1598 			tx_desc->me_buffer =
1599 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
1600 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
1601 		}
1602 
1603 		if (is_cce_classified)
1604 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1605 
1606 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1607 		if (msdu_info->exception_fw) {
1608 			HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1609 		}
1610 
1611 		/*
1612 		 * Enqueue the Tx MSDU descriptor to HW for transmit
1613 		 */
1614 		status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
1615 			htt_tcl_metadata, tx_q->ring_id, NULL);
1616 
1617 		if (status != QDF_STATUS_SUCCESS) {
1618 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1619 					"%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1620 					__func__, tx_desc, tx_q->ring_id);
1621 
1622 			dp_tx_get_tid(vdev, nbuf, msdu_info);
1623 			tid_stats = &pdev->stats.tid_stats.
1624 				tid_tx_stats[msdu_info->tid];
1625 			tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
1626 			if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
1627 				dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
1628 
1629 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1630 			goto done;
1631 		}
1632 
1633 		/*
1634 		 * TODO
1635 		 * if tso_info structure can be modified to have curr_seg
1636 		 * as first element, following 2 blocks of code (for TSO and SG)
1637 		 * can be combined into 1
1638 		 */
1639 
1640 		/*
1641 		 * For frames with multiple segments (TSO, ME), jump to next
1642 		 * segment.
1643 		 */
1644 		if (msdu_info->frm_type == dp_tx_frm_tso) {
1645 			if (msdu_info->u.tso_info.curr_seg->next) {
1646 				msdu_info->u.tso_info.curr_seg =
1647 					msdu_info->u.tso_info.curr_seg->next;
1648 
1649 				/*
1650 				 * If this is a jumbo nbuf, then increment the number of
1651 				 * nbuf users for each additional segment of the msdu.
1652 				 * This will ensure that the skb is freed only after
1653 				 * receiving tx completion for all segments of an nbuf
1654 				 */
1655 				qdf_nbuf_inc_users(nbuf);
1656 
1657 				/* Check with MCL if this is needed */
1658 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
1659 			}
1660 		}
1661 
1662 		/*
1663 		 * For Multicast-Unicast converted packets,
1664 		 * each converted frame (for a client) is represented as
1665 		 * 1 segment
1666 		 */
1667 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
1668 				(msdu_info->frm_type == dp_tx_frm_me)) {
1669 			if (msdu_info->u.sg_info.curr_seg->next) {
1670 				msdu_info->u.sg_info.curr_seg =
1671 					msdu_info->u.sg_info.curr_seg->next;
1672 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1673 			}
1674 		}
1675 		i++;
1676 	}
1677 
1678 	nbuf = NULL;
1679 
1680 done:
1681 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1682 		hal_srng_access_end(soc->hal_soc, hal_srng);
1683 		hif_pm_runtime_put(soc->hif_handle);
1684 	} else {
1685 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1686 	}
1687 
1688 	return nbuf;
1689 }
1690 
1691 /**
1692  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
1693  *                     for SG frames
1694  * @vdev: DP vdev handle
1695  * @nbuf: skb
1696  * @seg_info: Pointer to Segment info Descriptor to be prepared
1697  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1698  *
1699  * Return: NULL on success,
1700  *         nbuf when it fails to send
1701  */
1702 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1703 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1704 {
1705 	uint32_t cur_frag, nr_frags;
1706 	qdf_dma_addr_t paddr;
1707 	struct dp_tx_sg_info_s *sg_info;
1708 
1709 	sg_info = &msdu_info->u.sg_info;
1710 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
1711 
1712 	if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
1713 				QDF_DMA_TO_DEVICE)) {
1714 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1715 				"dma map error");
1716 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1717 
1718 		qdf_nbuf_free(nbuf);
1719 		return NULL;
1720 	}
1721 
1722 	paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1723 	seg_info->frags[0].paddr_lo = paddr;
1724 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
1725 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
1726 	seg_info->frags[0].vaddr = (void *) nbuf;
1727 
1728 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
1729 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
1730 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
1731 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1732 					"frag dma map error");
1733 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1734 			qdf_nbuf_free(nbuf);
1735 			return NULL;
1736 		}
1737 
1738 		paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1739 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
1740 		seg_info->frags[cur_frag + 1].paddr_hi =
1741 			((uint64_t) paddr) >> 32;
1742 		seg_info->frags[cur_frag + 1].len =
1743 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
1744 	}
1745 
1746 	seg_info->frag_cnt = (cur_frag + 1);
1747 	seg_info->total_len = qdf_nbuf_len(nbuf);
1748 	seg_info->next = NULL;
1749 
1750 	sg_info->curr_seg = seg_info;
1751 
1752 	msdu_info->frm_type = dp_tx_frm_sg;
1753 	msdu_info->num_seg = 1;
1754 
1755 	return nbuf;
1756 }
1757 
1758 #ifdef MESH_MODE_SUPPORT
1759 
1760 /**
1761  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
1762 				and prepare msdu_info for mesh frames.
1763  * @vdev: DP vdev handle
1764  * @nbuf: skb
1765  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1766  *
1767  * Return: NULL on failure,
1768  *         nbuf when extracted successfully
1769  */
1770 static
1771 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1772 				struct dp_tx_msdu_info_s *msdu_info)
1773 {
1774 	struct meta_hdr_s *mhdr;
1775 	struct htt_tx_msdu_desc_ext2_t *meta_data =
1776 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
1777 
1778 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1779 
1780 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
1781 		msdu_info->exception_fw = 0;
1782 		goto remove_meta_hdr;
1783 	}
1784 
1785 	msdu_info->exception_fw = 1;
1786 
1787 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
1788 
1789 	meta_data->host_tx_desc_pool = 1;
1790 	meta_data->update_peer_cache = 1;
1791 	meta_data->learning_frame = 1;
1792 
1793 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
1794 		meta_data->power = mhdr->power;
1795 
1796 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
1797 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
1798 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
1799 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
1800 
1801 		meta_data->dyn_bw = 1;
1802 
1803 		meta_data->valid_pwr = 1;
1804 		meta_data->valid_mcs_mask = 1;
1805 		meta_data->valid_nss_mask = 1;
1806 		meta_data->valid_preamble_type  = 1;
1807 		meta_data->valid_retries = 1;
1808 		meta_data->valid_bw_info = 1;
1809 	}
1810 
1811 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
1812 		meta_data->encrypt_type = 0;
1813 		meta_data->valid_encrypt_type = 1;
1814 		meta_data->learning_frame = 0;
1815 	}
1816 
1817 	meta_data->valid_key_flags = 1;
1818 	meta_data->key_flags = (mhdr->keyix & 0x3);
1819 
1820 remove_meta_hdr:
1821 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
1822 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1823 				"qdf_nbuf_pull_head failed");
1824 		qdf_nbuf_free(nbuf);
1825 		return NULL;
1826 	}
1827 
1828 	msdu_info->tid = qdf_nbuf_get_priority(nbuf);
1829 
1830 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1831 			"%s , Meta hdr %0x %0x %0x %0x %0x %0x"
1832 			" tid %d to_fw %d",
1833 			__func__, msdu_info->meta_data[0],
1834 			msdu_info->meta_data[1],
1835 			msdu_info->meta_data[2],
1836 			msdu_info->meta_data[3],
1837 			msdu_info->meta_data[4],
1838 			msdu_info->meta_data[5],
1839 			msdu_info->tid, msdu_info->exception_fw);
1840 
1841 	return nbuf;
1842 }
1843 #else
1844 static
1845 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1846 				struct dp_tx_msdu_info_s *msdu_info)
1847 {
1848 	return nbuf;
1849 }
1850 
1851 #endif
1852 
1853 /**
1854  * dp_check_exc_metadata() - Checks if parameters are valid
1855  * @tx_exc - holds all exception path parameters
1856  *
1857  * Returns true when all the parameters are valid else false
1858  *
1859  */
1860 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
1861 {
1862 	if ((tx_exc->tid > DP_MAX_TIDS && tx_exc->tid != HTT_INVALID_TID) ||
1863 	    tx_exc->tx_encap_type > htt_cmn_pkt_num_types ||
1864 	    tx_exc->sec_type > cdp_num_sec_types) {
1865 		return false;
1866 	}
1867 
1868 	return true;
1869 }
1870 
1871 /**
1872  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
1873  * @vap_dev: DP vdev handle
1874  * @nbuf: skb
1875  * @tx_exc_metadata: Handle that holds exception path meta data
1876  *
1877  * Entry point for Core Tx layer (DP_TX) invoked from
1878  * hard_start_xmit in OSIF/HDD to transmit frames through fw
1879  *
1880  * Return: NULL on success,
1881  *         nbuf when it fails to send
1882  */
1883 qdf_nbuf_t dp_tx_send_exception(void *vap_dev, qdf_nbuf_t nbuf,
1884 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1885 {
1886 	qdf_ether_header_t *eh = NULL;
1887 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1888 	struct dp_tx_msdu_info_s msdu_info;
1889 
1890 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
1891 
1892 	msdu_info.tid = tx_exc_metadata->tid;
1893 
1894 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1895 	dp_verbose_debug("skb %pM", nbuf->data);
1896 
1897 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
1898 
1899 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
1900 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1901 			"Invalid parameters in exception path");
1902 		goto fail;
1903 	}
1904 
1905 	/* Basic sanity checks for unsupported packets */
1906 
1907 	/* MESH mode */
1908 	if (qdf_unlikely(vdev->mesh_vdev)) {
1909 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1910 			"Mesh mode is not supported in exception path");
1911 		goto fail;
1912 	}
1913 
1914 	/* TSO or SG */
1915 	if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
1916 	    qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
1917 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1918 			  "TSO and SG are not supported in exception path");
1919 
1920 		goto fail;
1921 	}
1922 
1923 	/* RAW */
1924 	if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
1925 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1926 			  "Raw frame is not supported in exception path");
1927 		goto fail;
1928 	}
1929 
1930 
1931 	/* Mcast enhancement*/
1932 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
1933 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
1934 		    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
1935 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1936 					  "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW");
1937 		}
1938 	}
1939 
1940 	/*
1941 	 * Get HW Queue to use for this frame.
1942 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
1943 	 * dedicated for data and 1 for command.
1944 	 * "queue_id" maps to one hardware ring.
1945 	 *  With each ring, we also associate a unique Tx descriptor pool
1946 	 *  to minimize lock contention for these resources.
1947 	 */
1948 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
1949 
1950 	/*  Single linear frame */
1951 	/*
1952 	 * If nbuf is a simple linear frame, use send_single function to
1953 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
1954 	 * SRNG. There is no need to setup a MSDU extension descriptor.
1955 	 */
1956 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
1957 			tx_exc_metadata->peer_id, tx_exc_metadata);
1958 
1959 	return nbuf;
1960 
1961 fail:
1962 	dp_verbose_debug("pkt send failed");
1963 	return nbuf;
1964 }
1965 
1966 /**
1967  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
1968  * @vap_dev: DP vdev handle
1969  * @nbuf: skb
1970  *
1971  * Entry point for Core Tx layer (DP_TX) invoked from
1972  * hard_start_xmit in OSIF/HDD
1973  *
1974  * Return: NULL on success,
1975  *         nbuf when it fails to send
1976  */
1977 #ifdef MESH_MODE_SUPPORT
1978 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
1979 {
1980 	struct meta_hdr_s *mhdr;
1981 	qdf_nbuf_t nbuf_mesh = NULL;
1982 	qdf_nbuf_t nbuf_clone = NULL;
1983 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1984 	uint8_t no_enc_frame = 0;
1985 
1986 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
1987 	if (!nbuf_mesh) {
1988 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1989 				"qdf_nbuf_unshare failed");
1990 		return nbuf;
1991 	}
1992 	nbuf = nbuf_mesh;
1993 
1994 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1995 
1996 	if ((vdev->sec_type != cdp_sec_type_none) &&
1997 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
1998 		no_enc_frame = 1;
1999 
2000 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
2001 		qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
2002 
2003 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
2004 		       !no_enc_frame) {
2005 		nbuf_clone = qdf_nbuf_clone(nbuf);
2006 		if (!nbuf_clone) {
2007 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2008 				"qdf_nbuf_clone failed");
2009 			return nbuf;
2010 		}
2011 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
2012 	}
2013 
2014 	if (nbuf_clone) {
2015 		if (!dp_tx_send(vap_dev, nbuf_clone)) {
2016 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2017 		} else {
2018 			qdf_nbuf_free(nbuf_clone);
2019 		}
2020 	}
2021 
2022 	if (no_enc_frame)
2023 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
2024 	else
2025 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
2026 
2027 	nbuf = dp_tx_send(vap_dev, nbuf);
2028 	if ((!nbuf) && no_enc_frame) {
2029 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2030 	}
2031 
2032 	return nbuf;
2033 }
2034 
2035 #else
2036 
2037 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
2038 {
2039 	return dp_tx_send(vap_dev, nbuf);
2040 }
2041 
2042 #endif
2043 
2044 /**
2045  * dp_tx_send() - Transmit a frame on a given VAP
2046  * @vap_dev: DP vdev handle
2047  * @nbuf: skb
2048  *
2049  * Entry point for Core Tx layer (DP_TX) invoked from
2050  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
2051  * cases
2052  *
2053  * Return: NULL on success,
2054  *         nbuf when it fails to send
2055  */
2056 qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
2057 {
2058 	qdf_ether_header_t *eh = NULL;
2059 	struct dp_tx_msdu_info_s msdu_info;
2060 	struct dp_tx_seg_info_s seg_info;
2061 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
2062 	uint16_t peer_id = HTT_INVALID_PEER;
2063 	qdf_nbuf_t nbuf_mesh = NULL;
2064 
2065 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
2066 	qdf_mem_zero(&seg_info, sizeof(seg_info));
2067 
2068 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2069 
2070 	dp_verbose_debug("skb %pM", nbuf->data);
2071 
2072 	/*
2073 	 * Set Default Host TID value to invalid TID
2074 	 * (TID override disabled)
2075 	 */
2076 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
2077 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
2078 
2079 	if (qdf_unlikely(vdev->mesh_vdev)) {
2080 		nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
2081 								&msdu_info);
2082 		if (!nbuf_mesh) {
2083 			dp_verbose_debug("Extracting mesh metadata failed");
2084 			return nbuf;
2085 		}
2086 		nbuf = nbuf_mesh;
2087 	}
2088 
2089 	/*
2090 	 * Get HW Queue to use for this frame.
2091 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
2092 	 * dedicated for data and 1 for command.
2093 	 * "queue_id" maps to one hardware ring.
2094 	 *  With each ring, we also associate a unique Tx descriptor pool
2095 	 *  to minimize lock contention for these resources.
2096 	 */
2097 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2098 
2099 	/*
2100 	 * TCL H/W supports 2 DSCP-TID mapping tables.
2101 	 *  Table 1 - Default DSCP-TID mapping table
2102 	 *  Table 2 - 1 DSCP-TID override table
2103 	 *
2104 	 * If we need a different DSCP-TID mapping for this vap,
2105 	 * call tid_classify to extract DSCP/ToS from frame and
2106 	 * map to a TID and store in msdu_info. This is later used
2107 	 * to fill in TCL Input descriptor (per-packet TID override).
2108 	 */
2109 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
2110 
2111 	/*
2112 	 * Classify the frame and call corresponding
2113 	 * "prepare" function which extracts the segment (TSO)
2114 	 * and fragmentation information (for TSO , SG, ME, or Raw)
2115 	 * into MSDU_INFO structure which is later used to fill
2116 	 * SW and HW descriptors.
2117 	 */
2118 	if (qdf_nbuf_is_tso(nbuf)) {
2119 		dp_verbose_debug("TSO frame %pK", vdev);
2120 		DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1,
2121 				qdf_nbuf_len(nbuf));
2122 
2123 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
2124 			DP_STATS_INC_PKT(vdev, tx_i.tso.dropped_host, 1,
2125 					 qdf_nbuf_len(nbuf));
2126 			return nbuf;
2127 		}
2128 
2129 		goto send_multiple;
2130 	}
2131 
2132 	/* SG */
2133 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2134 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
2135 
2136 		if (!nbuf)
2137 			return NULL;
2138 
2139 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
2140 
2141 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
2142 				qdf_nbuf_len(nbuf));
2143 
2144 		goto send_multiple;
2145 	}
2146 
2147 #ifdef ATH_SUPPORT_IQUE
2148 	/* Mcast to Ucast Conversion*/
2149 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
2150 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2151 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2152 		    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2153 			dp_verbose_debug("Mcast frm for ME %pK", vdev);
2154 
2155 			DP_STATS_INC_PKT(vdev,
2156 					tx_i.mcast_en.mcast_pkt, 1,
2157 					qdf_nbuf_len(nbuf));
2158 			if (dp_tx_prepare_send_me(vdev, nbuf) ==
2159 					QDF_STATUS_SUCCESS) {
2160 				return NULL;
2161 			}
2162 		}
2163 	}
2164 #endif
2165 
2166 	/* RAW */
2167 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
2168 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
2169 		if (!nbuf)
2170 			return NULL;
2171 
2172 		dp_verbose_debug("Raw frame %pK", vdev);
2173 
2174 		goto send_multiple;
2175 
2176 	}
2177 
2178 	/*  Single linear frame */
2179 	/*
2180 	 * If nbuf is a simple linear frame, use send_single function to
2181 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2182 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2183 	 */
2184 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
2185 
2186 	return nbuf;
2187 
2188 send_multiple:
2189 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2190 
2191 	return nbuf;
2192 }
2193 
2194 /**
2195  * dp_tx_reinject_handler() - Tx Reinject Handler
2196  * @tx_desc: software descriptor head pointer
2197  * @status : Tx completion status from HTT descriptor
2198  *
2199  * This function reinjects frames back to Target.
2200  * Todo - Host queue needs to be added
2201  *
2202  * Return: none
2203  */
2204 static
2205 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2206 {
2207 	struct dp_vdev *vdev;
2208 	struct dp_peer *peer = NULL;
2209 	uint32_t peer_id = HTT_INVALID_PEER;
2210 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2211 	qdf_nbuf_t nbuf_copy = NULL;
2212 	struct dp_tx_msdu_info_s msdu_info;
2213 	struct dp_peer *sa_peer = NULL;
2214 	struct dp_ast_entry *ast_entry = NULL;
2215 	struct dp_soc *soc = NULL;
2216 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2217 #ifdef WDS_VENDOR_EXTENSION
2218 	int is_mcast = 0, is_ucast = 0;
2219 	int num_peers_3addr = 0;
2220 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
2221 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
2222 #endif
2223 
2224 	vdev = tx_desc->vdev;
2225 	soc = vdev->pdev->soc;
2226 
2227 	qdf_assert(vdev);
2228 
2229 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
2230 
2231 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2232 
2233 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2234 			"%s Tx reinject path", __func__);
2235 
2236 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
2237 			qdf_nbuf_len(tx_desc->nbuf));
2238 
2239 	qdf_spin_lock_bh(&(soc->ast_lock));
2240 
2241 	ast_entry = dp_peer_ast_hash_find_by_pdevid
2242 				(soc,
2243 				 (uint8_t *)(eh->ether_shost),
2244 				 vdev->pdev->pdev_id);
2245 
2246 	if (ast_entry)
2247 		sa_peer = ast_entry->peer;
2248 
2249 	qdf_spin_unlock_bh(&(soc->ast_lock));
2250 
2251 #ifdef WDS_VENDOR_EXTENSION
2252 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
2253 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
2254 	} else {
2255 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
2256 	}
2257 	is_ucast = !is_mcast;
2258 
2259 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2260 		if (peer->bss_peer)
2261 			continue;
2262 
2263 		/* Detect wds peers that use 3-addr framing for mcast.
2264 		 * if there are any, the bss_peer is used to send the
2265 		 * the mcast frame using 3-addr format. all wds enabled
2266 		 * peers that use 4-addr framing for mcast frames will
2267 		 * be duplicated and sent as 4-addr frames below.
2268 		 */
2269 		if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
2270 			num_peers_3addr = 1;
2271 			break;
2272 		}
2273 	}
2274 #endif
2275 
2276 	if (qdf_unlikely(vdev->mesh_vdev)) {
2277 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
2278 	} else {
2279 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2280 			if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
2281 #ifdef WDS_VENDOR_EXTENSION
2282 			/*
2283 			 * . if 3-addr STA, then send on BSS Peer
2284 			 * . if Peer WDS enabled and accept 4-addr mcast,
2285 			 * send mcast on that peer only
2286 			 * . if Peer WDS enabled and accept 4-addr ucast,
2287 			 * send ucast on that peer only
2288 			 */
2289 			((peer->bss_peer && num_peers_3addr && is_mcast) ||
2290 			 (peer->wds_enabled &&
2291 				  ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
2292 				   (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
2293 #else
2294 			((peer->bss_peer &&
2295 			  !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) ||
2296 				 peer->nawds_enabled)) {
2297 #endif
2298 				peer_id = DP_INVALID_PEER;
2299 
2300 				if (peer->nawds_enabled) {
2301 					peer_id = peer->peer_ids[0];
2302 					if (sa_peer == peer) {
2303 						QDF_TRACE(
2304 							QDF_MODULE_ID_DP,
2305 							QDF_TRACE_LEVEL_DEBUG,
2306 							" %s: multicast packet",
2307 							__func__);
2308 						DP_STATS_INC(peer,
2309 							tx.nawds_mcast_drop, 1);
2310 						continue;
2311 					}
2312 				}
2313 
2314 				nbuf_copy = qdf_nbuf_copy(nbuf);
2315 
2316 				if (!nbuf_copy) {
2317 					QDF_TRACE(QDF_MODULE_ID_DP,
2318 						QDF_TRACE_LEVEL_DEBUG,
2319 						FL("nbuf copy failed"));
2320 					break;
2321 				}
2322 
2323 				nbuf_copy = dp_tx_send_msdu_single(vdev,
2324 						nbuf_copy,
2325 						&msdu_info,
2326 						peer_id,
2327 						NULL);
2328 
2329 				if (nbuf_copy) {
2330 					QDF_TRACE(QDF_MODULE_ID_DP,
2331 						QDF_TRACE_LEVEL_DEBUG,
2332 						FL("pkt send failed"));
2333 					qdf_nbuf_free(nbuf_copy);
2334 				} else {
2335 					if (peer_id != DP_INVALID_PEER)
2336 						DP_STATS_INC_PKT(peer,
2337 							tx.nawds_mcast,
2338 							1, qdf_nbuf_len(nbuf));
2339 				}
2340 			}
2341 		}
2342 	}
2343 
2344 	if (vdev->nawds_enabled) {
2345 		peer_id = DP_INVALID_PEER;
2346 
2347 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
2348 					1, qdf_nbuf_len(nbuf));
2349 
2350 		nbuf = dp_tx_send_msdu_single(vdev,
2351 				nbuf,
2352 				&msdu_info,
2353 				peer_id, NULL);
2354 
2355 		if (nbuf) {
2356 			QDF_TRACE(QDF_MODULE_ID_DP,
2357 				QDF_TRACE_LEVEL_DEBUG,
2358 				FL("pkt send failed"));
2359 			qdf_nbuf_free(nbuf);
2360 		}
2361 	} else
2362 		qdf_nbuf_free(nbuf);
2363 
2364 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2365 }
2366 
2367 /**
2368  * dp_tx_inspect_handler() - Tx Inspect Handler
2369  * @tx_desc: software descriptor head pointer
2370  * @status : Tx completion status from HTT descriptor
2371  *
2372  * Handles Tx frames sent back to Host for inspection
2373  * (ProxyARP)
2374  *
2375  * Return: none
2376  */
2377 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2378 {
2379 
2380 	struct dp_soc *soc;
2381 	struct dp_pdev *pdev = tx_desc->pdev;
2382 
2383 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2384 			"%s Tx inspect path",
2385 			__func__);
2386 
2387 	qdf_assert(pdev);
2388 
2389 	soc = pdev->soc;
2390 
2391 	DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
2392 			qdf_nbuf_len(tx_desc->nbuf));
2393 
2394 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
2395 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2396 }
2397 
2398 #ifdef FEATURE_PERPKT_INFO
2399 /**
2400  * dp_get_completion_indication_for_stack() - send completion to stack
2401  * @soc : dp_soc handle
2402  * @pdev: dp_pdev handle
2403  * @peer: dp peer handle
2404  * @ts: transmit completion status structure
2405  * @netbuf: Buffer pointer for free
2406  *
2407  * This function is used for indication whether buffer needs to be
2408  * sent to stack for freeing or not
2409 */
2410 QDF_STATUS
2411 dp_get_completion_indication_for_stack(struct dp_soc *soc,
2412 				       struct dp_pdev *pdev,
2413 				       struct dp_peer *peer,
2414 				       struct hal_tx_completion_status *ts,
2415 				       qdf_nbuf_t netbuf,
2416 				       uint64_t time_latency)
2417 {
2418 	struct tx_capture_hdr *ppdu_hdr;
2419 	uint16_t peer_id = ts->peer_id;
2420 	uint32_t ppdu_id = ts->ppdu_id;
2421 	uint8_t first_msdu = ts->first_msdu;
2422 	uint8_t last_msdu = ts->last_msdu;
2423 
2424 	if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
2425 			 !pdev->latency_capture_enable))
2426 		return QDF_STATUS_E_NOSUPPORT;
2427 
2428 	if (!peer) {
2429 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2430 				FL("Peer Invalid"));
2431 		return QDF_STATUS_E_INVAL;
2432 	}
2433 
2434 	if (pdev->mcopy_mode) {
2435 		if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
2436 				(pdev->m_copy_id.tx_peer_id == peer_id)) {
2437 			return QDF_STATUS_E_INVAL;
2438 		}
2439 
2440 		pdev->m_copy_id.tx_ppdu_id = ppdu_id;
2441 		pdev->m_copy_id.tx_peer_id = peer_id;
2442 	}
2443 
2444 	if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) {
2445 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2446 				FL("No headroom"));
2447 		return QDF_STATUS_E_NOMEM;
2448 	}
2449 
2450 	ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
2451 	qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
2452 		     QDF_MAC_ADDR_SIZE);
2453 	qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
2454 		     QDF_MAC_ADDR_SIZE);
2455 	ppdu_hdr->ppdu_id = ppdu_id;
2456 	ppdu_hdr->peer_id = peer_id;
2457 	ppdu_hdr->first_msdu = first_msdu;
2458 	ppdu_hdr->last_msdu = last_msdu;
2459 	if (qdf_unlikely(pdev->latency_capture_enable)) {
2460 		ppdu_hdr->tsf = ts->tsf;
2461 		ppdu_hdr->time_latency = time_latency;
2462 	}
2463 
2464 	return QDF_STATUS_SUCCESS;
2465 }
2466 
2467 
2468 /**
2469  * dp_send_completion_to_stack() - send completion to stack
2470  * @soc :  dp_soc handle
2471  * @pdev:  dp_pdev handle
2472  * @peer_id: peer_id of the peer for which completion came
2473  * @ppdu_id: ppdu_id
2474  * @netbuf: Buffer pointer for free
2475  *
2476  * This function is used to send completion to stack
2477  * to free buffer
2478 */
2479 void  dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2480 					uint16_t peer_id, uint32_t ppdu_id,
2481 					qdf_nbuf_t netbuf)
2482 {
2483 	dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
2484 				netbuf, peer_id,
2485 				WDI_NO_VAL, pdev->pdev_id);
2486 }
2487 #else
2488 static QDF_STATUS
2489 dp_get_completion_indication_for_stack(struct dp_soc *soc,
2490 				       struct dp_pdev *pdev,
2491 				       struct dp_peer *peer,
2492 				       struct hal_tx_completion_status *ts,
2493 				       qdf_nbuf_t netbuf,
2494 				       uint64_t time_latency)
2495 {
2496 	return QDF_STATUS_E_NOSUPPORT;
2497 }
2498 
2499 static void
2500 dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2501 	uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
2502 {
2503 }
2504 #endif
2505 
2506 /**
2507  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2508  * @soc: Soc handle
2509  * @desc: software Tx descriptor to be processed
2510  *
2511  * Return: none
2512  */
2513 static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
2514 				       struct dp_tx_desc_s *desc)
2515 {
2516 	struct dp_vdev *vdev = desc->vdev;
2517 	qdf_nbuf_t nbuf = desc->nbuf;
2518 
2519 	/* nbuf already freed in vdev detach path */
2520 	if (!nbuf)
2521 		return;
2522 
2523 	/* If it is TDLS mgmt, don't unmap or free the frame */
2524 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2525 		return dp_non_std_tx_comp_free_buff(desc, vdev);
2526 
2527 	/* 0 : MSDU buffer, 1 : MLE */
2528 	if (desc->msdu_ext_desc) {
2529 		/* TSO free */
2530 		if (hal_tx_ext_desc_get_tso_enable(
2531 					desc->msdu_ext_desc->vaddr)) {
2532 			/* unmap eash TSO seg before free the nbuf */
2533 			dp_tx_tso_unmap_segment(soc, desc->tso_desc,
2534 						desc->tso_num_desc);
2535 			qdf_nbuf_free(nbuf);
2536 			return;
2537 		}
2538 	}
2539 
2540 	qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
2541 
2542 	if (qdf_unlikely(!vdev)) {
2543 		qdf_nbuf_free(nbuf);
2544 		return;
2545 	}
2546 
2547 	if (qdf_likely(!vdev->mesh_vdev))
2548 		qdf_nbuf_free(nbuf);
2549 	else {
2550 		if (desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2551 			qdf_nbuf_free(nbuf);
2552 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2553 		} else
2554 			vdev->osif_tx_free_ext((nbuf));
2555 	}
2556 }
2557 
2558 /**
2559  * dp_tx_mec_handler() - Tx  MEC Notify Handler
2560  * @vdev: pointer to dp dev handler
2561  * @status : Tx completion status from HTT descriptor
2562  *
2563  * Handles MEC notify event sent from fw to Host
2564  *
2565  * Return: none
2566  */
2567 #ifdef FEATURE_WDS
2568 void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
2569 {
2570 
2571 	struct dp_soc *soc;
2572 	uint32_t flags = IEEE80211_NODE_F_WDS_HM;
2573 	struct dp_peer *peer;
2574 	uint8_t mac_addr[QDF_MAC_ADDR_SIZE], i;
2575 
2576 	if (!vdev->mec_enabled)
2577 		return;
2578 
2579 	/* MEC required only in STA mode */
2580 	if (vdev->opmode != wlan_op_mode_sta)
2581 		return;
2582 
2583 	soc = vdev->pdev->soc;
2584 	peer = vdev->vap_bss_peer;
2585 
2586 	if (!peer) {
2587 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2588 				FL("peer is NULL"));
2589 		return;
2590 	}
2591 
2592 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2593 			"%s Tx MEC Handler",
2594 			__func__);
2595 
2596 	for (i = 0; i < QDF_MAC_ADDR_SIZE; i++)
2597 		mac_addr[(QDF_MAC_ADDR_SIZE - 1) - i] =
2598 					status[(QDF_MAC_ADDR_SIZE - 2) + i];
2599 
2600 	if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE))
2601 		dp_peer_add_ast(soc,
2602 				peer,
2603 				mac_addr,
2604 				CDP_TXRX_AST_TYPE_MEC,
2605 				flags);
2606 }
2607 #endif
2608 
2609 #ifdef MESH_MODE_SUPPORT
2610 /**
2611  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
2612  *                                         in mesh meta header
2613  * @tx_desc: software descriptor head pointer
2614  * @ts: pointer to tx completion stats
2615  * Return: none
2616  */
2617 static
2618 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2619 		struct hal_tx_completion_status *ts)
2620 {
2621 	struct meta_hdr_s *mhdr;
2622 	qdf_nbuf_t netbuf = tx_desc->nbuf;
2623 
2624 	if (!tx_desc->msdu_ext_desc) {
2625 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
2626 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2627 				"netbuf %pK offset %d",
2628 				netbuf, tx_desc->pkt_offset);
2629 			return;
2630 		}
2631 	}
2632 	if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
2633 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2634 			"netbuf %pK offset %lu", netbuf,
2635 			sizeof(struct meta_hdr_s));
2636 		return;
2637 	}
2638 
2639 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
2640 	mhdr->rssi = ts->ack_frame_rssi;
2641 	mhdr->channel = tx_desc->pdev->operating_channel;
2642 }
2643 
2644 #else
2645 static
2646 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2647 		struct hal_tx_completion_status *ts)
2648 {
2649 }
2650 
2651 #endif
2652 
2653 /**
2654  * dp_tx_compute_delay() - Compute and fill in all timestamps
2655  *				to pass in correct fields
2656  *
2657  * @vdev: pdev handle
2658  * @tx_desc: tx descriptor
2659  * @tid: tid value
2660  * Return: none
2661  */
2662 static void dp_tx_compute_delay(struct dp_vdev *vdev,
2663 				struct dp_tx_desc_s *tx_desc, uint8_t tid)
2664 {
2665 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
2666 	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
2667 
2668 	if (qdf_likely(!vdev->pdev->delay_stats_flag))
2669 		return;
2670 
2671 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_get());
2672 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
2673 	timestamp_hw_enqueue = tx_desc->timestamp;
2674 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
2675 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
2676 					 timestamp_hw_enqueue);
2677 	interframe_delay = (uint32_t)(timestamp_ingress -
2678 				      vdev->prev_tx_enq_tstamp);
2679 
2680 	/*
2681 	 * Delay in software enqueue
2682 	 */
2683 	dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid,
2684 			      CDP_DELAY_STATS_SW_ENQ);
2685 	/*
2686 	 * Delay between packet enqueued to HW and Tx completion
2687 	 */
2688 	dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid,
2689 			      CDP_DELAY_STATS_FW_HW_TRANSMIT);
2690 
2691 	/*
2692 	 * Update interframe delay stats calculated at hardstart receive point.
2693 	 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
2694 	 * interframe delay will not be calculate correctly for 1st frame.
2695 	 * On the other side, this will help in avoiding extra per packet check
2696 	 * of !vdev->prev_tx_enq_tstamp.
2697 	 */
2698 	dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
2699 			      CDP_DELAY_STATS_TX_INTERFRAME);
2700 	vdev->prev_tx_enq_tstamp = timestamp_ingress;
2701 }
2702 
2703 /**
2704  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
2705  * @tx_desc: software descriptor head pointer
2706  * @ts: Tx completion status
2707  * @peer: peer handle
2708  *
2709  * Return: None
2710  */
2711 static inline void
2712 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
2713 			struct hal_tx_completion_status *ts,
2714 			struct dp_peer *peer)
2715 {
2716 	struct dp_pdev *pdev = peer->vdev->pdev;
2717 	struct dp_soc *soc = NULL;
2718 	uint8_t mcs, pkt_type;
2719 	uint8_t tid = ts->tid;
2720 	uint32_t length;
2721 	struct cdp_tid_tx_stats *tid_stats;
2722 
2723 	if (!pdev)
2724 		return;
2725 
2726 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
2727 		tid = CDP_MAX_DATA_TIDS - 1;
2728 
2729 	tid_stats = &pdev->stats.tid_stats.tid_tx_stats[tid];
2730 	soc = pdev->soc;
2731 
2732 	mcs = ts->mcs;
2733 	pkt_type = ts->pkt_type;
2734 
2735 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
2736 		dp_err("Release source is not from TQM");
2737 		return;
2738 	}
2739 
2740 	length = qdf_nbuf_len(tx_desc->nbuf);
2741 	DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
2742 
2743 	if (qdf_unlikely(pdev->delay_stats_flag))
2744 		dp_tx_compute_delay(peer->vdev, tx_desc, tid);
2745 	tid_stats->complete_cnt++;
2746 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
2747 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
2748 
2749 	DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length,
2750 			  (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
2751 
2752 	DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
2753 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
2754 
2755 	DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
2756 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
2757 
2758 	DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
2759 		     (ts->status == HAL_TX_TQM_RR_FW_REASON1));
2760 
2761 	DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
2762 		     (ts->status == HAL_TX_TQM_RR_FW_REASON2));
2763 
2764 	DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
2765 		     (ts->status == HAL_TX_TQM_RR_FW_REASON3));
2766 
2767 	if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
2768 		tid_stats->comp_fail_cnt++;
2769 		return;
2770 	}
2771 
2772 	tid_stats->success_cnt++;
2773 
2774 	DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
2775 
2776 	DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
2777 	DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
2778 
2779 	/*
2780 	 * Following Rate Statistics are updated from HTT PPDU events from FW.
2781 	 * Return from here if HTT PPDU events are enabled.
2782 	 */
2783 	if (!(soc->process_tx_status))
2784 		return;
2785 
2786 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2787 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
2788 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2789 			((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
2790 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2791 			((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
2792 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2793 			((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
2794 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2795 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
2796 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2797 			((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
2798 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2799 			((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2800 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2801 			((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2802 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2803 			((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2804 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2805 			((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2806 
2807 	DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
2808 	DP_STATS_INC(peer, tx.bw[ts->bw], 1);
2809 	DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
2810 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
2811 	DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
2812 	DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
2813 	DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
2814 
2815 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
2816 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
2817 			     &peer->stats, ts->peer_id,
2818 			     UPDATE_PEER_STATS, pdev->pdev_id);
2819 #endif
2820 }
2821 
2822 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2823 /**
2824  * dp_tx_flow_pool_lock() - take flow pool lock
2825  * @soc: core txrx main context
2826  * @tx_desc: tx desc
2827  *
2828  * Return: None
2829  */
2830 static inline
2831 void dp_tx_flow_pool_lock(struct dp_soc *soc,
2832 			  struct dp_tx_desc_s *tx_desc)
2833 {
2834 	struct dp_tx_desc_pool_s *pool;
2835 	uint8_t desc_pool_id;
2836 
2837 	desc_pool_id = tx_desc->pool_id;
2838 	pool = &soc->tx_desc[desc_pool_id];
2839 
2840 	qdf_spin_lock_bh(&pool->flow_pool_lock);
2841 }
2842 
2843 /**
2844  * dp_tx_flow_pool_unlock() - release flow pool lock
2845  * @soc: core txrx main context
2846  * @tx_desc: tx desc
2847  *
2848  * Return: None
2849  */
2850 static inline
2851 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
2852 			    struct dp_tx_desc_s *tx_desc)
2853 {
2854 	struct dp_tx_desc_pool_s *pool;
2855 	uint8_t desc_pool_id;
2856 
2857 	desc_pool_id = tx_desc->pool_id;
2858 	pool = &soc->tx_desc[desc_pool_id];
2859 
2860 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
2861 }
2862 #else
2863 static inline
2864 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2865 {
2866 }
2867 
2868 static inline
2869 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2870 {
2871 }
2872 #endif
2873 
2874 /**
2875  * dp_tx_notify_completion() - Notify tx completion for this desc
2876  * @soc: core txrx main context
2877  * @tx_desc: tx desc
2878  * @netbuf:  buffer
2879  *
2880  * Return: none
2881  */
2882 static inline void dp_tx_notify_completion(struct dp_soc *soc,
2883 					   struct dp_tx_desc_s *tx_desc,
2884 					   qdf_nbuf_t netbuf)
2885 {
2886 	void *osif_dev;
2887 	ol_txrx_completion_fp tx_compl_cbk = NULL;
2888 
2889 	qdf_assert(tx_desc);
2890 
2891 	dp_tx_flow_pool_lock(soc, tx_desc);
2892 
2893 	if (!tx_desc->vdev ||
2894 	    !tx_desc->vdev->osif_vdev) {
2895 		dp_tx_flow_pool_unlock(soc, tx_desc);
2896 		return;
2897 	}
2898 
2899 	osif_dev = tx_desc->vdev->osif_vdev;
2900 	tx_compl_cbk = tx_desc->vdev->tx_comp;
2901 	dp_tx_flow_pool_unlock(soc, tx_desc);
2902 
2903 	if (tx_compl_cbk)
2904 		tx_compl_cbk(netbuf, osif_dev);
2905 }
2906 
2907 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
2908  * @pdev: pdev handle
2909  * @tid: tid value
2910  * @txdesc_ts: timestamp from txdesc
2911  * @ppdu_id: ppdu id
2912  *
2913  * Return: none
2914  */
2915 #ifdef FEATURE_PERPKT_INFO
2916 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
2917 					       struct dp_peer *peer,
2918 					       uint8_t tid,
2919 					       uint64_t txdesc_ts,
2920 					       uint32_t ppdu_id)
2921 {
2922 	uint64_t delta_ms;
2923 	struct cdp_tx_sojourn_stats *sojourn_stats;
2924 
2925 	if (qdf_unlikely(pdev->enhanced_stats_en == 0))
2926 		return;
2927 
2928 	if (qdf_unlikely(tid == HTT_INVALID_TID ||
2929 			 tid >= CDP_DATA_TID_MAX))
2930 		return;
2931 
2932 	if (qdf_unlikely(!pdev->sojourn_buf))
2933 		return;
2934 
2935 	sojourn_stats = (struct cdp_tx_sojourn_stats *)
2936 		qdf_nbuf_data(pdev->sojourn_buf);
2937 
2938 	sojourn_stats->cookie = (void *)peer->wlanstats_ctx;
2939 
2940 	delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
2941 				txdesc_ts;
2942 	qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid],
2943 			    delta_ms);
2944 	sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
2945 	sojourn_stats->num_msdus[tid] = 1;
2946 	sojourn_stats->avg_sojourn_msdu[tid].internal =
2947 		peer->avg_sojourn_msdu[tid].internal;
2948 	dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
2949 			     pdev->sojourn_buf, HTT_INVALID_PEER,
2950 			     WDI_NO_VAL, pdev->pdev_id);
2951 	sojourn_stats->sum_sojourn_msdu[tid] = 0;
2952 	sojourn_stats->num_msdus[tid] = 0;
2953 	sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
2954 }
2955 #else
2956 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
2957 					       uint8_t tid,
2958 					       uint64_t txdesc_ts,
2959 					       uint32_t ppdu_id)
2960 {
2961 }
2962 #endif
2963 
2964 /**
2965  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
2966  * @soc: DP Soc handle
2967  * @tx_desc: software Tx descriptor
2968  * @ts : Tx completion status from HAL/HTT descriptor
2969  *
2970  * Return: none
2971  */
2972 static inline void
2973 dp_tx_comp_process_desc(struct dp_soc *soc,
2974 			struct dp_tx_desc_s *desc,
2975 			struct hal_tx_completion_status *ts,
2976 			struct dp_peer *peer)
2977 {
2978 	uint64_t time_latency = 0;
2979 	/*
2980 	 * m_copy/tx_capture modes are not supported for
2981 	 * scatter gather packets
2982 	 */
2983 	if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
2984 		time_latency = (qdf_ktime_to_ms(qdf_ktime_get()) -
2985 				desc->timestamp);
2986 	}
2987 	if (!(desc->msdu_ext_desc)) {
2988 		if (QDF_STATUS_SUCCESS ==
2989 		    dp_tx_add_to_comp_queue(soc, desc, ts, peer)) {
2990 			return;
2991 		}
2992 
2993 		if (QDF_STATUS_SUCCESS ==
2994 		    dp_get_completion_indication_for_stack(soc,
2995 							   desc->pdev,
2996 							   peer, ts,
2997 							   desc->nbuf,
2998 							   time_latency)) {
2999 			qdf_nbuf_unmap(soc->osdev, desc->nbuf,
3000 				       QDF_DMA_TO_DEVICE);
3001 			dp_send_completion_to_stack(soc,
3002 						    desc->pdev,
3003 						    ts->peer_id,
3004 						    ts->ppdu_id,
3005 						    desc->nbuf);
3006 			return;
3007 		}
3008 	}
3009 
3010 	dp_tx_comp_free_buf(soc, desc);
3011 }
3012 
3013 /**
3014  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
3015  * @tx_desc: software descriptor head pointer
3016  * @ts: Tx completion status
3017  * @peer: peer handle
3018  *
3019  * Return: none
3020  */
3021 static inline
3022 void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
3023 				  struct hal_tx_completion_status *ts,
3024 				  struct dp_peer *peer)
3025 {
3026 	uint32_t length;
3027 	qdf_ether_header_t *eh;
3028 	struct dp_soc *soc = NULL;
3029 	struct dp_vdev *vdev = tx_desc->vdev;
3030 	qdf_nbuf_t nbuf = tx_desc->nbuf;
3031 
3032 	if (!vdev || !nbuf) {
3033 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3034 				"invalid tx descriptor. vdev or nbuf NULL");
3035 		goto out;
3036 	}
3037 
3038 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3039 
3040 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
3041 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
3042 				 QDF_TRACE_DEFAULT_PDEV_ID,
3043 				 qdf_nbuf_data_addr(nbuf),
3044 				 sizeof(qdf_nbuf_data(nbuf)),
3045 				 tx_desc->id,
3046 				 ts->status));
3047 
3048 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3049 				"-------------------- \n"
3050 				"Tx Completion Stats: \n"
3051 				"-------------------- \n"
3052 				"ack_frame_rssi = %d \n"
3053 				"first_msdu = %d \n"
3054 				"last_msdu = %d \n"
3055 				"msdu_part_of_amsdu = %d \n"
3056 				"rate_stats valid = %d \n"
3057 				"bw = %d \n"
3058 				"pkt_type = %d \n"
3059 				"stbc = %d \n"
3060 				"ldpc = %d \n"
3061 				"sgi = %d \n"
3062 				"mcs = %d \n"
3063 				"ofdma = %d \n"
3064 				"tones_in_ru = %d \n"
3065 				"tsf = %d \n"
3066 				"ppdu_id = %d \n"
3067 				"transmit_cnt = %d \n"
3068 				"tid = %d \n"
3069 				"peer_id = %d\n",
3070 				ts->ack_frame_rssi, ts->first_msdu,
3071 				ts->last_msdu, ts->msdu_part_of_amsdu,
3072 				ts->valid, ts->bw, ts->pkt_type, ts->stbc,
3073 				ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
3074 				ts->tones_in_ru, ts->tsf, ts->ppdu_id,
3075 				ts->transmit_cnt, ts->tid, ts->peer_id);
3076 
3077 	soc = vdev->pdev->soc;
3078 
3079 	/* Update SoC level stats */
3080 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
3081 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
3082 
3083 	/* Update per-packet stats for mesh mode */
3084 	if (qdf_unlikely(vdev->mesh_vdev) &&
3085 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
3086 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
3087 
3088 	length = qdf_nbuf_len(nbuf);
3089 	/* Update peer level stats */
3090 	if (!peer) {
3091 		QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
3092 				   "peer is null or deletion in progress");
3093 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
3094 		goto out;
3095 	}
3096 
3097 	if (qdf_likely(!peer->bss_peer)) {
3098 		DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
3099 
3100 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
3101 			DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
3102 	} else {
3103 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
3104 			DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
3105 
3106 			if ((peer->vdev->tx_encap_type ==
3107 				htt_cmn_pkt_type_ethernet) &&
3108 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
3109 				DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
3110 			}
3111 		}
3112 	}
3113 
3114 	dp_tx_update_peer_stats(tx_desc, ts, peer);
3115 
3116 #ifdef QCA_SUPPORT_RDK_STATS
3117 	if (soc->wlanstats_enabled)
3118 		dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid,
3119 					    tx_desc->timestamp,
3120 					    ts->ppdu_id);
3121 #endif
3122 
3123 out:
3124 	return;
3125 }
3126 /**
3127  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
3128  * @soc: core txrx main context
3129  * @comp_head: software descriptor head pointer
3130  *
3131  * This function will process batch of descriptors reaped by dp_tx_comp_handler
3132  * and release the software descriptors after processing is complete
3133  *
3134  * Return: none
3135  */
3136 static void
3137 dp_tx_comp_process_desc_list(struct dp_soc *soc,
3138 			     struct dp_tx_desc_s *comp_head)
3139 {
3140 	struct dp_tx_desc_s *desc;
3141 	struct dp_tx_desc_s *next;
3142 	struct hal_tx_completion_status ts = {0};
3143 	struct dp_peer *peer;
3144 	qdf_nbuf_t netbuf;
3145 
3146 	desc = comp_head;
3147 
3148 	while (desc) {
3149 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
3150 		peer = dp_peer_find_by_id(soc, ts.peer_id);
3151 		dp_tx_comp_process_tx_status(desc, &ts, peer);
3152 
3153 		netbuf = desc->nbuf;
3154 		/* check tx complete notification */
3155 		if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(netbuf))
3156 			dp_tx_notify_completion(soc, desc, netbuf);
3157 
3158 		dp_tx_comp_process_desc(soc, desc, &ts, peer);
3159 
3160 		if (peer)
3161 			dp_peer_unref_del_find_by_id(peer);
3162 
3163 		next = desc->next;
3164 
3165 		dp_tx_desc_release(desc, desc->pool_id);
3166 		desc = next;
3167 	}
3168 
3169 }
3170 
3171 /**
3172  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
3173  * @tx_desc: software descriptor head pointer
3174  * @status : Tx completion status from HTT descriptor
3175  *
3176  * This function will process HTT Tx indication messages from Target
3177  *
3178  * Return: none
3179  */
3180 static
3181 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
3182 {
3183 	uint8_t tx_status;
3184 	struct dp_pdev *pdev;
3185 	struct dp_vdev *vdev;
3186 	struct dp_soc *soc;
3187 	struct hal_tx_completion_status ts = {0};
3188 	uint32_t *htt_desc = (uint32_t *)status;
3189 	struct dp_peer *peer;
3190 	struct cdp_tid_tx_stats *tid_stats = NULL;
3191 
3192 	qdf_assert(tx_desc->pdev);
3193 
3194 	pdev = tx_desc->pdev;
3195 	vdev = tx_desc->vdev;
3196 	soc = pdev->soc;
3197 
3198 	if (!vdev)
3199 		return;
3200 
3201 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
3202 
3203 	switch (tx_status) {
3204 	case HTT_TX_FW2WBM_TX_STATUS_OK:
3205 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
3206 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
3207 	{
3208 		uint8_t tid;
3209 		if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
3210 			ts.peer_id =
3211 				HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
3212 						htt_desc[2]);
3213 			ts.tid =
3214 				HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
3215 						htt_desc[2]);
3216 		} else {
3217 			ts.peer_id = HTT_INVALID_PEER;
3218 			ts.tid = HTT_INVALID_TID;
3219 		}
3220 		ts.ppdu_id =
3221 			HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
3222 					htt_desc[1]);
3223 		ts.ack_frame_rssi =
3224 			HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
3225 					htt_desc[1]);
3226 
3227 		ts.first_msdu = 1;
3228 		ts.last_msdu = 1;
3229 		tid = ts.tid;
3230 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
3231 			tid = CDP_MAX_DATA_TIDS - 1;
3232 
3233 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[tid];
3234 
3235 		if (qdf_unlikely(pdev->delay_stats_flag))
3236 			dp_tx_compute_delay(vdev, tx_desc, tid);
3237 		tid_stats->complete_cnt++;
3238 		if (qdf_unlikely(tx_status != HTT_TX_FW2WBM_TX_STATUS_OK)) {
3239 			ts.status = HAL_TX_TQM_RR_REM_CMD_REM;
3240 			tid_stats->comp_fail_cnt++;
3241 		} else {
3242 			tid_stats->success_cnt++;
3243 		}
3244 
3245 		peer = dp_peer_find_by_id(soc, ts.peer_id);
3246 
3247 		if (qdf_likely(peer))
3248 			dp_peer_unref_del_find_by_id(peer);
3249 
3250 		dp_tx_comp_process_tx_status(tx_desc, &ts, peer);
3251 		dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
3252 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3253 
3254 		break;
3255 	}
3256 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
3257 	{
3258 		dp_tx_reinject_handler(tx_desc, status);
3259 		break;
3260 	}
3261 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
3262 	{
3263 		dp_tx_inspect_handler(tx_desc, status);
3264 		break;
3265 	}
3266 	case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
3267 	{
3268 		dp_tx_mec_handler(vdev, status);
3269 		break;
3270 	}
3271 	default:
3272 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3273 			  "%s Invalid HTT tx_status %d\n",
3274 			  __func__, tx_status);
3275 		break;
3276 	}
3277 }
3278 
3279 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
3280 static inline
3281 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
3282 {
3283 	bool limit_hit = false;
3284 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
3285 
3286 	limit_hit =
3287 		(num_reaped >= cfg->tx_comp_loop_pkt_limit) ? true : false;
3288 
3289 	if (limit_hit)
3290 		DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
3291 
3292 	return limit_hit;
3293 }
3294 
3295 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
3296 {
3297 	return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
3298 }
3299 #else
3300 static inline
3301 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
3302 {
3303 	return false;
3304 }
3305 
3306 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
3307 {
3308 	return false;
3309 }
3310 #endif
3311 
3312 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
3313 			    void *hal_srng, uint32_t quota)
3314 {
3315 	void *tx_comp_hal_desc;
3316 	uint8_t buffer_src;
3317 	uint8_t pool_id;
3318 	uint32_t tx_desc_id;
3319 	struct dp_tx_desc_s *tx_desc = NULL;
3320 	struct dp_tx_desc_s *head_desc = NULL;
3321 	struct dp_tx_desc_s *tail_desc = NULL;
3322 	uint32_t num_processed = 0;
3323 	uint32_t count = 0;
3324 	bool force_break = false;
3325 
3326 	DP_HIST_INIT();
3327 
3328 more_data:
3329 	/* Re-initialize local variables to be re-used */
3330 		head_desc = NULL;
3331 		tail_desc = NULL;
3332 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
3333 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3334 				"%s %d : HAL RING Access Failed -- %pK",
3335 				__func__, __LINE__, hal_srng);
3336 		return 0;
3337 	}
3338 
3339 	/* Find head descriptor from completion ring */
3340 	while (qdf_likely(tx_comp_hal_desc =
3341 			hal_srng_dst_get_next(soc->hal_soc, hal_srng))) {
3342 
3343 		buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
3344 
3345 		/* If this buffer was not released by TQM or FW, then it is not
3346 		 * Tx completion indication, assert */
3347 		if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
3348 				(buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
3349 
3350 			QDF_TRACE(QDF_MODULE_ID_DP,
3351 				  QDF_TRACE_LEVEL_FATAL,
3352 				  "Tx comp release_src != TQM | FW but from %d",
3353 				  buffer_src);
3354 			hal_dump_comp_desc(tx_comp_hal_desc);
3355 			DP_STATS_INC(soc, tx.invalid_release_source, 1);
3356 			qdf_assert_always(0);
3357 		}
3358 
3359 		/* Get descriptor id */
3360 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
3361 		pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
3362 			DP_TX_DESC_ID_POOL_OS;
3363 
3364 		if (!dp_tx_is_desc_id_valid(soc, tx_desc_id))
3365 			continue;
3366 
3367 		/* Find Tx descriptor */
3368 		tx_desc = dp_tx_desc_find(soc, pool_id,
3369 				(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
3370 				DP_TX_DESC_ID_PAGE_OS,
3371 				(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
3372 				DP_TX_DESC_ID_OFFSET_OS);
3373 
3374 		/*
3375 		 * If the descriptor is already freed in vdev_detach,
3376 		 * continue to next descriptor
3377 		 */
3378 		if (!tx_desc->vdev && !tx_desc->flags) {
3379 			QDF_TRACE(QDF_MODULE_ID_DP,
3380 				  QDF_TRACE_LEVEL_INFO,
3381 				  "Descriptor freed in vdev_detach %d",
3382 				  tx_desc_id);
3383 
3384 			num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
3385 			count++;
3386 			continue;
3387 		}
3388 
3389 		/*
3390 		 * If the release source is FW, process the HTT status
3391 		 */
3392 		if (qdf_unlikely(buffer_src ==
3393 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
3394 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
3395 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
3396 					htt_tx_status);
3397 			dp_tx_process_htt_completion(tx_desc,
3398 					htt_tx_status);
3399 		} else {
3400 			/* Pool id is not matching. Error */
3401 			if (tx_desc->pool_id != pool_id) {
3402 				QDF_TRACE(QDF_MODULE_ID_DP,
3403 					QDF_TRACE_LEVEL_FATAL,
3404 					"Tx Comp pool id %d not matched %d",
3405 					pool_id, tx_desc->pool_id);
3406 
3407 				qdf_assert_always(0);
3408 			}
3409 
3410 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
3411 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
3412 				QDF_TRACE(QDF_MODULE_ID_DP,
3413 					QDF_TRACE_LEVEL_FATAL,
3414 					"Txdesc invalid, flgs = %x,id = %d",
3415 					tx_desc->flags,	tx_desc_id);
3416 				qdf_assert_always(0);
3417 			}
3418 
3419 			/* First ring descriptor on the cycle */
3420 			if (!head_desc) {
3421 				head_desc = tx_desc;
3422 				tail_desc = tx_desc;
3423 			}
3424 
3425 			tail_desc->next = tx_desc;
3426 			tx_desc->next = NULL;
3427 			tail_desc = tx_desc;
3428 
3429 			DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
3430 
3431 			/* Collect hw completion contents */
3432 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
3433 					&tx_desc->comp, 1);
3434 
3435 		}
3436 
3437 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
3438 
3439 		/*
3440 		 * Processed packet count is more than given quota
3441 		 * stop to processing
3442 		 */
3443 		if (num_processed >= quota) {
3444 			force_break = true;
3445 			break;
3446 		}
3447 
3448 		count++;
3449 
3450 		if (dp_tx_comp_loop_pkt_limit_hit(soc, count))
3451 			break;
3452 	}
3453 
3454 	hal_srng_access_end(soc->hal_soc, hal_srng);
3455 
3456 	/* Process the reaped descriptors */
3457 	if (head_desc)
3458 		dp_tx_comp_process_desc_list(soc, head_desc);
3459 
3460 	if (dp_tx_comp_enable_eol_data_check(soc)) {
3461 		if (!force_break &&
3462 		    hal_srng_dst_peek_sync_locked(soc, hal_srng)) {
3463 			DP_STATS_INC(soc, tx.hp_oos2, 1);
3464 			if (!hif_exec_should_yield(soc->hif_handle,
3465 						   int_ctx->dp_intr_id))
3466 				goto more_data;
3467 		}
3468 	}
3469 	DP_TX_HIST_STATS_PER_PDEV();
3470 
3471 	return num_processed;
3472 }
3473 
3474 #ifdef FEATURE_WLAN_TDLS
3475 /**
3476  * dp_tx_non_std() - Allow the control-path SW to send data frames
3477  *
3478  * @data_vdev - which vdev should transmit the tx data frames
3479  * @tx_spec - what non-standard handling to apply to the tx data frames
3480  * @msdu_list - NULL-terminated list of tx MSDUs
3481  *
3482  * Return: NULL on success,
3483  *         nbuf when it fails to send
3484  */
3485 qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle,
3486 			enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
3487 {
3488 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
3489 
3490 	if (tx_spec & OL_TX_SPEC_NO_FREE)
3491 		vdev->is_tdls_frame = true;
3492 	return dp_tx_send(vdev_handle, msdu_list);
3493 }
3494 #endif
3495 
3496 /**
3497  * dp_tx_vdev_attach() - attach vdev to dp tx
3498  * @vdev: virtual device instance
3499  *
3500  * Return: QDF_STATUS_SUCCESS: success
3501  *         QDF_STATUS_E_RESOURCES: Error return
3502  */
3503 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
3504 {
3505 	/*
3506 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
3507 	 */
3508 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
3509 			HTT_TCL_METADATA_TYPE_VDEV_BASED);
3510 
3511 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
3512 			vdev->vdev_id);
3513 
3514 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
3515 			DP_SW2HW_MACID(vdev->pdev->pdev_id));
3516 
3517 	/*
3518 	 * Set HTT Extension Valid bit to 0 by default
3519 	 */
3520 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
3521 
3522 	dp_tx_vdev_update_search_flags(vdev);
3523 
3524 	return QDF_STATUS_SUCCESS;
3525 }
3526 
3527 #ifdef FEATURE_WDS
3528 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
3529 {
3530 	struct dp_soc *soc = vdev->pdev->soc;
3531 
3532 	/*
3533 	 * If AST index override support is available (HKv2 etc),
3534 	 * DA search flag be enabled always
3535 	 *
3536 	 * If AST index override support is not available (HKv1),
3537 	 * DA search flag should be used for all modes except QWRAP
3538 	 */
3539 	if (soc->ast_override_support || !vdev->proxysta_vdev)
3540 		return true;
3541 
3542 	return false;
3543 }
3544 #else
3545 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
3546 {
3547 	return false;
3548 }
3549 #endif
3550 
3551 /**
3552  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
3553  * @vdev: virtual device instance
3554  *
3555  * Return: void
3556  *
3557  */
3558 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
3559 {
3560 	struct dp_soc *soc = vdev->pdev->soc;
3561 
3562 	/*
3563 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
3564 	 * for TDLS link
3565 	 *
3566 	 * Enable AddrY (SA based search) only for non-WDS STA and
3567 	 * ProxySTA VAP (in HKv1) modes.
3568 	 *
3569 	 * In all other VAP modes, only DA based search should be
3570 	 * enabled
3571 	 */
3572 	if (vdev->opmode == wlan_op_mode_sta &&
3573 	    vdev->tdls_link_connected)
3574 		vdev->hal_desc_addr_search_flags =
3575 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
3576 	else if ((vdev->opmode == wlan_op_mode_sta) &&
3577 		 !dp_tx_da_search_override(vdev))
3578 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
3579 	else
3580 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
3581 
3582 	/* Set search type only when peer map v2 messaging is enabled
3583 	 * as we will have the search index (AST hash) only when v2 is
3584 	 * enabled
3585 	 */
3586 	if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
3587 		vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
3588 	else
3589 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
3590 }
3591 
3592 static inline bool
3593 dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
3594 			  struct dp_vdev *vdev,
3595 			  struct dp_tx_desc_s *tx_desc)
3596 {
3597 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
3598 		return false;
3599 
3600 	/*
3601 	 * if vdev is given, then only check whether desc
3602 	 * vdev match. if vdev is NULL, then check whether
3603 	 * desc pdev match.
3604 	 */
3605 	return vdev ? (tx_desc->vdev == vdev) : (tx_desc->pdev == pdev);
3606 }
3607 
3608 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3609 /**
3610  * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
3611  *
3612  * @soc: Handle to DP SoC structure
3613  * @tx_desc: pointer of one TX desc
3614  * @desc_pool_id: TX Desc pool id
3615  */
3616 static inline void
3617 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
3618 		      uint8_t desc_pool_id)
3619 {
3620 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
3621 
3622 	qdf_spin_lock_bh(&pool->flow_pool_lock);
3623 
3624 	tx_desc->vdev = NULL;
3625 
3626 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
3627 }
3628 
3629 /**
3630  * dp_tx_desc_flush() - release resources associated
3631  *                      to TX Desc
3632  *
3633  * @dp_pdev: Handle to DP pdev structure
3634  * @vdev: virtual device instance
3635  * NULL: no specific Vdev is required and check all allcated TX desc
3636  * on this pdev.
3637  * Non-NULL: only check the allocated TX Desc associated to this Vdev.
3638  *
3639  * @force_free:
3640  * true: flush the TX desc.
3641  * false: only reset the Vdev in each allocated TX desc
3642  * that associated to current Vdev.
3643  *
3644  * This function will go through the TX desc pool to flush
3645  * the outstanding TX data or reset Vdev to NULL in associated TX
3646  * Desc.
3647  */
3648 static void dp_tx_desc_flush(struct dp_pdev *pdev,
3649 			     struct dp_vdev *vdev,
3650 			     bool force_free)
3651 {
3652 	uint8_t i;
3653 	uint32_t j;
3654 	uint32_t num_desc, page_id, offset;
3655 	uint16_t num_desc_per_page;
3656 	struct dp_soc *soc = pdev->soc;
3657 	struct dp_tx_desc_s *tx_desc = NULL;
3658 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
3659 
3660 	if (!vdev && !force_free) {
3661 		dp_err("Reset TX desc vdev, Vdev param is required!");
3662 		return;
3663 	}
3664 
3665 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
3666 		tx_desc_pool = &soc->tx_desc[i];
3667 		if (!(tx_desc_pool->pool_size) ||
3668 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
3669 		    !(tx_desc_pool->desc_pages.cacheable_pages))
3670 			continue;
3671 
3672 		num_desc = tx_desc_pool->pool_size;
3673 		num_desc_per_page =
3674 			tx_desc_pool->desc_pages.num_element_per_page;
3675 		for (j = 0; j < num_desc; j++) {
3676 			page_id = j / num_desc_per_page;
3677 			offset = j % num_desc_per_page;
3678 
3679 			if (qdf_unlikely(!(tx_desc_pool->
3680 					 desc_pages.cacheable_pages)))
3681 				break;
3682 
3683 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
3684 
3685 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
3686 				/*
3687 				 * Free TX desc if force free is
3688 				 * required, otherwise only reset vdev
3689 				 * in this TX desc.
3690 				 */
3691 				if (force_free) {
3692 					dp_tx_comp_free_buf(soc, tx_desc);
3693 					dp_tx_desc_release(tx_desc, i);
3694 				} else {
3695 					dp_tx_desc_reset_vdev(soc, tx_desc,
3696 							      i);
3697 				}
3698 			}
3699 		}
3700 	}
3701 }
3702 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3703 
3704 static inline void
3705 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
3706 		      uint8_t desc_pool_id)
3707 {
3708 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
3709 
3710 	tx_desc->vdev = NULL;
3711 
3712 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
3713 }
3714 
3715 static void dp_tx_desc_flush(struct dp_pdev *pdev,
3716 			     struct dp_vdev *vdev,
3717 			     bool force_free)
3718 {
3719 	uint8_t i, num_pool;
3720 	uint32_t j;
3721 	uint32_t num_desc, page_id, offset;
3722 	uint16_t num_desc_per_page;
3723 	struct dp_soc *soc = pdev->soc;
3724 	struct dp_tx_desc_s *tx_desc = NULL;
3725 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
3726 
3727 	if (!vdev && !force_free) {
3728 		dp_err("Reset TX desc vdev, Vdev param is required!");
3729 		return;
3730 	}
3731 
3732 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3733 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3734 
3735 	for (i = 0; i < num_pool; i++) {
3736 		tx_desc_pool = &soc->tx_desc[i];
3737 		if (!tx_desc_pool->desc_pages.cacheable_pages)
3738 			continue;
3739 
3740 		num_desc_per_page =
3741 			tx_desc_pool->desc_pages.num_element_per_page;
3742 		for (j = 0; j < num_desc; j++) {
3743 			page_id = j / num_desc_per_page;
3744 			offset = j % num_desc_per_page;
3745 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
3746 
3747 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
3748 				if (force_free) {
3749 					dp_tx_comp_free_buf(soc, tx_desc);
3750 					dp_tx_desc_release(tx_desc, i);
3751 				} else {
3752 					dp_tx_desc_reset_vdev(soc, tx_desc,
3753 							      i);
3754 				}
3755 			}
3756 		}
3757 	}
3758 }
3759 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3760 
3761 /**
3762  * dp_tx_vdev_detach() - detach vdev from dp tx
3763  * @vdev: virtual device instance
3764  *
3765  * Return: QDF_STATUS_SUCCESS: success
3766  *         QDF_STATUS_E_RESOURCES: Error return
3767  */
3768 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
3769 {
3770 	struct dp_pdev *pdev = vdev->pdev;
3771 
3772 	/* Reset TX desc associated to this Vdev as NULL */
3773 	dp_tx_desc_flush(pdev, vdev, false);
3774 
3775 	return QDF_STATUS_SUCCESS;
3776 }
3777 
3778 /**
3779  * dp_tx_pdev_attach() - attach pdev to dp tx
3780  * @pdev: physical device instance
3781  *
3782  * Return: QDF_STATUS_SUCCESS: success
3783  *         QDF_STATUS_E_RESOURCES: Error return
3784  */
3785 QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
3786 {
3787 	struct dp_soc *soc = pdev->soc;
3788 
3789 	/* Initialize Flow control counters */
3790 	qdf_atomic_init(&pdev->num_tx_exception);
3791 	qdf_atomic_init(&pdev->num_tx_outstanding);
3792 
3793 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3794 		/* Initialize descriptors in TCL Ring */
3795 		hal_tx_init_data_ring(soc->hal_soc,
3796 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
3797 	}
3798 
3799 	return QDF_STATUS_SUCCESS;
3800 }
3801 
3802 /**
3803  * dp_tx_pdev_detach() - detach pdev from dp tx
3804  * @pdev: physical device instance
3805  *
3806  * Return: QDF_STATUS_SUCCESS: success
3807  *         QDF_STATUS_E_RESOURCES: Error return
3808  */
3809 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
3810 {
3811 	/* flush TX outstanding data per pdev */
3812 	dp_tx_desc_flush(pdev, NULL, true);
3813 	dp_tx_me_exit(pdev);
3814 	return QDF_STATUS_SUCCESS;
3815 }
3816 
3817 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3818 /* Pools will be allocated dynamically */
3819 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3820 					int num_desc)
3821 {
3822 	uint8_t i;
3823 
3824 	for (i = 0; i < num_pool; i++) {
3825 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
3826 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
3827 	}
3828 
3829 	return 0;
3830 }
3831 
3832 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3833 {
3834 	uint8_t i;
3835 
3836 	for (i = 0; i < num_pool; i++)
3837 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
3838 }
3839 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3840 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3841 					int num_desc)
3842 {
3843 	uint8_t i;
3844 
3845 	/* Allocate software Tx descriptor pools */
3846 	for (i = 0; i < num_pool; i++) {
3847 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
3848 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3849 					"%s Tx Desc Pool alloc %d failed %pK",
3850 					__func__, i, soc);
3851 			return ENOMEM;
3852 		}
3853 	}
3854 	return 0;
3855 }
3856 
3857 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3858 {
3859 	uint8_t i;
3860 
3861 	for (i = 0; i < num_pool; i++) {
3862 		qdf_assert_always(!soc->tx_desc[i].num_allocated);
3863 		if (dp_tx_desc_pool_free(soc, i)) {
3864 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3865 				"%s Tx Desc Pool Free failed", __func__);
3866 		}
3867 	}
3868 }
3869 
3870 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3871 
3872 #ifndef QCA_MEM_ATTACH_ON_WIFI3
3873 /**
3874  * dp_tso_attach_wifi3() - TSO attach handler
3875  * @txrx_soc: Opaque Dp handle
3876  *
3877  * Reserve TSO descriptor buffers
3878  *
3879  * Return: QDF_STATUS_E_FAILURE on failure or
3880  * QDF_STATUS_SUCCESS on success
3881  */
3882 static
3883 QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc)
3884 {
3885 	return dp_tso_soc_attach(txrx_soc);
3886 }
3887 
3888 /**
3889  * dp_tso_detach_wifi3() - TSO Detach handler
3890  * @txrx_soc: Opaque Dp handle
3891  *
3892  * Deallocate TSO descriptor buffers
3893  *
3894  * Return: QDF_STATUS_E_FAILURE on failure or
3895  * QDF_STATUS_SUCCESS on success
3896  */
3897 static
3898 QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc)
3899 {
3900 	return dp_tso_soc_detach(txrx_soc);
3901 }
3902 #else
3903 static
3904 QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc)
3905 {
3906 	return QDF_STATUS_SUCCESS;
3907 }
3908 
3909 static
3910 QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc)
3911 {
3912 	return QDF_STATUS_SUCCESS;
3913 }
3914 #endif
3915 
3916 QDF_STATUS dp_tso_soc_detach(void *txrx_soc)
3917 {
3918 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3919 	uint8_t i;
3920 	uint8_t num_pool;
3921 	uint32_t num_desc;
3922 
3923 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3924 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3925 
3926 	for (i = 0; i < num_pool; i++)
3927 		dp_tx_tso_desc_pool_free(soc, i);
3928 
3929 	dp_info("%s TSO Desc Pool %d Free descs = %d",
3930 		__func__, num_pool, num_desc);
3931 
3932 	for (i = 0; i < num_pool; i++)
3933 		dp_tx_tso_num_seg_pool_free(soc, i);
3934 
3935 	dp_info("%s TSO Num of seg Desc Pool %d Free descs = %d",
3936 		__func__, num_pool, num_desc);
3937 
3938 	return QDF_STATUS_SUCCESS;
3939 }
3940 
3941 /**
3942  * dp_tso_attach() - TSO attach handler
3943  * @txrx_soc: Opaque Dp handle
3944  *
3945  * Reserve TSO descriptor buffers
3946  *
3947  * Return: QDF_STATUS_E_FAILURE on failure or
3948  * QDF_STATUS_SUCCESS on success
3949  */
3950 QDF_STATUS dp_tso_soc_attach(void *txrx_soc)
3951 {
3952 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3953 	uint8_t i;
3954 	uint8_t num_pool;
3955 	uint32_t num_desc;
3956 
3957 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3958 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3959 
3960 	for (i = 0; i < num_pool; i++) {
3961 		if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
3962 			dp_err("TSO Desc Pool alloc %d failed %pK",
3963 			       i, soc);
3964 
3965 			return QDF_STATUS_E_FAILURE;
3966 		}
3967 	}
3968 
3969 	dp_info("%s TSO Desc Alloc %d, descs = %d",
3970 		__func__, num_pool, num_desc);
3971 
3972 	for (i = 0; i < num_pool; i++) {
3973 		if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
3974 			dp_err("TSO Num of seg Pool alloc %d failed %pK",
3975 			       i, soc);
3976 
3977 			return QDF_STATUS_E_FAILURE;
3978 		}
3979 	}
3980 	return QDF_STATUS_SUCCESS;
3981 }
3982 
3983 /**
3984  * dp_tx_soc_detach() - detach soc from dp tx
3985  * @soc: core txrx main context
3986  *
3987  * This function will detach dp tx into main device context
3988  * will free dp tx resource and initialize resources
3989  *
3990  * Return: QDF_STATUS_SUCCESS: success
3991  *         QDF_STATUS_E_RESOURCES: Error return
3992  */
3993 QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
3994 {
3995 	uint8_t num_pool;
3996 	uint16_t num_desc;
3997 	uint16_t num_ext_desc;
3998 	uint8_t i;
3999 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4000 
4001 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4002 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4003 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
4004 
4005 	dp_tx_flow_control_deinit(soc);
4006 	dp_tx_delete_static_pools(soc, num_pool);
4007 
4008 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4009 			"%s Tx Desc Pool Free num_pool = %d, descs = %d",
4010 			__func__, num_pool, num_desc);
4011 
4012 	for (i = 0; i < num_pool; i++) {
4013 		if (dp_tx_ext_desc_pool_free(soc, i)) {
4014 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4015 					"%s Tx Ext Desc Pool Free failed",
4016 					__func__);
4017 			return QDF_STATUS_E_RESOURCES;
4018 		}
4019 	}
4020 
4021 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4022 			"%s MSDU Ext Desc Pool %d Free descs = %d",
4023 			__func__, num_pool, num_ext_desc);
4024 
4025 	status = dp_tso_detach_wifi3(soc);
4026 	if (status != QDF_STATUS_SUCCESS)
4027 		return status;
4028 
4029 	return QDF_STATUS_SUCCESS;
4030 }
4031 
4032 /**
4033  * dp_tx_soc_attach() - attach soc to dp tx
4034  * @soc: core txrx main context
4035  *
4036  * This function will attach dp tx into main device context
4037  * will allocate dp tx resource and initialize resources
4038  *
4039  * Return: QDF_STATUS_SUCCESS: success
4040  *         QDF_STATUS_E_RESOURCES: Error return
4041  */
4042 QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
4043 {
4044 	uint8_t i;
4045 	uint8_t num_pool;
4046 	uint32_t num_desc;
4047 	uint32_t num_ext_desc;
4048 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4049 
4050 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4051 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4052 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
4053 
4054 	if (num_pool > MAX_TXDESC_POOLS)
4055 		goto fail;
4056 
4057 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
4058 		goto fail;
4059 
4060 	dp_tx_flow_control_init(soc);
4061 
4062 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4063 			"%s Tx Desc Alloc num_pool = %d, descs = %d",
4064 			__func__, num_pool, num_desc);
4065 
4066 	/* Allocate extension tx descriptor pools */
4067 	for (i = 0; i < num_pool; i++) {
4068 		if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
4069 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4070 				"MSDU Ext Desc Pool alloc %d failed %pK",
4071 				i, soc);
4072 
4073 			goto fail;
4074 		}
4075 	}
4076 
4077 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4078 			"%s MSDU Ext Desc Alloc %d, descs = %d",
4079 			__func__, num_pool, num_ext_desc);
4080 
4081 	status = dp_tso_attach_wifi3((void *)soc);
4082 	if (status != QDF_STATUS_SUCCESS)
4083 		goto fail;
4084 
4085 
4086 	/* Initialize descriptors in TCL Rings */
4087 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
4088 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
4089 			hal_tx_init_data_ring(soc->hal_soc,
4090 					soc->tcl_data_ring[i].hal_srng);
4091 		}
4092 	}
4093 
4094 	/*
4095 	 * todo - Add a runtime config option to enable this.
4096 	 */
4097 	/*
4098 	 * Due to multiple issues on NPR EMU, enable it selectively
4099 	 * only for NPR EMU, should be removed, once NPR platforms
4100 	 * are stable.
4101 	 */
4102 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
4103 
4104 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4105 			"%s HAL Tx init Success", __func__);
4106 
4107 	return QDF_STATUS_SUCCESS;
4108 
4109 fail:
4110 	/* Detach will take care of freeing only allocated resources */
4111 	dp_tx_soc_detach(soc);
4112 	return QDF_STATUS_E_RESOURCES;
4113 }
4114 
4115 /*
4116  * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
4117  * pdev: pointer to DP PDEV structure
4118  * seg_info_head: Pointer to the head of list
4119  *
4120  * return: void
4121  */
4122 static void dp_tx_me_mem_free(struct dp_pdev *pdev,
4123 		struct dp_tx_seg_info_s *seg_info_head)
4124 {
4125 	struct dp_tx_me_buf_t *mc_uc_buf;
4126 	struct dp_tx_seg_info_s *seg_info_new = NULL;
4127 	qdf_nbuf_t nbuf = NULL;
4128 	uint64_t phy_addr;
4129 
4130 	while (seg_info_head) {
4131 		nbuf = seg_info_head->nbuf;
4132 		mc_uc_buf = (struct dp_tx_me_buf_t *)
4133 			seg_info_head->frags[0].vaddr;
4134 		phy_addr = seg_info_head->frags[0].paddr_hi;
4135 		phy_addr =  (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
4136 		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
4137 				phy_addr,
4138 				QDF_DMA_TO_DEVICE , QDF_MAC_ADDR_SIZE);
4139 		dp_tx_me_free_buf(pdev, mc_uc_buf);
4140 		qdf_nbuf_free(nbuf);
4141 		seg_info_new = seg_info_head;
4142 		seg_info_head = seg_info_head->next;
4143 		qdf_mem_free(seg_info_new);
4144 	}
4145 }
4146 
4147 /**
4148  * dp_tx_me_send_convert_ucast(): function to convert multicast to unicast
4149  * @vdev: DP VDEV handle
4150  * @nbuf: Multicast nbuf
4151  * @newmac: Table of the clients to which packets have to be sent
4152  * @new_mac_cnt: No of clients
4153  *
4154  * return: no of converted packets
4155  */
4156 uint16_t
4157 dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
4158 		uint8_t newmac[][QDF_MAC_ADDR_SIZE], uint8_t new_mac_cnt)
4159 {
4160 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
4161 	struct dp_pdev *pdev = vdev->pdev;
4162 	qdf_ether_header_t *eh;
4163 	uint8_t *data;
4164 	uint16_t len;
4165 
4166 	/* reference to frame dst addr */
4167 	uint8_t *dstmac;
4168 	/* copy of original frame src addr */
4169 	uint8_t srcmac[QDF_MAC_ADDR_SIZE];
4170 
4171 	/* local index into newmac */
4172 	uint8_t new_mac_idx = 0;
4173 	struct dp_tx_me_buf_t *mc_uc_buf;
4174 	qdf_nbuf_t  nbuf_clone;
4175 	struct dp_tx_msdu_info_s msdu_info;
4176 	struct dp_tx_seg_info_s *seg_info_head = NULL;
4177 	struct dp_tx_seg_info_s *seg_info_tail = NULL;
4178 	struct dp_tx_seg_info_s *seg_info_new;
4179 	qdf_dma_addr_t paddr_data;
4180 	qdf_dma_addr_t paddr_mcbuf = 0;
4181 	uint8_t empty_entry_mac[QDF_MAC_ADDR_SIZE] = {0};
4182 	QDF_STATUS status;
4183 
4184 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
4185 
4186 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
4187 
4188 	eh = (qdf_ether_header_t *)nbuf;
4189 	qdf_mem_copy(srcmac, eh->ether_shost, QDF_MAC_ADDR_SIZE);
4190 
4191 	len = qdf_nbuf_len(nbuf);
4192 
4193 	data = qdf_nbuf_data(nbuf);
4194 
4195 	status = qdf_nbuf_map(vdev->osdev, nbuf,
4196 			QDF_DMA_TO_DEVICE);
4197 
4198 	if (status) {
4199 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4200 				"Mapping failure Error:%d", status);
4201 		DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
4202 		qdf_nbuf_free(nbuf);
4203 		return 1;
4204 	}
4205 
4206 	paddr_data = qdf_nbuf_mapped_paddr_get(nbuf) + QDF_MAC_ADDR_SIZE;
4207 
4208 	for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
4209 		dstmac = newmac[new_mac_idx];
4210 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4211 				"added mac addr (%pM)", dstmac);
4212 
4213 		/* Check for NULL Mac Address */
4214 		if (!qdf_mem_cmp(dstmac, empty_entry_mac, QDF_MAC_ADDR_SIZE))
4215 			continue;
4216 
4217 		/* frame to self mac. skip */
4218 		if (!qdf_mem_cmp(dstmac, srcmac, QDF_MAC_ADDR_SIZE))
4219 			continue;
4220 
4221 		/*
4222 		 * TODO: optimize to avoid malloc in per-packet path
4223 		 * For eg. seg_pool can be made part of vdev structure
4224 		 */
4225 		seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
4226 
4227 		if (!seg_info_new) {
4228 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4229 					"alloc failed");
4230 			DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
4231 			goto fail_seg_alloc;
4232 		}
4233 
4234 		mc_uc_buf = dp_tx_me_alloc_buf(pdev);
4235 		if (!mc_uc_buf)
4236 			goto fail_buf_alloc;
4237 
4238 		/*
4239 		 * TODO: Check if we need to clone the nbuf
4240 		 * Or can we just use the reference for all cases
4241 		 */
4242 		if (new_mac_idx < (new_mac_cnt - 1)) {
4243 			nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
4244 			if (!nbuf_clone) {
4245 				DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
4246 				goto fail_clone;
4247 			}
4248 		} else {
4249 			/*
4250 			 * Update the ref
4251 			 * to account for frame sent without cloning
4252 			 */
4253 			qdf_nbuf_ref(nbuf);
4254 			nbuf_clone = nbuf;
4255 		}
4256 
4257 		qdf_mem_copy(mc_uc_buf->data, dstmac, QDF_MAC_ADDR_SIZE);
4258 
4259 		status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
4260 				QDF_DMA_TO_DEVICE, QDF_MAC_ADDR_SIZE,
4261 				&paddr_mcbuf);
4262 
4263 		if (status) {
4264 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4265 					"Mapping failure Error:%d", status);
4266 			DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
4267 			goto fail_map;
4268 		}
4269 
4270 		seg_info_new->frags[0].vaddr =  (uint8_t *)mc_uc_buf;
4271 		seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
4272 		seg_info_new->frags[0].paddr_hi =
4273 			(uint16_t)((uint64_t)paddr_mcbuf >> 32);
4274 		seg_info_new->frags[0].len = QDF_MAC_ADDR_SIZE;
4275 
4276 		/*preparing data fragment*/
4277 		seg_info_new->frags[1].vaddr =
4278 			qdf_nbuf_data(nbuf) + QDF_MAC_ADDR_SIZE;
4279 		seg_info_new->frags[1].paddr_lo = (uint32_t)paddr_data;
4280 		seg_info_new->frags[1].paddr_hi =
4281 			(uint16_t)(((uint64_t)paddr_data) >> 32);
4282 		seg_info_new->frags[1].len = len - QDF_MAC_ADDR_SIZE;
4283 
4284 		seg_info_new->nbuf = nbuf_clone;
4285 		seg_info_new->frag_cnt = 2;
4286 		seg_info_new->total_len = len;
4287 
4288 		seg_info_new->next = NULL;
4289 
4290 		if (!seg_info_head)
4291 			seg_info_head = seg_info_new;
4292 		else
4293 			seg_info_tail->next = seg_info_new;
4294 
4295 		seg_info_tail = seg_info_new;
4296 	}
4297 
4298 	if (!seg_info_head) {
4299 		goto free_return;
4300 	}
4301 
4302 	msdu_info.u.sg_info.curr_seg = seg_info_head;
4303 	msdu_info.num_seg = new_mac_cnt;
4304 	msdu_info.frm_type = dp_tx_frm_me;
4305 
4306 	msdu_info.tid = HTT_INVALID_TID;
4307 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0) &&
4308 	    qdf_unlikely(pdev->hmmc_tid_override_en))
4309 		msdu_info.tid = pdev->hmmc_tid;
4310 
4311 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
4312 	dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
4313 
4314 	while (seg_info_head->next) {
4315 		seg_info_new = seg_info_head;
4316 		seg_info_head = seg_info_head->next;
4317 		qdf_mem_free(seg_info_new);
4318 	}
4319 	qdf_mem_free(seg_info_head);
4320 
4321 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
4322 	qdf_nbuf_free(nbuf);
4323 	return new_mac_cnt;
4324 
4325 fail_map:
4326 	qdf_nbuf_free(nbuf_clone);
4327 
4328 fail_clone:
4329 	dp_tx_me_free_buf(pdev, mc_uc_buf);
4330 
4331 fail_buf_alloc:
4332 	qdf_mem_free(seg_info_new);
4333 
4334 fail_seg_alloc:
4335 	dp_tx_me_mem_free(pdev, seg_info_head);
4336 
4337 free_return:
4338 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
4339 	qdf_nbuf_free(nbuf);
4340 	return 1;
4341 }
4342 
4343