xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision 8ddef7dd9a290d4a9b1efd5d3efacf51d78a1a0d)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "hal_hw_headers.h"
21 #include "dp_tx.h"
22 #include "dp_tx_desc.h"
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "hal_tx.h"
26 #include "qdf_mem.h"
27 #include "qdf_nbuf.h"
28 #include "qdf_net_types.h"
29 #include <wlan_cfg.h>
30 #ifdef MESH_MODE_SUPPORT
31 #include "if_meta_hdr.h"
32 #endif
33 #include "enet.h"
34 
35 #define DP_TX_QUEUE_MASK 0x3
36 
37 /* TODO Add support in TSO */
38 #define DP_DESC_NUM_FRAG(x) 0
39 
40 /* disable TQM_BYPASS */
41 #define TQM_BYPASS_WAR 0
42 
43 /* invalid peer id for reinject*/
44 #define DP_INVALID_PEER 0XFFFE
45 
46 /*mapping between hal encrypt type and cdp_sec_type*/
47 #define MAX_CDP_SEC_TYPE 12
48 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
49 					HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
50 					HAL_TX_ENCRYPT_TYPE_WEP_128,
51 					HAL_TX_ENCRYPT_TYPE_WEP_104,
52 					HAL_TX_ENCRYPT_TYPE_WEP_40,
53 					HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
54 					HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
55 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
56 					HAL_TX_ENCRYPT_TYPE_WAPI,
57 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
58 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
59 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
60 					HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
61 
62 /**
63  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
64  * @vdev: DP Virtual device handle
65  * @nbuf: Buffer pointer
66  * @queue: queue ids container for nbuf
67  *
68  * TX packet queue has 2 instances, software descriptors id and dma ring id
69  * Based on tx feature and hardware configuration queue id combination could be
70  * different.
71  * For example -
72  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
73  * With no XPS,lock based resource protection, Descriptor pool ids are different
74  * for each vdev, dma ring id will be same as single pdev id
75  *
76  * Return: None
77  */
78 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
79 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
80 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
81 {
82 	uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK;
83 	queue->desc_pool_id = queue_offset;
84 	queue->ring_id = vdev->pdev->soc->tx_ring_map[queue_offset];
85 
86 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
87 			"%s, pool_id:%d ring_id: %d",
88 			__func__, queue->desc_pool_id, queue->ring_id);
89 
90 	return;
91 }
92 #else /* QCA_OL_TX_MULTIQ_SUPPORT */
93 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
94 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
95 {
96 	/* get flow id */
97 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
98 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
99 
100 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
101 			"%s, pool_id:%d ring_id: %d",
102 			__func__, queue->desc_pool_id, queue->ring_id);
103 
104 	return;
105 }
106 #endif
107 
108 #if defined(FEATURE_TSO)
109 /**
110  * dp_tx_tso_unmap_segment() - Unmap TSO segment
111  *
112  * @soc - core txrx main context
113  * @seg_desc - tso segment descriptor
114  * @num_seg_desc - tso number segment descriptor
115  */
116 static void dp_tx_tso_unmap_segment(
117 		struct dp_soc *soc,
118 		struct qdf_tso_seg_elem_t *seg_desc,
119 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
120 {
121 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
122 	if (qdf_unlikely(!seg_desc)) {
123 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
124 			 __func__, __LINE__);
125 		qdf_assert(0);
126 	} else if (qdf_unlikely(!num_seg_desc)) {
127 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
128 			 __func__, __LINE__);
129 		qdf_assert(0);
130 	} else {
131 		bool is_last_seg;
132 		/* no tso segment left to do dma unmap */
133 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
134 			return;
135 
136 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
137 					true : false;
138 		qdf_nbuf_unmap_tso_segment(soc->osdev,
139 					   seg_desc, is_last_seg);
140 		num_seg_desc->num_seg.tso_cmn_num_seg--;
141 	}
142 }
143 
144 /**
145  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
146  *                            back to the freelist
147  *
148  * @soc - soc device handle
149  * @tx_desc - Tx software descriptor
150  */
151 static void dp_tx_tso_desc_release(struct dp_soc *soc,
152 				   struct dp_tx_desc_s *tx_desc)
153 {
154 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
155 	if (qdf_unlikely(!tx_desc->tso_desc)) {
156 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
157 			  "%s %d TSO desc is NULL!",
158 			  __func__, __LINE__);
159 		qdf_assert(0);
160 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
161 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
162 			  "%s %d TSO num desc is NULL!",
163 			  __func__, __LINE__);
164 		qdf_assert(0);
165 	} else {
166 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
167 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
168 
169 		/* Add the tso num segment into the free list */
170 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
171 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
172 					    tx_desc->tso_num_desc);
173 			tx_desc->tso_num_desc = NULL;
174 		}
175 
176 		/* Add the tso segment into the free list*/
177 		dp_tx_tso_desc_free(soc,
178 				    tx_desc->pool_id, tx_desc->tso_desc);
179 		tx_desc->tso_desc = NULL;
180 	}
181 }
182 #else
183 static void dp_tx_tso_unmap_segment(
184 		struct dp_soc *soc,
185 		struct qdf_tso_seg_elem_t *seg_desc,
186 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
187 
188 {
189 }
190 
191 static void dp_tx_tso_desc_release(struct dp_soc *soc,
192 				   struct dp_tx_desc_s *tx_desc)
193 {
194 }
195 #endif
196 /**
197  * dp_tx_desc_release() - Release Tx Descriptor
198  * @tx_desc : Tx Descriptor
199  * @desc_pool_id: Descriptor Pool ID
200  *
201  * Deallocate all resources attached to Tx descriptor and free the Tx
202  * descriptor.
203  *
204  * Return:
205  */
206 static void
207 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
208 {
209 	struct dp_pdev *pdev = tx_desc->pdev;
210 	struct dp_soc *soc;
211 	uint8_t comp_status = 0;
212 
213 	qdf_assert(pdev);
214 
215 	soc = pdev->soc;
216 
217 	if (tx_desc->frm_type == dp_tx_frm_tso)
218 		dp_tx_tso_desc_release(soc, tx_desc);
219 
220 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
221 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
222 
223 	if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
224 		dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
225 
226 	qdf_atomic_dec(&pdev->num_tx_outstanding);
227 
228 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
229 		qdf_atomic_dec(&pdev->num_tx_exception);
230 
231 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
232 				hal_tx_comp_get_buffer_source(&tx_desc->comp))
233 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
234 							     soc->hal_soc);
235 	else
236 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
237 
238 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
239 		"Tx Completion Release desc %d status %d outstanding %d",
240 		tx_desc->id, comp_status,
241 		qdf_atomic_read(&pdev->num_tx_outstanding));
242 
243 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
244 	return;
245 }
246 
247 /**
248  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
249  * @vdev: DP vdev Handle
250  * @nbuf: skb
251  *
252  * Prepares and fills HTT metadata in the frame pre-header for special frames
253  * that should be transmitted using varying transmit parameters.
254  * There are 2 VDEV modes that currently needs this special metadata -
255  *  1) Mesh Mode
256  *  2) DSRC Mode
257  *
258  * Return: HTT metadata size
259  *
260  */
261 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
262 		uint32_t *meta_data)
263 {
264 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
265 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
266 
267 	uint8_t htt_desc_size;
268 
269 	/* Size rounded of multiple of 8 bytes */
270 	uint8_t htt_desc_size_aligned;
271 
272 	uint8_t *hdr = NULL;
273 
274 	/*
275 	 * Metadata - HTT MSDU Extension header
276 	 */
277 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
278 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
279 
280 	if (vdev->mesh_vdev) {
281 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
282 					htt_desc_size_aligned)) {
283 			DP_STATS_INC(vdev,
284 				     tx_i.dropped.headroom_insufficient, 1);
285 			return 0;
286 		}
287 		/* Fill and add HTT metaheader */
288 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
289 		if (hdr == NULL) {
290 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
291 					"Error in filling HTT metadata");
292 
293 			return 0;
294 		}
295 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
296 
297 	} else if (vdev->opmode == wlan_op_mode_ocb) {
298 		/* Todo - Add support for DSRC */
299 	}
300 
301 	return htt_desc_size_aligned;
302 }
303 
304 /**
305  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
306  * @tso_seg: TSO segment to process
307  * @ext_desc: Pointer to MSDU extension descriptor
308  *
309  * Return: void
310  */
311 #if defined(FEATURE_TSO)
312 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
313 		void *ext_desc)
314 {
315 	uint8_t num_frag;
316 	uint32_t tso_flags;
317 
318 	/*
319 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
320 	 * tcp_flag_mask
321 	 *
322 	 * Checksum enable flags are set in TCL descriptor and not in Extension
323 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
324 	 */
325 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
326 
327 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
328 
329 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
330 		tso_seg->tso_flags.ip_len);
331 
332 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
333 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
334 
335 
336 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
337 		uint32_t lo = 0;
338 		uint32_t hi = 0;
339 
340 		qdf_dmaaddr_to_32s(
341 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
342 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
343 			tso_seg->tso_frags[num_frag].length);
344 	}
345 
346 	return;
347 }
348 #else
349 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
350 		void *ext_desc)
351 {
352 	return;
353 }
354 #endif
355 
356 #if defined(FEATURE_TSO)
357 /**
358  * dp_tx_free_tso_seg_list() - Loop through the tso segments
359  *                             allocated and free them
360  *
361  * @soc: soc handle
362  * @free_seg: list of tso segments
363  * @msdu_info: msdu descriptor
364  *
365  * Return - void
366  */
367 static void dp_tx_free_tso_seg_list(
368 		struct dp_soc *soc,
369 		struct qdf_tso_seg_elem_t *free_seg,
370 		struct dp_tx_msdu_info_s *msdu_info)
371 {
372 	struct qdf_tso_seg_elem_t *next_seg;
373 
374 	while (free_seg) {
375 		next_seg = free_seg->next;
376 		dp_tx_tso_desc_free(soc,
377 				    msdu_info->tx_queue.desc_pool_id,
378 				    free_seg);
379 		free_seg = next_seg;
380 	}
381 }
382 
383 /**
384  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
385  *                                 allocated and free them
386  *
387  * @soc:  soc handle
388  * @free_num_seg: list of tso number segments
389  * @msdu_info: msdu descriptor
390  * Return - void
391  */
392 static void dp_tx_free_tso_num_seg_list(
393 		struct dp_soc *soc,
394 		struct qdf_tso_num_seg_elem_t *free_num_seg,
395 		struct dp_tx_msdu_info_s *msdu_info)
396 {
397 	struct qdf_tso_num_seg_elem_t *next_num_seg;
398 
399 	while (free_num_seg) {
400 		next_num_seg = free_num_seg->next;
401 		dp_tso_num_seg_free(soc,
402 				    msdu_info->tx_queue.desc_pool_id,
403 				    free_num_seg);
404 		free_num_seg = next_num_seg;
405 	}
406 }
407 
408 /**
409  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
410  *                              do dma unmap for each segment
411  *
412  * @soc: soc handle
413  * @free_seg: list of tso segments
414  * @num_seg_desc: tso number segment descriptor
415  *
416  * Return - void
417  */
418 static void dp_tx_unmap_tso_seg_list(
419 		struct dp_soc *soc,
420 		struct qdf_tso_seg_elem_t *free_seg,
421 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
422 {
423 	struct qdf_tso_seg_elem_t *next_seg;
424 
425 	if (qdf_unlikely(!num_seg_desc)) {
426 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
427 		return;
428 	}
429 
430 	while (free_seg) {
431 		next_seg = free_seg->next;
432 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
433 		free_seg = next_seg;
434 	}
435 }
436 
437 /**
438  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
439  *				     free the tso segments descriptor and
440  *				     tso num segments descriptor
441  *
442  * @soc:  soc handle
443  * @msdu_info: msdu descriptor
444  * @tso_seg_unmap: flag to show if dma unmap is necessary
445  *
446  * Return - void
447  */
448 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
449 					  struct dp_tx_msdu_info_s *msdu_info,
450 					  bool tso_seg_unmap)
451 {
452 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
453 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
454 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
455 					tso_info->tso_num_seg_list;
456 
457 	/* do dma unmap for each segment */
458 	if (tso_seg_unmap)
459 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
460 
461 	/* free all tso number segment descriptor though looks only have 1 */
462 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
463 
464 	/* free all tso segment descriptor */
465 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
466 }
467 
468 /**
469  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
470  * @vdev: virtual device handle
471  * @msdu: network buffer
472  * @msdu_info: meta data associated with the msdu
473  *
474  * Return: QDF_STATUS_SUCCESS success
475  */
476 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
477 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
478 {
479 	struct qdf_tso_seg_elem_t *tso_seg;
480 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
481 	struct dp_soc *soc = vdev->pdev->soc;
482 	struct qdf_tso_info_t *tso_info;
483 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
484 
485 	tso_info = &msdu_info->u.tso_info;
486 	tso_info->curr_seg = NULL;
487 	tso_info->tso_seg_list = NULL;
488 	tso_info->num_segs = num_seg;
489 	msdu_info->frm_type = dp_tx_frm_tso;
490 	tso_info->tso_num_seg_list = NULL;
491 
492 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
493 
494 	while (num_seg) {
495 		tso_seg = dp_tx_tso_desc_alloc(
496 				soc, msdu_info->tx_queue.desc_pool_id);
497 		if (tso_seg) {
498 			tso_seg->next = tso_info->tso_seg_list;
499 			tso_info->tso_seg_list = tso_seg;
500 			num_seg--;
501 		} else {
502 			DP_TRACE(ERROR, "%s: Failed to alloc tso seg desc",
503 				 __func__);
504 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
505 
506 			return QDF_STATUS_E_NOMEM;
507 		}
508 	}
509 
510 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
511 
512 	tso_num_seg = dp_tso_num_seg_alloc(soc,
513 			msdu_info->tx_queue.desc_pool_id);
514 
515 	if (tso_num_seg) {
516 		tso_num_seg->next = tso_info->tso_num_seg_list;
517 		tso_info->tso_num_seg_list = tso_num_seg;
518 	} else {
519 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
520 			 __func__);
521 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
522 
523 		return QDF_STATUS_E_NOMEM;
524 	}
525 
526 	msdu_info->num_seg =
527 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
528 
529 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
530 			msdu_info->num_seg);
531 
532 	if (!(msdu_info->num_seg)) {
533 		/*
534 		 * Free allocated TSO seg desc and number seg desc,
535 		 * do unmap for segments if dma map has done.
536 		 */
537 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
538 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
539 
540 		return QDF_STATUS_E_INVAL;
541 	}
542 
543 	tso_info->curr_seg = tso_info->tso_seg_list;
544 
545 	return QDF_STATUS_SUCCESS;
546 }
547 #else
548 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
549 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
550 {
551 	return QDF_STATUS_E_NOMEM;
552 }
553 #endif
554 
555 /**
556  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
557  * @vdev: DP Vdev handle
558  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
559  * @desc_pool_id: Descriptor Pool ID
560  *
561  * Return:
562  */
563 static
564 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
565 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
566 {
567 	uint8_t i;
568 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
569 	struct dp_tx_seg_info_s *seg_info;
570 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
571 	struct dp_soc *soc = vdev->pdev->soc;
572 
573 	/* Allocate an extension descriptor */
574 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
575 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
576 
577 	if (!msdu_ext_desc) {
578 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
579 		return NULL;
580 	}
581 
582 	if (msdu_info->exception_fw &&
583 			qdf_unlikely(vdev->mesh_vdev)) {
584 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
585 				&msdu_info->meta_data[0],
586 				sizeof(struct htt_tx_msdu_desc_ext2_t));
587 		qdf_atomic_inc(&vdev->pdev->num_tx_exception);
588 	}
589 
590 	switch (msdu_info->frm_type) {
591 	case dp_tx_frm_sg:
592 	case dp_tx_frm_me:
593 	case dp_tx_frm_raw:
594 		seg_info = msdu_info->u.sg_info.curr_seg;
595 		/* Update the buffer pointers in MSDU Extension Descriptor */
596 		for (i = 0; i < seg_info->frag_cnt; i++) {
597 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
598 				seg_info->frags[i].paddr_lo,
599 				seg_info->frags[i].paddr_hi,
600 				seg_info->frags[i].len);
601 		}
602 
603 		break;
604 
605 	case dp_tx_frm_tso:
606 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
607 				&cached_ext_desc[0]);
608 		break;
609 
610 
611 	default:
612 		break;
613 	}
614 
615 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
616 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
617 
618 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
619 			msdu_ext_desc->vaddr);
620 
621 	return msdu_ext_desc;
622 }
623 
624 /**
625  * dp_tx_trace_pkt() - Trace TX packet at DP layer
626  *
627  * @skb: skb to be traced
628  * @msdu_id: msdu_id of the packet
629  * @vdev_id: vdev_id of the packet
630  *
631  * Return: None
632  */
633 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
634 			    uint8_t vdev_id)
635 {
636 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
637 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
638 	DPTRACE(qdf_dp_trace_ptr(skb,
639 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
640 				 QDF_TRACE_DEFAULT_PDEV_ID,
641 				 qdf_nbuf_data_addr(skb),
642 				 sizeof(qdf_nbuf_data(skb)),
643 				 msdu_id, vdev_id));
644 
645 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
646 
647 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
648 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
649 				      msdu_id, QDF_TX));
650 }
651 
652 /**
653  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
654  * @vdev: DP vdev handle
655  * @nbuf: skb
656  * @desc_pool_id: Descriptor pool ID
657  * @meta_data: Metadata to the fw
658  * @tx_exc_metadata: Handle that holds exception path metadata
659  * Allocate and prepare Tx descriptor with msdu information.
660  *
661  * Return: Pointer to Tx Descriptor on success,
662  *         NULL on failure
663  */
664 static
665 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
666 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
667 		struct dp_tx_msdu_info_s *msdu_info,
668 		struct cdp_tx_exception_metadata *tx_exc_metadata)
669 {
670 	uint8_t align_pad;
671 	uint8_t is_exception = 0;
672 	uint8_t htt_hdr_size;
673 	qdf_ether_header_t *eh;
674 	struct dp_tx_desc_s *tx_desc;
675 	struct dp_pdev *pdev = vdev->pdev;
676 	struct dp_soc *soc = pdev->soc;
677 
678 	/* Allocate software Tx descriptor */
679 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
680 	if (qdf_unlikely(!tx_desc)) {
681 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
682 		return NULL;
683 	}
684 
685 	/* Flow control/Congestion Control counters */
686 	qdf_atomic_inc(&pdev->num_tx_outstanding);
687 
688 	/* Initialize the SW tx descriptor */
689 	tx_desc->nbuf = nbuf;
690 	tx_desc->frm_type = dp_tx_frm_std;
691 	tx_desc->tx_encap_type = (tx_exc_metadata ?
692 			tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
693 	tx_desc->vdev = vdev;
694 	tx_desc->pdev = pdev;
695 	tx_desc->msdu_ext_desc = NULL;
696 	tx_desc->pkt_offset = 0;
697 
698 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
699 
700 	/*
701 	 * For special modes (vdev_type == ocb or mesh), data frames should be
702 	 * transmitted using varying transmit parameters (tx spec) which include
703 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
704 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
705 	 * These frames are sent as exception packets to firmware.
706 	 *
707 	 * HW requirement is that metadata should always point to a
708 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
709 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
710 	 *  to get 8-byte aligned start address along with align_pad added
711 	 *
712 	 *  |-----------------------------|
713 	 *  |                             |
714 	 *  |-----------------------------| <-----Buffer Pointer Address given
715 	 *  |                             |  ^    in HW descriptor (aligned)
716 	 *  |       HTT Metadata          |  |
717 	 *  |                             |  |
718 	 *  |                             |  | Packet Offset given in descriptor
719 	 *  |                             |  |
720 	 *  |-----------------------------|  |
721 	 *  |       Alignment Pad         |  v
722 	 *  |-----------------------------| <----- Actual buffer start address
723 	 *  |        SKB Data             |           (Unaligned)
724 	 *  |                             |
725 	 *  |                             |
726 	 *  |                             |
727 	 *  |                             |
728 	 *  |                             |
729 	 *  |-----------------------------|
730 	 */
731 	if (qdf_unlikely((msdu_info->exception_fw)) ||
732 				(vdev->opmode == wlan_op_mode_ocb)) {
733 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
734 
735 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
736 			DP_STATS_INC(vdev,
737 				     tx_i.dropped.headroom_insufficient, 1);
738 			goto failure;
739 		}
740 
741 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
742 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
743 					"qdf_nbuf_push_head failed");
744 			goto failure;
745 		}
746 
747 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
748 				msdu_info->meta_data);
749 		if (htt_hdr_size == 0)
750 			goto failure;
751 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
752 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
753 		is_exception = 1;
754 	}
755 
756 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
757 				qdf_nbuf_map(soc->osdev, nbuf,
758 					QDF_DMA_TO_DEVICE))) {
759 		/* Handle failure */
760 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
761 				"qdf_nbuf_map failed");
762 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
763 		goto failure;
764 	}
765 
766 	if (qdf_unlikely(vdev->nawds_enabled)) {
767 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
768 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
769 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
770 			is_exception = 1;
771 		}
772 	}
773 
774 #if !TQM_BYPASS_WAR
775 	if (is_exception || tx_exc_metadata)
776 #endif
777 	{
778 		/* Temporary WAR due to TQM VP issues */
779 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
780 		qdf_atomic_inc(&pdev->num_tx_exception);
781 	}
782 
783 	return tx_desc;
784 
785 failure:
786 	dp_tx_desc_release(tx_desc, desc_pool_id);
787 	return NULL;
788 }
789 
790 /**
791  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
792  * @vdev: DP vdev handle
793  * @nbuf: skb
794  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
795  * @desc_pool_id : Descriptor Pool ID
796  *
797  * Allocate and prepare Tx descriptor with msdu and fragment descritor
798  * information. For frames wth fragments, allocate and prepare
799  * an MSDU extension descriptor
800  *
801  * Return: Pointer to Tx Descriptor on success,
802  *         NULL on failure
803  */
804 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
805 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
806 		uint8_t desc_pool_id)
807 {
808 	struct dp_tx_desc_s *tx_desc;
809 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
810 	struct dp_pdev *pdev = vdev->pdev;
811 	struct dp_soc *soc = pdev->soc;
812 
813 	/* Allocate software Tx descriptor */
814 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
815 	if (!tx_desc) {
816 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
817 		return NULL;
818 	}
819 
820 	/* Flow control/Congestion Control counters */
821 	qdf_atomic_inc(&pdev->num_tx_outstanding);
822 
823 	/* Initialize the SW tx descriptor */
824 	tx_desc->nbuf = nbuf;
825 	tx_desc->frm_type = msdu_info->frm_type;
826 	tx_desc->tx_encap_type = vdev->tx_encap_type;
827 	tx_desc->vdev = vdev;
828 	tx_desc->pdev = pdev;
829 	tx_desc->pkt_offset = 0;
830 	tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
831 	tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
832 
833 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
834 
835 	/* Handle scattered frames - TSO/SG/ME */
836 	/* Allocate and prepare an extension descriptor for scattered frames */
837 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
838 	if (!msdu_ext_desc) {
839 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
840 				"%s Tx Extension Descriptor Alloc Fail",
841 				__func__);
842 		goto failure;
843 	}
844 
845 #if TQM_BYPASS_WAR
846 	/* Temporary WAR due to TQM VP issues */
847 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
848 	qdf_atomic_inc(&pdev->num_tx_exception);
849 #endif
850 	if (qdf_unlikely(msdu_info->exception_fw))
851 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
852 
853 	tx_desc->msdu_ext_desc = msdu_ext_desc;
854 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
855 
856 	return tx_desc;
857 failure:
858 	dp_tx_desc_release(tx_desc, desc_pool_id);
859 	return NULL;
860 }
861 
862 /**
863  * dp_tx_prepare_raw() - Prepare RAW packet TX
864  * @vdev: DP vdev handle
865  * @nbuf: buffer pointer
866  * @seg_info: Pointer to Segment info Descriptor to be prepared
867  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
868  *     descriptor
869  *
870  * Return:
871  */
872 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
873 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
874 {
875 	qdf_nbuf_t curr_nbuf = NULL;
876 	uint16_t total_len = 0;
877 	qdf_dma_addr_t paddr;
878 	int32_t i;
879 	int32_t mapped_buf_num = 0;
880 
881 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
882 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
883 
884 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
885 
886 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
887 	if (vdev->raw_mode_war &&
888 	    (qos_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS))
889 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
890 
891 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
892 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
893 
894 		if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf,
895 					QDF_DMA_TO_DEVICE)) {
896 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
897 				"%s dma map error ", __func__);
898 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
899 			mapped_buf_num = i;
900 			goto error;
901 		}
902 
903 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
904 		seg_info->frags[i].paddr_lo = paddr;
905 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
906 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
907 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
908 		total_len += qdf_nbuf_len(curr_nbuf);
909 	}
910 
911 	seg_info->frag_cnt = i;
912 	seg_info->total_len = total_len;
913 	seg_info->next = NULL;
914 
915 	sg_info->curr_seg = seg_info;
916 
917 	msdu_info->frm_type = dp_tx_frm_raw;
918 	msdu_info->num_seg = 1;
919 
920 	return nbuf;
921 
922 error:
923 	i = 0;
924 	while (nbuf) {
925 		curr_nbuf = nbuf;
926 		if (i < mapped_buf_num) {
927 			qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE);
928 			i++;
929 		}
930 		nbuf = qdf_nbuf_next(nbuf);
931 		qdf_nbuf_free(curr_nbuf);
932 	}
933 	return NULL;
934 
935 }
936 
937 /**
938  * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
939  * @soc: DP Soc Handle
940  * @vdev: DP vdev handle
941  * @tx_desc: Tx Descriptor Handle
942  * @tid: TID from HLOS for overriding default DSCP-TID mapping
943  * @fw_metadata: Metadata to send to Target Firmware along with frame
944  * @ring_id: Ring ID of H/W ring to which we enqueue the packet
945  * @tx_exc_metadata: Handle that holds exception path meta data
946  *
947  *  Gets the next free TCL HW DMA descriptor and sets up required parameters
948  *  from software Tx descriptor
949  *
950  * Return:
951  */
952 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
953 				   struct dp_tx_desc_s *tx_desc, uint8_t tid,
954 				   uint16_t fw_metadata, uint8_t ring_id,
955 				   struct cdp_tx_exception_metadata
956 					*tx_exc_metadata)
957 {
958 	uint8_t type;
959 	uint16_t length;
960 	void *hal_tx_desc, *hal_tx_desc_cached;
961 	qdf_dma_addr_t dma_addr;
962 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
963 
964 	enum cdp_sec_type sec_type = (tx_exc_metadata ?
965 			tx_exc_metadata->sec_type : vdev->sec_type);
966 
967 	/* Return Buffer Manager ID */
968 	uint8_t bm_id = ring_id;
969 	void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng;
970 
971 	hal_tx_desc_cached = (void *) cached_desc;
972 	qdf_mem_zero_outline(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
973 
974 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
975 		length = HAL_TX_EXT_DESC_WITH_META_DATA;
976 		type = HAL_TX_BUF_TYPE_EXT_DESC;
977 		dma_addr = tx_desc->msdu_ext_desc->paddr;
978 	} else {
979 		length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
980 		type = HAL_TX_BUF_TYPE_BUFFER;
981 		dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
982 	}
983 
984 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
985 	hal_tx_desc_set_buf_addr(hal_tx_desc_cached,
986 					dma_addr, bm_id, tx_desc->id,
987 					type, soc->hal_soc);
988 
989 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id))
990 		return QDF_STATUS_E_RESOURCES;
991 
992 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
993 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
994 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
995 	hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
996 				vdev->pdev->lmac_id);
997 	hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
998 				    vdev->search_type);
999 	hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
1000 				     vdev->bss_ast_hash);
1001 	hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
1002 					  vdev->dscp_tid_map_id);
1003 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
1004 			sec_type_map[sec_type]);
1005 
1006 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1007 			"%s length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
1008 			__func__, length, type, (uint64_t)dma_addr,
1009 			tx_desc->pkt_offset, tx_desc->id);
1010 
1011 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
1012 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
1013 
1014 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
1015 			vdev->hal_desc_addr_search_flags);
1016 
1017 	/* verify checksum offload configuration*/
1018 	if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) &&
1019 		((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
1020 		|| qdf_nbuf_is_tso(tx_desc->nbuf)))  {
1021 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
1022 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
1023 	}
1024 
1025 	if (tid != HTT_TX_EXT_TID_INVALID)
1026 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
1027 
1028 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
1029 		hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1);
1030 
1031 
1032 	/* Sync cached descriptor with HW */
1033 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
1034 
1035 	if (!hal_tx_desc) {
1036 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1037 			  "%s TCL ring full ring_id:%d", __func__, ring_id);
1038 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1039 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1040 		return QDF_STATUS_E_RESOURCES;
1041 	}
1042 
1043 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
1044 
1045 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
1046 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
1047 
1048 	return QDF_STATUS_SUCCESS;
1049 }
1050 
1051 
1052 /**
1053  * dp_cce_classify() - Classify the frame based on CCE rules
1054  * @vdev: DP vdev handle
1055  * @nbuf: skb
1056  *
1057  * Classify frames based on CCE rules
1058  * Return: bool( true if classified,
1059  *               else false)
1060  */
1061 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1062 {
1063 	qdf_ether_header_t *eh = NULL;
1064 	uint16_t   ether_type;
1065 	qdf_llc_t *llcHdr;
1066 	qdf_nbuf_t nbuf_clone = NULL;
1067 	qdf_dot3_qosframe_t *qos_wh = NULL;
1068 
1069 	/* for mesh packets don't do any classification */
1070 	if (qdf_unlikely(vdev->mesh_vdev))
1071 		return false;
1072 
1073 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1074 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1075 		ether_type = eh->ether_type;
1076 		llcHdr = (qdf_llc_t *)(nbuf->data +
1077 					sizeof(qdf_ether_header_t));
1078 	} else {
1079 		qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1080 		/* For encrypted packets don't do any classification */
1081 		if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
1082 			return false;
1083 
1084 		if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
1085 			if (qdf_unlikely(
1086 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
1087 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
1088 
1089 				ether_type = *(uint16_t *)(nbuf->data
1090 						+ QDF_IEEE80211_4ADDR_HDR_LEN
1091 						+ sizeof(qdf_llc_t)
1092 						- sizeof(ether_type));
1093 				llcHdr = (qdf_llc_t *)(nbuf->data +
1094 						QDF_IEEE80211_4ADDR_HDR_LEN);
1095 			} else {
1096 				ether_type = *(uint16_t *)(nbuf->data
1097 						+ QDF_IEEE80211_3ADDR_HDR_LEN
1098 						+ sizeof(qdf_llc_t)
1099 						- sizeof(ether_type));
1100 				llcHdr = (qdf_llc_t *)(nbuf->data +
1101 					QDF_IEEE80211_3ADDR_HDR_LEN);
1102 			}
1103 
1104 			if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
1105 				&& (ether_type ==
1106 				qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
1107 
1108 				DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
1109 				return true;
1110 			}
1111 		}
1112 
1113 		return false;
1114 	}
1115 
1116 	if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
1117 		ether_type = *(uint16_t *)(nbuf->data + 2*ETHER_ADDR_LEN +
1118 				sizeof(*llcHdr));
1119 		nbuf_clone = qdf_nbuf_clone(nbuf);
1120 		if (qdf_unlikely(nbuf_clone)) {
1121 			qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
1122 
1123 			if (ether_type == htons(ETHERTYPE_VLAN)) {
1124 				qdf_nbuf_pull_head(nbuf_clone,
1125 						sizeof(qdf_net_vlanhdr_t));
1126 			}
1127 		}
1128 	} else {
1129 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1130 			nbuf_clone = qdf_nbuf_clone(nbuf);
1131 			if (qdf_unlikely(nbuf_clone)) {
1132 				qdf_nbuf_pull_head(nbuf_clone,
1133 					sizeof(qdf_net_vlanhdr_t));
1134 			}
1135 		}
1136 	}
1137 
1138 	if (qdf_unlikely(nbuf_clone))
1139 		nbuf = nbuf_clone;
1140 
1141 
1142 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
1143 		|| qdf_nbuf_is_ipv4_arp_pkt(nbuf)
1144 		|| qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
1145 		|| qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
1146 		|| (qdf_nbuf_is_ipv4_pkt(nbuf)
1147 			&& qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
1148 		|| (qdf_nbuf_is_ipv6_pkt(nbuf) &&
1149 			qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
1150 		if (qdf_unlikely(nbuf_clone != NULL))
1151 			qdf_nbuf_free(nbuf_clone);
1152 		return true;
1153 	}
1154 
1155 	if (qdf_unlikely(nbuf_clone != NULL))
1156 		qdf_nbuf_free(nbuf_clone);
1157 
1158 	return false;
1159 }
1160 
1161 /**
1162  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1163  * @vdev: DP vdev handle
1164  * @nbuf: skb
1165  *
1166  * Extract the DSCP or PCP information from frame and map into TID value.
1167  * Software based TID classification is required when more than 2 DSCP-TID
1168  * mapping tables are needed.
1169  * Hardware supports 2 DSCP-TID mapping tables
1170  *
1171  * Return: void
1172  */
1173 static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1174 		struct dp_tx_msdu_info_s *msdu_info)
1175 {
1176 	uint8_t tos = 0, dscp_tid_override = 0;
1177 	uint8_t *hdr_ptr, *L3datap;
1178 	uint8_t is_mcast = 0;
1179 	qdf_ether_header_t *eh = NULL;
1180 	qdf_ethervlan_header_t *evh = NULL;
1181 	uint16_t   ether_type;
1182 	qdf_llc_t *llcHdr;
1183 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1184 
1185 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1186 
1187 	if (pdev->soc && vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map)
1188 		return;
1189 
1190 	/* for mesh packets don't do any classification */
1191 	if (qdf_unlikely(vdev->mesh_vdev))
1192 		return;
1193 
1194 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1195 		eh = (qdf_ether_header_t *)nbuf->data;
1196 		hdr_ptr = eh->ether_dhost;
1197 		L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
1198 	} else {
1199 		qdf_dot3_qosframe_t *qos_wh =
1200 			(qdf_dot3_qosframe_t *) nbuf->data;
1201 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1202 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1203 		return;
1204 	}
1205 
1206 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1207 	ether_type = eh->ether_type;
1208 
1209 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
1210 	/*
1211 	 * Check if packet is dot3 or eth2 type.
1212 	 */
1213 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1214 		ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN +
1215 				sizeof(*llcHdr));
1216 
1217 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1218 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1219 				sizeof(*llcHdr);
1220 			ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN
1221 					+ sizeof(*llcHdr) +
1222 					sizeof(qdf_net_vlanhdr_t));
1223 		} else {
1224 			L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
1225 				sizeof(*llcHdr);
1226 		}
1227 	} else {
1228 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1229 			evh = (qdf_ethervlan_header_t *) eh;
1230 			ether_type = evh->ether_type;
1231 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1232 		}
1233 	}
1234 
1235 	/*
1236 	 * Find priority from IP TOS DSCP field
1237 	 */
1238 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1239 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1240 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1241 			/* Only for unicast frames */
1242 			if (!is_mcast) {
1243 				/* send it on VO queue */
1244 				msdu_info->tid = DP_VO_TID;
1245 			}
1246 		} else {
1247 			/*
1248 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1249 			 * from TOS byte.
1250 			 */
1251 			tos = ip->ip_tos;
1252 			dscp_tid_override = 1;
1253 
1254 		}
1255 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1256 		/* TODO
1257 		 * use flowlabel
1258 		 *igmpmld cases to be handled in phase 2
1259 		 */
1260 		unsigned long ver_pri_flowlabel;
1261 		unsigned long pri;
1262 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1263 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1264 			DP_IPV6_PRIORITY_SHIFT;
1265 		tos = pri;
1266 		dscp_tid_override = 1;
1267 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1268 		msdu_info->tid = DP_VO_TID;
1269 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1270 		/* Only for unicast frames */
1271 		if (!is_mcast) {
1272 			/* send ucast arp on VO queue */
1273 			msdu_info->tid = DP_VO_TID;
1274 		}
1275 	}
1276 
1277 	/*
1278 	 * Assign all MCAST packets to BE
1279 	 */
1280 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1281 		if (is_mcast) {
1282 			tos = 0;
1283 			dscp_tid_override = 1;
1284 		}
1285 	}
1286 
1287 	if (dscp_tid_override == 1) {
1288 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1289 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1290 	}
1291 	return;
1292 }
1293 
1294 #ifdef FEATURE_WLAN_TDLS
1295 /**
1296  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1297  * @tx_desc: TX descriptor
1298  *
1299  * Return: None
1300  */
1301 static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1302 {
1303 	if (tx_desc->vdev) {
1304 		if (tx_desc->vdev->is_tdls_frame) {
1305 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1306 			tx_desc->vdev->is_tdls_frame = false;
1307 		}
1308 	}
1309 }
1310 
1311 /**
1312  * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1313  * @tx_desc: TX descriptor
1314  * @vdev: datapath vdev handle
1315  *
1316  * Return: None
1317  */
1318 static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
1319 					 struct dp_vdev *vdev)
1320 {
1321 	struct hal_tx_completion_status ts = {0};
1322 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1323 
1324 	hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
1325 	if (vdev->tx_non_std_data_callback.func) {
1326 		qdf_nbuf_set_next(tx_desc->nbuf, NULL);
1327 		vdev->tx_non_std_data_callback.func(
1328 				vdev->tx_non_std_data_callback.ctxt,
1329 				nbuf, ts.status);
1330 		return;
1331 	}
1332 }
1333 #else
1334 static inline void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1335 {
1336 }
1337 
1338 static inline void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
1339 						struct dp_vdev *vdev)
1340 {
1341 }
1342 #endif
1343 
1344 /**
1345  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1346  * @vdev: DP vdev handle
1347  * @nbuf: skb
1348  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1349  * @meta_data: Metadata to the fw
1350  * @tx_q: Tx queue to be used for this Tx frame
1351  * @peer_id: peer_id of the peer in case of NAWDS frames
1352  * @tx_exc_metadata: Handle that holds exception path metadata
1353  *
1354  * Return: NULL on success,
1355  *         nbuf when it fails to send
1356  */
1357 static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1358 		struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
1359 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1360 {
1361 	struct dp_pdev *pdev = vdev->pdev;
1362 	struct dp_soc *soc = pdev->soc;
1363 	struct dp_tx_desc_s *tx_desc;
1364 	QDF_STATUS status;
1365 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
1366 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1367 	uint16_t htt_tcl_metadata = 0;
1368 	uint8_t tid = msdu_info->tid;
1369 
1370 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
1371 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
1372 			msdu_info, tx_exc_metadata);
1373 	if (!tx_desc) {
1374 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1375 			  "%s Tx_desc prepare Fail vdev %pK queue %d",
1376 			  __func__, vdev, tx_q->desc_pool_id);
1377 		return nbuf;
1378 	}
1379 
1380 	if (qdf_unlikely(soc->cce_disable)) {
1381 		if (dp_cce_classify(vdev, nbuf) == true) {
1382 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1383 			tid = DP_VO_TID;
1384 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1385 		}
1386 	}
1387 
1388 	dp_tx_update_tdls_flags(tx_desc);
1389 
1390 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1391 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1392 				"%s %d : HAL RING Access Failed -- %pK",
1393 				__func__, __LINE__, hal_srng);
1394 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1395 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1396 		qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE);
1397 		goto fail_return;
1398 	}
1399 
1400 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
1401 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1402 		HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
1403 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
1404 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
1405 				HTT_TCL_METADATA_TYPE_PEER_BASED);
1406 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
1407 				peer_id);
1408 	} else
1409 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1410 
1411 
1412 	if (msdu_info->exception_fw) {
1413 		HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1414 	}
1415 
1416 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
1417 	status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
1418 			htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
1419 
1420 	if (status != QDF_STATUS_SUCCESS) {
1421 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1422 			  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1423 			  __func__, tx_desc, tx_q->ring_id);
1424 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1425 		qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE);
1426 		goto fail_return;
1427 	}
1428 
1429 	nbuf = NULL;
1430 
1431 fail_return:
1432 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1433 		hal_srng_access_end(soc->hal_soc, hal_srng);
1434 		hif_pm_runtime_put(soc->hif_handle);
1435 	} else {
1436 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1437 	}
1438 
1439 	return nbuf;
1440 }
1441 
1442 /**
1443  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
1444  * @vdev: DP vdev handle
1445  * @nbuf: skb
1446  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
1447  *
1448  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
1449  *
1450  * Return: NULL on success,
1451  *         nbuf when it fails to send
1452  */
1453 #if QDF_LOCK_STATS
1454 static noinline
1455 #else
1456 static
1457 #endif
1458 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1459 				    struct dp_tx_msdu_info_s *msdu_info)
1460 {
1461 	uint8_t i;
1462 	struct dp_pdev *pdev = vdev->pdev;
1463 	struct dp_soc *soc = pdev->soc;
1464 	struct dp_tx_desc_s *tx_desc;
1465 	bool is_cce_classified = false;
1466 	QDF_STATUS status;
1467 	uint16_t htt_tcl_metadata = 0;
1468 
1469 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1470 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1471 
1472 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1473 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1474 				"%s %d : HAL RING Access Failed -- %pK",
1475 				__func__, __LINE__, hal_srng);
1476 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1477 		return nbuf;
1478 	}
1479 
1480 	if (qdf_unlikely(soc->cce_disable)) {
1481 		is_cce_classified = dp_cce_classify(vdev, nbuf);
1482 		if (is_cce_classified) {
1483 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1484 			msdu_info->tid = DP_VO_TID;
1485 		}
1486 	}
1487 
1488 	if (msdu_info->frm_type == dp_tx_frm_me)
1489 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1490 
1491 	i = 0;
1492 	/* Print statement to track i and num_seg */
1493 	/*
1494 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
1495 	 * descriptors using information in msdu_info
1496 	 */
1497 	while (i < msdu_info->num_seg) {
1498 		/*
1499 		 * Setup Tx descriptor for an MSDU, and MSDU extension
1500 		 * descriptor
1501 		 */
1502 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
1503 				tx_q->desc_pool_id);
1504 
1505 		if (!tx_desc) {
1506 			if (msdu_info->frm_type == dp_tx_frm_me) {
1507 				dp_tx_me_free_buf(pdev,
1508 					(void *)(msdu_info->u.sg_info
1509 						.curr_seg->frags[0].vaddr));
1510 			}
1511 			goto done;
1512 		}
1513 
1514 		if (msdu_info->frm_type == dp_tx_frm_me) {
1515 			tx_desc->me_buffer =
1516 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
1517 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
1518 		}
1519 
1520 		if (is_cce_classified)
1521 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1522 
1523 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1524 		if (msdu_info->exception_fw) {
1525 			HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1526 		}
1527 
1528 		/*
1529 		 * Enqueue the Tx MSDU descriptor to HW for transmit
1530 		 */
1531 		status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
1532 			htt_tcl_metadata, tx_q->ring_id, NULL);
1533 
1534 		if (status != QDF_STATUS_SUCCESS) {
1535 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1536 				  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1537 				  __func__, tx_desc, tx_q->ring_id);
1538 
1539 			if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
1540 				dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
1541 
1542 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1543 			goto done;
1544 		}
1545 
1546 		/*
1547 		 * TODO
1548 		 * if tso_info structure can be modified to have curr_seg
1549 		 * as first element, following 2 blocks of code (for TSO and SG)
1550 		 * can be combined into 1
1551 		 */
1552 
1553 		/*
1554 		 * For frames with multiple segments (TSO, ME), jump to next
1555 		 * segment.
1556 		 */
1557 		if (msdu_info->frm_type == dp_tx_frm_tso) {
1558 			if (msdu_info->u.tso_info.curr_seg->next) {
1559 				msdu_info->u.tso_info.curr_seg =
1560 					msdu_info->u.tso_info.curr_seg->next;
1561 
1562 				/*
1563 				 * If this is a jumbo nbuf, then increment the number of
1564 				 * nbuf users for each additional segment of the msdu.
1565 				 * This will ensure that the skb is freed only after
1566 				 * receiving tx completion for all segments of an nbuf
1567 				 */
1568 				qdf_nbuf_inc_users(nbuf);
1569 
1570 				/* Check with MCL if this is needed */
1571 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
1572 			}
1573 		}
1574 
1575 		/*
1576 		 * For Multicast-Unicast converted packets,
1577 		 * each converted frame (for a client) is represented as
1578 		 * 1 segment
1579 		 */
1580 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
1581 				(msdu_info->frm_type == dp_tx_frm_me)) {
1582 			if (msdu_info->u.sg_info.curr_seg->next) {
1583 				msdu_info->u.sg_info.curr_seg =
1584 					msdu_info->u.sg_info.curr_seg->next;
1585 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1586 			}
1587 		}
1588 		i++;
1589 	}
1590 
1591 	nbuf = NULL;
1592 
1593 done:
1594 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1595 		hal_srng_access_end(soc->hal_soc, hal_srng);
1596 		hif_pm_runtime_put(soc->hif_handle);
1597 	} else {
1598 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1599 	}
1600 
1601 	return nbuf;
1602 }
1603 
1604 /**
1605  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
1606  *                     for SG frames
1607  * @vdev: DP vdev handle
1608  * @nbuf: skb
1609  * @seg_info: Pointer to Segment info Descriptor to be prepared
1610  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1611  *
1612  * Return: NULL on success,
1613  *         nbuf when it fails to send
1614  */
1615 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1616 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1617 {
1618 	uint32_t cur_frag, nr_frags;
1619 	qdf_dma_addr_t paddr;
1620 	struct dp_tx_sg_info_s *sg_info;
1621 
1622 	sg_info = &msdu_info->u.sg_info;
1623 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
1624 
1625 	if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
1626 				QDF_DMA_TO_DEVICE)) {
1627 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1628 				"dma map error");
1629 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1630 
1631 		qdf_nbuf_free(nbuf);
1632 		return NULL;
1633 	}
1634 
1635 	paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1636 	seg_info->frags[0].paddr_lo = paddr;
1637 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
1638 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
1639 	seg_info->frags[0].vaddr = (void *) nbuf;
1640 
1641 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
1642 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
1643 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
1644 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1645 					"frag dma map error");
1646 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1647 			qdf_nbuf_free(nbuf);
1648 			return NULL;
1649 		}
1650 
1651 		paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1652 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
1653 		seg_info->frags[cur_frag + 1].paddr_hi =
1654 			((uint64_t) paddr) >> 32;
1655 		seg_info->frags[cur_frag + 1].len =
1656 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
1657 	}
1658 
1659 	seg_info->frag_cnt = (cur_frag + 1);
1660 	seg_info->total_len = qdf_nbuf_len(nbuf);
1661 	seg_info->next = NULL;
1662 
1663 	sg_info->curr_seg = seg_info;
1664 
1665 	msdu_info->frm_type = dp_tx_frm_sg;
1666 	msdu_info->num_seg = 1;
1667 
1668 	return nbuf;
1669 }
1670 
1671 #ifdef MESH_MODE_SUPPORT
1672 
1673 /**
1674  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
1675 				and prepare msdu_info for mesh frames.
1676  * @vdev: DP vdev handle
1677  * @nbuf: skb
1678  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1679  *
1680  * Return: NULL on failure,
1681  *         nbuf when extracted successfully
1682  */
1683 static
1684 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1685 				struct dp_tx_msdu_info_s *msdu_info)
1686 {
1687 	struct meta_hdr_s *mhdr;
1688 	struct htt_tx_msdu_desc_ext2_t *meta_data =
1689 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
1690 
1691 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1692 
1693 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
1694 		msdu_info->exception_fw = 0;
1695 		goto remove_meta_hdr;
1696 	}
1697 
1698 	msdu_info->exception_fw = 1;
1699 
1700 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
1701 
1702 	meta_data->host_tx_desc_pool = 1;
1703 	meta_data->update_peer_cache = 1;
1704 	meta_data->learning_frame = 1;
1705 
1706 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
1707 		meta_data->power = mhdr->power;
1708 
1709 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
1710 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
1711 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
1712 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
1713 
1714 		meta_data->dyn_bw = 1;
1715 
1716 		meta_data->valid_pwr = 1;
1717 		meta_data->valid_mcs_mask = 1;
1718 		meta_data->valid_nss_mask = 1;
1719 		meta_data->valid_preamble_type  = 1;
1720 		meta_data->valid_retries = 1;
1721 		meta_data->valid_bw_info = 1;
1722 	}
1723 
1724 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
1725 		meta_data->encrypt_type = 0;
1726 		meta_data->valid_encrypt_type = 1;
1727 		meta_data->learning_frame = 0;
1728 	}
1729 
1730 	meta_data->valid_key_flags = 1;
1731 	meta_data->key_flags = (mhdr->keyix & 0x3);
1732 
1733 remove_meta_hdr:
1734 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
1735 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1736 				"qdf_nbuf_pull_head failed");
1737 		qdf_nbuf_free(nbuf);
1738 		return NULL;
1739 	}
1740 
1741 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
1742 		msdu_info->tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1743 	else
1744 		msdu_info->tid = qdf_nbuf_get_priority(nbuf);
1745 
1746 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1747 			"%s , Meta hdr %0x %0x %0x %0x %0x %0x"
1748 			" tid %d to_fw %d",
1749 			__func__, msdu_info->meta_data[0],
1750 			msdu_info->meta_data[1],
1751 			msdu_info->meta_data[2],
1752 			msdu_info->meta_data[3],
1753 			msdu_info->meta_data[4],
1754 			msdu_info->meta_data[5],
1755 			msdu_info->tid, msdu_info->exception_fw);
1756 
1757 	return nbuf;
1758 }
1759 #else
1760 static
1761 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1762 				struct dp_tx_msdu_info_s *msdu_info)
1763 {
1764 	return nbuf;
1765 }
1766 
1767 #endif
1768 
1769 #ifdef DP_FEATURE_NAWDS_TX
1770 /**
1771  * dp_tx_prepare_nawds(): Tramit NAWDS frames
1772  * @vdev: dp_vdev handle
1773  * @nbuf: skb
1774  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1775  * @tx_q: Tx queue to be used for this Tx frame
1776  * @meta_data: Meta date for mesh
1777  * @peer_id: peer_id of the peer in case of NAWDS frames
1778  *
1779  * return: NULL on success nbuf on failure
1780  */
1781 static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1782 		struct dp_tx_msdu_info_s *msdu_info)
1783 {
1784 	struct dp_peer *peer = NULL;
1785 	struct dp_soc *soc = vdev->pdev->soc;
1786 	struct dp_ast_entry *ast_entry = NULL;
1787 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1788 	uint16_t peer_id = HTT_INVALID_PEER;
1789 
1790 	struct dp_peer *sa_peer = NULL;
1791 	qdf_nbuf_t nbuf_copy;
1792 
1793 	qdf_spin_lock_bh(&(soc->ast_lock));
1794 	ast_entry = dp_peer_ast_hash_find_by_pdevid
1795 				(soc,
1796 				 (uint8_t *)(eh->ether_shost),
1797 				 vdev->pdev->pdev_id);
1798 
1799 	if (ast_entry)
1800 		sa_peer = ast_entry->peer;
1801 
1802 	qdf_spin_unlock_bh(&(soc->ast_lock));
1803 
1804 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1805 		if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
1806 				(peer->nawds_enabled)) {
1807 			if (sa_peer == peer) {
1808 				QDF_TRACE(QDF_MODULE_ID_DP,
1809 						QDF_TRACE_LEVEL_DEBUG,
1810 						" %s: broadcast multicast packet",
1811 						 __func__);
1812 				DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
1813 				continue;
1814 			}
1815 
1816 			nbuf_copy = qdf_nbuf_copy(nbuf);
1817 			if (!nbuf_copy) {
1818 				QDF_TRACE(QDF_MODULE_ID_DP,
1819 						QDF_TRACE_LEVEL_ERROR,
1820 						"nbuf copy failed");
1821 			}
1822 
1823 			peer_id = peer->peer_ids[0];
1824 			nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy,
1825 					msdu_info, peer_id, NULL);
1826 			if (nbuf_copy != NULL) {
1827 				qdf_nbuf_free(nbuf_copy);
1828 				continue;
1829 			}
1830 			DP_STATS_INC_PKT(peer, tx.nawds_mcast,
1831 						1, qdf_nbuf_len(nbuf));
1832 		}
1833 	}
1834 	if (peer_id == HTT_INVALID_PEER)
1835 		return nbuf;
1836 
1837 	return NULL;
1838 }
1839 #endif
1840 
1841 /**
1842  * dp_check_exc_metadata() - Checks if parameters are valid
1843  * @tx_exc - holds all exception path parameters
1844  *
1845  * Returns true when all the parameters are valid else false
1846  *
1847  */
1848 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
1849 {
1850 	if ((tx_exc->tid > DP_MAX_TIDS && tx_exc->tid != HTT_INVALID_TID) ||
1851 	    tx_exc->tx_encap_type > htt_cmn_pkt_num_types ||
1852 	    tx_exc->sec_type > cdp_num_sec_types) {
1853 		return false;
1854 	}
1855 
1856 	return true;
1857 }
1858 
1859 /**
1860  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
1861  * @vap_dev: DP vdev handle
1862  * @nbuf: skb
1863  * @tx_exc_metadata: Handle that holds exception path meta data
1864  *
1865  * Entry point for Core Tx layer (DP_TX) invoked from
1866  * hard_start_xmit in OSIF/HDD to transmit frames through fw
1867  *
1868  * Return: NULL on success,
1869  *         nbuf when it fails to send
1870  */
1871 qdf_nbuf_t dp_tx_send_exception(void *vap_dev, qdf_nbuf_t nbuf,
1872 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1873 {
1874 	qdf_ether_header_t *eh = NULL;
1875 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1876 	struct dp_tx_msdu_info_s msdu_info;
1877 
1878 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
1879 
1880 	msdu_info.tid = tx_exc_metadata->tid;
1881 
1882 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1883 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1884 			"%s , skb %pM",
1885 			__func__, nbuf->data);
1886 
1887 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
1888 
1889 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
1890 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1891 			"Invalid parameters in exception path");
1892 		goto fail;
1893 	}
1894 
1895 	/* Basic sanity checks for unsupported packets */
1896 
1897 	/* MESH mode */
1898 	if (qdf_unlikely(vdev->mesh_vdev)) {
1899 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1900 			"Mesh mode is not supported in exception path");
1901 		goto fail;
1902 	}
1903 
1904 	/* TSO or SG */
1905 	if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
1906 	    qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
1907 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1908 			  "TSO and SG are not supported in exception path");
1909 
1910 		goto fail;
1911 	}
1912 
1913 	/* RAW */
1914 	if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
1915 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1916 			  "Raw frame is not supported in exception path");
1917 		goto fail;
1918 	}
1919 
1920 
1921 	/* Mcast enhancement*/
1922 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
1923 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
1924 		    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
1925 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1926 					  "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW");
1927 		}
1928 	}
1929 
1930 	/*
1931 	 * Get HW Queue to use for this frame.
1932 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
1933 	 * dedicated for data and 1 for command.
1934 	 * "queue_id" maps to one hardware ring.
1935 	 *  With each ring, we also associate a unique Tx descriptor pool
1936 	 *  to minimize lock contention for these resources.
1937 	 */
1938 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
1939 
1940 	/*  Single linear frame */
1941 	/*
1942 	 * If nbuf is a simple linear frame, use send_single function to
1943 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
1944 	 * SRNG. There is no need to setup a MSDU extension descriptor.
1945 	 */
1946 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
1947 			tx_exc_metadata->peer_id, tx_exc_metadata);
1948 
1949 	return nbuf;
1950 
1951 fail:
1952 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1953 			"pkt send failed");
1954 	return nbuf;
1955 }
1956 
1957 /**
1958  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
1959  * @vap_dev: DP vdev handle
1960  * @nbuf: skb
1961  *
1962  * Entry point for Core Tx layer (DP_TX) invoked from
1963  * hard_start_xmit in OSIF/HDD
1964  *
1965  * Return: NULL on success,
1966  *         nbuf when it fails to send
1967  */
1968 #ifdef MESH_MODE_SUPPORT
1969 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
1970 {
1971 	struct meta_hdr_s *mhdr;
1972 	qdf_nbuf_t nbuf_mesh = NULL;
1973 	qdf_nbuf_t nbuf_clone = NULL;
1974 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1975 	uint8_t no_enc_frame = 0;
1976 
1977 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
1978 	if (nbuf_mesh == NULL) {
1979 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1980 				"qdf_nbuf_unshare failed");
1981 		return nbuf;
1982 	}
1983 	nbuf = nbuf_mesh;
1984 
1985 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1986 
1987 	if ((vdev->sec_type != cdp_sec_type_none) &&
1988 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
1989 		no_enc_frame = 1;
1990 
1991 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
1992 		       !no_enc_frame) {
1993 		nbuf_clone = qdf_nbuf_clone(nbuf);
1994 		if (nbuf_clone == NULL) {
1995 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1996 				"qdf_nbuf_clone failed");
1997 			return nbuf;
1998 		}
1999 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
2000 	}
2001 
2002 	if (nbuf_clone) {
2003 		if (!dp_tx_send(vap_dev, nbuf_clone)) {
2004 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2005 		} else {
2006 			qdf_nbuf_free(nbuf_clone);
2007 		}
2008 	}
2009 
2010 	if (no_enc_frame)
2011 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
2012 	else
2013 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
2014 
2015 	nbuf = dp_tx_send(vap_dev, nbuf);
2016 	if ((nbuf == NULL) && no_enc_frame) {
2017 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2018 	}
2019 
2020 	return nbuf;
2021 }
2022 
2023 #else
2024 
2025 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
2026 {
2027 	return dp_tx_send(vap_dev, nbuf);
2028 }
2029 
2030 #endif
2031 
2032 /**
2033  * dp_tx_send() - Transmit a frame on a given VAP
2034  * @vap_dev: DP vdev handle
2035  * @nbuf: skb
2036  *
2037  * Entry point for Core Tx layer (DP_TX) invoked from
2038  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
2039  * cases
2040  *
2041  * Return: NULL on success,
2042  *         nbuf when it fails to send
2043  */
2044 qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
2045 {
2046 	qdf_ether_header_t *eh = NULL;
2047 	struct dp_tx_msdu_info_s msdu_info;
2048 	struct dp_tx_seg_info_s seg_info;
2049 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
2050 	uint16_t peer_id = HTT_INVALID_PEER;
2051 	qdf_nbuf_t nbuf_mesh = NULL;
2052 
2053 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
2054 	qdf_mem_zero(&seg_info, sizeof(seg_info));
2055 
2056 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2057 
2058 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2059 			"%s , skb %pM",
2060 			__func__, nbuf->data);
2061 
2062 	/*
2063 	 * Set Default Host TID value to invalid TID
2064 	 * (TID override disabled)
2065 	 */
2066 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
2067 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
2068 
2069 	if (qdf_unlikely(vdev->mesh_vdev)) {
2070 		nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
2071 								&msdu_info);
2072 		if (nbuf_mesh == NULL) {
2073 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2074 					"Extracting mesh metadata failed");
2075 			return nbuf;
2076 		}
2077 		nbuf = nbuf_mesh;
2078 	}
2079 
2080 	/*
2081 	 * Get HW Queue to use for this frame.
2082 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
2083 	 * dedicated for data and 1 for command.
2084 	 * "queue_id" maps to one hardware ring.
2085 	 *  With each ring, we also associate a unique Tx descriptor pool
2086 	 *  to minimize lock contention for these resources.
2087 	 */
2088 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2089 
2090 	/*
2091 	 * TCL H/W supports 2 DSCP-TID mapping tables.
2092 	 *  Table 1 - Default DSCP-TID mapping table
2093 	 *  Table 2 - 1 DSCP-TID override table
2094 	 *
2095 	 * If we need a different DSCP-TID mapping for this vap,
2096 	 * call tid_classify to extract DSCP/ToS from frame and
2097 	 * map to a TID and store in msdu_info. This is later used
2098 	 * to fill in TCL Input descriptor (per-packet TID override).
2099 	 */
2100 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
2101 
2102 	/*
2103 	 * Classify the frame and call corresponding
2104 	 * "prepare" function which extracts the segment (TSO)
2105 	 * and fragmentation information (for TSO , SG, ME, or Raw)
2106 	 * into MSDU_INFO structure which is later used to fill
2107 	 * SW and HW descriptors.
2108 	 */
2109 	if (qdf_nbuf_is_tso(nbuf)) {
2110 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2111 			  "%s TSO frame %pK", __func__, vdev);
2112 		DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1,
2113 				qdf_nbuf_len(nbuf));
2114 
2115 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
2116 			DP_STATS_INC_PKT(vdev, tx_i.tso.dropped_host, 1,
2117 					 qdf_nbuf_len(nbuf));
2118 			return nbuf;
2119 		}
2120 
2121 		goto send_multiple;
2122 	}
2123 
2124 	/* SG */
2125 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2126 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
2127 
2128 		if (!nbuf)
2129 			return NULL;
2130 
2131 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2132 			 "%s non-TSO SG frame %pK", __func__, vdev);
2133 
2134 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
2135 				qdf_nbuf_len(nbuf));
2136 
2137 		goto send_multiple;
2138 	}
2139 
2140 #ifdef ATH_SUPPORT_IQUE
2141 	/* Mcast to Ucast Conversion*/
2142 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
2143 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2144 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2145 		    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2146 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2147 				  "%s Mcast frm for ME %pK", __func__, vdev);
2148 
2149 			DP_STATS_INC_PKT(vdev,
2150 					tx_i.mcast_en.mcast_pkt, 1,
2151 					qdf_nbuf_len(nbuf));
2152 			if (dp_tx_prepare_send_me(vdev, nbuf) ==
2153 					QDF_STATUS_SUCCESS) {
2154 				return NULL;
2155 			}
2156 		}
2157 	}
2158 #endif
2159 
2160 	/* RAW */
2161 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
2162 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
2163 		if (nbuf == NULL)
2164 			return NULL;
2165 
2166 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2167 			  "%s Raw frame %pK", __func__, vdev);
2168 
2169 		goto send_multiple;
2170 
2171 	}
2172 
2173 	/*  Single linear frame */
2174 	/*
2175 	 * If nbuf is a simple linear frame, use send_single function to
2176 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2177 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2178 	 */
2179 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
2180 
2181 	return nbuf;
2182 
2183 send_multiple:
2184 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2185 
2186 	return nbuf;
2187 }
2188 
2189 /**
2190  * dp_tx_reinject_handler() - Tx Reinject Handler
2191  * @tx_desc: software descriptor head pointer
2192  * @status : Tx completion status from HTT descriptor
2193  *
2194  * This function reinjects frames back to Target.
2195  * Todo - Host queue needs to be added
2196  *
2197  * Return: none
2198  */
2199 static
2200 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2201 {
2202 	struct dp_vdev *vdev;
2203 	struct dp_peer *peer = NULL;
2204 	uint32_t peer_id = HTT_INVALID_PEER;
2205 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2206 	qdf_nbuf_t nbuf_copy = NULL;
2207 	struct dp_tx_msdu_info_s msdu_info;
2208 	struct dp_peer *sa_peer = NULL;
2209 	struct dp_ast_entry *ast_entry = NULL;
2210 	struct dp_soc *soc = NULL;
2211 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2212 #ifdef WDS_VENDOR_EXTENSION
2213 	int is_mcast = 0, is_ucast = 0;
2214 	int num_peers_3addr = 0;
2215 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
2216 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
2217 #endif
2218 
2219 	vdev = tx_desc->vdev;
2220 	soc = vdev->pdev->soc;
2221 
2222 	qdf_assert(vdev);
2223 
2224 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
2225 
2226 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2227 
2228 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2229 			"%s Tx reinject path", __func__);
2230 
2231 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
2232 			qdf_nbuf_len(tx_desc->nbuf));
2233 
2234 	qdf_spin_lock_bh(&(soc->ast_lock));
2235 
2236 	ast_entry = dp_peer_ast_hash_find_by_pdevid
2237 				(soc,
2238 				 (uint8_t *)(eh->ether_shost),
2239 				 vdev->pdev->pdev_id);
2240 
2241 	if (ast_entry)
2242 		sa_peer = ast_entry->peer;
2243 
2244 	qdf_spin_unlock_bh(&(soc->ast_lock));
2245 
2246 #ifdef WDS_VENDOR_EXTENSION
2247 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
2248 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
2249 	} else {
2250 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
2251 	}
2252 	is_ucast = !is_mcast;
2253 
2254 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2255 		if (peer->bss_peer)
2256 			continue;
2257 
2258 		/* Detect wds peers that use 3-addr framing for mcast.
2259 		 * if there are any, the bss_peer is used to send the
2260 		 * the mcast frame using 3-addr format. all wds enabled
2261 		 * peers that use 4-addr framing for mcast frames will
2262 		 * be duplicated and sent as 4-addr frames below.
2263 		 */
2264 		if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
2265 			num_peers_3addr = 1;
2266 			break;
2267 		}
2268 	}
2269 #endif
2270 
2271 	if (qdf_unlikely(vdev->mesh_vdev)) {
2272 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
2273 	} else {
2274 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2275 			if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
2276 #ifdef WDS_VENDOR_EXTENSION
2277 			/*
2278 			 * . if 3-addr STA, then send on BSS Peer
2279 			 * . if Peer WDS enabled and accept 4-addr mcast,
2280 			 * send mcast on that peer only
2281 			 * . if Peer WDS enabled and accept 4-addr ucast,
2282 			 * send ucast on that peer only
2283 			 */
2284 			((peer->bss_peer && num_peers_3addr && is_mcast) ||
2285 			 (peer->wds_enabled &&
2286 				  ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
2287 				   (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
2288 #else
2289 			((peer->bss_peer &&
2290 			  !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) ||
2291 				 peer->nawds_enabled)) {
2292 #endif
2293 				peer_id = DP_INVALID_PEER;
2294 
2295 				if (peer->nawds_enabled) {
2296 					peer_id = peer->peer_ids[0];
2297 					if (sa_peer == peer) {
2298 						QDF_TRACE(
2299 							QDF_MODULE_ID_DP,
2300 							QDF_TRACE_LEVEL_DEBUG,
2301 							" %s: multicast packet",
2302 							__func__);
2303 						DP_STATS_INC(peer,
2304 							tx.nawds_mcast_drop, 1);
2305 						continue;
2306 					}
2307 				}
2308 
2309 				nbuf_copy = qdf_nbuf_copy(nbuf);
2310 
2311 				if (!nbuf_copy) {
2312 					QDF_TRACE(QDF_MODULE_ID_DP,
2313 						QDF_TRACE_LEVEL_DEBUG,
2314 						FL("nbuf copy failed"));
2315 					break;
2316 				}
2317 
2318 				nbuf_copy = dp_tx_send_msdu_single(vdev,
2319 						nbuf_copy,
2320 						&msdu_info,
2321 						peer_id,
2322 						NULL);
2323 
2324 				if (nbuf_copy) {
2325 					QDF_TRACE(QDF_MODULE_ID_DP,
2326 						QDF_TRACE_LEVEL_DEBUG,
2327 						FL("pkt send failed"));
2328 					qdf_nbuf_free(nbuf_copy);
2329 				} else {
2330 					if (peer_id != DP_INVALID_PEER)
2331 						DP_STATS_INC_PKT(peer,
2332 							tx.nawds_mcast,
2333 							1, qdf_nbuf_len(nbuf));
2334 				}
2335 			}
2336 		}
2337 	}
2338 
2339 	if (vdev->nawds_enabled) {
2340 		peer_id = DP_INVALID_PEER;
2341 
2342 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
2343 					1, qdf_nbuf_len(nbuf));
2344 
2345 		nbuf = dp_tx_send_msdu_single(vdev,
2346 				nbuf,
2347 				&msdu_info,
2348 				peer_id, NULL);
2349 
2350 		if (nbuf) {
2351 			QDF_TRACE(QDF_MODULE_ID_DP,
2352 				QDF_TRACE_LEVEL_DEBUG,
2353 				FL("pkt send failed"));
2354 			qdf_nbuf_free(nbuf);
2355 		}
2356 	} else
2357 		qdf_nbuf_free(nbuf);
2358 
2359 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2360 }
2361 
2362 /**
2363  * dp_tx_inspect_handler() - Tx Inspect Handler
2364  * @tx_desc: software descriptor head pointer
2365  * @status : Tx completion status from HTT descriptor
2366  *
2367  * Handles Tx frames sent back to Host for inspection
2368  * (ProxyARP)
2369  *
2370  * Return: none
2371  */
2372 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2373 {
2374 
2375 	struct dp_soc *soc;
2376 	struct dp_pdev *pdev = tx_desc->pdev;
2377 
2378 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2379 			"%s Tx inspect path",
2380 			__func__);
2381 
2382 	qdf_assert(pdev);
2383 
2384 	soc = pdev->soc;
2385 
2386 	DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
2387 			qdf_nbuf_len(tx_desc->nbuf));
2388 
2389 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
2390 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2391 }
2392 
2393 #ifdef FEATURE_PERPKT_INFO
2394 /**
2395  * dp_get_completion_indication_for_stack() - send completion to stack
2396  * @soc : dp_soc handle
2397  * @pdev: dp_pdev handle
2398  * @peer: dp peer handle
2399  * @ts: transmit completion status structure
2400  * @netbuf: Buffer pointer for free
2401  *
2402  * This function is used for indication whether buffer needs to be
2403  * sent to stack for freeing or not
2404 */
2405 QDF_STATUS
2406 dp_get_completion_indication_for_stack(struct dp_soc *soc,
2407 				       struct dp_pdev *pdev,
2408 				       struct dp_peer *peer,
2409 				       struct hal_tx_completion_status *ts,
2410 				       qdf_nbuf_t netbuf)
2411 {
2412 	struct tx_capture_hdr *ppdu_hdr;
2413 	uint16_t peer_id = ts->peer_id;
2414 	uint32_t ppdu_id = ts->ppdu_id;
2415 	uint8_t first_msdu = ts->first_msdu;
2416 	uint8_t last_msdu = ts->last_msdu;
2417 
2418 	if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode))
2419 		return QDF_STATUS_E_NOSUPPORT;
2420 
2421 	if (!peer) {
2422 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2423 				FL("Peer Invalid"));
2424 		return QDF_STATUS_E_INVAL;
2425 	}
2426 
2427 	if (pdev->mcopy_mode) {
2428 		if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
2429 				(pdev->m_copy_id.tx_peer_id == peer_id)) {
2430 			return QDF_STATUS_E_INVAL;
2431 		}
2432 
2433 		pdev->m_copy_id.tx_ppdu_id = ppdu_id;
2434 		pdev->m_copy_id.tx_peer_id = peer_id;
2435 	}
2436 
2437 	if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) {
2438 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2439 				FL("No headroom"));
2440 		return QDF_STATUS_E_NOMEM;
2441 	}
2442 
2443 	ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
2444 	qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
2445 		     IEEE80211_ADDR_LEN);
2446 	qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
2447 		     IEEE80211_ADDR_LEN);
2448 	ppdu_hdr->ppdu_id = ppdu_id;
2449 	ppdu_hdr->peer_id = peer_id;
2450 	ppdu_hdr->first_msdu = first_msdu;
2451 	ppdu_hdr->last_msdu = last_msdu;
2452 
2453 	return QDF_STATUS_SUCCESS;
2454 }
2455 
2456 
2457 /**
2458  * dp_send_completion_to_stack() - send completion to stack
2459  * @soc :  dp_soc handle
2460  * @pdev:  dp_pdev handle
2461  * @peer_id: peer_id of the peer for which completion came
2462  * @ppdu_id: ppdu_id
2463  * @netbuf: Buffer pointer for free
2464  *
2465  * This function is used to send completion to stack
2466  * to free buffer
2467 */
2468 void  dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2469 					uint16_t peer_id, uint32_t ppdu_id,
2470 					qdf_nbuf_t netbuf)
2471 {
2472 	dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
2473 				netbuf, peer_id,
2474 				WDI_NO_VAL, pdev->pdev_id);
2475 }
2476 #else
2477 static QDF_STATUS
2478 dp_get_completion_indication_for_stack(struct dp_soc *soc,
2479 				       struct dp_pdev *pdev,
2480 				       struct dp_peer *peer,
2481 				       struct hal_tx_completion_status *ts,
2482 				       qdf_nbuf_t netbuf)
2483 {
2484 	return QDF_STATUS_E_NOSUPPORT;
2485 }
2486 
2487 static void
2488 dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2489 	uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
2490 {
2491 }
2492 #endif
2493 
2494 /**
2495  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2496  * @soc: Soc handle
2497  * @desc: software Tx descriptor to be processed
2498  *
2499  * Return: none
2500  */
2501 static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
2502 		struct dp_tx_desc_s *desc)
2503 {
2504 	struct dp_vdev *vdev = desc->vdev;
2505 	qdf_nbuf_t nbuf = desc->nbuf;
2506 
2507 	/* If it is TDLS mgmt, don't unmap or free the frame */
2508 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2509 		return dp_non_std_tx_comp_free_buff(desc, vdev);
2510 
2511 	/* 0 : MSDU buffer, 1 : MLE */
2512 	if (desc->msdu_ext_desc) {
2513 		/* TSO free */
2514 		if (hal_tx_ext_desc_get_tso_enable(
2515 					desc->msdu_ext_desc->vaddr)) {
2516 			/* unmap eash TSO seg before free the nbuf */
2517 			dp_tx_tso_unmap_segment(soc, desc->tso_desc,
2518 						desc->tso_num_desc);
2519 			qdf_nbuf_free(nbuf);
2520 			return;
2521 		}
2522 	}
2523 
2524 	qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
2525 
2526 	if (qdf_likely(!vdev->mesh_vdev))
2527 		qdf_nbuf_free(nbuf);
2528 	else {
2529 		if (desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2530 			qdf_nbuf_free(nbuf);
2531 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2532 		} else
2533 			vdev->osif_tx_free_ext((nbuf));
2534 	}
2535 }
2536 
2537 /**
2538  * dp_tx_mec_handler() - Tx  MEC Notify Handler
2539  * @vdev: pointer to dp dev handler
2540  * @status : Tx completion status from HTT descriptor
2541  *
2542  * Handles MEC notify event sent from fw to Host
2543  *
2544  * Return: none
2545  */
2546 #ifdef FEATURE_WDS
2547 void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
2548 {
2549 
2550 	struct dp_soc *soc;
2551 	uint32_t flags = IEEE80211_NODE_F_WDS_HM;
2552 	struct dp_peer *peer;
2553 	uint8_t mac_addr[DP_MAC_ADDR_LEN], i;
2554 
2555 	if (!vdev->mec_enabled)
2556 		return;
2557 
2558 	/* MEC required only in STA mode */
2559 	if (vdev->opmode != wlan_op_mode_sta)
2560 		return;
2561 
2562 	soc = vdev->pdev->soc;
2563 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
2564 	peer = TAILQ_FIRST(&vdev->peer_list);
2565 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2566 
2567 	if (!peer) {
2568 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2569 				FL("peer is NULL"));
2570 		return;
2571 	}
2572 
2573 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2574 			"%s Tx MEC Handler",
2575 			__func__);
2576 
2577 	for (i = 0; i < DP_MAC_ADDR_LEN; i++)
2578 		mac_addr[(DP_MAC_ADDR_LEN - 1) - i] =
2579 					status[(DP_MAC_ADDR_LEN - 2) + i];
2580 
2581 	if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, DP_MAC_ADDR_LEN))
2582 		dp_peer_add_ast(soc,
2583 				peer,
2584 				mac_addr,
2585 				CDP_TXRX_AST_TYPE_MEC,
2586 				flags);
2587 }
2588 #endif
2589 
2590 #ifdef MESH_MODE_SUPPORT
2591 /**
2592  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
2593  *                                         in mesh meta header
2594  * @tx_desc: software descriptor head pointer
2595  * @ts: pointer to tx completion stats
2596  * Return: none
2597  */
2598 static
2599 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2600 		struct hal_tx_completion_status *ts)
2601 {
2602 	struct meta_hdr_s *mhdr;
2603 	qdf_nbuf_t netbuf = tx_desc->nbuf;
2604 
2605 	if (!tx_desc->msdu_ext_desc) {
2606 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
2607 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2608 				"netbuf %pK offset %d",
2609 				netbuf, tx_desc->pkt_offset);
2610 			return;
2611 		}
2612 	}
2613 	if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
2614 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2615 			"netbuf %pK offset %lu", netbuf,
2616 			sizeof(struct meta_hdr_s));
2617 		return;
2618 	}
2619 
2620 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
2621 	mhdr->rssi = ts->ack_frame_rssi;
2622 	mhdr->channel = tx_desc->pdev->operating_channel;
2623 }
2624 
2625 #else
2626 static
2627 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2628 		struct hal_tx_completion_status *ts)
2629 {
2630 }
2631 
2632 #endif
2633 
2634 /**
2635  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
2636  * @peer: Handle to DP peer
2637  * @ts: pointer to HAL Tx completion stats
2638  *
2639  * Return: None
2640  */
2641 static inline void
2642 dp_tx_update_peer_stats(struct dp_peer *peer,
2643 			struct hal_tx_completion_status *ts, uint32_t length)
2644 {
2645 	struct dp_pdev *pdev = peer->vdev->pdev;
2646 	struct dp_soc *soc = NULL;
2647 	uint8_t mcs, pkt_type;
2648 
2649 	if (!pdev)
2650 		return;
2651 
2652 	soc = pdev->soc;
2653 
2654 	mcs = ts->mcs;
2655 	pkt_type = ts->pkt_type;
2656 
2657 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
2658 		dp_err("Release source is not from TQM");
2659 		return;
2660 	}
2661 
2662 	DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
2663 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
2664 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
2665 
2666 	DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length,
2667 			  (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
2668 
2669 	DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
2670 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
2671 
2672 	DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
2673 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
2674 
2675 	DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
2676 		     (ts->status == HAL_TX_TQM_RR_FW_REASON1));
2677 
2678 	DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
2679 		     (ts->status == HAL_TX_TQM_RR_FW_REASON2));
2680 
2681 	DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
2682 		     (ts->status == HAL_TX_TQM_RR_FW_REASON3));
2683 
2684 	if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
2685 		return;
2686 	}
2687 	DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
2688 
2689 	DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
2690 	DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
2691 
2692 	/*
2693 	 * Following Rate Statistics are updated from HTT PPDU events from FW.
2694 	 * Return from here if HTT PPDU events are enabled.
2695 	 */
2696 	if (!(soc->process_tx_status))
2697 		return;
2698 
2699 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2700 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
2701 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2702 			((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
2703 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2704 			((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
2705 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2706 			((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
2707 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2708 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
2709 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2710 			((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
2711 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2712 			((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2713 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2714 			((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2715 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2716 			((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2717 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2718 			((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2719 
2720 	DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
2721 	DP_STATS_INC(peer, tx.bw[ts->bw], 1);
2722 	DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
2723 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
2724 	DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
2725 	DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
2726 	DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
2727 
2728 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
2729 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
2730 			     &peer->stats, ts->peer_id,
2731 			     UPDATE_PEER_STATS, pdev->pdev_id);
2732 #endif
2733 }
2734 
2735 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2736 /**
2737  * dp_tx_flow_pool_lock() - take flow pool lock
2738  * @soc: core txrx main context
2739  * @tx_desc: tx desc
2740  *
2741  * Return: None
2742  */
2743 static inline
2744 void dp_tx_flow_pool_lock(struct dp_soc *soc,
2745 			  struct dp_tx_desc_s *tx_desc)
2746 {
2747 	struct dp_tx_desc_pool_s *pool;
2748 	uint8_t desc_pool_id;
2749 
2750 	desc_pool_id = tx_desc->pool_id;
2751 	pool = &soc->tx_desc[desc_pool_id];
2752 
2753 	qdf_spin_lock_bh(&pool->flow_pool_lock);
2754 }
2755 
2756 /**
2757  * dp_tx_flow_pool_unlock() - release flow pool lock
2758  * @soc: core txrx main context
2759  * @tx_desc: tx desc
2760  *
2761  * Return: None
2762  */
2763 static inline
2764 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
2765 			    struct dp_tx_desc_s *tx_desc)
2766 {
2767 	struct dp_tx_desc_pool_s *pool;
2768 	uint8_t desc_pool_id;
2769 
2770 	desc_pool_id = tx_desc->pool_id;
2771 	pool = &soc->tx_desc[desc_pool_id];
2772 
2773 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
2774 }
2775 #else
2776 static inline
2777 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2778 {
2779 }
2780 
2781 static inline
2782 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2783 {
2784 }
2785 #endif
2786 
2787 /**
2788  * dp_tx_notify_completion() - Notify tx completion for this desc
2789  * @soc: core txrx main context
2790  * @tx_desc: tx desc
2791  * @netbuf:  buffer
2792  *
2793  * Return: none
2794  */
2795 static inline void dp_tx_notify_completion(struct dp_soc *soc,
2796 					   struct dp_tx_desc_s *tx_desc,
2797 					   qdf_nbuf_t netbuf)
2798 {
2799 	void *osif_dev;
2800 	ol_txrx_completion_fp tx_compl_cbk = NULL;
2801 
2802 	qdf_assert(tx_desc);
2803 
2804 	dp_tx_flow_pool_lock(soc, tx_desc);
2805 
2806 	if (!tx_desc->vdev ||
2807 	    !tx_desc->vdev->osif_vdev) {
2808 		dp_tx_flow_pool_unlock(soc, tx_desc);
2809 		return;
2810 	}
2811 
2812 	osif_dev = tx_desc->vdev->osif_vdev;
2813 	tx_compl_cbk = tx_desc->vdev->tx_comp;
2814 	dp_tx_flow_pool_unlock(soc, tx_desc);
2815 
2816 	if (tx_compl_cbk)
2817 		tx_compl_cbk(netbuf, osif_dev);
2818 }
2819 
2820 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
2821  * @pdev: pdev handle
2822  * @tid: tid value
2823  * @txdesc_ts: timestamp from txdesc
2824  * @ppdu_id: ppdu id
2825  *
2826  * Return: none
2827  */
2828 #ifdef FEATURE_PERPKT_INFO
2829 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
2830 					       uint8_t tid,
2831 					       uint64_t txdesc_ts,
2832 					       uint32_t ppdu_id)
2833 {
2834 	uint64_t delta_ms;
2835 	struct cdp_tx_sojourn_stats *sojourn_stats;
2836 
2837 	if (pdev->enhanced_stats_en == 0)
2838 		return;
2839 
2840 	if (pdev->sojourn_stats.ppdu_seq_id == 0)
2841 		pdev->sojourn_stats.ppdu_seq_id = ppdu_id;
2842 
2843 	if (ppdu_id != pdev->sojourn_stats.ppdu_seq_id) {
2844 		if (!pdev->sojourn_buf)
2845 			return;
2846 
2847 		sojourn_stats = (struct cdp_tx_sojourn_stats *)
2848 					qdf_nbuf_data(pdev->sojourn_buf);
2849 
2850 		qdf_mem_copy(sojourn_stats, &pdev->sojourn_stats,
2851 			     sizeof(struct cdp_tx_sojourn_stats));
2852 
2853 		qdf_mem_zero(&pdev->sojourn_stats,
2854 			     sizeof(struct cdp_tx_sojourn_stats));
2855 
2856 		dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
2857 				     pdev->sojourn_buf, HTT_INVALID_PEER,
2858 				     WDI_NO_VAL, pdev->pdev_id);
2859 
2860 		pdev->sojourn_stats.ppdu_seq_id = ppdu_id;
2861 	}
2862 
2863 	if (tid == HTT_INVALID_TID)
2864 		return;
2865 
2866 	delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
2867 				txdesc_ts;
2868 	qdf_ewma_tx_lag_add(&pdev->sojourn_stats.avg_sojourn_msdu[tid],
2869 			    delta_ms);
2870 	pdev->sojourn_stats.sum_sojourn_msdu[tid] += delta_ms;
2871 	pdev->sojourn_stats.num_msdus[tid]++;
2872 }
2873 #else
2874 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
2875 					       uint8_t tid,
2876 					       uint64_t txdesc_ts,
2877 					       uint32_t ppdu_id)
2878 {
2879 }
2880 #endif
2881 
2882 /**
2883  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
2884  * @soc: DP Soc handle
2885  * @tx_desc: software Tx descriptor
2886  * @ts : Tx completion status from HAL/HTT descriptor
2887  *
2888  * Return: none
2889  */
2890 static inline void
2891 dp_tx_comp_process_desc(struct dp_soc *soc,
2892 			struct dp_tx_desc_s *desc,
2893 			struct hal_tx_completion_status *ts,
2894 			struct dp_peer *peer)
2895 {
2896 	/*
2897 	 * m_copy/tx_capture modes are not supported for
2898 	 * scatter gather packets
2899 	 */
2900 	if (!(desc->msdu_ext_desc) &&
2901 	    (dp_get_completion_indication_for_stack(soc, desc->pdev,
2902 						    peer, ts, desc->nbuf)
2903 			== QDF_STATUS_SUCCESS)) {
2904 		qdf_nbuf_unmap(soc->osdev, desc->nbuf,
2905 			       QDF_DMA_TO_DEVICE);
2906 
2907 		dp_send_completion_to_stack(soc, desc->pdev, ts->peer_id,
2908 					    ts->ppdu_id, desc->nbuf);
2909 	} else {
2910 		dp_tx_comp_free_buf(soc, desc);
2911 	}
2912 }
2913 
2914 /**
2915  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
2916  * @tx_desc: software descriptor head pointer
2917  * @ts: Tx completion status
2918  * @peer: peer handle
2919  *
2920  * Return: none
2921  */
2922 static inline
2923 void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
2924 				  struct hal_tx_completion_status *ts,
2925 				  struct dp_peer *peer)
2926 {
2927 	uint32_t length;
2928 	struct dp_soc *soc = NULL;
2929 	struct dp_vdev *vdev = tx_desc->vdev;
2930 	qdf_ether_header_t *eh =
2931 		(qdf_ether_header_t *)qdf_nbuf_data(tx_desc->nbuf);
2932 
2933 	if (!vdev) {
2934 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2935 				"invalid vdev");
2936 		goto out;
2937 	}
2938 
2939 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
2940 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
2941 				 QDF_TRACE_DEFAULT_PDEV_ID,
2942 				 qdf_nbuf_data_addr(tx_desc->nbuf),
2943 				 sizeof(qdf_nbuf_data(tx_desc->nbuf)),
2944 				 tx_desc->id,
2945 				 ts->status));
2946 
2947 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2948 				"-------------------- \n"
2949 				"Tx Completion Stats: \n"
2950 				"-------------------- \n"
2951 				"ack_frame_rssi = %d \n"
2952 				"first_msdu = %d \n"
2953 				"last_msdu = %d \n"
2954 				"msdu_part_of_amsdu = %d \n"
2955 				"rate_stats valid = %d \n"
2956 				"bw = %d \n"
2957 				"pkt_type = %d \n"
2958 				"stbc = %d \n"
2959 				"ldpc = %d \n"
2960 				"sgi = %d \n"
2961 				"mcs = %d \n"
2962 				"ofdma = %d \n"
2963 				"tones_in_ru = %d \n"
2964 				"tsf = %d \n"
2965 				"ppdu_id = %d \n"
2966 				"transmit_cnt = %d \n"
2967 				"tid = %d \n"
2968 				"peer_id = %d\n",
2969 				ts->ack_frame_rssi, ts->first_msdu,
2970 				ts->last_msdu, ts->msdu_part_of_amsdu,
2971 				ts->valid, ts->bw, ts->pkt_type, ts->stbc,
2972 				ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
2973 				ts->tones_in_ru, ts->tsf, ts->ppdu_id,
2974 				ts->transmit_cnt, ts->tid, ts->peer_id);
2975 
2976 	soc = vdev->pdev->soc;
2977 
2978 	/* Update SoC level stats */
2979 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
2980 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
2981 
2982 	/* Update per-packet stats for mesh mode */
2983 	if (qdf_unlikely(vdev->mesh_vdev) &&
2984 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
2985 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
2986 
2987 	length = qdf_nbuf_len(tx_desc->nbuf);
2988 	/* Update peer level stats */
2989 	if (!peer) {
2990 		QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
2991 				   "peer is null or deletion in progress");
2992 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
2993 		goto out;
2994 	}
2995 
2996 	if (qdf_likely(!peer->bss_peer)) {
2997 		DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
2998 
2999 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
3000 			DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
3001 	} else {
3002 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
3003 			DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
3004 
3005 			if ((peer->vdev->tx_encap_type ==
3006 				htt_cmn_pkt_type_ethernet) &&
3007 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
3008 				DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
3009 			}
3010 		}
3011 	}
3012 
3013 	dp_tx_update_peer_stats(peer, ts, length);
3014 
3015 out:
3016 	return;
3017 }
3018 /**
3019  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
3020  * @soc: core txrx main context
3021  * @comp_head: software descriptor head pointer
3022  *
3023  * This function will process batch of descriptors reaped by dp_tx_comp_handler
3024  * and release the software descriptors after processing is complete
3025  *
3026  * Return: none
3027  */
3028 static void
3029 dp_tx_comp_process_desc_list(struct dp_soc *soc,
3030 			     struct dp_tx_desc_s *comp_head)
3031 {
3032 	struct dp_tx_desc_s *desc;
3033 	struct dp_tx_desc_s *next;
3034 	struct hal_tx_completion_status ts = {0};
3035 	struct dp_peer *peer;
3036 
3037 	DP_HIST_INIT();
3038 	desc = comp_head;
3039 
3040 	while (desc) {
3041 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
3042 		peer = dp_peer_find_by_id(soc, ts.peer_id);
3043 		dp_tx_comp_process_tx_status(desc, &ts, peer);
3044 		dp_tx_comp_process_desc(soc, desc, &ts, peer);
3045 
3046 		if (peer)
3047 			dp_peer_unref_del_find_by_id(peer);
3048 
3049 		DP_HIST_PACKET_COUNT_INC(desc->pdev->pdev_id);
3050 
3051 		next = desc->next;
3052 
3053 		dp_tx_desc_release(desc, desc->pool_id);
3054 		desc = next;
3055 	}
3056 
3057 	DP_TX_HIST_STATS_PER_PDEV();
3058 }
3059 
3060 /**
3061  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
3062  * @tx_desc: software descriptor head pointer
3063  * @status : Tx completion status from HTT descriptor
3064  *
3065  * This function will process HTT Tx indication messages from Target
3066  *
3067  * Return: none
3068  */
3069 static
3070 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
3071 {
3072 	uint8_t tx_status;
3073 	struct dp_pdev *pdev;
3074 	struct dp_vdev *vdev;
3075 	struct dp_soc *soc;
3076 	struct hal_tx_completion_status ts = {0};
3077 	uint32_t *htt_desc = (uint32_t *)status;
3078 	struct dp_peer *peer;
3079 
3080 	qdf_assert(tx_desc->pdev);
3081 
3082 	pdev = tx_desc->pdev;
3083 	vdev = tx_desc->vdev;
3084 	soc = pdev->soc;
3085 
3086 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
3087 
3088 	switch (tx_status) {
3089 	case HTT_TX_FW2WBM_TX_STATUS_OK:
3090 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
3091 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
3092 	{
3093 		if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
3094 			ts.peer_id =
3095 				HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
3096 						htt_desc[2]);
3097 			ts.tid =
3098 				HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
3099 						htt_desc[2]);
3100 		} else {
3101 			ts.peer_id = HTT_INVALID_PEER;
3102 			ts.tid = HTT_INVALID_TID;
3103 		}
3104 		ts.ppdu_id =
3105 			HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
3106 					htt_desc[1]);
3107 		ts.ack_frame_rssi =
3108 			HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
3109 					htt_desc[1]);
3110 
3111 		ts.first_msdu = 1;
3112 		ts.last_msdu = 1;
3113 
3114 		if (tx_status != HTT_TX_FW2WBM_TX_STATUS_OK)
3115 			ts.status = HAL_TX_TQM_RR_REM_CMD_REM;
3116 
3117 		peer = dp_peer_find_by_id(soc, ts.peer_id);
3118 
3119 		if (qdf_likely(peer))
3120 			dp_peer_unref_del_find_by_id(peer);
3121 
3122 		dp_tx_comp_process_tx_status(tx_desc, &ts, peer);
3123 		dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
3124 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3125 
3126 		break;
3127 	}
3128 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
3129 	{
3130 		dp_tx_reinject_handler(tx_desc, status);
3131 		break;
3132 	}
3133 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
3134 	{
3135 		dp_tx_inspect_handler(tx_desc, status);
3136 		break;
3137 	}
3138 	case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
3139 	{
3140 		dp_tx_mec_handler(vdev, status);
3141 		break;
3142 	}
3143 	default:
3144 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3145 			  "%s Invalid HTT tx_status %d\n",
3146 			  __func__, tx_status);
3147 		break;
3148 	}
3149 }
3150 
3151 /**
3152  * dp_tx_comp_handler() - Tx completion handler
3153  * @soc: core txrx main context
3154  * @ring_id: completion ring id
3155  * @quota: No. of packets/descriptors that can be serviced in one loop
3156  *
3157  * This function will collect hardware release ring element contents and
3158  * handle descriptor contents. Based on contents, free packet or handle error
3159  * conditions
3160  *
3161  * Return: none
3162  */
3163 uint32_t dp_tx_comp_handler(struct dp_soc *soc, void *hal_srng, uint32_t quota)
3164 {
3165 	void *tx_comp_hal_desc;
3166 	uint8_t buffer_src;
3167 	uint8_t pool_id;
3168 	uint32_t tx_desc_id;
3169 	struct dp_tx_desc_s *tx_desc = NULL;
3170 	struct dp_tx_desc_s *head_desc = NULL;
3171 	struct dp_tx_desc_s *tail_desc = NULL;
3172 	uint32_t num_processed;
3173 	uint32_t count;
3174 
3175 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
3176 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3177 				"%s %d : HAL RING Access Failed -- %pK",
3178 				__func__, __LINE__, hal_srng);
3179 		return 0;
3180 	}
3181 
3182 	num_processed = 0;
3183 	count = 0;
3184 
3185 	/* Find head descriptor from completion ring */
3186 	while (qdf_likely(tx_comp_hal_desc =
3187 			hal_srng_dst_get_next(soc->hal_soc, hal_srng))) {
3188 
3189 		buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
3190 
3191 		/* If this buffer was not released by TQM or FW, then it is not
3192 		 * Tx completion indication, assert */
3193 		if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
3194 				(buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
3195 
3196 			QDF_TRACE(QDF_MODULE_ID_DP,
3197 					QDF_TRACE_LEVEL_FATAL,
3198 					"Tx comp release_src != TQM | FW");
3199 
3200 			qdf_assert_always(0);
3201 		}
3202 
3203 		/* Get descriptor id */
3204 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
3205 		pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
3206 			DP_TX_DESC_ID_POOL_OS;
3207 
3208 		if (!dp_tx_is_desc_id_valid(soc, tx_desc_id))
3209 			continue;
3210 
3211 		/* Find Tx descriptor */
3212 		tx_desc = dp_tx_desc_find(soc, pool_id,
3213 				(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
3214 				DP_TX_DESC_ID_PAGE_OS,
3215 				(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
3216 				DP_TX_DESC_ID_OFFSET_OS);
3217 
3218 		/*
3219 		 * If the descriptor is already freed in vdev_detach,
3220 		 * continue to next descriptor
3221 		 */
3222 		if (!tx_desc->vdev) {
3223 			QDF_TRACE(QDF_MODULE_ID_DP,
3224 				  QDF_TRACE_LEVEL_INFO,
3225 				  "Descriptor freed in vdev_detach %d",
3226 				  tx_desc_id);
3227 
3228 			num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
3229 			count++;
3230 			continue;
3231 		}
3232 
3233 		/*
3234 		 * If the release source is FW, process the HTT status
3235 		 */
3236 		if (qdf_unlikely(buffer_src ==
3237 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
3238 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
3239 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
3240 					htt_tx_status);
3241 			dp_tx_process_htt_completion(tx_desc,
3242 					htt_tx_status);
3243 		} else {
3244 			/* Pool id is not matching. Error */
3245 			if (tx_desc->pool_id != pool_id) {
3246 				QDF_TRACE(QDF_MODULE_ID_DP,
3247 					QDF_TRACE_LEVEL_FATAL,
3248 					"Tx Comp pool id %d not matched %d",
3249 					pool_id, tx_desc->pool_id);
3250 
3251 				qdf_assert_always(0);
3252 			}
3253 
3254 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
3255 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
3256 				QDF_TRACE(QDF_MODULE_ID_DP,
3257 					QDF_TRACE_LEVEL_FATAL,
3258 					"Txdesc invalid, flgs = %x,id = %d",
3259 					tx_desc->flags,	tx_desc_id);
3260 				qdf_assert_always(0);
3261 			}
3262 
3263 			/* First ring descriptor on the cycle */
3264 			if (!head_desc) {
3265 				head_desc = tx_desc;
3266 				tail_desc = tx_desc;
3267 			}
3268 
3269 			tail_desc->next = tx_desc;
3270 			tx_desc->next = NULL;
3271 			tail_desc = tx_desc;
3272 
3273 			/* Collect hw completion contents */
3274 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
3275 					&tx_desc->comp, 1);
3276 
3277 		}
3278 
3279 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
3280 
3281 		/*
3282 		 * Processed packet count is more than given quota
3283 		 * stop to processing
3284 		 */
3285 		if ((num_processed >= quota))
3286 			break;
3287 
3288 		count++;
3289 	}
3290 
3291 	hal_srng_access_end(soc->hal_soc, hal_srng);
3292 
3293 	/* Process the reaped descriptors */
3294 	if (head_desc)
3295 		dp_tx_comp_process_desc_list(soc, head_desc);
3296 
3297 	return num_processed;
3298 }
3299 
3300 #ifdef FEATURE_WLAN_TDLS
3301 /**
3302  * dp_tx_non_std() - Allow the control-path SW to send data frames
3303  *
3304  * @data_vdev - which vdev should transmit the tx data frames
3305  * @tx_spec - what non-standard handling to apply to the tx data frames
3306  * @msdu_list - NULL-terminated list of tx MSDUs
3307  *
3308  * Return: NULL on success,
3309  *         nbuf when it fails to send
3310  */
3311 qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle,
3312 			enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
3313 {
3314 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
3315 
3316 	if (tx_spec & OL_TX_SPEC_NO_FREE)
3317 		vdev->is_tdls_frame = true;
3318 	return dp_tx_send(vdev_handle, msdu_list);
3319 }
3320 #endif
3321 
3322 /**
3323  * dp_tx_vdev_attach() - attach vdev to dp tx
3324  * @vdev: virtual device instance
3325  *
3326  * Return: QDF_STATUS_SUCCESS: success
3327  *         QDF_STATUS_E_RESOURCES: Error return
3328  */
3329 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
3330 {
3331 	/*
3332 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
3333 	 */
3334 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
3335 			HTT_TCL_METADATA_TYPE_VDEV_BASED);
3336 
3337 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
3338 			vdev->vdev_id);
3339 
3340 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
3341 			DP_SW2HW_MACID(vdev->pdev->pdev_id));
3342 
3343 	/*
3344 	 * Set HTT Extension Valid bit to 0 by default
3345 	 */
3346 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
3347 
3348 	dp_tx_vdev_update_search_flags(vdev);
3349 
3350 	return QDF_STATUS_SUCCESS;
3351 }
3352 
3353 #ifdef FEATURE_WDS
3354 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
3355 {
3356 	struct dp_soc *soc = vdev->pdev->soc;
3357 
3358 	/*
3359 	 * If AST index override support is available (HKv2 etc),
3360 	 * DA search flag be enabled always
3361 	 *
3362 	 * If AST index override support is not available (HKv1),
3363 	 * DA search flag should be used for all modes except QWRAP
3364 	 */
3365 	if (soc->ast_override_support || !vdev->proxysta_vdev)
3366 		return true;
3367 
3368 	return false;
3369 }
3370 #else
3371 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
3372 {
3373 	return false;
3374 }
3375 #endif
3376 
3377 /**
3378  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
3379  * @vdev: virtual device instance
3380  *
3381  * Return: void
3382  *
3383  */
3384 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
3385 {
3386 	struct dp_soc *soc = vdev->pdev->soc;
3387 
3388 	/*
3389 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
3390 	 * for TDLS link
3391 	 *
3392 	 * Enable AddrY (SA based search) only for non-WDS STA and
3393 	 * ProxySTA VAP (in HKv1) modes.
3394 	 *
3395 	 * In all other VAP modes, only DA based search should be
3396 	 * enabled
3397 	 */
3398 	if (vdev->opmode == wlan_op_mode_sta &&
3399 	    vdev->tdls_link_connected)
3400 		vdev->hal_desc_addr_search_flags =
3401 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
3402 	else if ((vdev->opmode == wlan_op_mode_sta) &&
3403 		 !dp_tx_da_search_override(vdev))
3404 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
3405 	else
3406 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
3407 
3408 	/* Set search type only when peer map v2 messaging is enabled
3409 	 * as we will have the search index (AST hash) only when v2 is
3410 	 * enabled
3411 	 */
3412 	if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
3413 		vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
3414 	else
3415 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
3416 }
3417 
3418 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3419 /* dp_tx_desc_flush() - release resources associated
3420  *                      to tx_desc
3421  * @vdev: virtual device instance
3422  *
3423  * This function will free all outstanding Tx buffers,
3424  * including ME buffer for which either free during
3425  * completion didn't happened or completion is not
3426  * received.
3427  */
3428 static void dp_tx_desc_flush(struct dp_vdev *vdev)
3429 {
3430 	uint8_t i;
3431 	uint32_t j;
3432 	uint32_t num_desc, page_id, offset;
3433 	uint16_t num_desc_per_page;
3434 	struct dp_soc *soc = vdev->pdev->soc;
3435 	struct dp_tx_desc_s *tx_desc = NULL;
3436 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
3437 
3438 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
3439 		tx_desc_pool = &soc->tx_desc[i];
3440 		if (!(tx_desc_pool->pool_size) ||
3441 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
3442 		    !(tx_desc_pool->desc_pages.cacheable_pages))
3443 			continue;
3444 
3445 		num_desc = tx_desc_pool->pool_size;
3446 		num_desc_per_page =
3447 			tx_desc_pool->desc_pages.num_element_per_page;
3448 		for (j = 0; j < num_desc; j++) {
3449 			page_id = j / num_desc_per_page;
3450 			offset = j % num_desc_per_page;
3451 
3452 			if (qdf_unlikely(!(tx_desc_pool->
3453 					 desc_pages.cacheable_pages)))
3454 				break;
3455 
3456 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
3457 			if (tx_desc && (tx_desc->vdev == vdev) &&
3458 			    (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) {
3459 				dp_tx_comp_free_buf(soc, tx_desc);
3460 				dp_tx_desc_release(tx_desc, i);
3461 			}
3462 		}
3463 	}
3464 }
3465 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3466 static void dp_tx_desc_flush(struct dp_vdev *vdev)
3467 {
3468 	uint8_t i, num_pool;
3469 	uint32_t j;
3470 	uint32_t num_desc, page_id, offset;
3471 	uint16_t num_desc_per_page;
3472 	struct dp_soc *soc = vdev->pdev->soc;
3473 	struct dp_tx_desc_s *tx_desc = NULL;
3474 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
3475 
3476 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3477 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3478 
3479 	for (i = 0; i < num_pool; i++) {
3480 		tx_desc_pool = &soc->tx_desc[i];
3481 		if (!tx_desc_pool->desc_pages.cacheable_pages)
3482 			continue;
3483 
3484 		num_desc_per_page =
3485 			tx_desc_pool->desc_pages.num_element_per_page;
3486 		for (j = 0; j < num_desc; j++) {
3487 			page_id = j / num_desc_per_page;
3488 			offset = j % num_desc_per_page;
3489 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
3490 
3491 			if (tx_desc && (tx_desc->vdev == vdev) &&
3492 			    (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) {
3493 				dp_tx_comp_free_buf(soc, tx_desc);
3494 				dp_tx_desc_release(tx_desc, i);
3495 			}
3496 		}
3497 	}
3498 }
3499 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3500 
3501 /**
3502  * dp_tx_vdev_detach() - detach vdev from dp tx
3503  * @vdev: virtual device instance
3504  *
3505  * Return: QDF_STATUS_SUCCESS: success
3506  *         QDF_STATUS_E_RESOURCES: Error return
3507  */
3508 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
3509 {
3510 	dp_tx_desc_flush(vdev);
3511 	return QDF_STATUS_SUCCESS;
3512 }
3513 
3514 /**
3515  * dp_tx_pdev_attach() - attach pdev to dp tx
3516  * @pdev: physical device instance
3517  *
3518  * Return: QDF_STATUS_SUCCESS: success
3519  *         QDF_STATUS_E_RESOURCES: Error return
3520  */
3521 QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
3522 {
3523 	struct dp_soc *soc = pdev->soc;
3524 
3525 	/* Initialize Flow control counters */
3526 	qdf_atomic_init(&pdev->num_tx_exception);
3527 	qdf_atomic_init(&pdev->num_tx_outstanding);
3528 
3529 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3530 		/* Initialize descriptors in TCL Ring */
3531 		hal_tx_init_data_ring(soc->hal_soc,
3532 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
3533 	}
3534 
3535 	return QDF_STATUS_SUCCESS;
3536 }
3537 
3538 /**
3539  * dp_tx_pdev_detach() - detach pdev from dp tx
3540  * @pdev: physical device instance
3541  *
3542  * Return: QDF_STATUS_SUCCESS: success
3543  *         QDF_STATUS_E_RESOURCES: Error return
3544  */
3545 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
3546 {
3547 	dp_tx_me_exit(pdev);
3548 	return QDF_STATUS_SUCCESS;
3549 }
3550 
3551 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3552 /* Pools will be allocated dynamically */
3553 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3554 					int num_desc)
3555 {
3556 	uint8_t i;
3557 
3558 	for (i = 0; i < num_pool; i++) {
3559 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
3560 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
3561 	}
3562 
3563 	return 0;
3564 }
3565 
3566 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3567 {
3568 	uint8_t i;
3569 
3570 	for (i = 0; i < num_pool; i++)
3571 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
3572 }
3573 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3574 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3575 					int num_desc)
3576 {
3577 	uint8_t i;
3578 
3579 	/* Allocate software Tx descriptor pools */
3580 	for (i = 0; i < num_pool; i++) {
3581 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
3582 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3583 					"%s Tx Desc Pool alloc %d failed %pK",
3584 					__func__, i, soc);
3585 			return ENOMEM;
3586 		}
3587 	}
3588 	return 0;
3589 }
3590 
3591 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3592 {
3593 	uint8_t i;
3594 
3595 	for (i = 0; i < num_pool; i++) {
3596 		qdf_assert_always(!soc->tx_desc[i].num_allocated);
3597 		if (dp_tx_desc_pool_free(soc, i)) {
3598 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3599 				"%s Tx Desc Pool Free failed", __func__);
3600 		}
3601 	}
3602 }
3603 
3604 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3605 
3606 #ifndef QCA_MEM_ATTACH_ON_WIFI3
3607 /**
3608  * dp_tso_attach_wifi3() - TSO attach handler
3609  * @txrx_soc: Opaque Dp handle
3610  *
3611  * Reserve TSO descriptor buffers
3612  *
3613  * Return: QDF_STATUS_E_FAILURE on failure or
3614  * QDF_STATUS_SUCCESS on success
3615  */
3616 static
3617 QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc)
3618 {
3619 	return dp_tso_soc_attach(txrx_soc);
3620 }
3621 
3622 /**
3623  * dp_tso_detach_wifi3() - TSO Detach handler
3624  * @txrx_soc: Opaque Dp handle
3625  *
3626  * Deallocate TSO descriptor buffers
3627  *
3628  * Return: QDF_STATUS_E_FAILURE on failure or
3629  * QDF_STATUS_SUCCESS on success
3630  */
3631 static
3632 QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc)
3633 {
3634 	return dp_tso_soc_detach(txrx_soc);
3635 }
3636 #else
3637 static
3638 QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc)
3639 {
3640 	return QDF_STATUS_SUCCESS;
3641 }
3642 
3643 static
3644 QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc)
3645 {
3646 	return QDF_STATUS_SUCCESS;
3647 }
3648 #endif
3649 
3650 QDF_STATUS dp_tso_soc_detach(void *txrx_soc)
3651 {
3652 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3653 	uint8_t i;
3654 	uint8_t num_pool;
3655 	uint32_t num_desc;
3656 
3657 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3658 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3659 
3660 	for (i = 0; i < num_pool; i++)
3661 		dp_tx_tso_desc_pool_free(soc, i);
3662 
3663 	dp_info("%s TSO Desc Pool %d Free descs = %d",
3664 		__func__, num_pool, num_desc);
3665 
3666 	for (i = 0; i < num_pool; i++)
3667 		dp_tx_tso_num_seg_pool_free(soc, i);
3668 
3669 	dp_info("%s TSO Num of seg Desc Pool %d Free descs = %d",
3670 		__func__, num_pool, num_desc);
3671 
3672 	return QDF_STATUS_SUCCESS;
3673 }
3674 
3675 /**
3676  * dp_tso_attach() - TSO attach handler
3677  * @txrx_soc: Opaque Dp handle
3678  *
3679  * Reserve TSO descriptor buffers
3680  *
3681  * Return: QDF_STATUS_E_FAILURE on failure or
3682  * QDF_STATUS_SUCCESS on success
3683  */
3684 QDF_STATUS dp_tso_soc_attach(void *txrx_soc)
3685 {
3686 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3687 	uint8_t i;
3688 	uint8_t num_pool;
3689 	uint32_t num_desc;
3690 
3691 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3692 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3693 
3694 	for (i = 0; i < num_pool; i++) {
3695 		if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
3696 			dp_err("TSO Desc Pool alloc %d failed %pK",
3697 			       i, soc);
3698 
3699 			return QDF_STATUS_E_FAILURE;
3700 		}
3701 	}
3702 
3703 	dp_info("%s TSO Desc Alloc %d, descs = %d",
3704 		__func__, num_pool, num_desc);
3705 
3706 	for (i = 0; i < num_pool; i++) {
3707 		if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
3708 			dp_err("TSO Num of seg Pool alloc %d failed %pK",
3709 			       i, soc);
3710 
3711 			return QDF_STATUS_E_FAILURE;
3712 		}
3713 	}
3714 	return QDF_STATUS_SUCCESS;
3715 }
3716 
3717 /**
3718  * dp_tx_soc_detach() - detach soc from dp tx
3719  * @soc: core txrx main context
3720  *
3721  * This function will detach dp tx into main device context
3722  * will free dp tx resource and initialize resources
3723  *
3724  * Return: QDF_STATUS_SUCCESS: success
3725  *         QDF_STATUS_E_RESOURCES: Error return
3726  */
3727 QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
3728 {
3729 	uint8_t num_pool;
3730 	uint16_t num_desc;
3731 	uint16_t num_ext_desc;
3732 	uint8_t i;
3733 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3734 
3735 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3736 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3737 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
3738 
3739 	dp_tx_flow_control_deinit(soc);
3740 	dp_tx_delete_static_pools(soc, num_pool);
3741 
3742 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3743 			"%s Tx Desc Pool Free num_pool = %d, descs = %d",
3744 			__func__, num_pool, num_desc);
3745 
3746 	for (i = 0; i < num_pool; i++) {
3747 		if (dp_tx_ext_desc_pool_free(soc, i)) {
3748 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3749 					"%s Tx Ext Desc Pool Free failed",
3750 					__func__);
3751 			return QDF_STATUS_E_RESOURCES;
3752 		}
3753 	}
3754 
3755 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3756 			"%s MSDU Ext Desc Pool %d Free descs = %d",
3757 			__func__, num_pool, num_ext_desc);
3758 
3759 	status = dp_tso_detach_wifi3(soc);
3760 	if (status != QDF_STATUS_SUCCESS)
3761 		return status;
3762 
3763 	return QDF_STATUS_SUCCESS;
3764 }
3765 
3766 /**
3767  * dp_tx_soc_attach() - attach soc to dp tx
3768  * @soc: core txrx main context
3769  *
3770  * This function will attach dp tx into main device context
3771  * will allocate dp tx resource and initialize resources
3772  *
3773  * Return: QDF_STATUS_SUCCESS: success
3774  *         QDF_STATUS_E_RESOURCES: Error return
3775  */
3776 QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
3777 {
3778 	uint8_t i;
3779 	uint8_t num_pool;
3780 	uint32_t num_desc;
3781 	uint32_t num_ext_desc;
3782 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3783 
3784 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3785 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3786 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
3787 
3788 	if (num_pool > MAX_TXDESC_POOLS)
3789 		goto fail;
3790 
3791 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
3792 		goto fail;
3793 
3794 	dp_tx_flow_control_init(soc);
3795 
3796 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3797 			"%s Tx Desc Alloc num_pool = %d, descs = %d",
3798 			__func__, num_pool, num_desc);
3799 
3800 	/* Allocate extension tx descriptor pools */
3801 	for (i = 0; i < num_pool; i++) {
3802 		if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
3803 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3804 				"MSDU Ext Desc Pool alloc %d failed %pK",
3805 				i, soc);
3806 
3807 			goto fail;
3808 		}
3809 	}
3810 
3811 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3812 			"%s MSDU Ext Desc Alloc %d, descs = %d",
3813 			__func__, num_pool, num_ext_desc);
3814 
3815 	status = dp_tso_attach_wifi3((void *)soc);
3816 	if (status != QDF_STATUS_SUCCESS)
3817 		goto fail;
3818 
3819 
3820 	/* Initialize descriptors in TCL Rings */
3821 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3822 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3823 			hal_tx_init_data_ring(soc->hal_soc,
3824 					soc->tcl_data_ring[i].hal_srng);
3825 		}
3826 	}
3827 
3828 	/*
3829 	 * todo - Add a runtime config option to enable this.
3830 	 */
3831 	/*
3832 	 * Due to multiple issues on NPR EMU, enable it selectively
3833 	 * only for NPR EMU, should be removed, once NPR platforms
3834 	 * are stable.
3835 	 */
3836 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
3837 
3838 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3839 			"%s HAL Tx init Success", __func__);
3840 
3841 	return QDF_STATUS_SUCCESS;
3842 
3843 fail:
3844 	/* Detach will take care of freeing only allocated resources */
3845 	dp_tx_soc_detach(soc);
3846 	return QDF_STATUS_E_RESOURCES;
3847 }
3848 
3849 /*
3850  * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
3851  * pdev: pointer to DP PDEV structure
3852  * seg_info_head: Pointer to the head of list
3853  *
3854  * return: void
3855  */
3856 static void dp_tx_me_mem_free(struct dp_pdev *pdev,
3857 		struct dp_tx_seg_info_s *seg_info_head)
3858 {
3859 	struct dp_tx_me_buf_t *mc_uc_buf;
3860 	struct dp_tx_seg_info_s *seg_info_new = NULL;
3861 	qdf_nbuf_t nbuf = NULL;
3862 	uint64_t phy_addr;
3863 
3864 	while (seg_info_head) {
3865 		nbuf = seg_info_head->nbuf;
3866 		mc_uc_buf = (struct dp_tx_me_buf_t *)
3867 			seg_info_head->frags[0].vaddr;
3868 		phy_addr = seg_info_head->frags[0].paddr_hi;
3869 		phy_addr =  (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
3870 		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
3871 				phy_addr,
3872 				QDF_DMA_TO_DEVICE , DP_MAC_ADDR_LEN);
3873 		dp_tx_me_free_buf(pdev, mc_uc_buf);
3874 		qdf_nbuf_free(nbuf);
3875 		seg_info_new = seg_info_head;
3876 		seg_info_head = seg_info_head->next;
3877 		qdf_mem_free(seg_info_new);
3878 	}
3879 }
3880 
3881 /**
3882  * dp_tx_me_send_convert_ucast(): function to convert multicast to unicast
3883  * @vdev: DP VDEV handle
3884  * @nbuf: Multicast nbuf
3885  * @newmac: Table of the clients to which packets have to be sent
3886  * @new_mac_cnt: No of clients
3887  *
3888  * return: no of converted packets
3889  */
3890 uint16_t
3891 dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
3892 		uint8_t newmac[][DP_MAC_ADDR_LEN], uint8_t new_mac_cnt)
3893 {
3894 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
3895 	struct dp_pdev *pdev = vdev->pdev;
3896 	qdf_ether_header_t *eh;
3897 	uint8_t *data;
3898 	uint16_t len;
3899 
3900 	/* reference to frame dst addr */
3901 	uint8_t *dstmac;
3902 	/* copy of original frame src addr */
3903 	uint8_t srcmac[DP_MAC_ADDR_LEN];
3904 
3905 	/* local index into newmac */
3906 	uint8_t new_mac_idx = 0;
3907 	struct dp_tx_me_buf_t *mc_uc_buf;
3908 	qdf_nbuf_t  nbuf_clone;
3909 	struct dp_tx_msdu_info_s msdu_info;
3910 	struct dp_tx_seg_info_s *seg_info_head = NULL;
3911 	struct dp_tx_seg_info_s *seg_info_tail = NULL;
3912 	struct dp_tx_seg_info_s *seg_info_new;
3913 	struct dp_tx_frag_info_s data_frag;
3914 	qdf_dma_addr_t paddr_data;
3915 	qdf_dma_addr_t paddr_mcbuf = 0;
3916 	uint8_t empty_entry_mac[DP_MAC_ADDR_LEN] = {0};
3917 	QDF_STATUS status;
3918 
3919 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
3920 
3921 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3922 
3923 	eh = (qdf_ether_header_t *)nbuf;
3924 	qdf_mem_copy(srcmac, eh->ether_shost, DP_MAC_ADDR_LEN);
3925 
3926 	len = qdf_nbuf_len(nbuf);
3927 
3928 	data = qdf_nbuf_data(nbuf);
3929 
3930 	status = qdf_nbuf_map(vdev->osdev, nbuf,
3931 			QDF_DMA_TO_DEVICE);
3932 
3933 	if (status) {
3934 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3935 				"Mapping failure Error:%d", status);
3936 		DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
3937 		qdf_nbuf_free(nbuf);
3938 		return 1;
3939 	}
3940 
3941 	paddr_data = qdf_nbuf_get_frag_paddr(nbuf, 0) + IEEE80211_ADDR_LEN;
3942 
3943 	/*preparing data fragment*/
3944 	data_frag.vaddr = qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN;
3945 	data_frag.paddr_lo = (uint32_t)paddr_data;
3946 	data_frag.paddr_hi = (((uint64_t) paddr_data)  >> 32);
3947 	data_frag.len = len - DP_MAC_ADDR_LEN;
3948 
3949 	for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
3950 		dstmac = newmac[new_mac_idx];
3951 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3952 				"added mac addr (%pM)", dstmac);
3953 
3954 		/* Check for NULL Mac Address */
3955 		if (!qdf_mem_cmp(dstmac, empty_entry_mac, DP_MAC_ADDR_LEN))
3956 			continue;
3957 
3958 		/* frame to self mac. skip */
3959 		if (!qdf_mem_cmp(dstmac, srcmac, DP_MAC_ADDR_LEN))
3960 			continue;
3961 
3962 		/*
3963 		 * TODO: optimize to avoid malloc in per-packet path
3964 		 * For eg. seg_pool can be made part of vdev structure
3965 		 */
3966 		seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
3967 
3968 		if (!seg_info_new) {
3969 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3970 					"alloc failed");
3971 			DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
3972 			goto fail_seg_alloc;
3973 		}
3974 
3975 		mc_uc_buf = dp_tx_me_alloc_buf(pdev);
3976 		if (mc_uc_buf == NULL)
3977 			goto fail_buf_alloc;
3978 
3979 		/*
3980 		 * TODO: Check if we need to clone the nbuf
3981 		 * Or can we just use the reference for all cases
3982 		 */
3983 		if (new_mac_idx < (new_mac_cnt - 1)) {
3984 			nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
3985 			if (nbuf_clone == NULL) {
3986 				DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
3987 				goto fail_clone;
3988 			}
3989 		} else {
3990 			/*
3991 			 * Update the ref
3992 			 * to account for frame sent without cloning
3993 			 */
3994 			qdf_nbuf_ref(nbuf);
3995 			nbuf_clone = nbuf;
3996 		}
3997 
3998 		qdf_mem_copy(mc_uc_buf->data, dstmac, DP_MAC_ADDR_LEN);
3999 
4000 		status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
4001 				QDF_DMA_TO_DEVICE, DP_MAC_ADDR_LEN,
4002 				&paddr_mcbuf);
4003 
4004 		if (status) {
4005 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4006 					"Mapping failure Error:%d", status);
4007 			DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
4008 			goto fail_map;
4009 		}
4010 
4011 		seg_info_new->frags[0].vaddr =  (uint8_t *)mc_uc_buf;
4012 		seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
4013 		seg_info_new->frags[0].paddr_hi =
4014 			((uint64_t) paddr_mcbuf >> 32);
4015 		seg_info_new->frags[0].len = DP_MAC_ADDR_LEN;
4016 
4017 		seg_info_new->frags[1] = data_frag;
4018 		seg_info_new->nbuf = nbuf_clone;
4019 		seg_info_new->frag_cnt = 2;
4020 		seg_info_new->total_len = len;
4021 
4022 		seg_info_new->next = NULL;
4023 
4024 		if (seg_info_head == NULL)
4025 			seg_info_head = seg_info_new;
4026 		else
4027 			seg_info_tail->next = seg_info_new;
4028 
4029 		seg_info_tail = seg_info_new;
4030 	}
4031 
4032 	if (!seg_info_head) {
4033 		goto free_return;
4034 	}
4035 
4036 	msdu_info.u.sg_info.curr_seg = seg_info_head;
4037 	msdu_info.num_seg = new_mac_cnt;
4038 	msdu_info.frm_type = dp_tx_frm_me;
4039 
4040 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0) &&
4041 	    qdf_unlikely(pdev->hmmc_tid_override_en))
4042 		msdu_info.tid = pdev->hmmc_tid;
4043 
4044 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
4045 	dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
4046 
4047 	while (seg_info_head->next) {
4048 		seg_info_new = seg_info_head;
4049 		seg_info_head = seg_info_head->next;
4050 		qdf_mem_free(seg_info_new);
4051 	}
4052 	qdf_mem_free(seg_info_head);
4053 
4054 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
4055 	qdf_nbuf_free(nbuf);
4056 	return new_mac_cnt;
4057 
4058 fail_map:
4059 	qdf_nbuf_free(nbuf_clone);
4060 
4061 fail_clone:
4062 	dp_tx_me_free_buf(pdev, mc_uc_buf);
4063 
4064 fail_buf_alloc:
4065 	qdf_mem_free(seg_info_new);
4066 
4067 fail_seg_alloc:
4068 	dp_tx_me_mem_free(pdev, seg_info_head);
4069 
4070 free_return:
4071 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
4072 	qdf_nbuf_free(nbuf);
4073 	return 1;
4074 }
4075 
4076