xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision fa47688f04ef001a6dcafaebdcc3c031f15ee75e)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "hal_hw_headers.h"
21 #include "dp_tx.h"
22 #include "dp_tx_desc.h"
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "hal_tx.h"
26 #include "qdf_mem.h"
27 #include "qdf_nbuf.h"
28 #include "qdf_net_types.h"
29 #include <wlan_cfg.h>
30 #ifdef MESH_MODE_SUPPORT
31 #include "if_meta_hdr.h"
32 #endif
33 
34 #define DP_TX_QUEUE_MASK 0x3
35 
36 /* TODO Add support in TSO */
37 #define DP_DESC_NUM_FRAG(x) 0
38 
39 /* disable TQM_BYPASS */
40 #define TQM_BYPASS_WAR 0
41 
42 /* invalid peer id for reinject*/
43 #define DP_INVALID_PEER 0XFFFE
44 
45 /*mapping between hal encrypt type and cdp_sec_type*/
46 #define MAX_CDP_SEC_TYPE 12
47 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
48 					HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
49 					HAL_TX_ENCRYPT_TYPE_WEP_128,
50 					HAL_TX_ENCRYPT_TYPE_WEP_104,
51 					HAL_TX_ENCRYPT_TYPE_WEP_40,
52 					HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
53 					HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
54 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
55 					HAL_TX_ENCRYPT_TYPE_WAPI,
56 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
57 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
58 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
59 					HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
60 
61 /**
62  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
63  * @vdev: DP Virtual device handle
64  * @nbuf: Buffer pointer
65  * @queue: queue ids container for nbuf
66  *
67  * TX packet queue has 2 instances, software descriptors id and dma ring id
68  * Based on tx feature and hardware configuration queue id combination could be
69  * different.
70  * For example -
71  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
72  * With no XPS,lock based resource protection, Descriptor pool ids are different
73  * for each vdev, dma ring id will be same as single pdev id
74  *
75  * Return: None
76  */
77 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
78 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
79 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
80 {
81 	uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK;
82 	queue->desc_pool_id = queue_offset;
83 	queue->ring_id = vdev->pdev->soc->tx_ring_map[queue_offset];
84 
85 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
86 			"%s, pool_id:%d ring_id: %d",
87 			__func__, queue->desc_pool_id, queue->ring_id);
88 
89 	return;
90 }
91 #else /* QCA_OL_TX_MULTIQ_SUPPORT */
92 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
93 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
94 {
95 	/* get flow id */
96 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
97 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
98 
99 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
100 			"%s, pool_id:%d ring_id: %d",
101 			__func__, queue->desc_pool_id, queue->ring_id);
102 
103 	return;
104 }
105 #endif
106 
107 #if defined(FEATURE_TSO)
108 /**
109  * dp_tx_tso_unmap_segment() - Unmap TSO segment
110  *
111  * @soc - core txrx main context
112  * @seg_desc - tso segment descriptor
113  * @num_seg_desc - tso number segment descriptor
114  */
115 static void dp_tx_tso_unmap_segment(
116 		struct dp_soc *soc,
117 		struct qdf_tso_seg_elem_t *seg_desc,
118 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
119 {
120 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
121 	if (qdf_unlikely(!seg_desc)) {
122 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
123 			 __func__, __LINE__);
124 		qdf_assert(0);
125 	} else if (qdf_unlikely(!num_seg_desc)) {
126 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
127 			 __func__, __LINE__);
128 		qdf_assert(0);
129 	} else {
130 		bool is_last_seg;
131 		/* no tso segment left to do dma unmap */
132 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
133 			return;
134 
135 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
136 					true : false;
137 		qdf_nbuf_unmap_tso_segment(soc->osdev,
138 					   seg_desc, is_last_seg);
139 		num_seg_desc->num_seg.tso_cmn_num_seg--;
140 	}
141 }
142 
143 /**
144  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
145  *                            back to the freelist
146  *
147  * @soc - soc device handle
148  * @tx_desc - Tx software descriptor
149  */
150 static void dp_tx_tso_desc_release(struct dp_soc *soc,
151 				   struct dp_tx_desc_s *tx_desc)
152 {
153 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
154 	if (qdf_unlikely(!tx_desc->tso_desc)) {
155 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
156 			  "%s %d TSO desc is NULL!",
157 			  __func__, __LINE__);
158 		qdf_assert(0);
159 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
160 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
161 			  "%s %d TSO num desc is NULL!",
162 			  __func__, __LINE__);
163 		qdf_assert(0);
164 	} else {
165 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
166 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
167 
168 		/* Add the tso num segment into the free list */
169 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
170 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
171 					    tx_desc->tso_num_desc);
172 			tx_desc->tso_num_desc = NULL;
173 		}
174 
175 		/* Add the tso segment into the free list*/
176 		dp_tx_tso_desc_free(soc,
177 				    tx_desc->pool_id, tx_desc->tso_desc);
178 		tx_desc->tso_desc = NULL;
179 	}
180 }
181 #else
182 static void dp_tx_tso_unmap_segment(
183 		struct dp_soc *soc,
184 		struct qdf_tso_seg_elem_t *seg_desc,
185 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
186 
187 {
188 }
189 
190 static void dp_tx_tso_desc_release(struct dp_soc *soc,
191 				   struct dp_tx_desc_s *tx_desc)
192 {
193 }
194 #endif
195 /**
196  * dp_tx_desc_release() - Release Tx Descriptor
197  * @tx_desc : Tx Descriptor
198  * @desc_pool_id: Descriptor Pool ID
199  *
200  * Deallocate all resources attached to Tx descriptor and free the Tx
201  * descriptor.
202  *
203  * Return:
204  */
205 static void
206 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
207 {
208 	struct dp_pdev *pdev = tx_desc->pdev;
209 	struct dp_soc *soc;
210 	uint8_t comp_status = 0;
211 
212 	qdf_assert(pdev);
213 
214 	soc = pdev->soc;
215 
216 	if (tx_desc->frm_type == dp_tx_frm_tso)
217 		dp_tx_tso_desc_release(soc, tx_desc);
218 
219 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
220 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
221 
222 	if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
223 		dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
224 
225 	qdf_atomic_dec(&pdev->num_tx_outstanding);
226 
227 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
228 		qdf_atomic_dec(&pdev->num_tx_exception);
229 
230 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
231 				hal_tx_comp_get_buffer_source(&tx_desc->comp))
232 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
233 							     soc->hal_soc);
234 	else
235 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
236 
237 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
238 		"Tx Completion Release desc %d status %d outstanding %d",
239 		tx_desc->id, comp_status,
240 		qdf_atomic_read(&pdev->num_tx_outstanding));
241 
242 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
243 	return;
244 }
245 
246 /**
247  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
248  * @vdev: DP vdev Handle
249  * @nbuf: skb
250  *
251  * Prepares and fills HTT metadata in the frame pre-header for special frames
252  * that should be transmitted using varying transmit parameters.
253  * There are 2 VDEV modes that currently needs this special metadata -
254  *  1) Mesh Mode
255  *  2) DSRC Mode
256  *
257  * Return: HTT metadata size
258  *
259  */
260 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
261 		uint32_t *meta_data)
262 {
263 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
264 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
265 
266 	uint8_t htt_desc_size;
267 
268 	/* Size rounded of multiple of 8 bytes */
269 	uint8_t htt_desc_size_aligned;
270 
271 	uint8_t *hdr = NULL;
272 
273 	/*
274 	 * Metadata - HTT MSDU Extension header
275 	 */
276 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
277 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
278 
279 	if (vdev->mesh_vdev) {
280 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
281 					htt_desc_size_aligned)) {
282 			DP_STATS_INC(vdev,
283 				     tx_i.dropped.headroom_insufficient, 1);
284 			return 0;
285 		}
286 		/* Fill and add HTT metaheader */
287 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
288 		if (hdr == NULL) {
289 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
290 					"Error in filling HTT metadata");
291 
292 			return 0;
293 		}
294 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
295 
296 	} else if (vdev->opmode == wlan_op_mode_ocb) {
297 		/* Todo - Add support for DSRC */
298 	}
299 
300 	return htt_desc_size_aligned;
301 }
302 
303 /**
304  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
305  * @tso_seg: TSO segment to process
306  * @ext_desc: Pointer to MSDU extension descriptor
307  *
308  * Return: void
309  */
310 #if defined(FEATURE_TSO)
311 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
312 		void *ext_desc)
313 {
314 	uint8_t num_frag;
315 	uint32_t tso_flags;
316 
317 	/*
318 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
319 	 * tcp_flag_mask
320 	 *
321 	 * Checksum enable flags are set in TCL descriptor and not in Extension
322 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
323 	 */
324 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
325 
326 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
327 
328 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
329 		tso_seg->tso_flags.ip_len);
330 
331 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
332 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
333 
334 
335 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
336 		uint32_t lo = 0;
337 		uint32_t hi = 0;
338 
339 		qdf_dmaaddr_to_32s(
340 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
341 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
342 			tso_seg->tso_frags[num_frag].length);
343 	}
344 
345 	return;
346 }
347 #else
348 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
349 		void *ext_desc)
350 {
351 	return;
352 }
353 #endif
354 
355 #if defined(FEATURE_TSO)
356 /**
357  * dp_tx_free_tso_seg_list() - Loop through the tso segments
358  *                             allocated and free them
359  *
360  * @soc: soc handle
361  * @free_seg: list of tso segments
362  * @msdu_info: msdu descriptor
363  *
364  * Return - void
365  */
366 static void dp_tx_free_tso_seg_list(
367 		struct dp_soc *soc,
368 		struct qdf_tso_seg_elem_t *free_seg,
369 		struct dp_tx_msdu_info_s *msdu_info)
370 {
371 	struct qdf_tso_seg_elem_t *next_seg;
372 
373 	while (free_seg) {
374 		next_seg = free_seg->next;
375 		dp_tx_tso_desc_free(soc,
376 				    msdu_info->tx_queue.desc_pool_id,
377 				    free_seg);
378 		free_seg = next_seg;
379 	}
380 }
381 
382 /**
383  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
384  *                                 allocated and free them
385  *
386  * @soc:  soc handle
387  * @free_num_seg: list of tso number segments
388  * @msdu_info: msdu descriptor
389  * Return - void
390  */
391 static void dp_tx_free_tso_num_seg_list(
392 		struct dp_soc *soc,
393 		struct qdf_tso_num_seg_elem_t *free_num_seg,
394 		struct dp_tx_msdu_info_s *msdu_info)
395 {
396 	struct qdf_tso_num_seg_elem_t *next_num_seg;
397 
398 	while (free_num_seg) {
399 		next_num_seg = free_num_seg->next;
400 		dp_tso_num_seg_free(soc,
401 				    msdu_info->tx_queue.desc_pool_id,
402 				    free_num_seg);
403 		free_num_seg = next_num_seg;
404 	}
405 }
406 
407 /**
408  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
409  *                              do dma unmap for each segment
410  *
411  * @soc: soc handle
412  * @free_seg: list of tso segments
413  * @num_seg_desc: tso number segment descriptor
414  *
415  * Return - void
416  */
417 static void dp_tx_unmap_tso_seg_list(
418 		struct dp_soc *soc,
419 		struct qdf_tso_seg_elem_t *free_seg,
420 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
421 {
422 	struct qdf_tso_seg_elem_t *next_seg;
423 
424 	if (qdf_unlikely(!num_seg_desc)) {
425 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
426 		return;
427 	}
428 
429 	while (free_seg) {
430 		next_seg = free_seg->next;
431 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
432 		free_seg = next_seg;
433 	}
434 }
435 
436 /**
437  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
438  *				     free the tso segments descriptor and
439  *				     tso num segments descriptor
440  *
441  * @soc:  soc handle
442  * @msdu_info: msdu descriptor
443  * @tso_seg_unmap: flag to show if dma unmap is necessary
444  *
445  * Return - void
446  */
447 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
448 					  struct dp_tx_msdu_info_s *msdu_info,
449 					  bool tso_seg_unmap)
450 {
451 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
452 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
453 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
454 					tso_info->tso_num_seg_list;
455 
456 	/* do dma unmap for each segment */
457 	if (tso_seg_unmap)
458 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
459 
460 	/* free all tso number segment descriptor though looks only have 1 */
461 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
462 
463 	/* free all tso segment descriptor */
464 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
465 }
466 
467 /**
468  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
469  * @vdev: virtual device handle
470  * @msdu: network buffer
471  * @msdu_info: meta data associated with the msdu
472  *
473  * Return: QDF_STATUS_SUCCESS success
474  */
475 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
476 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
477 {
478 	struct qdf_tso_seg_elem_t *tso_seg;
479 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
480 	struct dp_soc *soc = vdev->pdev->soc;
481 	struct qdf_tso_info_t *tso_info;
482 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
483 
484 	tso_info = &msdu_info->u.tso_info;
485 	tso_info->curr_seg = NULL;
486 	tso_info->tso_seg_list = NULL;
487 	tso_info->num_segs = num_seg;
488 	msdu_info->frm_type = dp_tx_frm_tso;
489 	tso_info->tso_num_seg_list = NULL;
490 
491 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
492 
493 	while (num_seg) {
494 		tso_seg = dp_tx_tso_desc_alloc(
495 				soc, msdu_info->tx_queue.desc_pool_id);
496 		if (tso_seg) {
497 			tso_seg->next = tso_info->tso_seg_list;
498 			tso_info->tso_seg_list = tso_seg;
499 			num_seg--;
500 		} else {
501 			DP_TRACE(ERROR, "%s: Failed to alloc tso seg desc",
502 				 __func__);
503 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
504 
505 			return QDF_STATUS_E_NOMEM;
506 		}
507 	}
508 
509 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
510 
511 	tso_num_seg = dp_tso_num_seg_alloc(soc,
512 			msdu_info->tx_queue.desc_pool_id);
513 
514 	if (tso_num_seg) {
515 		tso_num_seg->next = tso_info->tso_num_seg_list;
516 		tso_info->tso_num_seg_list = tso_num_seg;
517 	} else {
518 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
519 			 __func__);
520 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
521 
522 		return QDF_STATUS_E_NOMEM;
523 	}
524 
525 	msdu_info->num_seg =
526 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
527 
528 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
529 			msdu_info->num_seg);
530 
531 	if (!(msdu_info->num_seg)) {
532 		/*
533 		 * Free allocated TSO seg desc and number seg desc,
534 		 * do unmap for segments if dma map has done.
535 		 */
536 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
537 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
538 
539 		return QDF_STATUS_E_INVAL;
540 	}
541 
542 	tso_info->curr_seg = tso_info->tso_seg_list;
543 
544 	return QDF_STATUS_SUCCESS;
545 }
546 #else
547 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
548 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
549 {
550 	return QDF_STATUS_E_NOMEM;
551 }
552 #endif
553 
554 /**
555  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
556  * @vdev: DP Vdev handle
557  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
558  * @desc_pool_id: Descriptor Pool ID
559  *
560  * Return:
561  */
562 static
563 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
564 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
565 {
566 	uint8_t i;
567 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
568 	struct dp_tx_seg_info_s *seg_info;
569 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
570 	struct dp_soc *soc = vdev->pdev->soc;
571 
572 	/* Allocate an extension descriptor */
573 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
574 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
575 
576 	if (!msdu_ext_desc) {
577 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
578 		return NULL;
579 	}
580 
581 	if (msdu_info->exception_fw &&
582 			qdf_unlikely(vdev->mesh_vdev)) {
583 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
584 				&msdu_info->meta_data[0],
585 				sizeof(struct htt_tx_msdu_desc_ext2_t));
586 		qdf_atomic_inc(&vdev->pdev->num_tx_exception);
587 	}
588 
589 	switch (msdu_info->frm_type) {
590 	case dp_tx_frm_sg:
591 	case dp_tx_frm_me:
592 	case dp_tx_frm_raw:
593 		seg_info = msdu_info->u.sg_info.curr_seg;
594 		/* Update the buffer pointers in MSDU Extension Descriptor */
595 		for (i = 0; i < seg_info->frag_cnt; i++) {
596 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
597 				seg_info->frags[i].paddr_lo,
598 				seg_info->frags[i].paddr_hi,
599 				seg_info->frags[i].len);
600 		}
601 
602 		break;
603 
604 	case dp_tx_frm_tso:
605 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
606 				&cached_ext_desc[0]);
607 		break;
608 
609 
610 	default:
611 		break;
612 	}
613 
614 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
615 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
616 
617 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
618 			msdu_ext_desc->vaddr);
619 
620 	return msdu_ext_desc;
621 }
622 
623 /**
624  * dp_tx_trace_pkt() - Trace TX packet at DP layer
625  *
626  * @skb: skb to be traced
627  * @msdu_id: msdu_id of the packet
628  * @vdev_id: vdev_id of the packet
629  *
630  * Return: None
631  */
632 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
633 			    uint8_t vdev_id)
634 {
635 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
636 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
637 	DPTRACE(qdf_dp_trace_ptr(skb,
638 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
639 				 QDF_TRACE_DEFAULT_PDEV_ID,
640 				 qdf_nbuf_data_addr(skb),
641 				 sizeof(qdf_nbuf_data(skb)),
642 				 msdu_id, vdev_id));
643 
644 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
645 
646 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
647 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
648 				      msdu_id, QDF_TX));
649 }
650 
651 /**
652  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
653  * @vdev: DP vdev handle
654  * @nbuf: skb
655  * @desc_pool_id: Descriptor pool ID
656  * @meta_data: Metadata to the fw
657  * @tx_exc_metadata: Handle that holds exception path metadata
658  * Allocate and prepare Tx descriptor with msdu information.
659  *
660  * Return: Pointer to Tx Descriptor on success,
661  *         NULL on failure
662  */
663 static
664 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
665 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
666 		struct dp_tx_msdu_info_s *msdu_info,
667 		struct cdp_tx_exception_metadata *tx_exc_metadata)
668 {
669 	uint8_t align_pad;
670 	uint8_t is_exception = 0;
671 	uint8_t htt_hdr_size;
672 	struct ether_header *eh;
673 	struct dp_tx_desc_s *tx_desc;
674 	struct dp_pdev *pdev = vdev->pdev;
675 	struct dp_soc *soc = pdev->soc;
676 
677 	/* Allocate software Tx descriptor */
678 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
679 	if (qdf_unlikely(!tx_desc)) {
680 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
681 		return NULL;
682 	}
683 
684 	/* Flow control/Congestion Control counters */
685 	qdf_atomic_inc(&pdev->num_tx_outstanding);
686 
687 	/* Initialize the SW tx descriptor */
688 	tx_desc->nbuf = nbuf;
689 	tx_desc->frm_type = dp_tx_frm_std;
690 	tx_desc->tx_encap_type = (tx_exc_metadata ?
691 			tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
692 	tx_desc->vdev = vdev;
693 	tx_desc->pdev = pdev;
694 	tx_desc->msdu_ext_desc = NULL;
695 	tx_desc->pkt_offset = 0;
696 
697 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
698 
699 	/*
700 	 * For special modes (vdev_type == ocb or mesh), data frames should be
701 	 * transmitted using varying transmit parameters (tx spec) which include
702 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
703 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
704 	 * These frames are sent as exception packets to firmware.
705 	 *
706 	 * HW requirement is that metadata should always point to a
707 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
708 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
709 	 *  to get 8-byte aligned start address along with align_pad added
710 	 *
711 	 *  |-----------------------------|
712 	 *  |                             |
713 	 *  |-----------------------------| <-----Buffer Pointer Address given
714 	 *  |                             |  ^    in HW descriptor (aligned)
715 	 *  |       HTT Metadata          |  |
716 	 *  |                             |  |
717 	 *  |                             |  | Packet Offset given in descriptor
718 	 *  |                             |  |
719 	 *  |-----------------------------|  |
720 	 *  |       Alignment Pad         |  v
721 	 *  |-----------------------------| <----- Actual buffer start address
722 	 *  |        SKB Data             |           (Unaligned)
723 	 *  |                             |
724 	 *  |                             |
725 	 *  |                             |
726 	 *  |                             |
727 	 *  |                             |
728 	 *  |-----------------------------|
729 	 */
730 	if (qdf_unlikely((msdu_info->exception_fw)) ||
731 				(vdev->opmode == wlan_op_mode_ocb)) {
732 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
733 
734 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
735 			DP_STATS_INC(vdev,
736 				     tx_i.dropped.headroom_insufficient, 1);
737 			goto failure;
738 		}
739 
740 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
741 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
742 					"qdf_nbuf_push_head failed");
743 			goto failure;
744 		}
745 
746 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
747 				msdu_info->meta_data);
748 		if (htt_hdr_size == 0)
749 			goto failure;
750 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
751 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
752 		is_exception = 1;
753 	}
754 
755 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
756 				qdf_nbuf_map(soc->osdev, nbuf,
757 					QDF_DMA_TO_DEVICE))) {
758 		/* Handle failure */
759 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
760 				"qdf_nbuf_map failed");
761 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
762 		goto failure;
763 	}
764 
765 	if (qdf_unlikely(vdev->nawds_enabled)) {
766 		eh = (struct ether_header *) qdf_nbuf_data(nbuf);
767 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
768 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
769 			is_exception = 1;
770 		}
771 	}
772 
773 #if !TQM_BYPASS_WAR
774 	if (is_exception || tx_exc_metadata)
775 #endif
776 	{
777 		/* Temporary WAR due to TQM VP issues */
778 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
779 		qdf_atomic_inc(&pdev->num_tx_exception);
780 	}
781 
782 	return tx_desc;
783 
784 failure:
785 	dp_tx_desc_release(tx_desc, desc_pool_id);
786 	return NULL;
787 }
788 
789 /**
790  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
791  * @vdev: DP vdev handle
792  * @nbuf: skb
793  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
794  * @desc_pool_id : Descriptor Pool ID
795  *
796  * Allocate and prepare Tx descriptor with msdu and fragment descritor
797  * information. For frames wth fragments, allocate and prepare
798  * an MSDU extension descriptor
799  *
800  * Return: Pointer to Tx Descriptor on success,
801  *         NULL on failure
802  */
803 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
804 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
805 		uint8_t desc_pool_id)
806 {
807 	struct dp_tx_desc_s *tx_desc;
808 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
809 	struct dp_pdev *pdev = vdev->pdev;
810 	struct dp_soc *soc = pdev->soc;
811 
812 	/* Allocate software Tx descriptor */
813 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
814 	if (!tx_desc) {
815 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
816 		return NULL;
817 	}
818 
819 	/* Flow control/Congestion Control counters */
820 	qdf_atomic_inc(&pdev->num_tx_outstanding);
821 
822 	/* Initialize the SW tx descriptor */
823 	tx_desc->nbuf = nbuf;
824 	tx_desc->frm_type = msdu_info->frm_type;
825 	tx_desc->tx_encap_type = vdev->tx_encap_type;
826 	tx_desc->vdev = vdev;
827 	tx_desc->pdev = pdev;
828 	tx_desc->pkt_offset = 0;
829 	tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
830 	tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
831 
832 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
833 
834 	/* Handle scattered frames - TSO/SG/ME */
835 	/* Allocate and prepare an extension descriptor for scattered frames */
836 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
837 	if (!msdu_ext_desc) {
838 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
839 				"%s Tx Extension Descriptor Alloc Fail",
840 				__func__);
841 		goto failure;
842 	}
843 
844 #if TQM_BYPASS_WAR
845 	/* Temporary WAR due to TQM VP issues */
846 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
847 	qdf_atomic_inc(&pdev->num_tx_exception);
848 #endif
849 	if (qdf_unlikely(msdu_info->exception_fw))
850 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
851 
852 	tx_desc->msdu_ext_desc = msdu_ext_desc;
853 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
854 
855 	return tx_desc;
856 failure:
857 	dp_tx_desc_release(tx_desc, desc_pool_id);
858 	return NULL;
859 }
860 
861 /**
862  * dp_tx_prepare_raw() - Prepare RAW packet TX
863  * @vdev: DP vdev handle
864  * @nbuf: buffer pointer
865  * @seg_info: Pointer to Segment info Descriptor to be prepared
866  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
867  *     descriptor
868  *
869  * Return:
870  */
871 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
872 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
873 {
874 	qdf_nbuf_t curr_nbuf = NULL;
875 	uint16_t total_len = 0;
876 	qdf_dma_addr_t paddr;
877 	int32_t i;
878 	int32_t mapped_buf_num = 0;
879 
880 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
881 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
882 
883 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
884 
885 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
886 	if (vdev->raw_mode_war &&
887 	    (qos_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS))
888 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
889 
890 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
891 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
892 
893 		if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf,
894 					QDF_DMA_TO_DEVICE)) {
895 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
896 				"%s dma map error ", __func__);
897 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
898 			mapped_buf_num = i;
899 			goto error;
900 		}
901 
902 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
903 		seg_info->frags[i].paddr_lo = paddr;
904 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
905 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
906 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
907 		total_len += qdf_nbuf_len(curr_nbuf);
908 	}
909 
910 	seg_info->frag_cnt = i;
911 	seg_info->total_len = total_len;
912 	seg_info->next = NULL;
913 
914 	sg_info->curr_seg = seg_info;
915 
916 	msdu_info->frm_type = dp_tx_frm_raw;
917 	msdu_info->num_seg = 1;
918 
919 	return nbuf;
920 
921 error:
922 	i = 0;
923 	while (nbuf) {
924 		curr_nbuf = nbuf;
925 		if (i < mapped_buf_num) {
926 			qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE);
927 			i++;
928 		}
929 		nbuf = qdf_nbuf_next(nbuf);
930 		qdf_nbuf_free(curr_nbuf);
931 	}
932 	return NULL;
933 
934 }
935 
936 /**
937  * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
938  * @soc: DP Soc Handle
939  * @vdev: DP vdev handle
940  * @tx_desc: Tx Descriptor Handle
941  * @tid: TID from HLOS for overriding default DSCP-TID mapping
942  * @fw_metadata: Metadata to send to Target Firmware along with frame
943  * @ring_id: Ring ID of H/W ring to which we enqueue the packet
944  * @tx_exc_metadata: Handle that holds exception path meta data
945  *
946  *  Gets the next free TCL HW DMA descriptor and sets up required parameters
947  *  from software Tx descriptor
948  *
949  * Return:
950  */
951 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
952 				   struct dp_tx_desc_s *tx_desc, uint8_t tid,
953 				   uint16_t fw_metadata, uint8_t ring_id,
954 				   struct cdp_tx_exception_metadata
955 					*tx_exc_metadata)
956 {
957 	uint8_t type;
958 	uint16_t length;
959 	void *hal_tx_desc, *hal_tx_desc_cached;
960 	qdf_dma_addr_t dma_addr;
961 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
962 
963 	enum cdp_sec_type sec_type = (tx_exc_metadata ?
964 			tx_exc_metadata->sec_type : vdev->sec_type);
965 
966 	/* Return Buffer Manager ID */
967 	uint8_t bm_id = ring_id;
968 	void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng;
969 
970 	hal_tx_desc_cached = (void *) cached_desc;
971 	qdf_mem_zero_outline(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
972 
973 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
974 		length = HAL_TX_EXT_DESC_WITH_META_DATA;
975 		type = HAL_TX_BUF_TYPE_EXT_DESC;
976 		dma_addr = tx_desc->msdu_ext_desc->paddr;
977 	} else {
978 		length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
979 		type = HAL_TX_BUF_TYPE_BUFFER;
980 		dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
981 	}
982 
983 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
984 	hal_tx_desc_set_buf_addr(hal_tx_desc_cached,
985 					dma_addr, bm_id, tx_desc->id,
986 					type, soc->hal_soc);
987 
988 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id))
989 		return QDF_STATUS_E_RESOURCES;
990 
991 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
992 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
993 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
994 	hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
995 				vdev->pdev->lmac_id);
996 	hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
997 				    vdev->search_type);
998 	hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
999 				     vdev->bss_ast_hash);
1000 	hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
1001 					  vdev->dscp_tid_map_id);
1002 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
1003 			sec_type_map[sec_type]);
1004 
1005 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1006 			"%s length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
1007 			__func__, length, type, (uint64_t)dma_addr,
1008 			tx_desc->pkt_offset, tx_desc->id);
1009 
1010 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
1011 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
1012 
1013 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
1014 			vdev->hal_desc_addr_search_flags);
1015 
1016 	/* verify checksum offload configuration*/
1017 	if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) &&
1018 		((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
1019 		|| qdf_nbuf_is_tso(tx_desc->nbuf)))  {
1020 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
1021 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
1022 	}
1023 
1024 	if (tid != HTT_TX_EXT_TID_INVALID)
1025 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
1026 
1027 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
1028 		hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1);
1029 
1030 
1031 	/* Sync cached descriptor with HW */
1032 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
1033 
1034 	if (!hal_tx_desc) {
1035 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1036 			  "%s TCL ring full ring_id:%d", __func__, ring_id);
1037 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1038 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1039 		return QDF_STATUS_E_RESOURCES;
1040 	}
1041 
1042 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
1043 
1044 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
1045 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
1046 
1047 	return QDF_STATUS_SUCCESS;
1048 }
1049 
1050 
1051 /**
1052  * dp_cce_classify() - Classify the frame based on CCE rules
1053  * @vdev: DP vdev handle
1054  * @nbuf: skb
1055  *
1056  * Classify frames based on CCE rules
1057  * Return: bool( true if classified,
1058  *               else false)
1059  */
1060 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1061 {
1062 	struct ether_header *eh = NULL;
1063 	uint16_t   ether_type;
1064 	qdf_llc_t *llcHdr;
1065 	qdf_nbuf_t nbuf_clone = NULL;
1066 	qdf_dot3_qosframe_t *qos_wh = NULL;
1067 
1068 	/* for mesh packets don't do any classification */
1069 	if (qdf_unlikely(vdev->mesh_vdev))
1070 		return false;
1071 
1072 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1073 		eh = (struct ether_header *) qdf_nbuf_data(nbuf);
1074 		ether_type = eh->ether_type;
1075 		llcHdr = (qdf_llc_t *)(nbuf->data +
1076 					sizeof(struct ether_header));
1077 	} else {
1078 		qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1079 		/* For encrypted packets don't do any classification */
1080 		if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
1081 			return false;
1082 
1083 		if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
1084 			if (qdf_unlikely(
1085 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
1086 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
1087 
1088 				ether_type = *(uint16_t *)(nbuf->data
1089 						+ QDF_IEEE80211_4ADDR_HDR_LEN
1090 						+ sizeof(qdf_llc_t)
1091 						- sizeof(ether_type));
1092 				llcHdr = (qdf_llc_t *)(nbuf->data +
1093 						QDF_IEEE80211_4ADDR_HDR_LEN);
1094 			} else {
1095 				ether_type = *(uint16_t *)(nbuf->data
1096 						+ QDF_IEEE80211_3ADDR_HDR_LEN
1097 						+ sizeof(qdf_llc_t)
1098 						- sizeof(ether_type));
1099 				llcHdr = (qdf_llc_t *)(nbuf->data +
1100 					QDF_IEEE80211_3ADDR_HDR_LEN);
1101 			}
1102 
1103 			if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
1104 				&& (ether_type ==
1105 				qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
1106 
1107 				DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
1108 				return true;
1109 			}
1110 		}
1111 
1112 		return false;
1113 	}
1114 
1115 	if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
1116 		ether_type = *(uint16_t *)(nbuf->data + 2*ETHER_ADDR_LEN +
1117 				sizeof(*llcHdr));
1118 		nbuf_clone = qdf_nbuf_clone(nbuf);
1119 		if (qdf_unlikely(nbuf_clone)) {
1120 			qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
1121 
1122 			if (ether_type == htons(ETHERTYPE_8021Q)) {
1123 				qdf_nbuf_pull_head(nbuf_clone,
1124 						sizeof(qdf_net_vlanhdr_t));
1125 			}
1126 		}
1127 	} else {
1128 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1129 			nbuf_clone = qdf_nbuf_clone(nbuf);
1130 			if (qdf_unlikely(nbuf_clone)) {
1131 				qdf_nbuf_pull_head(nbuf_clone,
1132 					sizeof(qdf_net_vlanhdr_t));
1133 			}
1134 		}
1135 	}
1136 
1137 	if (qdf_unlikely(nbuf_clone))
1138 		nbuf = nbuf_clone;
1139 
1140 
1141 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
1142 		|| qdf_nbuf_is_ipv4_arp_pkt(nbuf)
1143 		|| qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
1144 		|| qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
1145 		|| (qdf_nbuf_is_ipv4_pkt(nbuf)
1146 			&& qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
1147 		|| (qdf_nbuf_is_ipv6_pkt(nbuf) &&
1148 			qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
1149 		if (qdf_unlikely(nbuf_clone != NULL))
1150 			qdf_nbuf_free(nbuf_clone);
1151 		return true;
1152 	}
1153 
1154 	if (qdf_unlikely(nbuf_clone != NULL))
1155 		qdf_nbuf_free(nbuf_clone);
1156 
1157 	return false;
1158 }
1159 
1160 /**
1161  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1162  * @vdev: DP vdev handle
1163  * @nbuf: skb
1164  *
1165  * Extract the DSCP or PCP information from frame and map into TID value.
1166  * Software based TID classification is required when more than 2 DSCP-TID
1167  * mapping tables are needed.
1168  * Hardware supports 2 DSCP-TID mapping tables
1169  *
1170  * Return: void
1171  */
1172 static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1173 		struct dp_tx_msdu_info_s *msdu_info)
1174 {
1175 	uint8_t tos = 0, dscp_tid_override = 0;
1176 	uint8_t *hdr_ptr, *L3datap;
1177 	uint8_t is_mcast = 0;
1178 	struct ether_header *eh = NULL;
1179 	qdf_ethervlan_header_t *evh = NULL;
1180 	uint16_t   ether_type;
1181 	qdf_llc_t *llcHdr;
1182 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1183 
1184 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1185 
1186 	if (pdev->soc && vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map)
1187 		return;
1188 
1189 	/* for mesh packets don't do any classification */
1190 	if (qdf_unlikely(vdev->mesh_vdev))
1191 		return;
1192 
1193 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1194 		eh = (struct ether_header *) nbuf->data;
1195 		hdr_ptr = eh->ether_dhost;
1196 		L3datap = hdr_ptr + sizeof(struct ether_header);
1197 	} else {
1198 		qdf_dot3_qosframe_t *qos_wh =
1199 			(qdf_dot3_qosframe_t *) nbuf->data;
1200 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1201 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1202 		return;
1203 	}
1204 
1205 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1206 	ether_type = eh->ether_type;
1207 
1208 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(struct ether_header));
1209 	/*
1210 	 * Check if packet is dot3 or eth2 type.
1211 	 */
1212 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1213 		ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN +
1214 				sizeof(*llcHdr));
1215 
1216 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1217 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1218 				sizeof(*llcHdr);
1219 			ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN
1220 					+ sizeof(*llcHdr) +
1221 					sizeof(qdf_net_vlanhdr_t));
1222 		} else {
1223 			L3datap = hdr_ptr + sizeof(struct ether_header) +
1224 				sizeof(*llcHdr);
1225 		}
1226 	} else {
1227 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1228 			evh = (qdf_ethervlan_header_t *) eh;
1229 			ether_type = evh->ether_type;
1230 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1231 		}
1232 	}
1233 
1234 	/*
1235 	 * Find priority from IP TOS DSCP field
1236 	 */
1237 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1238 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1239 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1240 			/* Only for unicast frames */
1241 			if (!is_mcast) {
1242 				/* send it on VO queue */
1243 				msdu_info->tid = DP_VO_TID;
1244 			}
1245 		} else {
1246 			/*
1247 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1248 			 * from TOS byte.
1249 			 */
1250 			tos = ip->ip_tos;
1251 			dscp_tid_override = 1;
1252 
1253 		}
1254 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1255 		/* TODO
1256 		 * use flowlabel
1257 		 *igmpmld cases to be handled in phase 2
1258 		 */
1259 		unsigned long ver_pri_flowlabel;
1260 		unsigned long pri;
1261 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1262 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1263 			DP_IPV6_PRIORITY_SHIFT;
1264 		tos = pri;
1265 		dscp_tid_override = 1;
1266 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1267 		msdu_info->tid = DP_VO_TID;
1268 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1269 		/* Only for unicast frames */
1270 		if (!is_mcast) {
1271 			/* send ucast arp on VO queue */
1272 			msdu_info->tid = DP_VO_TID;
1273 		}
1274 	}
1275 
1276 	/*
1277 	 * Assign all MCAST packets to BE
1278 	 */
1279 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1280 		if (is_mcast) {
1281 			tos = 0;
1282 			dscp_tid_override = 1;
1283 		}
1284 	}
1285 
1286 	if (dscp_tid_override == 1) {
1287 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1288 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1289 	}
1290 	return;
1291 }
1292 
1293 #ifdef CONVERGED_TDLS_ENABLE
1294 /**
1295  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1296  * @tx_desc: TX descriptor
1297  *
1298  * Return: None
1299  */
1300 static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1301 {
1302 	if (tx_desc->vdev) {
1303 		if (tx_desc->vdev->is_tdls_frame) {
1304 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1305 			tx_desc->vdev->is_tdls_frame = false;
1306 		}
1307 	}
1308 }
1309 
1310 /**
1311  * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1312  * @tx_desc: TX descriptor
1313  * @vdev: datapath vdev handle
1314  *
1315  * Return: None
1316  */
1317 static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
1318 				  struct dp_vdev *vdev)
1319 {
1320 	struct hal_tx_completion_status ts = {0};
1321 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1322 
1323 	hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
1324 	if (vdev->tx_non_std_data_callback.func) {
1325 		qdf_nbuf_set_next(tx_desc->nbuf, NULL);
1326 		vdev->tx_non_std_data_callback.func(
1327 				vdev->tx_non_std_data_callback.ctxt,
1328 				nbuf, ts.status);
1329 		return;
1330 	}
1331 }
1332 #endif
1333 
1334 /**
1335  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1336  * @vdev: DP vdev handle
1337  * @nbuf: skb
1338  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1339  * @meta_data: Metadata to the fw
1340  * @tx_q: Tx queue to be used for this Tx frame
1341  * @peer_id: peer_id of the peer in case of NAWDS frames
1342  * @tx_exc_metadata: Handle that holds exception path metadata
1343  *
1344  * Return: NULL on success,
1345  *         nbuf when it fails to send
1346  */
1347 static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1348 		struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
1349 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1350 {
1351 	struct dp_pdev *pdev = vdev->pdev;
1352 	struct dp_soc *soc = pdev->soc;
1353 	struct dp_tx_desc_s *tx_desc;
1354 	QDF_STATUS status;
1355 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
1356 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1357 	uint16_t htt_tcl_metadata = 0;
1358 	uint8_t tid = msdu_info->tid;
1359 
1360 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
1361 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
1362 			msdu_info, tx_exc_metadata);
1363 	if (!tx_desc) {
1364 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1365 			  "%s Tx_desc prepare Fail vdev %pK queue %d",
1366 			  __func__, vdev, tx_q->desc_pool_id);
1367 		return nbuf;
1368 	}
1369 
1370 	if (qdf_unlikely(soc->cce_disable)) {
1371 		if (dp_cce_classify(vdev, nbuf) == true) {
1372 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1373 			tid = DP_VO_TID;
1374 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1375 		}
1376 	}
1377 
1378 	dp_tx_update_tdls_flags(tx_desc);
1379 
1380 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1381 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1382 				"%s %d : HAL RING Access Failed -- %pK",
1383 				__func__, __LINE__, hal_srng);
1384 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1385 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1386 		qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE);
1387 		goto fail_return;
1388 	}
1389 
1390 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
1391 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1392 		HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
1393 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
1394 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
1395 				HTT_TCL_METADATA_TYPE_PEER_BASED);
1396 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
1397 				peer_id);
1398 	} else
1399 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1400 
1401 
1402 	if (msdu_info->exception_fw) {
1403 		HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1404 	}
1405 
1406 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
1407 	status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
1408 			htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
1409 
1410 	if (status != QDF_STATUS_SUCCESS) {
1411 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1412 			  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1413 			  __func__, tx_desc, tx_q->ring_id);
1414 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1415 		qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE);
1416 		goto fail_return;
1417 	}
1418 
1419 	nbuf = NULL;
1420 
1421 fail_return:
1422 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1423 		hal_srng_access_end(soc->hal_soc, hal_srng);
1424 		hif_pm_runtime_put(soc->hif_handle);
1425 	} else {
1426 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1427 	}
1428 
1429 	return nbuf;
1430 }
1431 
1432 /**
1433  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
1434  * @vdev: DP vdev handle
1435  * @nbuf: skb
1436  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
1437  *
1438  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
1439  *
1440  * Return: NULL on success,
1441  *         nbuf when it fails to send
1442  */
1443 #if QDF_LOCK_STATS
1444 static noinline
1445 #else
1446 static
1447 #endif
1448 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1449 				    struct dp_tx_msdu_info_s *msdu_info)
1450 {
1451 	uint8_t i;
1452 	struct dp_pdev *pdev = vdev->pdev;
1453 	struct dp_soc *soc = pdev->soc;
1454 	struct dp_tx_desc_s *tx_desc;
1455 	bool is_cce_classified = false;
1456 	QDF_STATUS status;
1457 	uint16_t htt_tcl_metadata = 0;
1458 
1459 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1460 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1461 
1462 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1463 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1464 				"%s %d : HAL RING Access Failed -- %pK",
1465 				__func__, __LINE__, hal_srng);
1466 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1467 		return nbuf;
1468 	}
1469 
1470 	if (qdf_unlikely(soc->cce_disable)) {
1471 		is_cce_classified = dp_cce_classify(vdev, nbuf);
1472 		if (is_cce_classified) {
1473 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1474 			msdu_info->tid = DP_VO_TID;
1475 		}
1476 	}
1477 
1478 	if (msdu_info->frm_type == dp_tx_frm_me)
1479 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1480 
1481 	i = 0;
1482 	/* Print statement to track i and num_seg */
1483 	/*
1484 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
1485 	 * descriptors using information in msdu_info
1486 	 */
1487 	while (i < msdu_info->num_seg) {
1488 		/*
1489 		 * Setup Tx descriptor for an MSDU, and MSDU extension
1490 		 * descriptor
1491 		 */
1492 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
1493 				tx_q->desc_pool_id);
1494 
1495 		if (!tx_desc) {
1496 			if (msdu_info->frm_type == dp_tx_frm_me) {
1497 				dp_tx_me_free_buf(pdev,
1498 					(void *)(msdu_info->u.sg_info
1499 						.curr_seg->frags[0].vaddr));
1500 			}
1501 			goto done;
1502 		}
1503 
1504 		if (msdu_info->frm_type == dp_tx_frm_me) {
1505 			tx_desc->me_buffer =
1506 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
1507 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
1508 		}
1509 
1510 		if (is_cce_classified)
1511 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1512 
1513 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1514 		if (msdu_info->exception_fw) {
1515 			HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1516 		}
1517 
1518 		/*
1519 		 * Enqueue the Tx MSDU descriptor to HW for transmit
1520 		 */
1521 		status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
1522 			htt_tcl_metadata, tx_q->ring_id, NULL);
1523 
1524 		if (status != QDF_STATUS_SUCCESS) {
1525 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1526 				  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1527 				  __func__, tx_desc, tx_q->ring_id);
1528 
1529 			if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
1530 				dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
1531 
1532 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1533 			goto done;
1534 		}
1535 
1536 		/*
1537 		 * TODO
1538 		 * if tso_info structure can be modified to have curr_seg
1539 		 * as first element, following 2 blocks of code (for TSO and SG)
1540 		 * can be combined into 1
1541 		 */
1542 
1543 		/*
1544 		 * For frames with multiple segments (TSO, ME), jump to next
1545 		 * segment.
1546 		 */
1547 		if (msdu_info->frm_type == dp_tx_frm_tso) {
1548 			if (msdu_info->u.tso_info.curr_seg->next) {
1549 				msdu_info->u.tso_info.curr_seg =
1550 					msdu_info->u.tso_info.curr_seg->next;
1551 
1552 				/*
1553 				 * If this is a jumbo nbuf, then increment the number of
1554 				 * nbuf users for each additional segment of the msdu.
1555 				 * This will ensure that the skb is freed only after
1556 				 * receiving tx completion for all segments of an nbuf
1557 				 */
1558 				qdf_nbuf_inc_users(nbuf);
1559 
1560 				/* Check with MCL if this is needed */
1561 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
1562 			}
1563 		}
1564 
1565 		/*
1566 		 * For Multicast-Unicast converted packets,
1567 		 * each converted frame (for a client) is represented as
1568 		 * 1 segment
1569 		 */
1570 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
1571 				(msdu_info->frm_type == dp_tx_frm_me)) {
1572 			if (msdu_info->u.sg_info.curr_seg->next) {
1573 				msdu_info->u.sg_info.curr_seg =
1574 					msdu_info->u.sg_info.curr_seg->next;
1575 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1576 			}
1577 		}
1578 		i++;
1579 	}
1580 
1581 	nbuf = NULL;
1582 
1583 done:
1584 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1585 		hal_srng_access_end(soc->hal_soc, hal_srng);
1586 		hif_pm_runtime_put(soc->hif_handle);
1587 	} else {
1588 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1589 	}
1590 
1591 	return nbuf;
1592 }
1593 
1594 /**
1595  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
1596  *                     for SG frames
1597  * @vdev: DP vdev handle
1598  * @nbuf: skb
1599  * @seg_info: Pointer to Segment info Descriptor to be prepared
1600  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1601  *
1602  * Return: NULL on success,
1603  *         nbuf when it fails to send
1604  */
1605 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1606 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1607 {
1608 	uint32_t cur_frag, nr_frags;
1609 	qdf_dma_addr_t paddr;
1610 	struct dp_tx_sg_info_s *sg_info;
1611 
1612 	sg_info = &msdu_info->u.sg_info;
1613 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
1614 
1615 	if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
1616 				QDF_DMA_TO_DEVICE)) {
1617 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1618 				"dma map error");
1619 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1620 
1621 		qdf_nbuf_free(nbuf);
1622 		return NULL;
1623 	}
1624 
1625 	paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1626 	seg_info->frags[0].paddr_lo = paddr;
1627 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
1628 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
1629 	seg_info->frags[0].vaddr = (void *) nbuf;
1630 
1631 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
1632 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
1633 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
1634 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1635 					"frag dma map error");
1636 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1637 			qdf_nbuf_free(nbuf);
1638 			return NULL;
1639 		}
1640 
1641 		paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1642 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
1643 		seg_info->frags[cur_frag + 1].paddr_hi =
1644 			((uint64_t) paddr) >> 32;
1645 		seg_info->frags[cur_frag + 1].len =
1646 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
1647 	}
1648 
1649 	seg_info->frag_cnt = (cur_frag + 1);
1650 	seg_info->total_len = qdf_nbuf_len(nbuf);
1651 	seg_info->next = NULL;
1652 
1653 	sg_info->curr_seg = seg_info;
1654 
1655 	msdu_info->frm_type = dp_tx_frm_sg;
1656 	msdu_info->num_seg = 1;
1657 
1658 	return nbuf;
1659 }
1660 
1661 #ifdef MESH_MODE_SUPPORT
1662 
1663 /**
1664  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
1665 				and prepare msdu_info for mesh frames.
1666  * @vdev: DP vdev handle
1667  * @nbuf: skb
1668  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1669  *
1670  * Return: NULL on failure,
1671  *         nbuf when extracted successfully
1672  */
1673 static
1674 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1675 				struct dp_tx_msdu_info_s *msdu_info)
1676 {
1677 	struct meta_hdr_s *mhdr;
1678 	struct htt_tx_msdu_desc_ext2_t *meta_data =
1679 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
1680 
1681 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1682 
1683 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
1684 		msdu_info->exception_fw = 0;
1685 		goto remove_meta_hdr;
1686 	}
1687 
1688 	msdu_info->exception_fw = 1;
1689 
1690 	qdf_mem_set(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t), 0);
1691 
1692 	meta_data->host_tx_desc_pool = 1;
1693 	meta_data->update_peer_cache = 1;
1694 	meta_data->learning_frame = 1;
1695 
1696 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
1697 		meta_data->power = mhdr->power;
1698 
1699 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
1700 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
1701 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
1702 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
1703 
1704 		meta_data->dyn_bw = 1;
1705 
1706 		meta_data->valid_pwr = 1;
1707 		meta_data->valid_mcs_mask = 1;
1708 		meta_data->valid_nss_mask = 1;
1709 		meta_data->valid_preamble_type  = 1;
1710 		meta_data->valid_retries = 1;
1711 		meta_data->valid_bw_info = 1;
1712 	}
1713 
1714 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
1715 		meta_data->encrypt_type = 0;
1716 		meta_data->valid_encrypt_type = 1;
1717 		meta_data->learning_frame = 0;
1718 	}
1719 
1720 	meta_data->valid_key_flags = 1;
1721 	meta_data->key_flags = (mhdr->keyix & 0x3);
1722 
1723 remove_meta_hdr:
1724 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
1725 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1726 				"qdf_nbuf_pull_head failed");
1727 		qdf_nbuf_free(nbuf);
1728 		return NULL;
1729 	}
1730 
1731 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
1732 		msdu_info->tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1733 	else
1734 		msdu_info->tid = qdf_nbuf_get_priority(nbuf);
1735 
1736 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1737 			"%s , Meta hdr %0x %0x %0x %0x %0x %0x"
1738 			" tid %d to_fw %d",
1739 			__func__, msdu_info->meta_data[0],
1740 			msdu_info->meta_data[1],
1741 			msdu_info->meta_data[2],
1742 			msdu_info->meta_data[3],
1743 			msdu_info->meta_data[4],
1744 			msdu_info->meta_data[5],
1745 			msdu_info->tid, msdu_info->exception_fw);
1746 
1747 	return nbuf;
1748 }
1749 #else
1750 static
1751 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1752 				struct dp_tx_msdu_info_s *msdu_info)
1753 {
1754 	return nbuf;
1755 }
1756 
1757 #endif
1758 
1759 #ifdef DP_FEATURE_NAWDS_TX
1760 /**
1761  * dp_tx_prepare_nawds(): Tramit NAWDS frames
1762  * @vdev: dp_vdev handle
1763  * @nbuf: skb
1764  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1765  * @tx_q: Tx queue to be used for this Tx frame
1766  * @meta_data: Meta date for mesh
1767  * @peer_id: peer_id of the peer in case of NAWDS frames
1768  *
1769  * return: NULL on success nbuf on failure
1770  */
1771 static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1772 		struct dp_tx_msdu_info_s *msdu_info)
1773 {
1774 	struct dp_peer *peer = NULL;
1775 	struct dp_soc *soc = vdev->pdev->soc;
1776 	struct dp_ast_entry *ast_entry = NULL;
1777 	struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1778 	uint16_t peer_id = HTT_INVALID_PEER;
1779 
1780 	struct dp_peer *sa_peer = NULL;
1781 	qdf_nbuf_t nbuf_copy;
1782 
1783 	qdf_spin_lock_bh(&(soc->ast_lock));
1784 	ast_entry = dp_peer_ast_hash_find_by_pdevid
1785 				(soc,
1786 				 (uint8_t *)(eh->ether_shost),
1787 				 vdev->pdev->pdev_id);
1788 
1789 	if (ast_entry)
1790 		sa_peer = ast_entry->peer;
1791 
1792 	qdf_spin_unlock_bh(&(soc->ast_lock));
1793 
1794 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1795 		if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
1796 				(peer->nawds_enabled)) {
1797 			if (sa_peer == peer) {
1798 				QDF_TRACE(QDF_MODULE_ID_DP,
1799 						QDF_TRACE_LEVEL_DEBUG,
1800 						" %s: broadcast multicast packet",
1801 						 __func__);
1802 				DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
1803 				continue;
1804 			}
1805 
1806 			nbuf_copy = qdf_nbuf_copy(nbuf);
1807 			if (!nbuf_copy) {
1808 				QDF_TRACE(QDF_MODULE_ID_DP,
1809 						QDF_TRACE_LEVEL_ERROR,
1810 						"nbuf copy failed");
1811 			}
1812 
1813 			peer_id = peer->peer_ids[0];
1814 			nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy,
1815 					msdu_info, peer_id, NULL);
1816 			if (nbuf_copy != NULL) {
1817 				qdf_nbuf_free(nbuf_copy);
1818 				continue;
1819 			}
1820 			DP_STATS_INC_PKT(peer, tx.nawds_mcast,
1821 						1, qdf_nbuf_len(nbuf));
1822 		}
1823 	}
1824 	if (peer_id == HTT_INVALID_PEER)
1825 		return nbuf;
1826 
1827 	return NULL;
1828 }
1829 #endif
1830 
1831 /**
1832  * dp_check_exc_metadata() - Checks if parameters are valid
1833  * @tx_exc - holds all exception path parameters
1834  *
1835  * Returns true when all the parameters are valid else false
1836  *
1837  */
1838 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
1839 {
1840 	if ((tx_exc->tid > DP_MAX_TIDS && tx_exc->tid != HTT_INVALID_TID) ||
1841 	    tx_exc->tx_encap_type > htt_cmn_pkt_num_types ||
1842 	    tx_exc->sec_type > cdp_num_sec_types) {
1843 		return false;
1844 	}
1845 
1846 	return true;
1847 }
1848 
1849 /**
1850  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
1851  * @vap_dev: DP vdev handle
1852  * @nbuf: skb
1853  * @tx_exc_metadata: Handle that holds exception path meta data
1854  *
1855  * Entry point for Core Tx layer (DP_TX) invoked from
1856  * hard_start_xmit in OSIF/HDD to transmit frames through fw
1857  *
1858  * Return: NULL on success,
1859  *         nbuf when it fails to send
1860  */
1861 qdf_nbuf_t dp_tx_send_exception(void *vap_dev, qdf_nbuf_t nbuf,
1862 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1863 {
1864 	struct ether_header *eh = NULL;
1865 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1866 	struct dp_tx_msdu_info_s msdu_info;
1867 
1868 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1869 
1870 	msdu_info.tid = tx_exc_metadata->tid;
1871 
1872 	eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1873 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1874 			"%s , skb %pM",
1875 			__func__, nbuf->data);
1876 
1877 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
1878 
1879 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
1880 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1881 			"Invalid parameters in exception path");
1882 		goto fail;
1883 	}
1884 
1885 	/* Basic sanity checks for unsupported packets */
1886 
1887 	/* MESH mode */
1888 	if (qdf_unlikely(vdev->mesh_vdev)) {
1889 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1890 			"Mesh mode is not supported in exception path");
1891 		goto fail;
1892 	}
1893 
1894 	/* TSO or SG */
1895 	if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
1896 	    qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
1897 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1898 			  "TSO and SG are not supported in exception path");
1899 
1900 		goto fail;
1901 	}
1902 
1903 	/* RAW */
1904 	if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
1905 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1906 			  "Raw frame is not supported in exception path");
1907 		goto fail;
1908 	}
1909 
1910 
1911 	/* Mcast enhancement*/
1912 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
1913 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
1914 		    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
1915 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1916 					  "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW");
1917 		}
1918 	}
1919 
1920 	/*
1921 	 * Get HW Queue to use for this frame.
1922 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
1923 	 * dedicated for data and 1 for command.
1924 	 * "queue_id" maps to one hardware ring.
1925 	 *  With each ring, we also associate a unique Tx descriptor pool
1926 	 *  to minimize lock contention for these resources.
1927 	 */
1928 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
1929 
1930 	/*  Single linear frame */
1931 	/*
1932 	 * If nbuf is a simple linear frame, use send_single function to
1933 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
1934 	 * SRNG. There is no need to setup a MSDU extension descriptor.
1935 	 */
1936 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
1937 			tx_exc_metadata->peer_id, tx_exc_metadata);
1938 
1939 	return nbuf;
1940 
1941 fail:
1942 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1943 			"pkt send failed");
1944 	return nbuf;
1945 }
1946 
1947 /**
1948  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
1949  * @vap_dev: DP vdev handle
1950  * @nbuf: skb
1951  *
1952  * Entry point for Core Tx layer (DP_TX) invoked from
1953  * hard_start_xmit in OSIF/HDD
1954  *
1955  * Return: NULL on success,
1956  *         nbuf when it fails to send
1957  */
1958 #ifdef MESH_MODE_SUPPORT
1959 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
1960 {
1961 	struct meta_hdr_s *mhdr;
1962 	qdf_nbuf_t nbuf_mesh = NULL;
1963 	qdf_nbuf_t nbuf_clone = NULL;
1964 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1965 	uint8_t no_enc_frame = 0;
1966 
1967 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
1968 	if (nbuf_mesh == NULL) {
1969 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1970 				"qdf_nbuf_unshare failed");
1971 		return nbuf;
1972 	}
1973 	nbuf = nbuf_mesh;
1974 
1975 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1976 
1977 	if ((vdev->sec_type != cdp_sec_type_none) &&
1978 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
1979 		no_enc_frame = 1;
1980 
1981 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
1982 		       !no_enc_frame) {
1983 		nbuf_clone = qdf_nbuf_clone(nbuf);
1984 		if (nbuf_clone == NULL) {
1985 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1986 				"qdf_nbuf_clone failed");
1987 			return nbuf;
1988 		}
1989 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
1990 	}
1991 
1992 	if (nbuf_clone) {
1993 		if (!dp_tx_send(vap_dev, nbuf_clone)) {
1994 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
1995 		} else {
1996 			qdf_nbuf_free(nbuf_clone);
1997 		}
1998 	}
1999 
2000 	if (no_enc_frame)
2001 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
2002 	else
2003 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
2004 
2005 	nbuf = dp_tx_send(vap_dev, nbuf);
2006 	if ((nbuf == NULL) && no_enc_frame) {
2007 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2008 	}
2009 
2010 	return nbuf;
2011 }
2012 
2013 #else
2014 
2015 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
2016 {
2017 	return dp_tx_send(vap_dev, nbuf);
2018 }
2019 
2020 #endif
2021 
2022 /**
2023  * dp_tx_send() - Transmit a frame on a given VAP
2024  * @vap_dev: DP vdev handle
2025  * @nbuf: skb
2026  *
2027  * Entry point for Core Tx layer (DP_TX) invoked from
2028  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
2029  * cases
2030  *
2031  * Return: NULL on success,
2032  *         nbuf when it fails to send
2033  */
2034 qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
2035 {
2036 	struct ether_header *eh = NULL;
2037 	struct dp_tx_msdu_info_s msdu_info;
2038 	struct dp_tx_seg_info_s seg_info;
2039 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
2040 	uint16_t peer_id = HTT_INVALID_PEER;
2041 	qdf_nbuf_t nbuf_mesh = NULL;
2042 
2043 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
2044 	qdf_mem_set(&seg_info, sizeof(seg_info), 0x0);
2045 
2046 	eh = (struct ether_header *)qdf_nbuf_data(nbuf);
2047 
2048 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2049 			"%s , skb %pM",
2050 			__func__, nbuf->data);
2051 
2052 	/*
2053 	 * Set Default Host TID value to invalid TID
2054 	 * (TID override disabled)
2055 	 */
2056 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
2057 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
2058 
2059 	if (qdf_unlikely(vdev->mesh_vdev)) {
2060 		nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
2061 								&msdu_info);
2062 		if (nbuf_mesh == NULL) {
2063 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2064 					"Extracting mesh metadata failed");
2065 			return nbuf;
2066 		}
2067 		nbuf = nbuf_mesh;
2068 	}
2069 
2070 	/*
2071 	 * Get HW Queue to use for this frame.
2072 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
2073 	 * dedicated for data and 1 for command.
2074 	 * "queue_id" maps to one hardware ring.
2075 	 *  With each ring, we also associate a unique Tx descriptor pool
2076 	 *  to minimize lock contention for these resources.
2077 	 */
2078 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2079 
2080 	/*
2081 	 * TCL H/W supports 2 DSCP-TID mapping tables.
2082 	 *  Table 1 - Default DSCP-TID mapping table
2083 	 *  Table 2 - 1 DSCP-TID override table
2084 	 *
2085 	 * If we need a different DSCP-TID mapping for this vap,
2086 	 * call tid_classify to extract DSCP/ToS from frame and
2087 	 * map to a TID and store in msdu_info. This is later used
2088 	 * to fill in TCL Input descriptor (per-packet TID override).
2089 	 */
2090 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
2091 
2092 	/*
2093 	 * Classify the frame and call corresponding
2094 	 * "prepare" function which extracts the segment (TSO)
2095 	 * and fragmentation information (for TSO , SG, ME, or Raw)
2096 	 * into MSDU_INFO structure which is later used to fill
2097 	 * SW and HW descriptors.
2098 	 */
2099 	if (qdf_nbuf_is_tso(nbuf)) {
2100 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2101 			  "%s TSO frame %pK", __func__, vdev);
2102 		DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1,
2103 				qdf_nbuf_len(nbuf));
2104 
2105 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
2106 			DP_STATS_INC_PKT(vdev, tx_i.tso.dropped_host, 1,
2107 					 qdf_nbuf_len(nbuf));
2108 			return nbuf;
2109 		}
2110 
2111 		goto send_multiple;
2112 	}
2113 
2114 	/* SG */
2115 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2116 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
2117 
2118 		if (!nbuf)
2119 			return NULL;
2120 
2121 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2122 			 "%s non-TSO SG frame %pK", __func__, vdev);
2123 
2124 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
2125 				qdf_nbuf_len(nbuf));
2126 
2127 		goto send_multiple;
2128 	}
2129 
2130 #ifdef ATH_SUPPORT_IQUE
2131 	/* Mcast to Ucast Conversion*/
2132 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
2133 		eh = (struct ether_header *)qdf_nbuf_data(nbuf);
2134 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2135 		    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2136 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2137 				  "%s Mcast frm for ME %pK", __func__, vdev);
2138 
2139 			DP_STATS_INC_PKT(vdev,
2140 					tx_i.mcast_en.mcast_pkt, 1,
2141 					qdf_nbuf_len(nbuf));
2142 			if (dp_tx_prepare_send_me(vdev, nbuf) ==
2143 					QDF_STATUS_SUCCESS) {
2144 				return NULL;
2145 			}
2146 		}
2147 	}
2148 #endif
2149 
2150 	/* RAW */
2151 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
2152 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
2153 		if (nbuf == NULL)
2154 			return NULL;
2155 
2156 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2157 			  "%s Raw frame %pK", __func__, vdev);
2158 
2159 		goto send_multiple;
2160 
2161 	}
2162 
2163 	/*  Single linear frame */
2164 	/*
2165 	 * If nbuf is a simple linear frame, use send_single function to
2166 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2167 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2168 	 */
2169 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
2170 
2171 	return nbuf;
2172 
2173 send_multiple:
2174 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2175 
2176 	return nbuf;
2177 }
2178 
2179 /**
2180  * dp_tx_reinject_handler() - Tx Reinject Handler
2181  * @tx_desc: software descriptor head pointer
2182  * @status : Tx completion status from HTT descriptor
2183  *
2184  * This function reinjects frames back to Target.
2185  * Todo - Host queue needs to be added
2186  *
2187  * Return: none
2188  */
2189 static
2190 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2191 {
2192 	struct dp_vdev *vdev;
2193 	struct dp_peer *peer = NULL;
2194 	uint32_t peer_id = HTT_INVALID_PEER;
2195 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2196 	qdf_nbuf_t nbuf_copy = NULL;
2197 	struct dp_tx_msdu_info_s msdu_info;
2198 	struct dp_peer *sa_peer = NULL;
2199 	struct dp_ast_entry *ast_entry = NULL;
2200 	struct dp_soc *soc = NULL;
2201 	struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
2202 #ifdef WDS_VENDOR_EXTENSION
2203 	int is_mcast = 0, is_ucast = 0;
2204 	int num_peers_3addr = 0;
2205 	struct ether_header *eth_hdr = (struct ether_header *)(qdf_nbuf_data(nbuf));
2206 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
2207 #endif
2208 
2209 	vdev = tx_desc->vdev;
2210 	soc = vdev->pdev->soc;
2211 
2212 	qdf_assert(vdev);
2213 
2214 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
2215 
2216 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2217 
2218 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2219 			"%s Tx reinject path", __func__);
2220 
2221 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
2222 			qdf_nbuf_len(tx_desc->nbuf));
2223 
2224 	qdf_spin_lock_bh(&(soc->ast_lock));
2225 
2226 	ast_entry = dp_peer_ast_hash_find_by_pdevid
2227 				(soc,
2228 				 (uint8_t *)(eh->ether_shost),
2229 				 vdev->pdev->pdev_id);
2230 
2231 	if (ast_entry)
2232 		sa_peer = ast_entry->peer;
2233 
2234 	qdf_spin_unlock_bh(&(soc->ast_lock));
2235 
2236 #ifdef WDS_VENDOR_EXTENSION
2237 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
2238 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
2239 	} else {
2240 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
2241 	}
2242 	is_ucast = !is_mcast;
2243 
2244 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2245 		if (peer->bss_peer)
2246 			continue;
2247 
2248 		/* Detect wds peers that use 3-addr framing for mcast.
2249 		 * if there are any, the bss_peer is used to send the
2250 		 * the mcast frame using 3-addr format. all wds enabled
2251 		 * peers that use 4-addr framing for mcast frames will
2252 		 * be duplicated and sent as 4-addr frames below.
2253 		 */
2254 		if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
2255 			num_peers_3addr = 1;
2256 			break;
2257 		}
2258 	}
2259 #endif
2260 
2261 	if (qdf_unlikely(vdev->mesh_vdev)) {
2262 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
2263 	} else {
2264 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2265 			if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
2266 #ifdef WDS_VENDOR_EXTENSION
2267 			/*
2268 			 * . if 3-addr STA, then send on BSS Peer
2269 			 * . if Peer WDS enabled and accept 4-addr mcast,
2270 			 * send mcast on that peer only
2271 			 * . if Peer WDS enabled and accept 4-addr ucast,
2272 			 * send ucast on that peer only
2273 			 */
2274 			((peer->bss_peer && num_peers_3addr && is_mcast) ||
2275 			 (peer->wds_enabled &&
2276 				  ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
2277 				   (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
2278 #else
2279 			((peer->bss_peer &&
2280 			  !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) ||
2281 				 peer->nawds_enabled)) {
2282 #endif
2283 				peer_id = DP_INVALID_PEER;
2284 
2285 				if (peer->nawds_enabled) {
2286 					peer_id = peer->peer_ids[0];
2287 					if (sa_peer == peer) {
2288 						QDF_TRACE(
2289 							QDF_MODULE_ID_DP,
2290 							QDF_TRACE_LEVEL_DEBUG,
2291 							" %s: multicast packet",
2292 							__func__);
2293 						DP_STATS_INC(peer,
2294 							tx.nawds_mcast_drop, 1);
2295 						continue;
2296 					}
2297 				}
2298 
2299 				nbuf_copy = qdf_nbuf_copy(nbuf);
2300 
2301 				if (!nbuf_copy) {
2302 					QDF_TRACE(QDF_MODULE_ID_DP,
2303 						QDF_TRACE_LEVEL_DEBUG,
2304 						FL("nbuf copy failed"));
2305 					break;
2306 				}
2307 
2308 				nbuf_copy = dp_tx_send_msdu_single(vdev,
2309 						nbuf_copy,
2310 						&msdu_info,
2311 						peer_id,
2312 						NULL);
2313 
2314 				if (nbuf_copy) {
2315 					QDF_TRACE(QDF_MODULE_ID_DP,
2316 						QDF_TRACE_LEVEL_DEBUG,
2317 						FL("pkt send failed"));
2318 					qdf_nbuf_free(nbuf_copy);
2319 				} else {
2320 					if (peer_id != DP_INVALID_PEER)
2321 						DP_STATS_INC_PKT(peer,
2322 							tx.nawds_mcast,
2323 							1, qdf_nbuf_len(nbuf));
2324 				}
2325 			}
2326 		}
2327 	}
2328 
2329 	if (vdev->nawds_enabled) {
2330 		peer_id = DP_INVALID_PEER;
2331 
2332 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
2333 					1, qdf_nbuf_len(nbuf));
2334 
2335 		nbuf = dp_tx_send_msdu_single(vdev,
2336 				nbuf,
2337 				&msdu_info,
2338 				peer_id, NULL);
2339 
2340 		if (nbuf) {
2341 			QDF_TRACE(QDF_MODULE_ID_DP,
2342 				QDF_TRACE_LEVEL_DEBUG,
2343 				FL("pkt send failed"));
2344 			qdf_nbuf_free(nbuf);
2345 		}
2346 	} else
2347 		qdf_nbuf_free(nbuf);
2348 
2349 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2350 }
2351 
2352 /**
2353  * dp_tx_inspect_handler() - Tx Inspect Handler
2354  * @tx_desc: software descriptor head pointer
2355  * @status : Tx completion status from HTT descriptor
2356  *
2357  * Handles Tx frames sent back to Host for inspection
2358  * (ProxyARP)
2359  *
2360  * Return: none
2361  */
2362 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2363 {
2364 
2365 	struct dp_soc *soc;
2366 	struct dp_pdev *pdev = tx_desc->pdev;
2367 
2368 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2369 			"%s Tx inspect path",
2370 			__func__);
2371 
2372 	qdf_assert(pdev);
2373 
2374 	soc = pdev->soc;
2375 
2376 	DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
2377 			qdf_nbuf_len(tx_desc->nbuf));
2378 
2379 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
2380 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2381 }
2382 
2383 #ifdef FEATURE_PERPKT_INFO
2384 /**
2385  * dp_get_completion_indication_for_stack() - send completion to stack
2386  * @soc : dp_soc handle
2387  * @pdev: dp_pdev handle
2388  * @peer: dp peer handle
2389  * @ts: transmit completion status structure
2390  * @netbuf: Buffer pointer for free
2391  *
2392  * This function is used for indication whether buffer needs to be
2393  * sent to stack for freeing or not
2394 */
2395 QDF_STATUS
2396 dp_get_completion_indication_for_stack(struct dp_soc *soc,
2397 				       struct dp_pdev *pdev,
2398 				       struct dp_peer *peer,
2399 				       struct hal_tx_completion_status *ts,
2400 				       qdf_nbuf_t netbuf)
2401 {
2402 	struct tx_capture_hdr *ppdu_hdr;
2403 	uint16_t peer_id = ts->peer_id;
2404 	uint32_t ppdu_id = ts->ppdu_id;
2405 	uint8_t first_msdu = ts->first_msdu;
2406 	uint8_t last_msdu = ts->last_msdu;
2407 
2408 	if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode))
2409 		return QDF_STATUS_E_NOSUPPORT;
2410 
2411 	if (!peer) {
2412 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2413 				FL("Peer Invalid"));
2414 		return QDF_STATUS_E_INVAL;
2415 	}
2416 
2417 	if (pdev->mcopy_mode) {
2418 		if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
2419 				(pdev->m_copy_id.tx_peer_id == peer_id)) {
2420 			return QDF_STATUS_E_INVAL;
2421 		}
2422 
2423 		pdev->m_copy_id.tx_ppdu_id = ppdu_id;
2424 		pdev->m_copy_id.tx_peer_id = peer_id;
2425 	}
2426 
2427 	if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) {
2428 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2429 				FL("No headroom"));
2430 		return QDF_STATUS_E_NOMEM;
2431 	}
2432 
2433 	ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
2434 	qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
2435 		     IEEE80211_ADDR_LEN);
2436 	qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
2437 		     IEEE80211_ADDR_LEN);
2438 	ppdu_hdr->ppdu_id = ppdu_id;
2439 	ppdu_hdr->peer_id = peer_id;
2440 	ppdu_hdr->first_msdu = first_msdu;
2441 	ppdu_hdr->last_msdu = last_msdu;
2442 
2443 	return QDF_STATUS_SUCCESS;
2444 }
2445 
2446 
2447 /**
2448  * dp_send_completion_to_stack() - send completion to stack
2449  * @soc :  dp_soc handle
2450  * @pdev:  dp_pdev handle
2451  * @peer_id: peer_id of the peer for which completion came
2452  * @ppdu_id: ppdu_id
2453  * @netbuf: Buffer pointer for free
2454  *
2455  * This function is used to send completion to stack
2456  * to free buffer
2457 */
2458 void  dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2459 					uint16_t peer_id, uint32_t ppdu_id,
2460 					qdf_nbuf_t netbuf)
2461 {
2462 	dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
2463 				netbuf, peer_id,
2464 				WDI_NO_VAL, pdev->pdev_id);
2465 }
2466 #else
2467 static QDF_STATUS
2468 dp_get_completion_indication_for_stack(struct dp_soc *soc,
2469 				       struct dp_pdev *pdev,
2470 				       struct dp_peer *peer,
2471 				       struct hal_tx_completion_status *ts,
2472 				       qdf_nbuf_t netbuf)
2473 {
2474 	return QDF_STATUS_E_NOSUPPORT;
2475 }
2476 
2477 static void
2478 dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2479 	uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
2480 {
2481 }
2482 #endif
2483 
2484 /**
2485  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2486  * @soc: Soc handle
2487  * @desc: software Tx descriptor to be processed
2488  *
2489  * Return: none
2490  */
2491 static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
2492 		struct dp_tx_desc_s *desc)
2493 {
2494 	struct dp_vdev *vdev = desc->vdev;
2495 	qdf_nbuf_t nbuf = desc->nbuf;
2496 
2497 	/* If it is TDLS mgmt, don't unmap or free the frame */
2498 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2499 		return dp_non_std_tx_comp_free_buff(desc, vdev);
2500 
2501 	/* 0 : MSDU buffer, 1 : MLE */
2502 	if (desc->msdu_ext_desc) {
2503 		/* TSO free */
2504 		if (hal_tx_ext_desc_get_tso_enable(
2505 					desc->msdu_ext_desc->vaddr)) {
2506 			/* unmap eash TSO seg before free the nbuf */
2507 			dp_tx_tso_unmap_segment(soc, desc->tso_desc,
2508 						desc->tso_num_desc);
2509 			qdf_nbuf_free(nbuf);
2510 			return;
2511 		}
2512 	}
2513 
2514 	qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
2515 
2516 	if (qdf_likely(!vdev->mesh_vdev))
2517 		qdf_nbuf_free(nbuf);
2518 	else {
2519 		if (desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2520 			qdf_nbuf_free(nbuf);
2521 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2522 		} else
2523 			vdev->osif_tx_free_ext((nbuf));
2524 	}
2525 }
2526 
2527 /**
2528  * dp_tx_mec_handler() - Tx  MEC Notify Handler
2529  * @vdev: pointer to dp dev handler
2530  * @status : Tx completion status from HTT descriptor
2531  *
2532  * Handles MEC notify event sent from fw to Host
2533  *
2534  * Return: none
2535  */
2536 #ifdef FEATURE_WDS
2537 void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
2538 {
2539 
2540 	struct dp_soc *soc;
2541 	uint32_t flags = IEEE80211_NODE_F_WDS_HM;
2542 	struct dp_peer *peer;
2543 	uint8_t mac_addr[DP_MAC_ADDR_LEN], i;
2544 
2545 	if (!vdev->mec_enabled)
2546 		return;
2547 
2548 	/* MEC required only in STA mode */
2549 	if (vdev->opmode != wlan_op_mode_sta)
2550 		return;
2551 
2552 	soc = vdev->pdev->soc;
2553 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
2554 	peer = TAILQ_FIRST(&vdev->peer_list);
2555 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2556 
2557 	if (!peer) {
2558 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2559 				FL("peer is NULL"));
2560 		return;
2561 	}
2562 
2563 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2564 			"%s Tx MEC Handler",
2565 			__func__);
2566 
2567 	for (i = 0; i < DP_MAC_ADDR_LEN; i++)
2568 		mac_addr[(DP_MAC_ADDR_LEN - 1) - i] =
2569 					status[(DP_MAC_ADDR_LEN - 2) + i];
2570 
2571 	if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, DP_MAC_ADDR_LEN))
2572 		dp_peer_add_ast(soc,
2573 				peer,
2574 				mac_addr,
2575 				CDP_TXRX_AST_TYPE_MEC,
2576 				flags);
2577 }
2578 #endif
2579 
2580 #ifdef MESH_MODE_SUPPORT
2581 /**
2582  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
2583  *                                         in mesh meta header
2584  * @tx_desc: software descriptor head pointer
2585  * @ts: pointer to tx completion stats
2586  * Return: none
2587  */
2588 static
2589 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2590 		struct hal_tx_completion_status *ts)
2591 {
2592 	struct meta_hdr_s *mhdr;
2593 	qdf_nbuf_t netbuf = tx_desc->nbuf;
2594 
2595 	if (!tx_desc->msdu_ext_desc) {
2596 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
2597 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2598 				"netbuf %pK offset %d",
2599 				netbuf, tx_desc->pkt_offset);
2600 			return;
2601 		}
2602 	}
2603 	if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
2604 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2605 			"netbuf %pK offset %d", netbuf,
2606 			sizeof(struct meta_hdr_s));
2607 		return;
2608 	}
2609 
2610 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
2611 	mhdr->rssi = ts->ack_frame_rssi;
2612 	mhdr->channel = tx_desc->pdev->operating_channel;
2613 }
2614 
2615 #else
2616 static
2617 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2618 		struct hal_tx_completion_status *ts)
2619 {
2620 }
2621 
2622 #endif
2623 
2624 /**
2625  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
2626  * @peer: Handle to DP peer
2627  * @ts: pointer to HAL Tx completion stats
2628  *
2629  * Return: None
2630  */
2631 static inline void
2632 dp_tx_update_peer_stats(struct dp_peer *peer,
2633 			struct hal_tx_completion_status *ts, uint32_t length)
2634 {
2635 	struct dp_pdev *pdev = peer->vdev->pdev;
2636 	struct dp_soc *soc = NULL;
2637 	uint8_t mcs, pkt_type;
2638 
2639 	if (!pdev)
2640 		return;
2641 
2642 	soc = pdev->soc;
2643 
2644 	mcs = ts->mcs;
2645 	pkt_type = ts->pkt_type;
2646 
2647 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
2648 		dp_err("Release source is not from TQM");
2649 		return;
2650 	}
2651 
2652 	DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
2653 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
2654 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
2655 
2656 	DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length,
2657 			  (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
2658 
2659 	DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
2660 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
2661 
2662 	DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
2663 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
2664 
2665 	DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
2666 		     (ts->status == HAL_TX_TQM_RR_FW_REASON1));
2667 
2668 	DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
2669 		     (ts->status == HAL_TX_TQM_RR_FW_REASON2));
2670 
2671 	DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
2672 		     (ts->status == HAL_TX_TQM_RR_FW_REASON3));
2673 
2674 	if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
2675 		return;
2676 	}
2677 	DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
2678 
2679 	DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
2680 	DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
2681 
2682 	/*
2683 	 * Following Rate Statistics are updated from HTT PPDU events from FW.
2684 	 * Return from here if HTT PPDU events are enabled.
2685 	 */
2686 	if (!(soc->process_tx_status))
2687 		return;
2688 
2689 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2690 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
2691 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2692 			((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
2693 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2694 			((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
2695 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2696 			((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
2697 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2698 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
2699 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2700 			((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
2701 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2702 			((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2703 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2704 			((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2705 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2706 			((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2707 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2708 			((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2709 
2710 	DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
2711 	DP_STATS_INC(peer, tx.bw[ts->bw], 1);
2712 	DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
2713 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
2714 	DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
2715 	DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
2716 	DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
2717 
2718 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
2719 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
2720 			     &peer->stats, ts->peer_id,
2721 			     UPDATE_PEER_STATS, pdev->pdev_id);
2722 #endif
2723 }
2724 
2725 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2726 /**
2727  * dp_tx_flow_pool_lock() - take flow pool lock
2728  * @soc: core txrx main context
2729  * @tx_desc: tx desc
2730  *
2731  * Return: None
2732  */
2733 static inline
2734 void dp_tx_flow_pool_lock(struct dp_soc *soc,
2735 			  struct dp_tx_desc_s *tx_desc)
2736 {
2737 	struct dp_tx_desc_pool_s *pool;
2738 	uint8_t desc_pool_id;
2739 
2740 	desc_pool_id = tx_desc->pool_id;
2741 	pool = &soc->tx_desc[desc_pool_id];
2742 
2743 	qdf_spin_lock_bh(&pool->flow_pool_lock);
2744 }
2745 
2746 /**
2747  * dp_tx_flow_pool_unlock() - release flow pool lock
2748  * @soc: core txrx main context
2749  * @tx_desc: tx desc
2750  *
2751  * Return: None
2752  */
2753 static inline
2754 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
2755 			    struct dp_tx_desc_s *tx_desc)
2756 {
2757 	struct dp_tx_desc_pool_s *pool;
2758 	uint8_t desc_pool_id;
2759 
2760 	desc_pool_id = tx_desc->pool_id;
2761 	pool = &soc->tx_desc[desc_pool_id];
2762 
2763 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
2764 }
2765 #else
2766 static inline
2767 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2768 {
2769 }
2770 
2771 static inline
2772 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2773 {
2774 }
2775 #endif
2776 
2777 /**
2778  * dp_tx_notify_completion() - Notify tx completion for this desc
2779  * @soc: core txrx main context
2780  * @tx_desc: tx desc
2781  * @netbuf:  buffer
2782  *
2783  * Return: none
2784  */
2785 static inline void dp_tx_notify_completion(struct dp_soc *soc,
2786 					   struct dp_tx_desc_s *tx_desc,
2787 					   qdf_nbuf_t netbuf)
2788 {
2789 	void *osif_dev;
2790 	ol_txrx_completion_fp tx_compl_cbk = NULL;
2791 
2792 	qdf_assert(tx_desc);
2793 
2794 	dp_tx_flow_pool_lock(soc, tx_desc);
2795 
2796 	if (!tx_desc->vdev ||
2797 	    !tx_desc->vdev->osif_vdev) {
2798 		dp_tx_flow_pool_unlock(soc, tx_desc);
2799 		return;
2800 	}
2801 
2802 	osif_dev = tx_desc->vdev->osif_vdev;
2803 	tx_compl_cbk = tx_desc->vdev->tx_comp;
2804 	dp_tx_flow_pool_unlock(soc, tx_desc);
2805 
2806 	if (tx_compl_cbk)
2807 		tx_compl_cbk(netbuf, osif_dev);
2808 }
2809 
2810 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
2811  * @pdev: pdev handle
2812  * @tid: tid value
2813  * @txdesc_ts: timestamp from txdesc
2814  * @ppdu_id: ppdu id
2815  *
2816  * Return: none
2817  */
2818 #ifdef FEATURE_PERPKT_INFO
2819 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
2820 					       uint8_t tid,
2821 					       uint64_t txdesc_ts,
2822 					       uint32_t ppdu_id)
2823 {
2824 	uint64_t delta_ms;
2825 	struct cdp_tx_sojourn_stats *sojourn_stats;
2826 
2827 	if (pdev->enhanced_stats_en == 0)
2828 		return;
2829 
2830 	if (pdev->sojourn_stats.ppdu_seq_id == 0)
2831 		pdev->sojourn_stats.ppdu_seq_id = ppdu_id;
2832 
2833 	if (ppdu_id != pdev->sojourn_stats.ppdu_seq_id) {
2834 		if (!pdev->sojourn_buf)
2835 			return;
2836 
2837 		sojourn_stats = (struct cdp_tx_sojourn_stats *)
2838 					qdf_nbuf_data(pdev->sojourn_buf);
2839 
2840 		qdf_mem_copy(sojourn_stats, &pdev->sojourn_stats,
2841 			     sizeof(struct cdp_tx_sojourn_stats));
2842 
2843 		qdf_mem_zero(&pdev->sojourn_stats,
2844 			     sizeof(struct cdp_tx_sojourn_stats));
2845 
2846 		dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
2847 				     pdev->sojourn_buf, HTT_INVALID_PEER,
2848 				     WDI_NO_VAL, pdev->pdev_id);
2849 
2850 		pdev->sojourn_stats.ppdu_seq_id = ppdu_id;
2851 	}
2852 
2853 	if (tid == HTT_INVALID_TID)
2854 		return;
2855 
2856 	delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
2857 				txdesc_ts;
2858 	qdf_ewma_tx_lag_add(&pdev->sojourn_stats.avg_sojourn_msdu[tid],
2859 			    delta_ms);
2860 	pdev->sojourn_stats.sum_sojourn_msdu[tid] += delta_ms;
2861 	pdev->sojourn_stats.num_msdus[tid]++;
2862 }
2863 #else
2864 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
2865 					       uint8_t tid,
2866 					       uint64_t txdesc_ts,
2867 					       uint32_t ppdu_id)
2868 {
2869 }
2870 #endif
2871 
2872 /**
2873  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
2874  * @soc: DP Soc handle
2875  * @tx_desc: software Tx descriptor
2876  * @ts : Tx completion status from HAL/HTT descriptor
2877  *
2878  * Return: none
2879  */
2880 static inline void
2881 dp_tx_comp_process_desc(struct dp_soc *soc,
2882 			struct dp_tx_desc_s *desc,
2883 			struct hal_tx_completion_status *ts,
2884 			struct dp_peer *peer)
2885 {
2886 	/*
2887 	 * m_copy/tx_capture modes are not supported for
2888 	 * scatter gather packets
2889 	 */
2890 	if (!(desc->msdu_ext_desc) &&
2891 	    (dp_get_completion_indication_for_stack(soc, desc->pdev,
2892 						    peer, ts, desc->nbuf)
2893 			== QDF_STATUS_SUCCESS)) {
2894 		qdf_nbuf_unmap(soc->osdev, desc->nbuf,
2895 			       QDF_DMA_TO_DEVICE);
2896 
2897 		dp_send_completion_to_stack(soc, desc->pdev, ts->peer_id,
2898 					    ts->ppdu_id, desc->nbuf);
2899 	} else {
2900 		dp_tx_comp_free_buf(soc, desc);
2901 	}
2902 }
2903 
2904 /**
2905  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
2906  * @tx_desc: software descriptor head pointer
2907  * @ts: Tx completion status
2908  * @peer: peer handle
2909  *
2910  * Return: none
2911  */
2912 static inline
2913 void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
2914 				  struct hal_tx_completion_status *ts,
2915 				  struct dp_peer *peer)
2916 {
2917 	uint32_t length;
2918 	struct dp_soc *soc = NULL;
2919 	struct dp_vdev *vdev = tx_desc->vdev;
2920 	struct ether_header *eh =
2921 		(struct ether_header *)qdf_nbuf_data(tx_desc->nbuf);
2922 
2923 	if (!vdev) {
2924 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2925 				"invalid vdev");
2926 		goto out;
2927 	}
2928 
2929 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
2930 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
2931 				 QDF_TRACE_DEFAULT_PDEV_ID,
2932 				 qdf_nbuf_data_addr(tx_desc->nbuf),
2933 				 sizeof(qdf_nbuf_data(tx_desc->nbuf)),
2934 				 tx_desc->id,
2935 				 ts->status));
2936 
2937 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2938 				"-------------------- \n"
2939 				"Tx Completion Stats: \n"
2940 				"-------------------- \n"
2941 				"ack_frame_rssi = %d \n"
2942 				"first_msdu = %d \n"
2943 				"last_msdu = %d \n"
2944 				"msdu_part_of_amsdu = %d \n"
2945 				"rate_stats valid = %d \n"
2946 				"bw = %d \n"
2947 				"pkt_type = %d \n"
2948 				"stbc = %d \n"
2949 				"ldpc = %d \n"
2950 				"sgi = %d \n"
2951 				"mcs = %d \n"
2952 				"ofdma = %d \n"
2953 				"tones_in_ru = %d \n"
2954 				"tsf = %d \n"
2955 				"ppdu_id = %d \n"
2956 				"transmit_cnt = %d \n"
2957 				"tid = %d \n"
2958 				"peer_id = %d\n",
2959 				ts->ack_frame_rssi, ts->first_msdu,
2960 				ts->last_msdu, ts->msdu_part_of_amsdu,
2961 				ts->valid, ts->bw, ts->pkt_type, ts->stbc,
2962 				ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
2963 				ts->tones_in_ru, ts->tsf, ts->ppdu_id,
2964 				ts->transmit_cnt, ts->tid, ts->peer_id);
2965 
2966 	soc = vdev->pdev->soc;
2967 
2968 	/* Update SoC level stats */
2969 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
2970 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
2971 
2972 	/* Update per-packet stats for mesh mode */
2973 	if (qdf_unlikely(vdev->mesh_vdev) &&
2974 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
2975 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
2976 
2977 	length = qdf_nbuf_len(tx_desc->nbuf);
2978 	/* Update peer level stats */
2979 	if (!peer) {
2980 		QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
2981 				   "peer is null or deletion in progress");
2982 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
2983 		goto out;
2984 	}
2985 
2986 	if (qdf_likely(!peer->bss_peer)) {
2987 		DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
2988 
2989 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
2990 			DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
2991 	} else {
2992 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
2993 			DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
2994 
2995 			if ((peer->vdev->tx_encap_type ==
2996 				htt_cmn_pkt_type_ethernet) &&
2997 				IEEE80211_IS_BROADCAST(eh->ether_dhost)) {
2998 				DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
2999 			}
3000 		}
3001 	}
3002 
3003 	dp_tx_update_peer_stats(peer, ts, length);
3004 
3005 out:
3006 	return;
3007 }
3008 /**
3009  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
3010  * @soc: core txrx main context
3011  * @comp_head: software descriptor head pointer
3012  *
3013  * This function will process batch of descriptors reaped by dp_tx_comp_handler
3014  * and release the software descriptors after processing is complete
3015  *
3016  * Return: none
3017  */
3018 static void
3019 dp_tx_comp_process_desc_list(struct dp_soc *soc,
3020 			     struct dp_tx_desc_s *comp_head)
3021 {
3022 	struct dp_tx_desc_s *desc;
3023 	struct dp_tx_desc_s *next;
3024 	struct hal_tx_completion_status ts = {0};
3025 	struct dp_peer *peer;
3026 
3027 	DP_HIST_INIT();
3028 	desc = comp_head;
3029 
3030 	while (desc) {
3031 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
3032 		peer = dp_peer_find_by_id(soc, ts.peer_id);
3033 		dp_tx_comp_process_tx_status(desc, &ts, peer);
3034 		dp_tx_comp_process_desc(soc, desc, &ts, peer);
3035 
3036 		if (peer)
3037 			dp_peer_unref_del_find_by_id(peer);
3038 
3039 		DP_HIST_PACKET_COUNT_INC(desc->pdev->pdev_id);
3040 
3041 		next = desc->next;
3042 
3043 		dp_tx_desc_release(desc, desc->pool_id);
3044 		desc = next;
3045 	}
3046 
3047 	DP_TX_HIST_STATS_PER_PDEV();
3048 }
3049 
3050 /**
3051  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
3052  * @tx_desc: software descriptor head pointer
3053  * @status : Tx completion status from HTT descriptor
3054  *
3055  * This function will process HTT Tx indication messages from Target
3056  *
3057  * Return: none
3058  */
3059 static
3060 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
3061 {
3062 	uint8_t tx_status;
3063 	struct dp_pdev *pdev;
3064 	struct dp_vdev *vdev;
3065 	struct dp_soc *soc;
3066 	struct hal_tx_completion_status ts = {0};
3067 	uint32_t *htt_desc = (uint32_t *)status;
3068 	struct dp_peer *peer;
3069 
3070 	qdf_assert(tx_desc->pdev);
3071 
3072 	pdev = tx_desc->pdev;
3073 	vdev = tx_desc->vdev;
3074 	soc = pdev->soc;
3075 
3076 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
3077 
3078 	switch (tx_status) {
3079 	case HTT_TX_FW2WBM_TX_STATUS_OK:
3080 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
3081 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
3082 	{
3083 		if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
3084 			ts.peer_id =
3085 				HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
3086 						htt_desc[2]);
3087 			ts.tid =
3088 				HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
3089 						htt_desc[2]);
3090 		} else {
3091 			ts.peer_id = HTT_INVALID_PEER;
3092 			ts.tid = HTT_INVALID_TID;
3093 		}
3094 		ts.ppdu_id =
3095 			HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
3096 					htt_desc[1]);
3097 		ts.ack_frame_rssi =
3098 			HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
3099 					htt_desc[1]);
3100 
3101 		ts.first_msdu = 1;
3102 		ts.last_msdu = 1;
3103 
3104 		if (tx_status != HTT_TX_FW2WBM_TX_STATUS_OK)
3105 			ts.status = HAL_TX_TQM_RR_REM_CMD_REM;
3106 
3107 		peer = dp_peer_find_by_id(soc, ts.peer_id);
3108 
3109 		if (qdf_likely(peer))
3110 			dp_peer_unref_del_find_by_id(peer);
3111 
3112 		dp_tx_comp_process_tx_status(tx_desc, &ts, peer);
3113 		dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
3114 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3115 
3116 		break;
3117 	}
3118 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
3119 	{
3120 		dp_tx_reinject_handler(tx_desc, status);
3121 		break;
3122 	}
3123 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
3124 	{
3125 		dp_tx_inspect_handler(tx_desc, status);
3126 		break;
3127 	}
3128 	case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
3129 	{
3130 		dp_tx_mec_handler(vdev, status);
3131 		break;
3132 	}
3133 	default:
3134 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3135 			  "%s Invalid HTT tx_status %d\n",
3136 			  __func__, tx_status);
3137 		break;
3138 	}
3139 }
3140 
3141 /**
3142  * dp_tx_comp_handler() - Tx completion handler
3143  * @soc: core txrx main context
3144  * @ring_id: completion ring id
3145  * @quota: No. of packets/descriptors that can be serviced in one loop
3146  *
3147  * This function will collect hardware release ring element contents and
3148  * handle descriptor contents. Based on contents, free packet or handle error
3149  * conditions
3150  *
3151  * Return: none
3152  */
3153 uint32_t dp_tx_comp_handler(struct dp_soc *soc, void *hal_srng, uint32_t quota)
3154 {
3155 	void *tx_comp_hal_desc;
3156 	uint8_t buffer_src;
3157 	uint8_t pool_id;
3158 	uint32_t tx_desc_id;
3159 	struct dp_tx_desc_s *tx_desc = NULL;
3160 	struct dp_tx_desc_s *head_desc = NULL;
3161 	struct dp_tx_desc_s *tail_desc = NULL;
3162 	uint32_t num_processed;
3163 	uint32_t count;
3164 
3165 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
3166 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3167 				"%s %d : HAL RING Access Failed -- %pK",
3168 				__func__, __LINE__, hal_srng);
3169 		return 0;
3170 	}
3171 
3172 	num_processed = 0;
3173 	count = 0;
3174 
3175 	/* Find head descriptor from completion ring */
3176 	while (qdf_likely(tx_comp_hal_desc =
3177 			hal_srng_dst_get_next(soc->hal_soc, hal_srng))) {
3178 
3179 		buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
3180 
3181 		/* If this buffer was not released by TQM or FW, then it is not
3182 		 * Tx completion indication, assert */
3183 		if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
3184 				(buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
3185 
3186 			QDF_TRACE(QDF_MODULE_ID_DP,
3187 					QDF_TRACE_LEVEL_FATAL,
3188 					"Tx comp release_src != TQM | FW");
3189 
3190 			qdf_assert_always(0);
3191 		}
3192 
3193 		/* Get descriptor id */
3194 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
3195 		pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
3196 			DP_TX_DESC_ID_POOL_OS;
3197 
3198 		if (!dp_tx_is_desc_id_valid(soc, tx_desc_id))
3199 			continue;
3200 
3201 		/* Find Tx descriptor */
3202 		tx_desc = dp_tx_desc_find(soc, pool_id,
3203 				(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
3204 				DP_TX_DESC_ID_PAGE_OS,
3205 				(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
3206 				DP_TX_DESC_ID_OFFSET_OS);
3207 
3208 		/*
3209 		 * If the descriptor is already freed in vdev_detach,
3210 		 * continue to next descriptor
3211 		 */
3212 		if (!tx_desc->vdev) {
3213 			QDF_TRACE(QDF_MODULE_ID_DP,
3214 				  QDF_TRACE_LEVEL_INFO,
3215 				  "Descriptor freed in vdev_detach %d",
3216 				  tx_desc_id);
3217 
3218 			num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
3219 			count++;
3220 			continue;
3221 		}
3222 
3223 		/*
3224 		 * If the release source is FW, process the HTT status
3225 		 */
3226 		if (qdf_unlikely(buffer_src ==
3227 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
3228 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
3229 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
3230 					htt_tx_status);
3231 			dp_tx_process_htt_completion(tx_desc,
3232 					htt_tx_status);
3233 		} else {
3234 			/* Pool id is not matching. Error */
3235 			if (tx_desc->pool_id != pool_id) {
3236 				QDF_TRACE(QDF_MODULE_ID_DP,
3237 					QDF_TRACE_LEVEL_FATAL,
3238 					"Tx Comp pool id %d not matched %d",
3239 					pool_id, tx_desc->pool_id);
3240 
3241 				qdf_assert_always(0);
3242 			}
3243 
3244 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
3245 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
3246 				QDF_TRACE(QDF_MODULE_ID_DP,
3247 					QDF_TRACE_LEVEL_FATAL,
3248 					"Txdesc invalid, flgs = %x,id = %d",
3249 					tx_desc->flags,	tx_desc_id);
3250 				qdf_assert_always(0);
3251 			}
3252 
3253 			/* First ring descriptor on the cycle */
3254 			if (!head_desc) {
3255 				head_desc = tx_desc;
3256 				tail_desc = tx_desc;
3257 			}
3258 
3259 			tail_desc->next = tx_desc;
3260 			tx_desc->next = NULL;
3261 			tail_desc = tx_desc;
3262 
3263 			/* Collect hw completion contents */
3264 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
3265 					&tx_desc->comp, 1);
3266 
3267 		}
3268 
3269 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
3270 
3271 		/*
3272 		 * Processed packet count is more than given quota
3273 		 * stop to processing
3274 		 */
3275 		if ((num_processed >= quota))
3276 			break;
3277 
3278 		count++;
3279 	}
3280 
3281 	hal_srng_access_end(soc->hal_soc, hal_srng);
3282 
3283 	/* Process the reaped descriptors */
3284 	if (head_desc)
3285 		dp_tx_comp_process_desc_list(soc, head_desc);
3286 
3287 	return num_processed;
3288 }
3289 
3290 #ifdef CONVERGED_TDLS_ENABLE
3291 /**
3292  * dp_tx_non_std() - Allow the control-path SW to send data frames
3293  *
3294  * @data_vdev - which vdev should transmit the tx data frames
3295  * @tx_spec - what non-standard handling to apply to the tx data frames
3296  * @msdu_list - NULL-terminated list of tx MSDUs
3297  *
3298  * Return: NULL on success,
3299  *         nbuf when it fails to send
3300  */
3301 qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle,
3302 			enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
3303 {
3304 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
3305 
3306 	if (tx_spec & OL_TX_SPEC_NO_FREE)
3307 		vdev->is_tdls_frame = true;
3308 	return dp_tx_send(vdev_handle, msdu_list);
3309 }
3310 #endif
3311 
3312 /**
3313  * dp_tx_vdev_attach() - attach vdev to dp tx
3314  * @vdev: virtual device instance
3315  *
3316  * Return: QDF_STATUS_SUCCESS: success
3317  *         QDF_STATUS_E_RESOURCES: Error return
3318  */
3319 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
3320 {
3321 	/*
3322 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
3323 	 */
3324 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
3325 			HTT_TCL_METADATA_TYPE_VDEV_BASED);
3326 
3327 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
3328 			vdev->vdev_id);
3329 
3330 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
3331 			DP_SW2HW_MACID(vdev->pdev->pdev_id));
3332 
3333 	/*
3334 	 * Set HTT Extension Valid bit to 0 by default
3335 	 */
3336 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
3337 
3338 	dp_tx_vdev_update_search_flags(vdev);
3339 
3340 	return QDF_STATUS_SUCCESS;
3341 }
3342 
3343 #ifdef FEATURE_WDS
3344 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
3345 {
3346 	struct dp_soc *soc = vdev->pdev->soc;
3347 
3348 	/*
3349 	 * If AST index override support is available (HKv2 etc),
3350 	 * DA search flag be enabled always
3351 	 *
3352 	 * If AST index override support is not available (HKv1),
3353 	 * DA search flag should be used for all modes except QWRAP
3354 	 */
3355 	if (soc->ast_override_support || !vdev->proxysta_vdev)
3356 		return true;
3357 
3358 	return false;
3359 }
3360 #else
3361 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
3362 {
3363 	return false;
3364 }
3365 #endif
3366 
3367 /**
3368  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
3369  * @vdev: virtual device instance
3370  *
3371  * Return: void
3372  *
3373  */
3374 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
3375 {
3376 	struct dp_soc *soc = vdev->pdev->soc;
3377 
3378 	/*
3379 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
3380 	 * for TDLS link
3381 	 *
3382 	 * Enable AddrY (SA based search) only for non-WDS STA and
3383 	 * ProxySTA VAP (in HKv1) modes.
3384 	 *
3385 	 * In all other VAP modes, only DA based search should be
3386 	 * enabled
3387 	 */
3388 	if (vdev->opmode == wlan_op_mode_sta &&
3389 	    vdev->tdls_link_connected)
3390 		vdev->hal_desc_addr_search_flags =
3391 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
3392 	else if ((vdev->opmode == wlan_op_mode_sta) &&
3393 		 !dp_tx_da_search_override(vdev))
3394 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
3395 	else
3396 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
3397 
3398 	/* Set search type only when peer map v2 messaging is enabled
3399 	 * as we will have the search index (AST hash) only when v2 is
3400 	 * enabled
3401 	 */
3402 	if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
3403 		vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
3404 	else
3405 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
3406 }
3407 
3408 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3409 /* dp_tx_desc_flush() - release resources associated
3410  *                      to tx_desc
3411  * @vdev: virtual device instance
3412  *
3413  * This function will free all outstanding Tx buffers,
3414  * including ME buffer for which either free during
3415  * completion didn't happened or completion is not
3416  * received.
3417  */
3418 static void dp_tx_desc_flush(struct dp_vdev *vdev)
3419 {
3420 	uint8_t i;
3421 	uint32_t j;
3422 	uint32_t num_desc, page_id, offset;
3423 	uint16_t num_desc_per_page;
3424 	struct dp_soc *soc = vdev->pdev->soc;
3425 	struct dp_tx_desc_s *tx_desc = NULL;
3426 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
3427 
3428 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
3429 		tx_desc_pool = &soc->tx_desc[i];
3430 		if (!(tx_desc_pool->pool_size) ||
3431 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
3432 		    !(tx_desc_pool->desc_pages.cacheable_pages))
3433 			continue;
3434 
3435 		num_desc = tx_desc_pool->pool_size;
3436 		num_desc_per_page =
3437 			tx_desc_pool->desc_pages.num_element_per_page;
3438 		for (j = 0; j < num_desc; j++) {
3439 			page_id = j / num_desc_per_page;
3440 			offset = j % num_desc_per_page;
3441 
3442 			if (qdf_unlikely(!(tx_desc_pool->
3443 					 desc_pages.cacheable_pages)))
3444 				break;
3445 
3446 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
3447 			if (tx_desc && (tx_desc->vdev == vdev) &&
3448 			    (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) {
3449 				dp_tx_comp_free_buf(soc, tx_desc);
3450 				dp_tx_desc_release(tx_desc, i);
3451 			}
3452 		}
3453 	}
3454 }
3455 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3456 static void dp_tx_desc_flush(struct dp_vdev *vdev)
3457 {
3458 	uint8_t i, num_pool;
3459 	uint32_t j;
3460 	uint32_t num_desc, page_id, offset;
3461 	uint16_t num_desc_per_page;
3462 	struct dp_soc *soc = vdev->pdev->soc;
3463 	struct dp_tx_desc_s *tx_desc = NULL;
3464 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
3465 
3466 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3467 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3468 
3469 	for (i = 0; i < num_pool; i++) {
3470 		tx_desc_pool = &soc->tx_desc[i];
3471 		if (!tx_desc_pool->desc_pages.cacheable_pages)
3472 			continue;
3473 
3474 		num_desc_per_page =
3475 			tx_desc_pool->desc_pages.num_element_per_page;
3476 		for (j = 0; j < num_desc; j++) {
3477 			page_id = j / num_desc_per_page;
3478 			offset = j % num_desc_per_page;
3479 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
3480 
3481 			if (tx_desc && (tx_desc->vdev == vdev) &&
3482 			    (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) {
3483 				dp_tx_comp_free_buf(soc, tx_desc);
3484 				dp_tx_desc_release(tx_desc, i);
3485 			}
3486 		}
3487 	}
3488 }
3489 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3490 
3491 /**
3492  * dp_tx_vdev_detach() - detach vdev from dp tx
3493  * @vdev: virtual device instance
3494  *
3495  * Return: QDF_STATUS_SUCCESS: success
3496  *         QDF_STATUS_E_RESOURCES: Error return
3497  */
3498 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
3499 {
3500 	dp_tx_desc_flush(vdev);
3501 	return QDF_STATUS_SUCCESS;
3502 }
3503 
3504 /**
3505  * dp_tx_pdev_attach() - attach pdev to dp tx
3506  * @pdev: physical device instance
3507  *
3508  * Return: QDF_STATUS_SUCCESS: success
3509  *         QDF_STATUS_E_RESOURCES: Error return
3510  */
3511 QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
3512 {
3513 	struct dp_soc *soc = pdev->soc;
3514 
3515 	/* Initialize Flow control counters */
3516 	qdf_atomic_init(&pdev->num_tx_exception);
3517 	qdf_atomic_init(&pdev->num_tx_outstanding);
3518 
3519 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3520 		/* Initialize descriptors in TCL Ring */
3521 		hal_tx_init_data_ring(soc->hal_soc,
3522 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
3523 	}
3524 
3525 	return QDF_STATUS_SUCCESS;
3526 }
3527 
3528 /**
3529  * dp_tx_pdev_detach() - detach pdev from dp tx
3530  * @pdev: physical device instance
3531  *
3532  * Return: QDF_STATUS_SUCCESS: success
3533  *         QDF_STATUS_E_RESOURCES: Error return
3534  */
3535 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
3536 {
3537 	dp_tx_me_exit(pdev);
3538 	return QDF_STATUS_SUCCESS;
3539 }
3540 
3541 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3542 /* Pools will be allocated dynamically */
3543 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3544 					int num_desc)
3545 {
3546 	uint8_t i;
3547 
3548 	for (i = 0; i < num_pool; i++) {
3549 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
3550 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
3551 	}
3552 
3553 	return 0;
3554 }
3555 
3556 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3557 {
3558 	uint8_t i;
3559 
3560 	for (i = 0; i < num_pool; i++)
3561 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
3562 }
3563 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3564 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3565 					int num_desc)
3566 {
3567 	uint8_t i;
3568 
3569 	/* Allocate software Tx descriptor pools */
3570 	for (i = 0; i < num_pool; i++) {
3571 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
3572 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3573 					"%s Tx Desc Pool alloc %d failed %pK",
3574 					__func__, i, soc);
3575 			return ENOMEM;
3576 		}
3577 	}
3578 	return 0;
3579 }
3580 
3581 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3582 {
3583 	uint8_t i;
3584 
3585 	for (i = 0; i < num_pool; i++) {
3586 		qdf_assert_always(!soc->tx_desc[i].num_allocated);
3587 		if (dp_tx_desc_pool_free(soc, i)) {
3588 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3589 				"%s Tx Desc Pool Free failed", __func__);
3590 		}
3591 	}
3592 }
3593 
3594 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3595 
3596 #ifndef QCA_MEM_ATTACH_ON_WIFI3
3597 /**
3598  * dp_tso_attach_wifi3() - TSO attach handler
3599  * @txrx_soc: Opaque Dp handle
3600  *
3601  * Reserve TSO descriptor buffers
3602  *
3603  * Return: QDF_STATUS_E_FAILURE on failure or
3604  * QDF_STATUS_SUCCESS on success
3605  */
3606 static
3607 QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc)
3608 {
3609 	return dp_tso_soc_attach(txrx_soc);
3610 }
3611 
3612 /**
3613  * dp_tso_detach_wifi3() - TSO Detach handler
3614  * @txrx_soc: Opaque Dp handle
3615  *
3616  * Deallocate TSO descriptor buffers
3617  *
3618  * Return: QDF_STATUS_E_FAILURE on failure or
3619  * QDF_STATUS_SUCCESS on success
3620  */
3621 static
3622 QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc)
3623 {
3624 	return dp_tso_soc_detach(txrx_soc);
3625 }
3626 #else
3627 static
3628 QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc)
3629 {
3630 	return QDF_STATUS_SUCCESS;
3631 }
3632 
3633 static
3634 QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc)
3635 {
3636 	return QDF_STATUS_SUCCESS;
3637 }
3638 #endif
3639 
3640 QDF_STATUS dp_tso_soc_detach(void *txrx_soc)
3641 {
3642 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3643 	uint8_t i;
3644 	uint8_t num_pool;
3645 	uint32_t num_desc;
3646 
3647 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3648 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3649 
3650 	for (i = 0; i < num_pool; i++)
3651 		dp_tx_tso_desc_pool_free(soc, i);
3652 
3653 	dp_info("%s TSO Desc Pool %d Free descs = %d",
3654 		__func__, num_pool, num_desc);
3655 
3656 	for (i = 0; i < num_pool; i++)
3657 		dp_tx_tso_num_seg_pool_free(soc, i);
3658 
3659 	dp_info("%s TSO Num of seg Desc Pool %d Free descs = %d",
3660 		__func__, num_pool, num_desc);
3661 
3662 	return QDF_STATUS_SUCCESS;
3663 }
3664 
3665 /**
3666  * dp_tso_attach() - TSO attach handler
3667  * @txrx_soc: Opaque Dp handle
3668  *
3669  * Reserve TSO descriptor buffers
3670  *
3671  * Return: QDF_STATUS_E_FAILURE on failure or
3672  * QDF_STATUS_SUCCESS on success
3673  */
3674 QDF_STATUS dp_tso_soc_attach(void *txrx_soc)
3675 {
3676 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3677 	uint8_t i;
3678 	uint8_t num_pool;
3679 	uint32_t num_desc;
3680 
3681 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3682 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3683 
3684 	for (i = 0; i < num_pool; i++) {
3685 		if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
3686 			dp_err("TSO Desc Pool alloc %d failed %pK",
3687 			       i, soc);
3688 
3689 			return QDF_STATUS_E_FAILURE;
3690 		}
3691 	}
3692 
3693 	dp_info("%s TSO Desc Alloc %d, descs = %d",
3694 		__func__, num_pool, num_desc);
3695 
3696 	for (i = 0; i < num_pool; i++) {
3697 		if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
3698 			dp_err("TSO Num of seg Pool alloc %d failed %pK",
3699 			       i, soc);
3700 
3701 			return QDF_STATUS_E_FAILURE;
3702 		}
3703 	}
3704 	return QDF_STATUS_SUCCESS;
3705 }
3706 
3707 /**
3708  * dp_tx_soc_detach() - detach soc from dp tx
3709  * @soc: core txrx main context
3710  *
3711  * This function will detach dp tx into main device context
3712  * will free dp tx resource and initialize resources
3713  *
3714  * Return: QDF_STATUS_SUCCESS: success
3715  *         QDF_STATUS_E_RESOURCES: Error return
3716  */
3717 QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
3718 {
3719 	uint8_t num_pool;
3720 	uint16_t num_desc;
3721 	uint16_t num_ext_desc;
3722 	uint8_t i;
3723 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3724 
3725 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3726 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3727 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
3728 
3729 	dp_tx_flow_control_deinit(soc);
3730 	dp_tx_delete_static_pools(soc, num_pool);
3731 
3732 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3733 			"%s Tx Desc Pool Free num_pool = %d, descs = %d",
3734 			__func__, num_pool, num_desc);
3735 
3736 	for (i = 0; i < num_pool; i++) {
3737 		if (dp_tx_ext_desc_pool_free(soc, i)) {
3738 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3739 					"%s Tx Ext Desc Pool Free failed",
3740 					__func__);
3741 			return QDF_STATUS_E_RESOURCES;
3742 		}
3743 	}
3744 
3745 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3746 			"%s MSDU Ext Desc Pool %d Free descs = %d",
3747 			__func__, num_pool, num_ext_desc);
3748 
3749 	status = dp_tso_detach_wifi3(soc);
3750 	if (status != QDF_STATUS_SUCCESS)
3751 		return status;
3752 
3753 	return QDF_STATUS_SUCCESS;
3754 }
3755 
3756 /**
3757  * dp_tx_soc_attach() - attach soc to dp tx
3758  * @soc: core txrx main context
3759  *
3760  * This function will attach dp tx into main device context
3761  * will allocate dp tx resource and initialize resources
3762  *
3763  * Return: QDF_STATUS_SUCCESS: success
3764  *         QDF_STATUS_E_RESOURCES: Error return
3765  */
3766 QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
3767 {
3768 	uint8_t i;
3769 	uint8_t num_pool;
3770 	uint32_t num_desc;
3771 	uint32_t num_ext_desc;
3772 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3773 
3774 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3775 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3776 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
3777 
3778 	if (num_pool > MAX_TXDESC_POOLS)
3779 		goto fail;
3780 
3781 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
3782 		goto fail;
3783 
3784 	dp_tx_flow_control_init(soc);
3785 
3786 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3787 			"%s Tx Desc Alloc num_pool = %d, descs = %d",
3788 			__func__, num_pool, num_desc);
3789 
3790 	/* Allocate extension tx descriptor pools */
3791 	for (i = 0; i < num_pool; i++) {
3792 		if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
3793 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3794 				"MSDU Ext Desc Pool alloc %d failed %pK",
3795 				i, soc);
3796 
3797 			goto fail;
3798 		}
3799 	}
3800 
3801 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3802 			"%s MSDU Ext Desc Alloc %d, descs = %d",
3803 			__func__, num_pool, num_ext_desc);
3804 
3805 	status = dp_tso_attach_wifi3((void *)soc);
3806 	if (status != QDF_STATUS_SUCCESS)
3807 		goto fail;
3808 
3809 
3810 	/* Initialize descriptors in TCL Rings */
3811 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3812 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3813 			hal_tx_init_data_ring(soc->hal_soc,
3814 					soc->tcl_data_ring[i].hal_srng);
3815 		}
3816 	}
3817 
3818 	/*
3819 	 * todo - Add a runtime config option to enable this.
3820 	 */
3821 	/*
3822 	 * Due to multiple issues on NPR EMU, enable it selectively
3823 	 * only for NPR EMU, should be removed, once NPR platforms
3824 	 * are stable.
3825 	 */
3826 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
3827 
3828 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3829 			"%s HAL Tx init Success", __func__);
3830 
3831 	return QDF_STATUS_SUCCESS;
3832 
3833 fail:
3834 	/* Detach will take care of freeing only allocated resources */
3835 	dp_tx_soc_detach(soc);
3836 	return QDF_STATUS_E_RESOURCES;
3837 }
3838 
3839 /*
3840  * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
3841  * pdev: pointer to DP PDEV structure
3842  * seg_info_head: Pointer to the head of list
3843  *
3844  * return: void
3845  */
3846 static void dp_tx_me_mem_free(struct dp_pdev *pdev,
3847 		struct dp_tx_seg_info_s *seg_info_head)
3848 {
3849 	struct dp_tx_me_buf_t *mc_uc_buf;
3850 	struct dp_tx_seg_info_s *seg_info_new = NULL;
3851 	qdf_nbuf_t nbuf = NULL;
3852 	uint64_t phy_addr;
3853 
3854 	while (seg_info_head) {
3855 		nbuf = seg_info_head->nbuf;
3856 		mc_uc_buf = (struct dp_tx_me_buf_t *)
3857 			seg_info_head->frags[0].vaddr;
3858 		phy_addr = seg_info_head->frags[0].paddr_hi;
3859 		phy_addr =  (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
3860 		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
3861 				phy_addr,
3862 				QDF_DMA_TO_DEVICE , DP_MAC_ADDR_LEN);
3863 		dp_tx_me_free_buf(pdev, mc_uc_buf);
3864 		qdf_nbuf_free(nbuf);
3865 		seg_info_new = seg_info_head;
3866 		seg_info_head = seg_info_head->next;
3867 		qdf_mem_free(seg_info_new);
3868 	}
3869 }
3870 
3871 /**
3872  * dp_tx_me_send_convert_ucast(): function to convert multicast to unicast
3873  * @vdev: DP VDEV handle
3874  * @nbuf: Multicast nbuf
3875  * @newmac: Table of the clients to which packets have to be sent
3876  * @new_mac_cnt: No of clients
3877  *
3878  * return: no of converted packets
3879  */
3880 uint16_t
3881 dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
3882 		uint8_t newmac[][DP_MAC_ADDR_LEN], uint8_t new_mac_cnt)
3883 {
3884 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
3885 	struct dp_pdev *pdev = vdev->pdev;
3886 	struct ether_header *eh;
3887 	uint8_t *data;
3888 	uint16_t len;
3889 
3890 	/* reference to frame dst addr */
3891 	uint8_t *dstmac;
3892 	/* copy of original frame src addr */
3893 	uint8_t srcmac[DP_MAC_ADDR_LEN];
3894 
3895 	/* local index into newmac */
3896 	uint8_t new_mac_idx = 0;
3897 	struct dp_tx_me_buf_t *mc_uc_buf;
3898 	qdf_nbuf_t  nbuf_clone;
3899 	struct dp_tx_msdu_info_s msdu_info;
3900 	struct dp_tx_seg_info_s *seg_info_head = NULL;
3901 	struct dp_tx_seg_info_s *seg_info_tail = NULL;
3902 	struct dp_tx_seg_info_s *seg_info_new;
3903 	struct dp_tx_frag_info_s data_frag;
3904 	qdf_dma_addr_t paddr_data;
3905 	qdf_dma_addr_t paddr_mcbuf = 0;
3906 	uint8_t empty_entry_mac[DP_MAC_ADDR_LEN] = {0};
3907 	QDF_STATUS status;
3908 
3909 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
3910 
3911 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3912 
3913 	eh = (struct ether_header *) nbuf;
3914 	qdf_mem_copy(srcmac, eh->ether_shost, DP_MAC_ADDR_LEN);
3915 
3916 	len = qdf_nbuf_len(nbuf);
3917 
3918 	data = qdf_nbuf_data(nbuf);
3919 
3920 	status = qdf_nbuf_map(vdev->osdev, nbuf,
3921 			QDF_DMA_TO_DEVICE);
3922 
3923 	if (status) {
3924 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3925 				"Mapping failure Error:%d", status);
3926 		DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
3927 		qdf_nbuf_free(nbuf);
3928 		return 1;
3929 	}
3930 
3931 	paddr_data = qdf_nbuf_get_frag_paddr(nbuf, 0) + IEEE80211_ADDR_LEN;
3932 
3933 	/*preparing data fragment*/
3934 	data_frag.vaddr = qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN;
3935 	data_frag.paddr_lo = (uint32_t)paddr_data;
3936 	data_frag.paddr_hi = (((uint64_t) paddr_data)  >> 32);
3937 	data_frag.len = len - DP_MAC_ADDR_LEN;
3938 
3939 	for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
3940 		dstmac = newmac[new_mac_idx];
3941 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3942 				"added mac addr (%pM)", dstmac);
3943 
3944 		/* Check for NULL Mac Address */
3945 		if (!qdf_mem_cmp(dstmac, empty_entry_mac, DP_MAC_ADDR_LEN))
3946 			continue;
3947 
3948 		/* frame to self mac. skip */
3949 		if (!qdf_mem_cmp(dstmac, srcmac, DP_MAC_ADDR_LEN))
3950 			continue;
3951 
3952 		/*
3953 		 * TODO: optimize to avoid malloc in per-packet path
3954 		 * For eg. seg_pool can be made part of vdev structure
3955 		 */
3956 		seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
3957 
3958 		if (!seg_info_new) {
3959 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3960 					"alloc failed");
3961 			DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
3962 			goto fail_seg_alloc;
3963 		}
3964 
3965 		mc_uc_buf = dp_tx_me_alloc_buf(pdev);
3966 		if (mc_uc_buf == NULL)
3967 			goto fail_buf_alloc;
3968 
3969 		/*
3970 		 * TODO: Check if we need to clone the nbuf
3971 		 * Or can we just use the reference for all cases
3972 		 */
3973 		if (new_mac_idx < (new_mac_cnt - 1)) {
3974 			nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
3975 			if (nbuf_clone == NULL) {
3976 				DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
3977 				goto fail_clone;
3978 			}
3979 		} else {
3980 			/*
3981 			 * Update the ref
3982 			 * to account for frame sent without cloning
3983 			 */
3984 			qdf_nbuf_ref(nbuf);
3985 			nbuf_clone = nbuf;
3986 		}
3987 
3988 		qdf_mem_copy(mc_uc_buf->data, dstmac, DP_MAC_ADDR_LEN);
3989 
3990 		status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
3991 				QDF_DMA_TO_DEVICE, DP_MAC_ADDR_LEN,
3992 				&paddr_mcbuf);
3993 
3994 		if (status) {
3995 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3996 					"Mapping failure Error:%d", status);
3997 			DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
3998 			goto fail_map;
3999 		}
4000 
4001 		seg_info_new->frags[0].vaddr =  (uint8_t *)mc_uc_buf;
4002 		seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
4003 		seg_info_new->frags[0].paddr_hi =
4004 			((uint64_t) paddr_mcbuf >> 32);
4005 		seg_info_new->frags[0].len = DP_MAC_ADDR_LEN;
4006 
4007 		seg_info_new->frags[1] = data_frag;
4008 		seg_info_new->nbuf = nbuf_clone;
4009 		seg_info_new->frag_cnt = 2;
4010 		seg_info_new->total_len = len;
4011 
4012 		seg_info_new->next = NULL;
4013 
4014 		if (seg_info_head == NULL)
4015 			seg_info_head = seg_info_new;
4016 		else
4017 			seg_info_tail->next = seg_info_new;
4018 
4019 		seg_info_tail = seg_info_new;
4020 	}
4021 
4022 	if (!seg_info_head) {
4023 		goto free_return;
4024 	}
4025 
4026 	msdu_info.u.sg_info.curr_seg = seg_info_head;
4027 	msdu_info.num_seg = new_mac_cnt;
4028 	msdu_info.frm_type = dp_tx_frm_me;
4029 
4030 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0) &&
4031 	    qdf_unlikely(pdev->hmmc_tid_override_en))
4032 		msdu_info.tid = pdev->hmmc_tid;
4033 
4034 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
4035 	dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
4036 
4037 	while (seg_info_head->next) {
4038 		seg_info_new = seg_info_head;
4039 		seg_info_head = seg_info_head->next;
4040 		qdf_mem_free(seg_info_new);
4041 	}
4042 	qdf_mem_free(seg_info_head);
4043 
4044 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
4045 	qdf_nbuf_free(nbuf);
4046 	return new_mac_cnt;
4047 
4048 fail_map:
4049 	qdf_nbuf_free(nbuf_clone);
4050 
4051 fail_clone:
4052 	dp_tx_me_free_buf(pdev, mc_uc_buf);
4053 
4054 fail_buf_alloc:
4055 	qdf_mem_free(seg_info_new);
4056 
4057 fail_seg_alloc:
4058 	dp_tx_me_mem_free(pdev, seg_info_head);
4059 
4060 free_return:
4061 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
4062 	qdf_nbuf_free(nbuf);
4063 	return 1;
4064 }
4065 
4066