xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision 1f55ed1a9f5050d8da228aa8dd3fff7c0242aa71)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "hal_hw_headers.h"
21 #include "dp_tx.h"
22 #include "dp_tx_desc.h"
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "hal_tx.h"
26 #include "qdf_mem.h"
27 #include "qdf_nbuf.h"
28 #include "qdf_net_types.h"
29 #include <wlan_cfg.h>
30 #ifdef MESH_MODE_SUPPORT
31 #include "if_meta_hdr.h"
32 #endif
33 
34 #define DP_TX_QUEUE_MASK 0x3
35 
36 /* TODO Add support in TSO */
37 #define DP_DESC_NUM_FRAG(x) 0
38 
39 /* disable TQM_BYPASS */
40 #define TQM_BYPASS_WAR 0
41 
42 /* invalid peer id for reinject*/
43 #define DP_INVALID_PEER 0XFFFE
44 
45 /*mapping between hal encrypt type and cdp_sec_type*/
46 #define MAX_CDP_SEC_TYPE 12
47 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
48 					HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
49 					HAL_TX_ENCRYPT_TYPE_WEP_128,
50 					HAL_TX_ENCRYPT_TYPE_WEP_104,
51 					HAL_TX_ENCRYPT_TYPE_WEP_40,
52 					HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
53 					HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
54 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
55 					HAL_TX_ENCRYPT_TYPE_WAPI,
56 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
57 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
58 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
59 					HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
60 
61 /**
62  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
63  * @vdev: DP Virtual device handle
64  * @nbuf: Buffer pointer
65  * @queue: queue ids container for nbuf
66  *
67  * TX packet queue has 2 instances, software descriptors id and dma ring id
68  * Based on tx feature and hardware configuration queue id combination could be
69  * different.
70  * For example -
71  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
72  * With no XPS,lock based resource protection, Descriptor pool ids are different
73  * for each vdev, dma ring id will be same as single pdev id
74  *
75  * Return: None
76  */
77 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
78 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
79 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
80 {
81 	uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK;
82 	queue->desc_pool_id = queue_offset;
83 	queue->ring_id = vdev->pdev->soc->tx_ring_map[queue_offset];
84 
85 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
86 			"%s, pool_id:%d ring_id: %d",
87 			__func__, queue->desc_pool_id, queue->ring_id);
88 
89 	return;
90 }
91 #else /* QCA_OL_TX_MULTIQ_SUPPORT */
92 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
93 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
94 {
95 	/* get flow id */
96 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
97 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
98 
99 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
100 			"%s, pool_id:%d ring_id: %d",
101 			__func__, queue->desc_pool_id, queue->ring_id);
102 
103 	return;
104 }
105 #endif
106 
107 #if defined(FEATURE_TSO)
108 /**
109  * dp_tx_tso_unmap_segment() - Unmap TSO segment
110  *
111  * @soc - core txrx main context
112  * @seg_desc - tso segment descriptor
113  * @num_seg_desc - tso number segment descriptor
114  */
115 static void dp_tx_tso_unmap_segment(
116 		struct dp_soc *soc,
117 		struct qdf_tso_seg_elem_t *seg_desc,
118 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
119 {
120 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
121 	if (qdf_unlikely(!seg_desc)) {
122 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
123 			 __func__, __LINE__);
124 		qdf_assert(0);
125 	} else if (qdf_unlikely(!num_seg_desc)) {
126 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
127 			 __func__, __LINE__);
128 		qdf_assert(0);
129 	} else {
130 		bool is_last_seg;
131 		/* no tso segment left to do dma unmap */
132 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
133 			return;
134 
135 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
136 					true : false;
137 		qdf_nbuf_unmap_tso_segment(soc->osdev,
138 					   seg_desc, is_last_seg);
139 		num_seg_desc->num_seg.tso_cmn_num_seg--;
140 	}
141 }
142 
143 /**
144  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
145  *                            back to the freelist
146  *
147  * @soc - soc device handle
148  * @tx_desc - Tx software descriptor
149  */
150 static void dp_tx_tso_desc_release(struct dp_soc *soc,
151 				   struct dp_tx_desc_s *tx_desc)
152 {
153 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
154 	if (qdf_unlikely(!tx_desc->tso_desc)) {
155 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
156 			  "%s %d TSO desc is NULL!",
157 			  __func__, __LINE__);
158 		qdf_assert(0);
159 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
160 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
161 			  "%s %d TSO num desc is NULL!",
162 			  __func__, __LINE__);
163 		qdf_assert(0);
164 	} else {
165 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
166 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
167 
168 		/* Add the tso num segment into the free list */
169 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
170 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
171 					    tx_desc->tso_num_desc);
172 			tx_desc->tso_num_desc = NULL;
173 		}
174 
175 		/* Add the tso segment into the free list*/
176 		dp_tx_tso_desc_free(soc,
177 				    tx_desc->pool_id, tx_desc->tso_desc);
178 		tx_desc->tso_desc = NULL;
179 	}
180 }
181 #else
182 static void dp_tx_tso_unmap_segment(
183 		struct dp_soc *soc,
184 		struct qdf_tso_seg_elem_t *seg_desc,
185 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
186 
187 {
188 }
189 
190 static void dp_tx_tso_desc_release(struct dp_soc *soc,
191 				   struct dp_tx_desc_s *tx_desc)
192 {
193 }
194 #endif
195 /**
196  * dp_tx_desc_release() - Release Tx Descriptor
197  * @tx_desc : Tx Descriptor
198  * @desc_pool_id: Descriptor Pool ID
199  *
200  * Deallocate all resources attached to Tx descriptor and free the Tx
201  * descriptor.
202  *
203  * Return:
204  */
205 static void
206 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
207 {
208 	struct dp_pdev *pdev = tx_desc->pdev;
209 	struct dp_soc *soc;
210 	uint8_t comp_status = 0;
211 
212 	qdf_assert(pdev);
213 
214 	soc = pdev->soc;
215 
216 	if (tx_desc->frm_type == dp_tx_frm_tso)
217 		dp_tx_tso_desc_release(soc, tx_desc);
218 
219 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
220 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
221 
222 	if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
223 		dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
224 
225 	qdf_atomic_dec(&pdev->num_tx_outstanding);
226 
227 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
228 		qdf_atomic_dec(&pdev->num_tx_exception);
229 
230 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
231 				hal_tx_comp_get_buffer_source(&tx_desc->comp))
232 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
233 							     soc->hal_soc);
234 	else
235 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
236 
237 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
238 		"Tx Completion Release desc %d status %d outstanding %d",
239 		tx_desc->id, comp_status,
240 		qdf_atomic_read(&pdev->num_tx_outstanding));
241 
242 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
243 	return;
244 }
245 
246 /**
247  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
248  * @vdev: DP vdev Handle
249  * @nbuf: skb
250  *
251  * Prepares and fills HTT metadata in the frame pre-header for special frames
252  * that should be transmitted using varying transmit parameters.
253  * There are 2 VDEV modes that currently needs this special metadata -
254  *  1) Mesh Mode
255  *  2) DSRC Mode
256  *
257  * Return: HTT metadata size
258  *
259  */
260 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
261 		uint32_t *meta_data)
262 {
263 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
264 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
265 
266 	uint8_t htt_desc_size;
267 
268 	/* Size rounded of multiple of 8 bytes */
269 	uint8_t htt_desc_size_aligned;
270 
271 	uint8_t *hdr = NULL;
272 
273 	/*
274 	 * Metadata - HTT MSDU Extension header
275 	 */
276 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
277 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
278 
279 	if (vdev->mesh_vdev) {
280 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
281 					htt_desc_size_aligned)) {
282 			DP_STATS_INC(vdev,
283 				     tx_i.dropped.headroom_insufficient, 1);
284 			return 0;
285 		}
286 		/* Fill and add HTT metaheader */
287 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
288 		if (hdr == NULL) {
289 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
290 					"Error in filling HTT metadata");
291 
292 			return 0;
293 		}
294 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
295 
296 	} else if (vdev->opmode == wlan_op_mode_ocb) {
297 		/* Todo - Add support for DSRC */
298 	}
299 
300 	return htt_desc_size_aligned;
301 }
302 
303 /**
304  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
305  * @tso_seg: TSO segment to process
306  * @ext_desc: Pointer to MSDU extension descriptor
307  *
308  * Return: void
309  */
310 #if defined(FEATURE_TSO)
311 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
312 		void *ext_desc)
313 {
314 	uint8_t num_frag;
315 	uint32_t tso_flags;
316 
317 	/*
318 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
319 	 * tcp_flag_mask
320 	 *
321 	 * Checksum enable flags are set in TCL descriptor and not in Extension
322 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
323 	 */
324 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
325 
326 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
327 
328 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
329 		tso_seg->tso_flags.ip_len);
330 
331 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
332 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
333 
334 
335 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
336 		uint32_t lo = 0;
337 		uint32_t hi = 0;
338 
339 		qdf_dmaaddr_to_32s(
340 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
341 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
342 			tso_seg->tso_frags[num_frag].length);
343 	}
344 
345 	return;
346 }
347 #else
348 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
349 		void *ext_desc)
350 {
351 	return;
352 }
353 #endif
354 
355 #if defined(FEATURE_TSO)
356 /**
357  * dp_tx_free_tso_seg_list() - Loop through the tso segments
358  *                             allocated and free them
359  *
360  * @soc: soc handle
361  * @free_seg: list of tso segments
362  * @msdu_info: msdu descriptor
363  *
364  * Return - void
365  */
366 static void dp_tx_free_tso_seg_list(
367 		struct dp_soc *soc,
368 		struct qdf_tso_seg_elem_t *free_seg,
369 		struct dp_tx_msdu_info_s *msdu_info)
370 {
371 	struct qdf_tso_seg_elem_t *next_seg;
372 
373 	while (free_seg) {
374 		next_seg = free_seg->next;
375 		dp_tx_tso_desc_free(soc,
376 				    msdu_info->tx_queue.desc_pool_id,
377 				    free_seg);
378 		free_seg = next_seg;
379 	}
380 }
381 
382 /**
383  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
384  *                                 allocated and free them
385  *
386  * @soc:  soc handle
387  * @free_num_seg: list of tso number segments
388  * @msdu_info: msdu descriptor
389  * Return - void
390  */
391 static void dp_tx_free_tso_num_seg_list(
392 		struct dp_soc *soc,
393 		struct qdf_tso_num_seg_elem_t *free_num_seg,
394 		struct dp_tx_msdu_info_s *msdu_info)
395 {
396 	struct qdf_tso_num_seg_elem_t *next_num_seg;
397 
398 	while (free_num_seg) {
399 		next_num_seg = free_num_seg->next;
400 		dp_tso_num_seg_free(soc,
401 				    msdu_info->tx_queue.desc_pool_id,
402 				    free_num_seg);
403 		free_num_seg = next_num_seg;
404 	}
405 }
406 
407 /**
408  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
409  *                              do dma unmap for each segment
410  *
411  * @soc: soc handle
412  * @free_seg: list of tso segments
413  * @num_seg_desc: tso number segment descriptor
414  *
415  * Return - void
416  */
417 static void dp_tx_unmap_tso_seg_list(
418 		struct dp_soc *soc,
419 		struct qdf_tso_seg_elem_t *free_seg,
420 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
421 {
422 	struct qdf_tso_seg_elem_t *next_seg;
423 
424 	if (qdf_unlikely(!num_seg_desc)) {
425 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
426 		return;
427 	}
428 
429 	while (free_seg) {
430 		next_seg = free_seg->next;
431 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
432 		free_seg = next_seg;
433 	}
434 }
435 
436 /**
437  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
438  *				     free the tso segments descriptor and
439  *				     tso num segments descriptor
440  *
441  * @soc:  soc handle
442  * @msdu_info: msdu descriptor
443  * @tso_seg_unmap: flag to show if dma unmap is necessary
444  *
445  * Return - void
446  */
447 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
448 					  struct dp_tx_msdu_info_s *msdu_info,
449 					  bool tso_seg_unmap)
450 {
451 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
452 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
453 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
454 					tso_info->tso_num_seg_list;
455 
456 	/* do dma unmap for each segment */
457 	if (tso_seg_unmap)
458 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
459 
460 	/* free all tso number segment descriptor though looks only have 1 */
461 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
462 
463 	/* free all tso segment descriptor */
464 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
465 }
466 
467 /**
468  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
469  * @vdev: virtual device handle
470  * @msdu: network buffer
471  * @msdu_info: meta data associated with the msdu
472  *
473  * Return: QDF_STATUS_SUCCESS success
474  */
475 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
476 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
477 {
478 	struct qdf_tso_seg_elem_t *tso_seg;
479 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
480 	struct dp_soc *soc = vdev->pdev->soc;
481 	struct qdf_tso_info_t *tso_info;
482 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
483 
484 	tso_info = &msdu_info->u.tso_info;
485 	tso_info->curr_seg = NULL;
486 	tso_info->tso_seg_list = NULL;
487 	tso_info->num_segs = num_seg;
488 	msdu_info->frm_type = dp_tx_frm_tso;
489 	tso_info->tso_num_seg_list = NULL;
490 
491 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
492 
493 	while (num_seg) {
494 		tso_seg = dp_tx_tso_desc_alloc(
495 				soc, msdu_info->tx_queue.desc_pool_id);
496 		if (tso_seg) {
497 			tso_seg->next = tso_info->tso_seg_list;
498 			tso_info->tso_seg_list = tso_seg;
499 			num_seg--;
500 		} else {
501 			DP_TRACE(ERROR, "%s: Failed to alloc tso seg desc",
502 				 __func__);
503 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
504 
505 			return QDF_STATUS_E_NOMEM;
506 		}
507 	}
508 
509 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
510 
511 	tso_num_seg = dp_tso_num_seg_alloc(soc,
512 			msdu_info->tx_queue.desc_pool_id);
513 
514 	if (tso_num_seg) {
515 		tso_num_seg->next = tso_info->tso_num_seg_list;
516 		tso_info->tso_num_seg_list = tso_num_seg;
517 	} else {
518 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
519 			 __func__);
520 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
521 
522 		return QDF_STATUS_E_NOMEM;
523 	}
524 
525 	msdu_info->num_seg =
526 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
527 
528 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
529 			msdu_info->num_seg);
530 
531 	if (!(msdu_info->num_seg)) {
532 		/*
533 		 * Free allocated TSO seg desc and number seg desc,
534 		 * do unmap for segments if dma map has done.
535 		 */
536 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
537 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
538 
539 		return QDF_STATUS_E_INVAL;
540 	}
541 
542 	tso_info->curr_seg = tso_info->tso_seg_list;
543 
544 	return QDF_STATUS_SUCCESS;
545 }
546 #else
547 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
548 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
549 {
550 	return QDF_STATUS_E_NOMEM;
551 }
552 #endif
553 
554 /**
555  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
556  * @vdev: DP Vdev handle
557  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
558  * @desc_pool_id: Descriptor Pool ID
559  *
560  * Return:
561  */
562 static
563 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
564 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
565 {
566 	uint8_t i;
567 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
568 	struct dp_tx_seg_info_s *seg_info;
569 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
570 	struct dp_soc *soc = vdev->pdev->soc;
571 
572 	/* Allocate an extension descriptor */
573 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
574 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
575 
576 	if (!msdu_ext_desc) {
577 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
578 		return NULL;
579 	}
580 
581 	if (msdu_info->exception_fw &&
582 			qdf_unlikely(vdev->mesh_vdev)) {
583 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
584 				&msdu_info->meta_data[0],
585 				sizeof(struct htt_tx_msdu_desc_ext2_t));
586 		qdf_atomic_inc(&vdev->pdev->num_tx_exception);
587 	}
588 
589 	switch (msdu_info->frm_type) {
590 	case dp_tx_frm_sg:
591 	case dp_tx_frm_me:
592 	case dp_tx_frm_raw:
593 		seg_info = msdu_info->u.sg_info.curr_seg;
594 		/* Update the buffer pointers in MSDU Extension Descriptor */
595 		for (i = 0; i < seg_info->frag_cnt; i++) {
596 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
597 				seg_info->frags[i].paddr_lo,
598 				seg_info->frags[i].paddr_hi,
599 				seg_info->frags[i].len);
600 		}
601 
602 		break;
603 
604 	case dp_tx_frm_tso:
605 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
606 				&cached_ext_desc[0]);
607 		break;
608 
609 
610 	default:
611 		break;
612 	}
613 
614 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
615 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
616 
617 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
618 			msdu_ext_desc->vaddr);
619 
620 	return msdu_ext_desc;
621 }
622 
623 /**
624  * dp_tx_trace_pkt() - Trace TX packet at DP layer
625  *
626  * @skb: skb to be traced
627  * @msdu_id: msdu_id of the packet
628  * @vdev_id: vdev_id of the packet
629  *
630  * Return: None
631  */
632 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
633 			    uint8_t vdev_id)
634 {
635 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
636 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
637 	DPTRACE(qdf_dp_trace_ptr(skb,
638 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
639 				 QDF_TRACE_DEFAULT_PDEV_ID,
640 				 qdf_nbuf_data_addr(skb),
641 				 sizeof(qdf_nbuf_data(skb)),
642 				 msdu_id, vdev_id));
643 
644 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
645 
646 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
647 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
648 				      msdu_id, QDF_TX));
649 }
650 
651 /**
652  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
653  * @vdev: DP vdev handle
654  * @nbuf: skb
655  * @desc_pool_id: Descriptor pool ID
656  * @meta_data: Metadata to the fw
657  * @tx_exc_metadata: Handle that holds exception path metadata
658  * Allocate and prepare Tx descriptor with msdu information.
659  *
660  * Return: Pointer to Tx Descriptor on success,
661  *         NULL on failure
662  */
663 static
664 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
665 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
666 		struct dp_tx_msdu_info_s *msdu_info,
667 		struct cdp_tx_exception_metadata *tx_exc_metadata)
668 {
669 	uint8_t align_pad;
670 	uint8_t is_exception = 0;
671 	uint8_t htt_hdr_size;
672 	struct ether_header *eh;
673 	struct dp_tx_desc_s *tx_desc;
674 	struct dp_pdev *pdev = vdev->pdev;
675 	struct dp_soc *soc = pdev->soc;
676 
677 	/* Allocate software Tx descriptor */
678 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
679 	if (qdf_unlikely(!tx_desc)) {
680 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
681 		return NULL;
682 	}
683 
684 	/* Flow control/Congestion Control counters */
685 	qdf_atomic_inc(&pdev->num_tx_outstanding);
686 
687 	/* Initialize the SW tx descriptor */
688 	tx_desc->nbuf = nbuf;
689 	tx_desc->frm_type = dp_tx_frm_std;
690 	tx_desc->tx_encap_type = (tx_exc_metadata ?
691 			tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
692 	tx_desc->vdev = vdev;
693 	tx_desc->pdev = pdev;
694 	tx_desc->msdu_ext_desc = NULL;
695 	tx_desc->pkt_offset = 0;
696 
697 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
698 
699 	/* Reset the control block */
700 	qdf_nbuf_reset_ctxt(nbuf);
701 
702 	/*
703 	 * For special modes (vdev_type == ocb or mesh), data frames should be
704 	 * transmitted using varying transmit parameters (tx spec) which include
705 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
706 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
707 	 * These frames are sent as exception packets to firmware.
708 	 *
709 	 * HW requirement is that metadata should always point to a
710 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
711 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
712 	 *  to get 8-byte aligned start address along with align_pad added
713 	 *
714 	 *  |-----------------------------|
715 	 *  |                             |
716 	 *  |-----------------------------| <-----Buffer Pointer Address given
717 	 *  |                             |  ^    in HW descriptor (aligned)
718 	 *  |       HTT Metadata          |  |
719 	 *  |                             |  |
720 	 *  |                             |  | Packet Offset given in descriptor
721 	 *  |                             |  |
722 	 *  |-----------------------------|  |
723 	 *  |       Alignment Pad         |  v
724 	 *  |-----------------------------| <----- Actual buffer start address
725 	 *  |        SKB Data             |           (Unaligned)
726 	 *  |                             |
727 	 *  |                             |
728 	 *  |                             |
729 	 *  |                             |
730 	 *  |                             |
731 	 *  |-----------------------------|
732 	 */
733 	if (qdf_unlikely((msdu_info->exception_fw)) ||
734 				(vdev->opmode == wlan_op_mode_ocb)) {
735 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
736 
737 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
738 			DP_STATS_INC(vdev,
739 				     tx_i.dropped.headroom_insufficient, 1);
740 			goto failure;
741 		}
742 
743 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
744 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
745 					"qdf_nbuf_push_head failed");
746 			goto failure;
747 		}
748 
749 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
750 				msdu_info->meta_data);
751 		if (htt_hdr_size == 0)
752 			goto failure;
753 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
754 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
755 		is_exception = 1;
756 	}
757 
758 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
759 				qdf_nbuf_map(soc->osdev, nbuf,
760 					QDF_DMA_TO_DEVICE))) {
761 		/* Handle failure */
762 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
763 				"qdf_nbuf_map failed");
764 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
765 		goto failure;
766 	}
767 
768 	if (qdf_unlikely(vdev->nawds_enabled)) {
769 		eh = (struct ether_header *) qdf_nbuf_data(nbuf);
770 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
771 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
772 			is_exception = 1;
773 		}
774 	}
775 
776 #if !TQM_BYPASS_WAR
777 	if (is_exception || tx_exc_metadata)
778 #endif
779 	{
780 		/* Temporary WAR due to TQM VP issues */
781 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
782 		qdf_atomic_inc(&pdev->num_tx_exception);
783 	}
784 
785 	return tx_desc;
786 
787 failure:
788 	dp_tx_desc_release(tx_desc, desc_pool_id);
789 	return NULL;
790 }
791 
792 /**
793  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
794  * @vdev: DP vdev handle
795  * @nbuf: skb
796  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
797  * @desc_pool_id : Descriptor Pool ID
798  *
799  * Allocate and prepare Tx descriptor with msdu and fragment descritor
800  * information. For frames wth fragments, allocate and prepare
801  * an MSDU extension descriptor
802  *
803  * Return: Pointer to Tx Descriptor on success,
804  *         NULL on failure
805  */
806 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
807 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
808 		uint8_t desc_pool_id)
809 {
810 	struct dp_tx_desc_s *tx_desc;
811 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
812 	struct dp_pdev *pdev = vdev->pdev;
813 	struct dp_soc *soc = pdev->soc;
814 
815 	/* Allocate software Tx descriptor */
816 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
817 	if (!tx_desc) {
818 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
819 		return NULL;
820 	}
821 
822 	/* Flow control/Congestion Control counters */
823 	qdf_atomic_inc(&pdev->num_tx_outstanding);
824 
825 	/* Initialize the SW tx descriptor */
826 	tx_desc->nbuf = nbuf;
827 	tx_desc->frm_type = msdu_info->frm_type;
828 	tx_desc->tx_encap_type = vdev->tx_encap_type;
829 	tx_desc->vdev = vdev;
830 	tx_desc->pdev = pdev;
831 	tx_desc->pkt_offset = 0;
832 	tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
833 	tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
834 
835 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
836 
837 	/* Reset the control block */
838 	qdf_nbuf_reset_ctxt(nbuf);
839 
840 	/* Handle scattered frames - TSO/SG/ME */
841 	/* Allocate and prepare an extension descriptor for scattered frames */
842 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
843 	if (!msdu_ext_desc) {
844 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
845 				"%s Tx Extension Descriptor Alloc Fail",
846 				__func__);
847 		goto failure;
848 	}
849 
850 #if TQM_BYPASS_WAR
851 	/* Temporary WAR due to TQM VP issues */
852 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
853 	qdf_atomic_inc(&pdev->num_tx_exception);
854 #endif
855 	if (qdf_unlikely(msdu_info->exception_fw))
856 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
857 
858 	tx_desc->msdu_ext_desc = msdu_ext_desc;
859 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
860 
861 	return tx_desc;
862 failure:
863 	dp_tx_desc_release(tx_desc, desc_pool_id);
864 	return NULL;
865 }
866 
867 /**
868  * dp_tx_prepare_raw() - Prepare RAW packet TX
869  * @vdev: DP vdev handle
870  * @nbuf: buffer pointer
871  * @seg_info: Pointer to Segment info Descriptor to be prepared
872  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
873  *     descriptor
874  *
875  * Return:
876  */
877 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
878 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
879 {
880 	qdf_nbuf_t curr_nbuf = NULL;
881 	uint16_t total_len = 0;
882 	qdf_dma_addr_t paddr;
883 	int32_t i;
884 	int32_t mapped_buf_num = 0;
885 
886 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
887 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
888 
889 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
890 
891 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
892 	if (vdev->raw_mode_war &&
893 	    (qos_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS))
894 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
895 
896 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
897 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
898 
899 		if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf,
900 					QDF_DMA_TO_DEVICE)) {
901 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
902 				"%s dma map error ", __func__);
903 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
904 			mapped_buf_num = i;
905 			goto error;
906 		}
907 
908 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
909 		seg_info->frags[i].paddr_lo = paddr;
910 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
911 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
912 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
913 		total_len += qdf_nbuf_len(curr_nbuf);
914 	}
915 
916 	seg_info->frag_cnt = i;
917 	seg_info->total_len = total_len;
918 	seg_info->next = NULL;
919 
920 	sg_info->curr_seg = seg_info;
921 
922 	msdu_info->frm_type = dp_tx_frm_raw;
923 	msdu_info->num_seg = 1;
924 
925 	return nbuf;
926 
927 error:
928 	i = 0;
929 	while (nbuf) {
930 		curr_nbuf = nbuf;
931 		if (i < mapped_buf_num) {
932 			qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE);
933 			i++;
934 		}
935 		nbuf = qdf_nbuf_next(nbuf);
936 		qdf_nbuf_free(curr_nbuf);
937 	}
938 	return NULL;
939 
940 }
941 
942 /**
943  * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
944  * @soc: DP Soc Handle
945  * @vdev: DP vdev handle
946  * @tx_desc: Tx Descriptor Handle
947  * @tid: TID from HLOS for overriding default DSCP-TID mapping
948  * @fw_metadata: Metadata to send to Target Firmware along with frame
949  * @ring_id: Ring ID of H/W ring to which we enqueue the packet
950  * @tx_exc_metadata: Handle that holds exception path meta data
951  *
952  *  Gets the next free TCL HW DMA descriptor and sets up required parameters
953  *  from software Tx descriptor
954  *
955  * Return:
956  */
957 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
958 				   struct dp_tx_desc_s *tx_desc, uint8_t tid,
959 				   uint16_t fw_metadata, uint8_t ring_id,
960 				   struct cdp_tx_exception_metadata
961 					*tx_exc_metadata)
962 {
963 	uint8_t type;
964 	uint16_t length;
965 	void *hal_tx_desc, *hal_tx_desc_cached;
966 	qdf_dma_addr_t dma_addr;
967 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
968 
969 	enum cdp_sec_type sec_type = (tx_exc_metadata ?
970 			tx_exc_metadata->sec_type : vdev->sec_type);
971 
972 	/* Return Buffer Manager ID */
973 	uint8_t bm_id = ring_id;
974 	void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng;
975 
976 	hal_tx_desc_cached = (void *) cached_desc;
977 	qdf_mem_zero_outline(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
978 
979 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
980 		length = HAL_TX_EXT_DESC_WITH_META_DATA;
981 		type = HAL_TX_BUF_TYPE_EXT_DESC;
982 		dma_addr = tx_desc->msdu_ext_desc->paddr;
983 	} else {
984 		length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
985 		type = HAL_TX_BUF_TYPE_BUFFER;
986 		dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
987 	}
988 
989 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
990 	hal_tx_desc_set_buf_addr(hal_tx_desc_cached,
991 					dma_addr, bm_id, tx_desc->id,
992 					type, soc->hal_soc);
993 
994 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id))
995 		return QDF_STATUS_E_RESOURCES;
996 
997 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
998 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
999 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
1000 	hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
1001 				vdev->pdev->lmac_id);
1002 	hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
1003 				    vdev->search_type);
1004 	hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
1005 				     vdev->bss_ast_hash);
1006 	hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
1007 					  vdev->dscp_tid_map_id);
1008 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
1009 			sec_type_map[sec_type]);
1010 
1011 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1012 			"%s length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
1013 			__func__, length, type, (uint64_t)dma_addr,
1014 			tx_desc->pkt_offset, tx_desc->id);
1015 
1016 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
1017 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
1018 
1019 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
1020 			vdev->hal_desc_addr_search_flags);
1021 
1022 	/* verify checksum offload configuration*/
1023 	if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) &&
1024 		((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
1025 		|| qdf_nbuf_is_tso(tx_desc->nbuf)))  {
1026 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
1027 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
1028 	}
1029 
1030 	if (tid != HTT_TX_EXT_TID_INVALID)
1031 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
1032 
1033 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
1034 		hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1);
1035 
1036 
1037 	/* Sync cached descriptor with HW */
1038 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
1039 
1040 	if (!hal_tx_desc) {
1041 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1042 			  "%s TCL ring full ring_id:%d", __func__, ring_id);
1043 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1044 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1045 		return QDF_STATUS_E_RESOURCES;
1046 	}
1047 
1048 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
1049 
1050 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
1051 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
1052 
1053 	return QDF_STATUS_SUCCESS;
1054 }
1055 
1056 
1057 /**
1058  * dp_cce_classify() - Classify the frame based on CCE rules
1059  * @vdev: DP vdev handle
1060  * @nbuf: skb
1061  *
1062  * Classify frames based on CCE rules
1063  * Return: bool( true if classified,
1064  *               else false)
1065  */
1066 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1067 {
1068 	struct ether_header *eh = NULL;
1069 	uint16_t   ether_type;
1070 	qdf_llc_t *llcHdr;
1071 	qdf_nbuf_t nbuf_clone = NULL;
1072 	qdf_dot3_qosframe_t *qos_wh = NULL;
1073 
1074 	/* for mesh packets don't do any classification */
1075 	if (qdf_unlikely(vdev->mesh_vdev))
1076 		return false;
1077 
1078 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1079 		eh = (struct ether_header *) qdf_nbuf_data(nbuf);
1080 		ether_type = eh->ether_type;
1081 		llcHdr = (qdf_llc_t *)(nbuf->data +
1082 					sizeof(struct ether_header));
1083 	} else {
1084 		qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1085 		/* For encrypted packets don't do any classification */
1086 		if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
1087 			return false;
1088 
1089 		if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
1090 			if (qdf_unlikely(
1091 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
1092 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
1093 
1094 				ether_type = *(uint16_t *)(nbuf->data
1095 						+ QDF_IEEE80211_4ADDR_HDR_LEN
1096 						+ sizeof(qdf_llc_t)
1097 						- sizeof(ether_type));
1098 				llcHdr = (qdf_llc_t *)(nbuf->data +
1099 						QDF_IEEE80211_4ADDR_HDR_LEN);
1100 			} else {
1101 				ether_type = *(uint16_t *)(nbuf->data
1102 						+ QDF_IEEE80211_3ADDR_HDR_LEN
1103 						+ sizeof(qdf_llc_t)
1104 						- sizeof(ether_type));
1105 				llcHdr = (qdf_llc_t *)(nbuf->data +
1106 					QDF_IEEE80211_3ADDR_HDR_LEN);
1107 			}
1108 
1109 			if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
1110 				&& (ether_type ==
1111 				qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
1112 
1113 				DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
1114 				return true;
1115 			}
1116 		}
1117 
1118 		return false;
1119 	}
1120 
1121 	if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
1122 		ether_type = *(uint16_t *)(nbuf->data + 2*ETHER_ADDR_LEN +
1123 				sizeof(*llcHdr));
1124 		nbuf_clone = qdf_nbuf_clone(nbuf);
1125 		if (qdf_unlikely(nbuf_clone)) {
1126 			qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
1127 
1128 			if (ether_type == htons(ETHERTYPE_8021Q)) {
1129 				qdf_nbuf_pull_head(nbuf_clone,
1130 						sizeof(qdf_net_vlanhdr_t));
1131 			}
1132 		}
1133 	} else {
1134 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1135 			nbuf_clone = qdf_nbuf_clone(nbuf);
1136 			if (qdf_unlikely(nbuf_clone)) {
1137 				qdf_nbuf_pull_head(nbuf_clone,
1138 					sizeof(qdf_net_vlanhdr_t));
1139 			}
1140 		}
1141 	}
1142 
1143 	if (qdf_unlikely(nbuf_clone))
1144 		nbuf = nbuf_clone;
1145 
1146 
1147 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
1148 		|| qdf_nbuf_is_ipv4_arp_pkt(nbuf)
1149 		|| qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
1150 		|| qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
1151 		|| (qdf_nbuf_is_ipv4_pkt(nbuf)
1152 			&& qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
1153 		|| (qdf_nbuf_is_ipv6_pkt(nbuf) &&
1154 			qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
1155 		if (qdf_unlikely(nbuf_clone != NULL))
1156 			qdf_nbuf_free(nbuf_clone);
1157 		return true;
1158 	}
1159 
1160 	if (qdf_unlikely(nbuf_clone != NULL))
1161 		qdf_nbuf_free(nbuf_clone);
1162 
1163 	return false;
1164 }
1165 
1166 /**
1167  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1168  * @vdev: DP vdev handle
1169  * @nbuf: skb
1170  *
1171  * Extract the DSCP or PCP information from frame and map into TID value.
1172  * Software based TID classification is required when more than 2 DSCP-TID
1173  * mapping tables are needed.
1174  * Hardware supports 2 DSCP-TID mapping tables
1175  *
1176  * Return: void
1177  */
1178 static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1179 		struct dp_tx_msdu_info_s *msdu_info)
1180 {
1181 	uint8_t tos = 0, dscp_tid_override = 0;
1182 	uint8_t *hdr_ptr, *L3datap;
1183 	uint8_t is_mcast = 0;
1184 	struct ether_header *eh = NULL;
1185 	qdf_ethervlan_header_t *evh = NULL;
1186 	uint16_t   ether_type;
1187 	qdf_llc_t *llcHdr;
1188 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1189 
1190 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1191 
1192 	if (pdev->soc && vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map)
1193 		return;
1194 
1195 	/* for mesh packets don't do any classification */
1196 	if (qdf_unlikely(vdev->mesh_vdev))
1197 		return;
1198 
1199 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1200 		eh = (struct ether_header *) nbuf->data;
1201 		hdr_ptr = eh->ether_dhost;
1202 		L3datap = hdr_ptr + sizeof(struct ether_header);
1203 	} else {
1204 		qdf_dot3_qosframe_t *qos_wh =
1205 			(qdf_dot3_qosframe_t *) nbuf->data;
1206 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1207 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1208 		return;
1209 	}
1210 
1211 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1212 	ether_type = eh->ether_type;
1213 
1214 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(struct ether_header));
1215 	/*
1216 	 * Check if packet is dot3 or eth2 type.
1217 	 */
1218 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1219 		ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN +
1220 				sizeof(*llcHdr));
1221 
1222 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1223 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1224 				sizeof(*llcHdr);
1225 			ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN
1226 					+ sizeof(*llcHdr) +
1227 					sizeof(qdf_net_vlanhdr_t));
1228 		} else {
1229 			L3datap = hdr_ptr + sizeof(struct ether_header) +
1230 				sizeof(*llcHdr);
1231 		}
1232 	} else {
1233 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1234 			evh = (qdf_ethervlan_header_t *) eh;
1235 			ether_type = evh->ether_type;
1236 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1237 		}
1238 	}
1239 
1240 	/*
1241 	 * Find priority from IP TOS DSCP field
1242 	 */
1243 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1244 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1245 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1246 			/* Only for unicast frames */
1247 			if (!is_mcast) {
1248 				/* send it on VO queue */
1249 				msdu_info->tid = DP_VO_TID;
1250 			}
1251 		} else {
1252 			/*
1253 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1254 			 * from TOS byte.
1255 			 */
1256 			tos = ip->ip_tos;
1257 			dscp_tid_override = 1;
1258 
1259 		}
1260 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1261 		/* TODO
1262 		 * use flowlabel
1263 		 *igmpmld cases to be handled in phase 2
1264 		 */
1265 		unsigned long ver_pri_flowlabel;
1266 		unsigned long pri;
1267 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1268 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1269 			DP_IPV6_PRIORITY_SHIFT;
1270 		tos = pri;
1271 		dscp_tid_override = 1;
1272 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1273 		msdu_info->tid = DP_VO_TID;
1274 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1275 		/* Only for unicast frames */
1276 		if (!is_mcast) {
1277 			/* send ucast arp on VO queue */
1278 			msdu_info->tid = DP_VO_TID;
1279 		}
1280 	}
1281 
1282 	/*
1283 	 * Assign all MCAST packets to BE
1284 	 */
1285 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1286 		if (is_mcast) {
1287 			tos = 0;
1288 			dscp_tid_override = 1;
1289 		}
1290 	}
1291 
1292 	if (dscp_tid_override == 1) {
1293 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1294 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1295 	}
1296 	return;
1297 }
1298 
1299 #ifdef CONVERGED_TDLS_ENABLE
1300 /**
1301  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1302  * @tx_desc: TX descriptor
1303  *
1304  * Return: None
1305  */
1306 static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1307 {
1308 	if (tx_desc->vdev) {
1309 		if (tx_desc->vdev->is_tdls_frame) {
1310 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1311 			tx_desc->vdev->is_tdls_frame = false;
1312 		}
1313 	}
1314 }
1315 
1316 /**
1317  * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1318  * @tx_desc: TX descriptor
1319  * @vdev: datapath vdev handle
1320  *
1321  * Return: None
1322  */
1323 static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
1324 				  struct dp_vdev *vdev)
1325 {
1326 	struct hal_tx_completion_status ts = {0};
1327 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1328 
1329 	hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
1330 	if (vdev->tx_non_std_data_callback.func) {
1331 		qdf_nbuf_set_next(tx_desc->nbuf, NULL);
1332 		vdev->tx_non_std_data_callback.func(
1333 				vdev->tx_non_std_data_callback.ctxt,
1334 				nbuf, ts.status);
1335 		return;
1336 	}
1337 }
1338 #endif
1339 
1340 /**
1341  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1342  * @vdev: DP vdev handle
1343  * @nbuf: skb
1344  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1345  * @meta_data: Metadata to the fw
1346  * @tx_q: Tx queue to be used for this Tx frame
1347  * @peer_id: peer_id of the peer in case of NAWDS frames
1348  * @tx_exc_metadata: Handle that holds exception path metadata
1349  *
1350  * Return: NULL on success,
1351  *         nbuf when it fails to send
1352  */
1353 static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1354 		struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
1355 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1356 {
1357 	struct dp_pdev *pdev = vdev->pdev;
1358 	struct dp_soc *soc = pdev->soc;
1359 	struct dp_tx_desc_s *tx_desc;
1360 	QDF_STATUS status;
1361 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
1362 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1363 	uint16_t htt_tcl_metadata = 0;
1364 	uint8_t tid = msdu_info->tid;
1365 
1366 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
1367 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
1368 			msdu_info, tx_exc_metadata);
1369 	if (!tx_desc) {
1370 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1371 			  "%s Tx_desc prepare Fail vdev %pK queue %d",
1372 			  __func__, vdev, tx_q->desc_pool_id);
1373 		return nbuf;
1374 	}
1375 
1376 	if (qdf_unlikely(soc->cce_disable)) {
1377 		if (dp_cce_classify(vdev, nbuf) == true) {
1378 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1379 			tid = DP_VO_TID;
1380 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1381 		}
1382 	}
1383 
1384 	dp_tx_update_tdls_flags(tx_desc);
1385 
1386 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1387 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1388 				"%s %d : HAL RING Access Failed -- %pK",
1389 				__func__, __LINE__, hal_srng);
1390 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1391 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1392 		qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE);
1393 		goto fail_return;
1394 	}
1395 
1396 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
1397 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1398 		HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
1399 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
1400 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
1401 				HTT_TCL_METADATA_TYPE_PEER_BASED);
1402 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
1403 				peer_id);
1404 	} else
1405 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1406 
1407 
1408 	if (msdu_info->exception_fw) {
1409 		HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1410 	}
1411 
1412 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
1413 	status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
1414 			htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
1415 
1416 	if (status != QDF_STATUS_SUCCESS) {
1417 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1418 			  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1419 			  __func__, tx_desc, tx_q->ring_id);
1420 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1421 		qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE);
1422 		goto fail_return;
1423 	}
1424 
1425 	nbuf = NULL;
1426 
1427 fail_return:
1428 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1429 		hal_srng_access_end(soc->hal_soc, hal_srng);
1430 		hif_pm_runtime_put(soc->hif_handle);
1431 	} else {
1432 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1433 	}
1434 
1435 	return nbuf;
1436 }
1437 
1438 /**
1439  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
1440  * @vdev: DP vdev handle
1441  * @nbuf: skb
1442  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
1443  *
1444  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
1445  *
1446  * Return: NULL on success,
1447  *         nbuf when it fails to send
1448  */
1449 #if QDF_LOCK_STATS
1450 static noinline
1451 #else
1452 static
1453 #endif
1454 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1455 				    struct dp_tx_msdu_info_s *msdu_info)
1456 {
1457 	uint8_t i;
1458 	struct dp_pdev *pdev = vdev->pdev;
1459 	struct dp_soc *soc = pdev->soc;
1460 	struct dp_tx_desc_s *tx_desc;
1461 	bool is_cce_classified = false;
1462 	QDF_STATUS status;
1463 	uint16_t htt_tcl_metadata = 0;
1464 
1465 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1466 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1467 
1468 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1469 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1470 				"%s %d : HAL RING Access Failed -- %pK",
1471 				__func__, __LINE__, hal_srng);
1472 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1473 		return nbuf;
1474 	}
1475 
1476 	if (qdf_unlikely(soc->cce_disable)) {
1477 		is_cce_classified = dp_cce_classify(vdev, nbuf);
1478 		if (is_cce_classified) {
1479 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1480 			msdu_info->tid = DP_VO_TID;
1481 		}
1482 	}
1483 
1484 	if (msdu_info->frm_type == dp_tx_frm_me)
1485 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1486 
1487 	i = 0;
1488 	/* Print statement to track i and num_seg */
1489 	/*
1490 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
1491 	 * descriptors using information in msdu_info
1492 	 */
1493 	while (i < msdu_info->num_seg) {
1494 		/*
1495 		 * Setup Tx descriptor for an MSDU, and MSDU extension
1496 		 * descriptor
1497 		 */
1498 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
1499 				tx_q->desc_pool_id);
1500 
1501 		if (!tx_desc) {
1502 			if (msdu_info->frm_type == dp_tx_frm_me) {
1503 				dp_tx_me_free_buf(pdev,
1504 					(void *)(msdu_info->u.sg_info
1505 						.curr_seg->frags[0].vaddr));
1506 			}
1507 			goto done;
1508 		}
1509 
1510 		if (msdu_info->frm_type == dp_tx_frm_me) {
1511 			tx_desc->me_buffer =
1512 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
1513 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
1514 		}
1515 
1516 		if (is_cce_classified)
1517 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1518 
1519 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1520 		if (msdu_info->exception_fw) {
1521 			HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1522 		}
1523 
1524 		/*
1525 		 * Enqueue the Tx MSDU descriptor to HW for transmit
1526 		 */
1527 		status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
1528 			htt_tcl_metadata, tx_q->ring_id, NULL);
1529 
1530 		if (status != QDF_STATUS_SUCCESS) {
1531 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1532 				  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1533 				  __func__, tx_desc, tx_q->ring_id);
1534 
1535 			if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
1536 				dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
1537 
1538 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1539 			goto done;
1540 		}
1541 
1542 		/*
1543 		 * TODO
1544 		 * if tso_info structure can be modified to have curr_seg
1545 		 * as first element, following 2 blocks of code (for TSO and SG)
1546 		 * can be combined into 1
1547 		 */
1548 
1549 		/*
1550 		 * For frames with multiple segments (TSO, ME), jump to next
1551 		 * segment.
1552 		 */
1553 		if (msdu_info->frm_type == dp_tx_frm_tso) {
1554 			if (msdu_info->u.tso_info.curr_seg->next) {
1555 				msdu_info->u.tso_info.curr_seg =
1556 					msdu_info->u.tso_info.curr_seg->next;
1557 
1558 				/*
1559 				 * If this is a jumbo nbuf, then increment the number of
1560 				 * nbuf users for each additional segment of the msdu.
1561 				 * This will ensure that the skb is freed only after
1562 				 * receiving tx completion for all segments of an nbuf
1563 				 */
1564 				qdf_nbuf_inc_users(nbuf);
1565 
1566 				/* Check with MCL if this is needed */
1567 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
1568 			}
1569 		}
1570 
1571 		/*
1572 		 * For Multicast-Unicast converted packets,
1573 		 * each converted frame (for a client) is represented as
1574 		 * 1 segment
1575 		 */
1576 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
1577 				(msdu_info->frm_type == dp_tx_frm_me)) {
1578 			if (msdu_info->u.sg_info.curr_seg->next) {
1579 				msdu_info->u.sg_info.curr_seg =
1580 					msdu_info->u.sg_info.curr_seg->next;
1581 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1582 			}
1583 		}
1584 		i++;
1585 	}
1586 
1587 	nbuf = NULL;
1588 
1589 done:
1590 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1591 		hal_srng_access_end(soc->hal_soc, hal_srng);
1592 		hif_pm_runtime_put(soc->hif_handle);
1593 	} else {
1594 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1595 	}
1596 
1597 	return nbuf;
1598 }
1599 
1600 /**
1601  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
1602  *                     for SG frames
1603  * @vdev: DP vdev handle
1604  * @nbuf: skb
1605  * @seg_info: Pointer to Segment info Descriptor to be prepared
1606  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1607  *
1608  * Return: NULL on success,
1609  *         nbuf when it fails to send
1610  */
1611 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1612 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1613 {
1614 	uint32_t cur_frag, nr_frags;
1615 	qdf_dma_addr_t paddr;
1616 	struct dp_tx_sg_info_s *sg_info;
1617 
1618 	sg_info = &msdu_info->u.sg_info;
1619 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
1620 
1621 	if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
1622 				QDF_DMA_TO_DEVICE)) {
1623 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1624 				"dma map error");
1625 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1626 
1627 		qdf_nbuf_free(nbuf);
1628 		return NULL;
1629 	}
1630 
1631 	paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1632 	seg_info->frags[0].paddr_lo = paddr;
1633 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
1634 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
1635 	seg_info->frags[0].vaddr = (void *) nbuf;
1636 
1637 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
1638 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
1639 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
1640 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1641 					"frag dma map error");
1642 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1643 			qdf_nbuf_free(nbuf);
1644 			return NULL;
1645 		}
1646 
1647 		paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1648 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
1649 		seg_info->frags[cur_frag + 1].paddr_hi =
1650 			((uint64_t) paddr) >> 32;
1651 		seg_info->frags[cur_frag + 1].len =
1652 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
1653 	}
1654 
1655 	seg_info->frag_cnt = (cur_frag + 1);
1656 	seg_info->total_len = qdf_nbuf_len(nbuf);
1657 	seg_info->next = NULL;
1658 
1659 	sg_info->curr_seg = seg_info;
1660 
1661 	msdu_info->frm_type = dp_tx_frm_sg;
1662 	msdu_info->num_seg = 1;
1663 
1664 	return nbuf;
1665 }
1666 
1667 #ifdef MESH_MODE_SUPPORT
1668 
1669 /**
1670  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
1671 				and prepare msdu_info for mesh frames.
1672  * @vdev: DP vdev handle
1673  * @nbuf: skb
1674  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1675  *
1676  * Return: NULL on failure,
1677  *         nbuf when extracted successfully
1678  */
1679 static
1680 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1681 				struct dp_tx_msdu_info_s *msdu_info)
1682 {
1683 	struct meta_hdr_s *mhdr;
1684 	struct htt_tx_msdu_desc_ext2_t *meta_data =
1685 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
1686 
1687 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1688 
1689 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
1690 		msdu_info->exception_fw = 0;
1691 		goto remove_meta_hdr;
1692 	}
1693 
1694 	msdu_info->exception_fw = 1;
1695 
1696 	qdf_mem_set(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t), 0);
1697 
1698 	meta_data->host_tx_desc_pool = 1;
1699 	meta_data->update_peer_cache = 1;
1700 	meta_data->learning_frame = 1;
1701 
1702 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
1703 		meta_data->power = mhdr->power;
1704 
1705 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
1706 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
1707 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
1708 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
1709 
1710 		meta_data->dyn_bw = 1;
1711 
1712 		meta_data->valid_pwr = 1;
1713 		meta_data->valid_mcs_mask = 1;
1714 		meta_data->valid_nss_mask = 1;
1715 		meta_data->valid_preamble_type  = 1;
1716 		meta_data->valid_retries = 1;
1717 		meta_data->valid_bw_info = 1;
1718 	}
1719 
1720 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
1721 		meta_data->encrypt_type = 0;
1722 		meta_data->valid_encrypt_type = 1;
1723 		meta_data->learning_frame = 0;
1724 	}
1725 
1726 	meta_data->valid_key_flags = 1;
1727 	meta_data->key_flags = (mhdr->keyix & 0x3);
1728 
1729 remove_meta_hdr:
1730 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
1731 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1732 				"qdf_nbuf_pull_head failed");
1733 		qdf_nbuf_free(nbuf);
1734 		return NULL;
1735 	}
1736 
1737 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
1738 		msdu_info->tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1739 	else
1740 		msdu_info->tid = qdf_nbuf_get_priority(nbuf);
1741 
1742 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1743 			"%s , Meta hdr %0x %0x %0x %0x %0x %0x"
1744 			" tid %d to_fw %d",
1745 			__func__, msdu_info->meta_data[0],
1746 			msdu_info->meta_data[1],
1747 			msdu_info->meta_data[2],
1748 			msdu_info->meta_data[3],
1749 			msdu_info->meta_data[4],
1750 			msdu_info->meta_data[5],
1751 			msdu_info->tid, msdu_info->exception_fw);
1752 
1753 	return nbuf;
1754 }
1755 #else
1756 static
1757 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1758 				struct dp_tx_msdu_info_s *msdu_info)
1759 {
1760 	return nbuf;
1761 }
1762 
1763 #endif
1764 
1765 #ifdef DP_FEATURE_NAWDS_TX
1766 /**
1767  * dp_tx_prepare_nawds(): Tramit NAWDS frames
1768  * @vdev: dp_vdev handle
1769  * @nbuf: skb
1770  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1771  * @tx_q: Tx queue to be used for this Tx frame
1772  * @meta_data: Meta date for mesh
1773  * @peer_id: peer_id of the peer in case of NAWDS frames
1774  *
1775  * return: NULL on success nbuf on failure
1776  */
1777 static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1778 		struct dp_tx_msdu_info_s *msdu_info)
1779 {
1780 	struct dp_peer *peer = NULL;
1781 	struct dp_soc *soc = vdev->pdev->soc;
1782 	struct dp_ast_entry *ast_entry = NULL;
1783 	struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1784 	uint16_t peer_id = HTT_INVALID_PEER;
1785 
1786 	struct dp_peer *sa_peer = NULL;
1787 	qdf_nbuf_t nbuf_copy;
1788 
1789 	qdf_spin_lock_bh(&(soc->ast_lock));
1790 	ast_entry = dp_peer_ast_hash_find_by_pdevid
1791 				(soc,
1792 				 (uint8_t *)(eh->ether_shost),
1793 				 vdev->pdev->pdev_id);
1794 
1795 	if (ast_entry)
1796 		sa_peer = ast_entry->peer;
1797 
1798 	qdf_spin_unlock_bh(&(soc->ast_lock));
1799 
1800 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1801 		if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
1802 				(peer->nawds_enabled)) {
1803 			if (sa_peer == peer) {
1804 				QDF_TRACE(QDF_MODULE_ID_DP,
1805 						QDF_TRACE_LEVEL_DEBUG,
1806 						" %s: broadcast multicast packet",
1807 						 __func__);
1808 				DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
1809 				continue;
1810 			}
1811 
1812 			nbuf_copy = qdf_nbuf_copy(nbuf);
1813 			if (!nbuf_copy) {
1814 				QDF_TRACE(QDF_MODULE_ID_DP,
1815 						QDF_TRACE_LEVEL_ERROR,
1816 						"nbuf copy failed");
1817 			}
1818 
1819 			peer_id = peer->peer_ids[0];
1820 			nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy,
1821 					msdu_info, peer_id, NULL);
1822 			if (nbuf_copy != NULL) {
1823 				qdf_nbuf_free(nbuf_copy);
1824 				continue;
1825 			}
1826 			DP_STATS_INC_PKT(peer, tx.nawds_mcast,
1827 						1, qdf_nbuf_len(nbuf));
1828 		}
1829 	}
1830 	if (peer_id == HTT_INVALID_PEER)
1831 		return nbuf;
1832 
1833 	return NULL;
1834 }
1835 #endif
1836 
1837 /**
1838  * dp_check_exc_metadata() - Checks if parameters are valid
1839  * @tx_exc - holds all exception path parameters
1840  *
1841  * Returns true when all the parameters are valid else false
1842  *
1843  */
1844 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
1845 {
1846 	if ((tx_exc->tid > DP_MAX_TIDS && tx_exc->tid != HTT_INVALID_TID) ||
1847 	    tx_exc->tx_encap_type > htt_cmn_pkt_num_types ||
1848 	    tx_exc->sec_type > cdp_num_sec_types) {
1849 		return false;
1850 	}
1851 
1852 	return true;
1853 }
1854 
1855 /**
1856  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
1857  * @vap_dev: DP vdev handle
1858  * @nbuf: skb
1859  * @tx_exc_metadata: Handle that holds exception path meta data
1860  *
1861  * Entry point for Core Tx layer (DP_TX) invoked from
1862  * hard_start_xmit in OSIF/HDD to transmit frames through fw
1863  *
1864  * Return: NULL on success,
1865  *         nbuf when it fails to send
1866  */
1867 qdf_nbuf_t dp_tx_send_exception(void *vap_dev, qdf_nbuf_t nbuf,
1868 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1869 {
1870 	struct ether_header *eh = NULL;
1871 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1872 	struct dp_tx_msdu_info_s msdu_info;
1873 
1874 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1875 
1876 	msdu_info.tid = tx_exc_metadata->tid;
1877 
1878 	eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1879 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1880 			"%s , skb %pM",
1881 			__func__, nbuf->data);
1882 
1883 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
1884 
1885 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
1886 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1887 			"Invalid parameters in exception path");
1888 		goto fail;
1889 	}
1890 
1891 	/* Basic sanity checks for unsupported packets */
1892 
1893 	/* MESH mode */
1894 	if (qdf_unlikely(vdev->mesh_vdev)) {
1895 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1896 			"Mesh mode is not supported in exception path");
1897 		goto fail;
1898 	}
1899 
1900 	/* TSO or SG */
1901 	if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
1902 	    qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
1903 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1904 			  "TSO and SG are not supported in exception path");
1905 
1906 		goto fail;
1907 	}
1908 
1909 	/* RAW */
1910 	if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
1911 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1912 			  "Raw frame is not supported in exception path");
1913 		goto fail;
1914 	}
1915 
1916 
1917 	/* Mcast enhancement*/
1918 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
1919 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
1920 		    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
1921 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1922 					  "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW");
1923 		}
1924 	}
1925 
1926 	/*
1927 	 * Get HW Queue to use for this frame.
1928 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
1929 	 * dedicated for data and 1 for command.
1930 	 * "queue_id" maps to one hardware ring.
1931 	 *  With each ring, we also associate a unique Tx descriptor pool
1932 	 *  to minimize lock contention for these resources.
1933 	 */
1934 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
1935 
1936 	/*  Single linear frame */
1937 	/*
1938 	 * If nbuf is a simple linear frame, use send_single function to
1939 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
1940 	 * SRNG. There is no need to setup a MSDU extension descriptor.
1941 	 */
1942 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
1943 			tx_exc_metadata->peer_id, tx_exc_metadata);
1944 
1945 	return nbuf;
1946 
1947 fail:
1948 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1949 			"pkt send failed");
1950 	return nbuf;
1951 }
1952 
1953 /**
1954  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
1955  * @vap_dev: DP vdev handle
1956  * @nbuf: skb
1957  *
1958  * Entry point for Core Tx layer (DP_TX) invoked from
1959  * hard_start_xmit in OSIF/HDD
1960  *
1961  * Return: NULL on success,
1962  *         nbuf when it fails to send
1963  */
1964 #ifdef MESH_MODE_SUPPORT
1965 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
1966 {
1967 	struct meta_hdr_s *mhdr;
1968 	qdf_nbuf_t nbuf_mesh = NULL;
1969 	qdf_nbuf_t nbuf_clone = NULL;
1970 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1971 	uint8_t no_enc_frame = 0;
1972 
1973 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
1974 	if (nbuf_mesh == NULL) {
1975 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1976 				"qdf_nbuf_unshare failed");
1977 		return nbuf;
1978 	}
1979 	nbuf = nbuf_mesh;
1980 
1981 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1982 
1983 	if ((vdev->sec_type != cdp_sec_type_none) &&
1984 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
1985 		no_enc_frame = 1;
1986 
1987 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
1988 		       !no_enc_frame) {
1989 		nbuf_clone = qdf_nbuf_clone(nbuf);
1990 		if (nbuf_clone == NULL) {
1991 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1992 				"qdf_nbuf_clone failed");
1993 			return nbuf;
1994 		}
1995 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
1996 	}
1997 
1998 	if (nbuf_clone) {
1999 		if (!dp_tx_send(vap_dev, nbuf_clone)) {
2000 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2001 		} else {
2002 			qdf_nbuf_free(nbuf_clone);
2003 		}
2004 	}
2005 
2006 	if (no_enc_frame)
2007 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
2008 	else
2009 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
2010 
2011 	nbuf = dp_tx_send(vap_dev, nbuf);
2012 	if ((nbuf == NULL) && no_enc_frame) {
2013 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2014 	}
2015 
2016 	return nbuf;
2017 }
2018 
2019 #else
2020 
2021 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
2022 {
2023 	return dp_tx_send(vap_dev, nbuf);
2024 }
2025 
2026 #endif
2027 
2028 /**
2029  * dp_tx_send() - Transmit a frame on a given VAP
2030  * @vap_dev: DP vdev handle
2031  * @nbuf: skb
2032  *
2033  * Entry point for Core Tx layer (DP_TX) invoked from
2034  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
2035  * cases
2036  *
2037  * Return: NULL on success,
2038  *         nbuf when it fails to send
2039  */
2040 qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
2041 {
2042 	struct ether_header *eh = NULL;
2043 	struct dp_tx_msdu_info_s msdu_info;
2044 	struct dp_tx_seg_info_s seg_info;
2045 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
2046 	uint16_t peer_id = HTT_INVALID_PEER;
2047 	qdf_nbuf_t nbuf_mesh = NULL;
2048 
2049 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
2050 	qdf_mem_set(&seg_info, sizeof(seg_info), 0x0);
2051 
2052 	eh = (struct ether_header *)qdf_nbuf_data(nbuf);
2053 
2054 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2055 			"%s , skb %pM",
2056 			__func__, nbuf->data);
2057 
2058 	/*
2059 	 * Set Default Host TID value to invalid TID
2060 	 * (TID override disabled)
2061 	 */
2062 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
2063 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
2064 
2065 	if (qdf_unlikely(vdev->mesh_vdev)) {
2066 		nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
2067 								&msdu_info);
2068 		if (nbuf_mesh == NULL) {
2069 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2070 					"Extracting mesh metadata failed");
2071 			return nbuf;
2072 		}
2073 		nbuf = nbuf_mesh;
2074 	}
2075 
2076 	/*
2077 	 * Get HW Queue to use for this frame.
2078 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
2079 	 * dedicated for data and 1 for command.
2080 	 * "queue_id" maps to one hardware ring.
2081 	 *  With each ring, we also associate a unique Tx descriptor pool
2082 	 *  to minimize lock contention for these resources.
2083 	 */
2084 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2085 
2086 	/*
2087 	 * TCL H/W supports 2 DSCP-TID mapping tables.
2088 	 *  Table 1 - Default DSCP-TID mapping table
2089 	 *  Table 2 - 1 DSCP-TID override table
2090 	 *
2091 	 * If we need a different DSCP-TID mapping for this vap,
2092 	 * call tid_classify to extract DSCP/ToS from frame and
2093 	 * map to a TID and store in msdu_info. This is later used
2094 	 * to fill in TCL Input descriptor (per-packet TID override).
2095 	 */
2096 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
2097 
2098 	/*
2099 	 * Classify the frame and call corresponding
2100 	 * "prepare" function which extracts the segment (TSO)
2101 	 * and fragmentation information (for TSO , SG, ME, or Raw)
2102 	 * into MSDU_INFO structure which is later used to fill
2103 	 * SW and HW descriptors.
2104 	 */
2105 	if (qdf_nbuf_is_tso(nbuf)) {
2106 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2107 			  "%s TSO frame %pK", __func__, vdev);
2108 		DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1,
2109 				qdf_nbuf_len(nbuf));
2110 
2111 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
2112 			DP_STATS_INC_PKT(vdev, tx_i.tso.dropped_host, 1,
2113 					 qdf_nbuf_len(nbuf));
2114 			return nbuf;
2115 		}
2116 
2117 		goto send_multiple;
2118 	}
2119 
2120 	/* SG */
2121 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2122 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
2123 
2124 		if (!nbuf)
2125 			return NULL;
2126 
2127 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2128 			 "%s non-TSO SG frame %pK", __func__, vdev);
2129 
2130 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
2131 				qdf_nbuf_len(nbuf));
2132 
2133 		goto send_multiple;
2134 	}
2135 
2136 #ifdef ATH_SUPPORT_IQUE
2137 	/* Mcast to Ucast Conversion*/
2138 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
2139 		eh = (struct ether_header *)qdf_nbuf_data(nbuf);
2140 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2141 		    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2142 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2143 				  "%s Mcast frm for ME %pK", __func__, vdev);
2144 
2145 			DP_STATS_INC_PKT(vdev,
2146 					tx_i.mcast_en.mcast_pkt, 1,
2147 					qdf_nbuf_len(nbuf));
2148 			if (dp_tx_prepare_send_me(vdev, nbuf) ==
2149 					QDF_STATUS_SUCCESS) {
2150 				return NULL;
2151 			}
2152 		}
2153 	}
2154 #endif
2155 
2156 	/* RAW */
2157 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
2158 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
2159 		if (nbuf == NULL)
2160 			return NULL;
2161 
2162 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2163 			  "%s Raw frame %pK", __func__, vdev);
2164 
2165 		goto send_multiple;
2166 
2167 	}
2168 
2169 	/*  Single linear frame */
2170 	/*
2171 	 * If nbuf is a simple linear frame, use send_single function to
2172 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2173 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2174 	 */
2175 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
2176 
2177 	return nbuf;
2178 
2179 send_multiple:
2180 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2181 
2182 	return nbuf;
2183 }
2184 
2185 /**
2186  * dp_tx_reinject_handler() - Tx Reinject Handler
2187  * @tx_desc: software descriptor head pointer
2188  * @status : Tx completion status from HTT descriptor
2189  *
2190  * This function reinjects frames back to Target.
2191  * Todo - Host queue needs to be added
2192  *
2193  * Return: none
2194  */
2195 static
2196 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2197 {
2198 	struct dp_vdev *vdev;
2199 	struct dp_peer *peer = NULL;
2200 	uint32_t peer_id = HTT_INVALID_PEER;
2201 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2202 	qdf_nbuf_t nbuf_copy = NULL;
2203 	struct dp_tx_msdu_info_s msdu_info;
2204 	struct dp_peer *sa_peer = NULL;
2205 	struct dp_ast_entry *ast_entry = NULL;
2206 	struct dp_soc *soc = NULL;
2207 	struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
2208 #ifdef WDS_VENDOR_EXTENSION
2209 	int is_mcast = 0, is_ucast = 0;
2210 	int num_peers_3addr = 0;
2211 	struct ether_header *eth_hdr = (struct ether_header *)(qdf_nbuf_data(nbuf));
2212 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
2213 #endif
2214 
2215 	vdev = tx_desc->vdev;
2216 	soc = vdev->pdev->soc;
2217 
2218 	qdf_assert(vdev);
2219 
2220 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
2221 
2222 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2223 
2224 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2225 			"%s Tx reinject path", __func__);
2226 
2227 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
2228 			qdf_nbuf_len(tx_desc->nbuf));
2229 
2230 	qdf_spin_lock_bh(&(soc->ast_lock));
2231 
2232 	ast_entry = dp_peer_ast_hash_find_by_pdevid
2233 				(soc,
2234 				 (uint8_t *)(eh->ether_shost),
2235 				 vdev->pdev->pdev_id);
2236 
2237 	if (ast_entry)
2238 		sa_peer = ast_entry->peer;
2239 
2240 	qdf_spin_unlock_bh(&(soc->ast_lock));
2241 
2242 #ifdef WDS_VENDOR_EXTENSION
2243 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
2244 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
2245 	} else {
2246 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
2247 	}
2248 	is_ucast = !is_mcast;
2249 
2250 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2251 		if (peer->bss_peer)
2252 			continue;
2253 
2254 		/* Detect wds peers that use 3-addr framing for mcast.
2255 		 * if there are any, the bss_peer is used to send the
2256 		 * the mcast frame using 3-addr format. all wds enabled
2257 		 * peers that use 4-addr framing for mcast frames will
2258 		 * be duplicated and sent as 4-addr frames below.
2259 		 */
2260 		if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
2261 			num_peers_3addr = 1;
2262 			break;
2263 		}
2264 	}
2265 #endif
2266 
2267 	if (qdf_unlikely(vdev->mesh_vdev)) {
2268 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
2269 	} else {
2270 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2271 			if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
2272 #ifdef WDS_VENDOR_EXTENSION
2273 			/*
2274 			 * . if 3-addr STA, then send on BSS Peer
2275 			 * . if Peer WDS enabled and accept 4-addr mcast,
2276 			 * send mcast on that peer only
2277 			 * . if Peer WDS enabled and accept 4-addr ucast,
2278 			 * send ucast on that peer only
2279 			 */
2280 			((peer->bss_peer && num_peers_3addr && is_mcast) ||
2281 			 (peer->wds_enabled &&
2282 				  ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
2283 				   (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
2284 #else
2285 			((peer->bss_peer &&
2286 			  !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) ||
2287 				 peer->nawds_enabled)) {
2288 #endif
2289 				peer_id = DP_INVALID_PEER;
2290 
2291 				if (peer->nawds_enabled) {
2292 					peer_id = peer->peer_ids[0];
2293 					if (sa_peer == peer) {
2294 						QDF_TRACE(
2295 							QDF_MODULE_ID_DP,
2296 							QDF_TRACE_LEVEL_DEBUG,
2297 							" %s: multicast packet",
2298 							__func__);
2299 						DP_STATS_INC(peer,
2300 							tx.nawds_mcast_drop, 1);
2301 						continue;
2302 					}
2303 				}
2304 
2305 				nbuf_copy = qdf_nbuf_copy(nbuf);
2306 
2307 				if (!nbuf_copy) {
2308 					QDF_TRACE(QDF_MODULE_ID_DP,
2309 						QDF_TRACE_LEVEL_DEBUG,
2310 						FL("nbuf copy failed"));
2311 					break;
2312 				}
2313 
2314 				nbuf_copy = dp_tx_send_msdu_single(vdev,
2315 						nbuf_copy,
2316 						&msdu_info,
2317 						peer_id,
2318 						NULL);
2319 
2320 				if (nbuf_copy) {
2321 					QDF_TRACE(QDF_MODULE_ID_DP,
2322 						QDF_TRACE_LEVEL_DEBUG,
2323 						FL("pkt send failed"));
2324 					qdf_nbuf_free(nbuf_copy);
2325 				} else {
2326 					if (peer_id != DP_INVALID_PEER)
2327 						DP_STATS_INC_PKT(peer,
2328 							tx.nawds_mcast,
2329 							1, qdf_nbuf_len(nbuf));
2330 				}
2331 			}
2332 		}
2333 	}
2334 
2335 	if (vdev->nawds_enabled) {
2336 		peer_id = DP_INVALID_PEER;
2337 
2338 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
2339 					1, qdf_nbuf_len(nbuf));
2340 
2341 		nbuf = dp_tx_send_msdu_single(vdev,
2342 				nbuf,
2343 				&msdu_info,
2344 				peer_id, NULL);
2345 
2346 		if (nbuf) {
2347 			QDF_TRACE(QDF_MODULE_ID_DP,
2348 				QDF_TRACE_LEVEL_DEBUG,
2349 				FL("pkt send failed"));
2350 			qdf_nbuf_free(nbuf);
2351 		}
2352 	} else
2353 		qdf_nbuf_free(nbuf);
2354 
2355 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2356 }
2357 
2358 /**
2359  * dp_tx_inspect_handler() - Tx Inspect Handler
2360  * @tx_desc: software descriptor head pointer
2361  * @status : Tx completion status from HTT descriptor
2362  *
2363  * Handles Tx frames sent back to Host for inspection
2364  * (ProxyARP)
2365  *
2366  * Return: none
2367  */
2368 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2369 {
2370 
2371 	struct dp_soc *soc;
2372 	struct dp_pdev *pdev = tx_desc->pdev;
2373 
2374 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2375 			"%s Tx inspect path",
2376 			__func__);
2377 
2378 	qdf_assert(pdev);
2379 
2380 	soc = pdev->soc;
2381 
2382 	DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
2383 			qdf_nbuf_len(tx_desc->nbuf));
2384 
2385 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
2386 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2387 }
2388 
2389 #ifdef FEATURE_PERPKT_INFO
2390 /**
2391  * dp_get_completion_indication_for_stack() - send completion to stack
2392  * @soc : dp_soc handle
2393  * @pdev: dp_pdev handle
2394  * @peer: dp peer handle
2395  * @ts: transmit completion status structure
2396  * @netbuf: Buffer pointer for free
2397  *
2398  * This function is used for indication whether buffer needs to be
2399  * sent to stack for freeing or not
2400 */
2401 QDF_STATUS
2402 dp_get_completion_indication_for_stack(struct dp_soc *soc,
2403 				       struct dp_pdev *pdev,
2404 				       struct dp_peer *peer,
2405 				       struct hal_tx_completion_status *ts,
2406 				       qdf_nbuf_t netbuf)
2407 {
2408 	struct tx_capture_hdr *ppdu_hdr;
2409 	uint16_t peer_id = ts->peer_id;
2410 	uint32_t ppdu_id = ts->ppdu_id;
2411 	uint8_t first_msdu = ts->first_msdu;
2412 	uint8_t last_msdu = ts->last_msdu;
2413 
2414 	if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode))
2415 		return QDF_STATUS_E_NOSUPPORT;
2416 
2417 	if (!peer) {
2418 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2419 				FL("Peer Invalid"));
2420 		return QDF_STATUS_E_INVAL;
2421 	}
2422 
2423 	if (pdev->mcopy_mode) {
2424 		if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
2425 				(pdev->m_copy_id.tx_peer_id == peer_id)) {
2426 			return QDF_STATUS_E_INVAL;
2427 		}
2428 
2429 		pdev->m_copy_id.tx_ppdu_id = ppdu_id;
2430 		pdev->m_copy_id.tx_peer_id = peer_id;
2431 	}
2432 
2433 	if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) {
2434 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2435 				FL("No headroom"));
2436 		return QDF_STATUS_E_NOMEM;
2437 	}
2438 
2439 	ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
2440 	qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
2441 		     IEEE80211_ADDR_LEN);
2442 	qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
2443 		     IEEE80211_ADDR_LEN);
2444 	ppdu_hdr->ppdu_id = ppdu_id;
2445 	ppdu_hdr->peer_id = peer_id;
2446 	ppdu_hdr->first_msdu = first_msdu;
2447 	ppdu_hdr->last_msdu = last_msdu;
2448 
2449 	return QDF_STATUS_SUCCESS;
2450 }
2451 
2452 
2453 /**
2454  * dp_send_completion_to_stack() - send completion to stack
2455  * @soc :  dp_soc handle
2456  * @pdev:  dp_pdev handle
2457  * @peer_id: peer_id of the peer for which completion came
2458  * @ppdu_id: ppdu_id
2459  * @netbuf: Buffer pointer for free
2460  *
2461  * This function is used to send completion to stack
2462  * to free buffer
2463 */
2464 void  dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2465 					uint16_t peer_id, uint32_t ppdu_id,
2466 					qdf_nbuf_t netbuf)
2467 {
2468 	dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
2469 				netbuf, peer_id,
2470 				WDI_NO_VAL, pdev->pdev_id);
2471 }
2472 #else
2473 static QDF_STATUS
2474 dp_get_completion_indication_for_stack(struct dp_soc *soc,
2475 				       struct dp_pdev *pdev,
2476 				       struct dp_peer *peer,
2477 				       struct hal_tx_completion_status *ts,
2478 				       qdf_nbuf_t netbuf)
2479 {
2480 	return QDF_STATUS_E_NOSUPPORT;
2481 }
2482 
2483 static void
2484 dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2485 	uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
2486 {
2487 }
2488 #endif
2489 
2490 /**
2491  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2492  * @soc: Soc handle
2493  * @desc: software Tx descriptor to be processed
2494  *
2495  * Return: none
2496  */
2497 static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
2498 		struct dp_tx_desc_s *desc)
2499 {
2500 	struct dp_vdev *vdev = desc->vdev;
2501 	qdf_nbuf_t nbuf = desc->nbuf;
2502 
2503 	/* If it is TDLS mgmt, don't unmap or free the frame */
2504 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2505 		return dp_non_std_tx_comp_free_buff(desc, vdev);
2506 
2507 	/* 0 : MSDU buffer, 1 : MLE */
2508 	if (desc->msdu_ext_desc) {
2509 		/* TSO free */
2510 		if (hal_tx_ext_desc_get_tso_enable(
2511 					desc->msdu_ext_desc->vaddr)) {
2512 			/* unmap eash TSO seg before free the nbuf */
2513 			dp_tx_tso_unmap_segment(soc, desc->tso_desc,
2514 						desc->tso_num_desc);
2515 			qdf_nbuf_free(nbuf);
2516 			return;
2517 		}
2518 	}
2519 
2520 	qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
2521 
2522 	if (qdf_likely(!vdev->mesh_vdev))
2523 		qdf_nbuf_free(nbuf);
2524 	else {
2525 		if (desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2526 			qdf_nbuf_free(nbuf);
2527 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2528 		} else
2529 			vdev->osif_tx_free_ext((nbuf));
2530 	}
2531 }
2532 
2533 /**
2534  * dp_tx_mec_handler() - Tx  MEC Notify Handler
2535  * @vdev: pointer to dp dev handler
2536  * @status : Tx completion status from HTT descriptor
2537  *
2538  * Handles MEC notify event sent from fw to Host
2539  *
2540  * Return: none
2541  */
2542 #ifdef FEATURE_WDS
2543 void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
2544 {
2545 
2546 	struct dp_soc *soc;
2547 	uint32_t flags = IEEE80211_NODE_F_WDS_HM;
2548 	struct dp_peer *peer;
2549 	uint8_t mac_addr[DP_MAC_ADDR_LEN], i;
2550 
2551 	if (!vdev->mec_enabled)
2552 		return;
2553 
2554 	/* MEC required only in STA mode */
2555 	if (vdev->opmode != wlan_op_mode_sta)
2556 		return;
2557 
2558 	soc = vdev->pdev->soc;
2559 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
2560 	peer = TAILQ_FIRST(&vdev->peer_list);
2561 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2562 
2563 	if (!peer) {
2564 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2565 				FL("peer is NULL"));
2566 		return;
2567 	}
2568 
2569 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2570 			"%s Tx MEC Handler",
2571 			__func__);
2572 
2573 	for (i = 0; i < DP_MAC_ADDR_LEN; i++)
2574 		mac_addr[(DP_MAC_ADDR_LEN - 1) - i] =
2575 					status[(DP_MAC_ADDR_LEN - 2) + i];
2576 
2577 	if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, DP_MAC_ADDR_LEN))
2578 		dp_peer_add_ast(soc,
2579 				peer,
2580 				mac_addr,
2581 				CDP_TXRX_AST_TYPE_MEC,
2582 				flags);
2583 }
2584 #endif
2585 
2586 #ifdef MESH_MODE_SUPPORT
2587 /**
2588  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
2589  *                                         in mesh meta header
2590  * @tx_desc: software descriptor head pointer
2591  * @ts: pointer to tx completion stats
2592  * Return: none
2593  */
2594 static
2595 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2596 		struct hal_tx_completion_status *ts)
2597 {
2598 	struct meta_hdr_s *mhdr;
2599 	qdf_nbuf_t netbuf = tx_desc->nbuf;
2600 
2601 	if (!tx_desc->msdu_ext_desc) {
2602 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
2603 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2604 				"netbuf %pK offset %d",
2605 				netbuf, tx_desc->pkt_offset);
2606 			return;
2607 		}
2608 	}
2609 	if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
2610 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2611 			"netbuf %pK offset %d", netbuf,
2612 			sizeof(struct meta_hdr_s));
2613 		return;
2614 	}
2615 
2616 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
2617 	mhdr->rssi = ts->ack_frame_rssi;
2618 	mhdr->channel = tx_desc->pdev->operating_channel;
2619 }
2620 
2621 #else
2622 static
2623 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2624 		struct hal_tx_completion_status *ts)
2625 {
2626 }
2627 
2628 #endif
2629 
2630 /**
2631  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
2632  * @peer: Handle to DP peer
2633  * @ts: pointer to HAL Tx completion stats
2634  *
2635  * Return: None
2636  */
2637 static inline void
2638 dp_tx_update_peer_stats(struct dp_peer *peer,
2639 			struct hal_tx_completion_status *ts, uint32_t length)
2640 {
2641 	struct dp_pdev *pdev = peer->vdev->pdev;
2642 	struct dp_soc *soc = NULL;
2643 	uint8_t mcs, pkt_type;
2644 
2645 	if (!pdev)
2646 		return;
2647 
2648 	soc = pdev->soc;
2649 
2650 	mcs = ts->mcs;
2651 	pkt_type = ts->pkt_type;
2652 
2653 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
2654 		dp_err("Release source is not from TQM");
2655 		return;
2656 	}
2657 
2658 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
2659 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
2660 
2661 	DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length,
2662 			  (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
2663 
2664 	DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
2665 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
2666 
2667 	DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
2668 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
2669 
2670 	DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
2671 		     (ts->status == HAL_TX_TQM_RR_FW_REASON1));
2672 
2673 	DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
2674 		     (ts->status == HAL_TX_TQM_RR_FW_REASON2));
2675 
2676 	DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
2677 		     (ts->status == HAL_TX_TQM_RR_FW_REASON3));
2678 
2679 	if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
2680 		return;
2681 	}
2682 
2683 	DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
2684 
2685 	DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
2686 	DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
2687 
2688 	/*
2689 	 * Following Rate Statistics are updated from HTT PPDU events from FW.
2690 	 * Return from here if HTT PPDU events are enabled.
2691 	 */
2692 	if (!(soc->process_tx_status))
2693 		return;
2694 
2695 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2696 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
2697 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2698 			((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
2699 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2700 			((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
2701 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2702 			((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
2703 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2704 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
2705 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2706 			((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
2707 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2708 			((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2709 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2710 			((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2711 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2712 			((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2713 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2714 			((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2715 
2716 	DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
2717 	DP_STATS_INC(peer, tx.bw[ts->bw], 1);
2718 	DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
2719 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
2720 	DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
2721 	DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
2722 	DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
2723 
2724 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
2725 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
2726 			     &peer->stats, ts->peer_id,
2727 			     UPDATE_PEER_STATS, pdev->pdev_id);
2728 #endif
2729 }
2730 
2731 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2732 /**
2733  * dp_tx_flow_pool_lock() - take flow pool lock
2734  * @soc: core txrx main context
2735  * @tx_desc: tx desc
2736  *
2737  * Return: None
2738  */
2739 static inline
2740 void dp_tx_flow_pool_lock(struct dp_soc *soc,
2741 			  struct dp_tx_desc_s *tx_desc)
2742 {
2743 	struct dp_tx_desc_pool_s *pool;
2744 	uint8_t desc_pool_id;
2745 
2746 	desc_pool_id = tx_desc->pool_id;
2747 	pool = &soc->tx_desc[desc_pool_id];
2748 
2749 	qdf_spin_lock_bh(&pool->flow_pool_lock);
2750 }
2751 
2752 /**
2753  * dp_tx_flow_pool_unlock() - release flow pool lock
2754  * @soc: core txrx main context
2755  * @tx_desc: tx desc
2756  *
2757  * Return: None
2758  */
2759 static inline
2760 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
2761 			    struct dp_tx_desc_s *tx_desc)
2762 {
2763 	struct dp_tx_desc_pool_s *pool;
2764 	uint8_t desc_pool_id;
2765 
2766 	desc_pool_id = tx_desc->pool_id;
2767 	pool = &soc->tx_desc[desc_pool_id];
2768 
2769 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
2770 }
2771 #else
2772 static inline
2773 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2774 {
2775 }
2776 
2777 static inline
2778 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2779 {
2780 }
2781 #endif
2782 
2783 /**
2784  * dp_tx_notify_completion() - Notify tx completion for this desc
2785  * @soc: core txrx main context
2786  * @tx_desc: tx desc
2787  * @netbuf:  buffer
2788  *
2789  * Return: none
2790  */
2791 static inline void dp_tx_notify_completion(struct dp_soc *soc,
2792 					   struct dp_tx_desc_s *tx_desc,
2793 					   qdf_nbuf_t netbuf)
2794 {
2795 	void *osif_dev;
2796 	ol_txrx_completion_fp tx_compl_cbk = NULL;
2797 
2798 	qdf_assert(tx_desc);
2799 
2800 	dp_tx_flow_pool_lock(soc, tx_desc);
2801 
2802 	if (!tx_desc->vdev ||
2803 	    !tx_desc->vdev->osif_vdev) {
2804 		dp_tx_flow_pool_unlock(soc, tx_desc);
2805 		return;
2806 	}
2807 
2808 	osif_dev = tx_desc->vdev->osif_vdev;
2809 	tx_compl_cbk = tx_desc->vdev->tx_comp;
2810 	dp_tx_flow_pool_unlock(soc, tx_desc);
2811 
2812 	if (tx_compl_cbk)
2813 		tx_compl_cbk(netbuf, osif_dev);
2814 }
2815 
2816 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
2817  * @pdev: pdev handle
2818  * @tid: tid value
2819  * @txdesc_ts: timestamp from txdesc
2820  * @ppdu_id: ppdu id
2821  *
2822  * Return: none
2823  */
2824 #ifdef FEATURE_PERPKT_INFO
2825 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
2826 					       uint8_t tid,
2827 					       uint64_t txdesc_ts,
2828 					       uint32_t ppdu_id)
2829 {
2830 	uint64_t delta_ms;
2831 	struct cdp_tx_sojourn_stats *sojourn_stats;
2832 
2833 	if (pdev->enhanced_stats_en == 0)
2834 		return;
2835 
2836 	if (pdev->sojourn_stats.ppdu_seq_id == 0)
2837 		pdev->sojourn_stats.ppdu_seq_id = ppdu_id;
2838 
2839 	if (ppdu_id != pdev->sojourn_stats.ppdu_seq_id) {
2840 		if (!pdev->sojourn_buf)
2841 			return;
2842 
2843 		sojourn_stats = (struct cdp_tx_sojourn_stats *)
2844 					qdf_nbuf_data(pdev->sojourn_buf);
2845 
2846 		qdf_mem_copy(sojourn_stats, &pdev->sojourn_stats,
2847 			     sizeof(struct cdp_tx_sojourn_stats));
2848 
2849 		qdf_mem_zero(&pdev->sojourn_stats,
2850 			     sizeof(struct cdp_tx_sojourn_stats));
2851 
2852 		dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
2853 				     pdev->sojourn_buf, HTT_INVALID_PEER,
2854 				     WDI_NO_VAL, pdev->pdev_id);
2855 
2856 		pdev->sojourn_stats.ppdu_seq_id = ppdu_id;
2857 	}
2858 
2859 	if (tid == HTT_INVALID_TID)
2860 		return;
2861 
2862 	delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
2863 				txdesc_ts;
2864 	qdf_ewma_tx_lag_add(&pdev->sojourn_stats.avg_sojourn_msdu[tid],
2865 			    delta_ms);
2866 	pdev->sojourn_stats.sum_sojourn_msdu[tid] += delta_ms;
2867 	pdev->sojourn_stats.num_msdus[tid]++;
2868 }
2869 #else
2870 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
2871 					       uint8_t tid,
2872 					       uint64_t txdesc_ts,
2873 					       uint32_t ppdu_id)
2874 {
2875 }
2876 #endif
2877 
2878 /**
2879  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
2880  * @soc: DP Soc handle
2881  * @tx_desc: software Tx descriptor
2882  * @ts : Tx completion status from HAL/HTT descriptor
2883  *
2884  * Return: none
2885  */
2886 static inline void
2887 dp_tx_comp_process_desc(struct dp_soc *soc,
2888 			struct dp_tx_desc_s *desc,
2889 			struct hal_tx_completion_status *ts,
2890 			struct dp_peer *peer)
2891 {
2892 	/*
2893 	 * m_copy/tx_capture modes are not supported for
2894 	 * scatter gather packets
2895 	 */
2896 	if (!(desc->msdu_ext_desc) &&
2897 	    (dp_get_completion_indication_for_stack(soc, desc->pdev,
2898 						    peer, ts, desc->nbuf)
2899 			== QDF_STATUS_SUCCESS)) {
2900 		qdf_nbuf_unmap(soc->osdev, desc->nbuf,
2901 			       QDF_DMA_TO_DEVICE);
2902 
2903 		dp_send_completion_to_stack(soc, desc->pdev, ts->peer_id,
2904 					    ts->ppdu_id, desc->nbuf);
2905 	} else {
2906 		dp_tx_comp_free_buf(soc, desc);
2907 	}
2908 }
2909 
2910 /**
2911  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
2912  * @tx_desc: software descriptor head pointer
2913  * @ts: Tx completion status
2914  * @peer: peer handle
2915  *
2916  * Return: none
2917  */
2918 static inline
2919 void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
2920 				  struct hal_tx_completion_status *ts,
2921 				  struct dp_peer *peer)
2922 {
2923 	uint32_t length;
2924 	struct dp_soc *soc = NULL;
2925 	struct dp_vdev *vdev = tx_desc->vdev;
2926 	struct ether_header *eh =
2927 		(struct ether_header *)qdf_nbuf_data(tx_desc->nbuf);
2928 
2929 	if (!vdev) {
2930 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2931 				"invalid vdev");
2932 		goto out;
2933 	}
2934 
2935 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2936 				"-------------------- \n"
2937 				"Tx Completion Stats: \n"
2938 				"-------------------- \n"
2939 				"ack_frame_rssi = %d \n"
2940 				"first_msdu = %d \n"
2941 				"last_msdu = %d \n"
2942 				"msdu_part_of_amsdu = %d \n"
2943 				"rate_stats valid = %d \n"
2944 				"bw = %d \n"
2945 				"pkt_type = %d \n"
2946 				"stbc = %d \n"
2947 				"ldpc = %d \n"
2948 				"sgi = %d \n"
2949 				"mcs = %d \n"
2950 				"ofdma = %d \n"
2951 				"tones_in_ru = %d \n"
2952 				"tsf = %d \n"
2953 				"ppdu_id = %d \n"
2954 				"transmit_cnt = %d \n"
2955 				"tid = %d \n"
2956 				"peer_id = %d\n",
2957 				ts->ack_frame_rssi, ts->first_msdu,
2958 				ts->last_msdu, ts->msdu_part_of_amsdu,
2959 				ts->valid, ts->bw, ts->pkt_type, ts->stbc,
2960 				ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
2961 				ts->tones_in_ru, ts->tsf, ts->ppdu_id,
2962 				ts->transmit_cnt, ts->tid, ts->peer_id);
2963 
2964 	soc = vdev->pdev->soc;
2965 
2966 	/* Update SoC level stats */
2967 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
2968 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
2969 
2970 	/* Update per-packet stats for mesh mode */
2971 	if (qdf_unlikely(vdev->mesh_vdev) &&
2972 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
2973 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
2974 
2975 	length = qdf_nbuf_len(tx_desc->nbuf);
2976 	/* Update peer level stats */
2977 	if (!peer) {
2978 		QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
2979 				   "peer is null or deletion in progress");
2980 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
2981 		goto out;
2982 	}
2983 
2984 	if (qdf_likely(!peer->bss_peer)) {
2985 		DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
2986 
2987 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
2988 			DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
2989 	} else {
2990 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
2991 			DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
2992 
2993 			if ((peer->vdev->tx_encap_type ==
2994 				htt_cmn_pkt_type_ethernet) &&
2995 				IEEE80211_IS_BROADCAST(eh->ether_dhost)) {
2996 				DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
2997 			}
2998 		}
2999 	}
3000 
3001 	dp_tx_update_peer_stats(peer, ts, length);
3002 
3003 out:
3004 	return;
3005 }
3006 /**
3007  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
3008  * @soc: core txrx main context
3009  * @comp_head: software descriptor head pointer
3010  *
3011  * This function will process batch of descriptors reaped by dp_tx_comp_handler
3012  * and release the software descriptors after processing is complete
3013  *
3014  * Return: none
3015  */
3016 static void
3017 dp_tx_comp_process_desc_list(struct dp_soc *soc,
3018 			     struct dp_tx_desc_s *comp_head)
3019 {
3020 	struct dp_tx_desc_s *desc;
3021 	struct dp_tx_desc_s *next;
3022 	struct hal_tx_completion_status ts = {0};
3023 	struct dp_peer *peer;
3024 
3025 	DP_HIST_INIT();
3026 	desc = comp_head;
3027 
3028 	while (desc) {
3029 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
3030 		peer = dp_peer_find_by_id(soc, ts.peer_id);
3031 		dp_tx_comp_process_tx_status(desc, &ts, peer);
3032 		dp_tx_comp_process_desc(soc, desc, &ts, peer);
3033 
3034 		if (peer)
3035 			dp_peer_unref_del_find_by_id(peer);
3036 
3037 		DP_HIST_PACKET_COUNT_INC(desc->pdev->pdev_id);
3038 
3039 		next = desc->next;
3040 
3041 		dp_tx_desc_release(desc, desc->pool_id);
3042 		desc = next;
3043 	}
3044 
3045 	DP_TX_HIST_STATS_PER_PDEV();
3046 }
3047 
3048 /**
3049  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
3050  * @tx_desc: software descriptor head pointer
3051  * @status : Tx completion status from HTT descriptor
3052  *
3053  * This function will process HTT Tx indication messages from Target
3054  *
3055  * Return: none
3056  */
3057 static
3058 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
3059 {
3060 	uint8_t tx_status;
3061 	struct dp_pdev *pdev;
3062 	struct dp_vdev *vdev;
3063 	struct dp_soc *soc;
3064 	struct hal_tx_completion_status ts = {0};
3065 	uint32_t *htt_desc = (uint32_t *)status;
3066 	struct dp_peer *peer;
3067 
3068 	qdf_assert(tx_desc->pdev);
3069 
3070 	pdev = tx_desc->pdev;
3071 	vdev = tx_desc->vdev;
3072 	soc = pdev->soc;
3073 
3074 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
3075 
3076 	switch (tx_status) {
3077 	case HTT_TX_FW2WBM_TX_STATUS_OK:
3078 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
3079 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
3080 	{
3081 		if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
3082 			ts.peer_id =
3083 				HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
3084 						htt_desc[2]);
3085 			ts.tid =
3086 				HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
3087 						htt_desc[2]);
3088 		} else {
3089 			ts.peer_id = HTT_INVALID_PEER;
3090 			ts.tid = HTT_INVALID_TID;
3091 		}
3092 		ts.ppdu_id =
3093 			HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
3094 					htt_desc[1]);
3095 		ts.ack_frame_rssi =
3096 			HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
3097 					htt_desc[1]);
3098 
3099 		ts.first_msdu = 1;
3100 		ts.last_msdu = 1;
3101 
3102 		if (tx_status != HTT_TX_FW2WBM_TX_STATUS_OK)
3103 			ts.status = HAL_TX_TQM_RR_REM_CMD_REM;
3104 
3105 		peer = dp_peer_find_by_id(soc, ts.peer_id);
3106 
3107 		if (qdf_likely(peer))
3108 			dp_peer_unref_del_find_by_id(peer);
3109 
3110 		dp_tx_comp_process_tx_status(tx_desc, &ts, peer);
3111 		dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
3112 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3113 
3114 		break;
3115 	}
3116 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
3117 	{
3118 		dp_tx_reinject_handler(tx_desc, status);
3119 		break;
3120 	}
3121 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
3122 	{
3123 		dp_tx_inspect_handler(tx_desc, status);
3124 		break;
3125 	}
3126 	case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
3127 	{
3128 		dp_tx_mec_handler(vdev, status);
3129 		break;
3130 	}
3131 	default:
3132 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3133 			  "%s Invalid HTT tx_status %d\n",
3134 			  __func__, tx_status);
3135 		break;
3136 	}
3137 }
3138 
3139 /**
3140  * dp_tx_comp_handler() - Tx completion handler
3141  * @soc: core txrx main context
3142  * @ring_id: completion ring id
3143  * @quota: No. of packets/descriptors that can be serviced in one loop
3144  *
3145  * This function will collect hardware release ring element contents and
3146  * handle descriptor contents. Based on contents, free packet or handle error
3147  * conditions
3148  *
3149  * Return: none
3150  */
3151 uint32_t dp_tx_comp_handler(struct dp_soc *soc, void *hal_srng, uint32_t quota)
3152 {
3153 	void *tx_comp_hal_desc;
3154 	uint8_t buffer_src;
3155 	uint8_t pool_id;
3156 	uint32_t tx_desc_id;
3157 	struct dp_tx_desc_s *tx_desc = NULL;
3158 	struct dp_tx_desc_s *head_desc = NULL;
3159 	struct dp_tx_desc_s *tail_desc = NULL;
3160 	uint32_t num_processed;
3161 	uint32_t count;
3162 
3163 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
3164 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3165 				"%s %d : HAL RING Access Failed -- %pK",
3166 				__func__, __LINE__, hal_srng);
3167 		return 0;
3168 	}
3169 
3170 	num_processed = 0;
3171 	count = 0;
3172 
3173 	/* Find head descriptor from completion ring */
3174 	while (qdf_likely(tx_comp_hal_desc =
3175 			hal_srng_dst_get_next(soc->hal_soc, hal_srng))) {
3176 
3177 		buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
3178 
3179 		/* If this buffer was not released by TQM or FW, then it is not
3180 		 * Tx completion indication, assert */
3181 		if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
3182 				(buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
3183 
3184 			QDF_TRACE(QDF_MODULE_ID_DP,
3185 					QDF_TRACE_LEVEL_FATAL,
3186 					"Tx comp release_src != TQM | FW");
3187 
3188 			qdf_assert_always(0);
3189 		}
3190 
3191 		/* Get descriptor id */
3192 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
3193 		pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
3194 			DP_TX_DESC_ID_POOL_OS;
3195 
3196 		if (!dp_tx_is_desc_id_valid(soc, tx_desc_id))
3197 			continue;
3198 
3199 		/* Find Tx descriptor */
3200 		tx_desc = dp_tx_desc_find(soc, pool_id,
3201 				(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
3202 				DP_TX_DESC_ID_PAGE_OS,
3203 				(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
3204 				DP_TX_DESC_ID_OFFSET_OS);
3205 
3206 		/*
3207 		 * If the descriptor is already freed in vdev_detach,
3208 		 * continue to next descriptor
3209 		 */
3210 		if (!tx_desc->vdev) {
3211 			QDF_TRACE(QDF_MODULE_ID_DP,
3212 				  QDF_TRACE_LEVEL_INFO,
3213 				  "Descriptor freed in vdev_detach %d",
3214 				  tx_desc_id);
3215 
3216 			num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
3217 			count++;
3218 			continue;
3219 		}
3220 
3221 		/*
3222 		 * If the release source is FW, process the HTT status
3223 		 */
3224 		if (qdf_unlikely(buffer_src ==
3225 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
3226 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
3227 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
3228 					htt_tx_status);
3229 			dp_tx_process_htt_completion(tx_desc,
3230 					htt_tx_status);
3231 		} else {
3232 			/* Pool id is not matching. Error */
3233 			if (tx_desc->pool_id != pool_id) {
3234 				QDF_TRACE(QDF_MODULE_ID_DP,
3235 					QDF_TRACE_LEVEL_FATAL,
3236 					"Tx Comp pool id %d not matched %d",
3237 					pool_id, tx_desc->pool_id);
3238 
3239 				qdf_assert_always(0);
3240 			}
3241 
3242 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
3243 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
3244 				QDF_TRACE(QDF_MODULE_ID_DP,
3245 					QDF_TRACE_LEVEL_FATAL,
3246 					"Txdesc invalid, flgs = %x,id = %d",
3247 					tx_desc->flags,	tx_desc_id);
3248 				qdf_assert_always(0);
3249 			}
3250 
3251 			/* First ring descriptor on the cycle */
3252 			if (!head_desc) {
3253 				head_desc = tx_desc;
3254 				tail_desc = tx_desc;
3255 			}
3256 
3257 			tail_desc->next = tx_desc;
3258 			tx_desc->next = NULL;
3259 			tail_desc = tx_desc;
3260 
3261 			/* Collect hw completion contents */
3262 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
3263 					&tx_desc->comp, 1);
3264 
3265 		}
3266 
3267 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
3268 
3269 		/*
3270 		 * Processed packet count is more than given quota
3271 		 * stop to processing
3272 		 */
3273 		if ((num_processed >= quota))
3274 			break;
3275 
3276 		count++;
3277 	}
3278 
3279 	hal_srng_access_end(soc->hal_soc, hal_srng);
3280 
3281 	/* Process the reaped descriptors */
3282 	if (head_desc)
3283 		dp_tx_comp_process_desc_list(soc, head_desc);
3284 
3285 	return num_processed;
3286 }
3287 
3288 #ifdef CONVERGED_TDLS_ENABLE
3289 /**
3290  * dp_tx_non_std() - Allow the control-path SW to send data frames
3291  *
3292  * @data_vdev - which vdev should transmit the tx data frames
3293  * @tx_spec - what non-standard handling to apply to the tx data frames
3294  * @msdu_list - NULL-terminated list of tx MSDUs
3295  *
3296  * Return: NULL on success,
3297  *         nbuf when it fails to send
3298  */
3299 qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle,
3300 			enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
3301 {
3302 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
3303 
3304 	if (tx_spec & OL_TX_SPEC_NO_FREE)
3305 		vdev->is_tdls_frame = true;
3306 	return dp_tx_send(vdev_handle, msdu_list);
3307 }
3308 #endif
3309 
3310 /**
3311  * dp_tx_vdev_attach() - attach vdev to dp tx
3312  * @vdev: virtual device instance
3313  *
3314  * Return: QDF_STATUS_SUCCESS: success
3315  *         QDF_STATUS_E_RESOURCES: Error return
3316  */
3317 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
3318 {
3319 	/*
3320 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
3321 	 */
3322 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
3323 			HTT_TCL_METADATA_TYPE_VDEV_BASED);
3324 
3325 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
3326 			vdev->vdev_id);
3327 
3328 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
3329 			DP_SW2HW_MACID(vdev->pdev->pdev_id));
3330 
3331 	/*
3332 	 * Set HTT Extension Valid bit to 0 by default
3333 	 */
3334 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
3335 
3336 	dp_tx_vdev_update_search_flags(vdev);
3337 
3338 	return QDF_STATUS_SUCCESS;
3339 }
3340 
3341 #ifdef FEATURE_WDS
3342 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
3343 {
3344 	struct dp_soc *soc = vdev->pdev->soc;
3345 
3346 	/*
3347 	 * If AST index override support is available (HKv2 etc),
3348 	 * DA search flag be enabled always
3349 	 *
3350 	 * If AST index override support is not available (HKv1),
3351 	 * DA search flag should be used for all modes except QWRAP
3352 	 */
3353 	if (soc->ast_override_support || !vdev->proxysta_vdev)
3354 		return true;
3355 
3356 	return false;
3357 }
3358 #else
3359 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
3360 {
3361 	return false;
3362 }
3363 #endif
3364 
3365 /**
3366  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
3367  * @vdev: virtual device instance
3368  *
3369  * Return: void
3370  *
3371  */
3372 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
3373 {
3374 	struct dp_soc *soc = vdev->pdev->soc;
3375 
3376 	/*
3377 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
3378 	 * for TDLS link
3379 	 *
3380 	 * Enable AddrY (SA based search) only for non-WDS STA and
3381 	 * ProxySTA VAP (in HKv1) modes.
3382 	 *
3383 	 * In all other VAP modes, only DA based search should be
3384 	 * enabled
3385 	 */
3386 	if (vdev->opmode == wlan_op_mode_sta &&
3387 	    vdev->tdls_link_connected)
3388 		vdev->hal_desc_addr_search_flags =
3389 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
3390 	else if ((vdev->opmode == wlan_op_mode_sta) &&
3391 		 !dp_tx_da_search_override(vdev))
3392 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
3393 	else
3394 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
3395 
3396 	/* Set search type only when peer map v2 messaging is enabled
3397 	 * as we will have the search index (AST hash) only when v2 is
3398 	 * enabled
3399 	 */
3400 	if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
3401 		vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
3402 	else
3403 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
3404 }
3405 
3406 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3407 /* dp_tx_desc_flush() - release resources associated
3408  *                      to tx_desc
3409  * @vdev: virtual device instance
3410  *
3411  * This function will free all outstanding Tx buffers,
3412  * including ME buffer for which either free during
3413  * completion didn't happened or completion is not
3414  * received.
3415  */
3416 static void dp_tx_desc_flush(struct dp_vdev *vdev)
3417 {
3418 	uint8_t i;
3419 	uint32_t j;
3420 	uint32_t num_desc, page_id, offset;
3421 	uint16_t num_desc_per_page;
3422 	struct dp_soc *soc = vdev->pdev->soc;
3423 	struct dp_tx_desc_s *tx_desc = NULL;
3424 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
3425 
3426 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
3427 		tx_desc_pool = &soc->tx_desc[i];
3428 		if (!(tx_desc_pool->pool_size) ||
3429 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
3430 		    !(tx_desc_pool->desc_pages.cacheable_pages))
3431 			continue;
3432 
3433 		num_desc = tx_desc_pool->pool_size;
3434 		num_desc_per_page =
3435 			tx_desc_pool->desc_pages.num_element_per_page;
3436 		for (j = 0; j < num_desc; j++) {
3437 			page_id = j / num_desc_per_page;
3438 			offset = j % num_desc_per_page;
3439 
3440 			if (qdf_unlikely(!(tx_desc_pool->
3441 					 desc_pages.cacheable_pages)))
3442 				break;
3443 
3444 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
3445 			if (tx_desc && (tx_desc->vdev == vdev) &&
3446 			    (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) {
3447 				dp_tx_comp_free_buf(soc, tx_desc);
3448 				dp_tx_desc_release(tx_desc, i);
3449 			}
3450 		}
3451 	}
3452 }
3453 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3454 static void dp_tx_desc_flush(struct dp_vdev *vdev)
3455 {
3456 	uint8_t i, num_pool;
3457 	uint32_t j;
3458 	uint32_t num_desc, page_id, offset;
3459 	uint16_t num_desc_per_page;
3460 	struct dp_soc *soc = vdev->pdev->soc;
3461 	struct dp_tx_desc_s *tx_desc = NULL;
3462 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
3463 
3464 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3465 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3466 
3467 	for (i = 0; i < num_pool; i++) {
3468 		tx_desc_pool = &soc->tx_desc[i];
3469 		if (!tx_desc_pool->desc_pages.cacheable_pages)
3470 			continue;
3471 
3472 		num_desc_per_page =
3473 			tx_desc_pool->desc_pages.num_element_per_page;
3474 		for (j = 0; j < num_desc; j++) {
3475 			page_id = j / num_desc_per_page;
3476 			offset = j % num_desc_per_page;
3477 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
3478 
3479 			if (tx_desc && (tx_desc->vdev == vdev) &&
3480 			    (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) {
3481 				dp_tx_comp_free_buf(soc, tx_desc);
3482 				dp_tx_desc_release(tx_desc, i);
3483 			}
3484 		}
3485 	}
3486 }
3487 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3488 
3489 /**
3490  * dp_tx_vdev_detach() - detach vdev from dp tx
3491  * @vdev: virtual device instance
3492  *
3493  * Return: QDF_STATUS_SUCCESS: success
3494  *         QDF_STATUS_E_RESOURCES: Error return
3495  */
3496 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
3497 {
3498 	dp_tx_desc_flush(vdev);
3499 	return QDF_STATUS_SUCCESS;
3500 }
3501 
3502 /**
3503  * dp_tx_pdev_attach() - attach pdev to dp tx
3504  * @pdev: physical device instance
3505  *
3506  * Return: QDF_STATUS_SUCCESS: success
3507  *         QDF_STATUS_E_RESOURCES: Error return
3508  */
3509 QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
3510 {
3511 	struct dp_soc *soc = pdev->soc;
3512 
3513 	/* Initialize Flow control counters */
3514 	qdf_atomic_init(&pdev->num_tx_exception);
3515 	qdf_atomic_init(&pdev->num_tx_outstanding);
3516 
3517 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3518 		/* Initialize descriptors in TCL Ring */
3519 		hal_tx_init_data_ring(soc->hal_soc,
3520 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
3521 	}
3522 
3523 	return QDF_STATUS_SUCCESS;
3524 }
3525 
3526 /**
3527  * dp_tx_pdev_detach() - detach pdev from dp tx
3528  * @pdev: physical device instance
3529  *
3530  * Return: QDF_STATUS_SUCCESS: success
3531  *         QDF_STATUS_E_RESOURCES: Error return
3532  */
3533 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
3534 {
3535 	dp_tx_me_exit(pdev);
3536 	return QDF_STATUS_SUCCESS;
3537 }
3538 
3539 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3540 /* Pools will be allocated dynamically */
3541 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3542 					int num_desc)
3543 {
3544 	uint8_t i;
3545 
3546 	for (i = 0; i < num_pool; i++) {
3547 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
3548 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
3549 	}
3550 
3551 	return 0;
3552 }
3553 
3554 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3555 {
3556 	uint8_t i;
3557 
3558 	for (i = 0; i < num_pool; i++)
3559 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
3560 }
3561 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3562 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3563 					int num_desc)
3564 {
3565 	uint8_t i;
3566 
3567 	/* Allocate software Tx descriptor pools */
3568 	for (i = 0; i < num_pool; i++) {
3569 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
3570 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3571 					"%s Tx Desc Pool alloc %d failed %pK",
3572 					__func__, i, soc);
3573 			return ENOMEM;
3574 		}
3575 	}
3576 	return 0;
3577 }
3578 
3579 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3580 {
3581 	uint8_t i;
3582 
3583 	for (i = 0; i < num_pool; i++) {
3584 		qdf_assert_always(!soc->tx_desc[i].num_allocated);
3585 		if (dp_tx_desc_pool_free(soc, i)) {
3586 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3587 				"%s Tx Desc Pool Free failed", __func__);
3588 		}
3589 	}
3590 }
3591 
3592 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3593 
3594 #ifndef QCA_MEM_ATTACH_ON_WIFI3
3595 /**
3596  * dp_tso_attach_wifi3() - TSO attach handler
3597  * @txrx_soc: Opaque Dp handle
3598  *
3599  * Reserve TSO descriptor buffers
3600  *
3601  * Return: QDF_STATUS_E_FAILURE on failure or
3602  * QDF_STATUS_SUCCESS on success
3603  */
3604 static
3605 QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc)
3606 {
3607 	return dp_tso_soc_attach(txrx_soc);
3608 }
3609 
3610 /**
3611  * dp_tso_detach_wifi3() - TSO Detach handler
3612  * @txrx_soc: Opaque Dp handle
3613  *
3614  * Deallocate TSO descriptor buffers
3615  *
3616  * Return: QDF_STATUS_E_FAILURE on failure or
3617  * QDF_STATUS_SUCCESS on success
3618  */
3619 static
3620 QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc)
3621 {
3622 	return dp_tso_soc_detach(txrx_soc);
3623 }
3624 #else
3625 static
3626 QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc)
3627 {
3628 	return QDF_STATUS_SUCCESS;
3629 }
3630 
3631 static
3632 QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc)
3633 {
3634 	return QDF_STATUS_SUCCESS;
3635 }
3636 #endif
3637 
3638 QDF_STATUS dp_tso_soc_detach(void *txrx_soc)
3639 {
3640 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3641 	uint8_t i;
3642 	uint8_t num_pool;
3643 	uint32_t num_desc;
3644 
3645 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3646 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3647 
3648 	for (i = 0; i < num_pool; i++)
3649 		dp_tx_tso_desc_pool_free(soc, i);
3650 
3651 	dp_info("%s TSO Desc Pool %d Free descs = %d",
3652 		__func__, num_pool, num_desc);
3653 
3654 	for (i = 0; i < num_pool; i++)
3655 		dp_tx_tso_num_seg_pool_free(soc, i);
3656 
3657 	dp_info("%s TSO Num of seg Desc Pool %d Free descs = %d",
3658 		__func__, num_pool, num_desc);
3659 
3660 	return QDF_STATUS_SUCCESS;
3661 }
3662 
3663 /**
3664  * dp_tso_attach() - TSO attach handler
3665  * @txrx_soc: Opaque Dp handle
3666  *
3667  * Reserve TSO descriptor buffers
3668  *
3669  * Return: QDF_STATUS_E_FAILURE on failure or
3670  * QDF_STATUS_SUCCESS on success
3671  */
3672 QDF_STATUS dp_tso_soc_attach(void *txrx_soc)
3673 {
3674 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3675 	uint8_t i;
3676 	uint8_t num_pool;
3677 	uint32_t num_desc;
3678 
3679 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3680 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3681 
3682 	for (i = 0; i < num_pool; i++) {
3683 		if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
3684 			dp_err("TSO Desc Pool alloc %d failed %pK",
3685 			       i, soc);
3686 
3687 			return QDF_STATUS_E_FAILURE;
3688 		}
3689 	}
3690 
3691 	dp_info("%s TSO Desc Alloc %d, descs = %d",
3692 		__func__, num_pool, num_desc);
3693 
3694 	for (i = 0; i < num_pool; i++) {
3695 		if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
3696 			dp_err("TSO Num of seg Pool alloc %d failed %pK",
3697 			       i, soc);
3698 
3699 			return QDF_STATUS_E_FAILURE;
3700 		}
3701 	}
3702 	return QDF_STATUS_SUCCESS;
3703 }
3704 
3705 /**
3706  * dp_tx_soc_detach() - detach soc from dp tx
3707  * @soc: core txrx main context
3708  *
3709  * This function will detach dp tx into main device context
3710  * will free dp tx resource and initialize resources
3711  *
3712  * Return: QDF_STATUS_SUCCESS: success
3713  *         QDF_STATUS_E_RESOURCES: Error return
3714  */
3715 QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
3716 {
3717 	uint8_t num_pool;
3718 	uint16_t num_desc;
3719 	uint16_t num_ext_desc;
3720 	uint8_t i;
3721 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3722 
3723 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3724 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3725 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
3726 
3727 	dp_tx_flow_control_deinit(soc);
3728 	dp_tx_delete_static_pools(soc, num_pool);
3729 
3730 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3731 			"%s Tx Desc Pool Free num_pool = %d, descs = %d",
3732 			__func__, num_pool, num_desc);
3733 
3734 	for (i = 0; i < num_pool; i++) {
3735 		if (dp_tx_ext_desc_pool_free(soc, i)) {
3736 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3737 					"%s Tx Ext Desc Pool Free failed",
3738 					__func__);
3739 			return QDF_STATUS_E_RESOURCES;
3740 		}
3741 	}
3742 
3743 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3744 			"%s MSDU Ext Desc Pool %d Free descs = %d",
3745 			__func__, num_pool, num_ext_desc);
3746 
3747 	status = dp_tso_detach_wifi3(soc);
3748 	if (status != QDF_STATUS_SUCCESS)
3749 		return status;
3750 
3751 	return QDF_STATUS_SUCCESS;
3752 }
3753 
3754 /**
3755  * dp_tx_soc_attach() - attach soc to dp tx
3756  * @soc: core txrx main context
3757  *
3758  * This function will attach dp tx into main device context
3759  * will allocate dp tx resource and initialize resources
3760  *
3761  * Return: QDF_STATUS_SUCCESS: success
3762  *         QDF_STATUS_E_RESOURCES: Error return
3763  */
3764 QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
3765 {
3766 	uint8_t i;
3767 	uint8_t num_pool;
3768 	uint32_t num_desc;
3769 	uint32_t num_ext_desc;
3770 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3771 
3772 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3773 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3774 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
3775 
3776 	if (num_pool > MAX_TXDESC_POOLS)
3777 		goto fail;
3778 
3779 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
3780 		goto fail;
3781 
3782 	dp_tx_flow_control_init(soc);
3783 
3784 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3785 			"%s Tx Desc Alloc num_pool = %d, descs = %d",
3786 			__func__, num_pool, num_desc);
3787 
3788 	/* Allocate extension tx descriptor pools */
3789 	for (i = 0; i < num_pool; i++) {
3790 		if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
3791 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3792 				"MSDU Ext Desc Pool alloc %d failed %pK",
3793 				i, soc);
3794 
3795 			goto fail;
3796 		}
3797 	}
3798 
3799 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3800 			"%s MSDU Ext Desc Alloc %d, descs = %d",
3801 			__func__, num_pool, num_ext_desc);
3802 
3803 	status = dp_tso_attach_wifi3((void *)soc);
3804 	if (status != QDF_STATUS_SUCCESS)
3805 		goto fail;
3806 
3807 
3808 	/* Initialize descriptors in TCL Rings */
3809 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3810 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3811 			hal_tx_init_data_ring(soc->hal_soc,
3812 					soc->tcl_data_ring[i].hal_srng);
3813 		}
3814 	}
3815 
3816 	/*
3817 	 * todo - Add a runtime config option to enable this.
3818 	 */
3819 	/*
3820 	 * Due to multiple issues on NPR EMU, enable it selectively
3821 	 * only for NPR EMU, should be removed, once NPR platforms
3822 	 * are stable.
3823 	 */
3824 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
3825 
3826 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3827 			"%s HAL Tx init Success", __func__);
3828 
3829 	return QDF_STATUS_SUCCESS;
3830 
3831 fail:
3832 	/* Detach will take care of freeing only allocated resources */
3833 	dp_tx_soc_detach(soc);
3834 	return QDF_STATUS_E_RESOURCES;
3835 }
3836 
3837 /*
3838  * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
3839  * pdev: pointer to DP PDEV structure
3840  * seg_info_head: Pointer to the head of list
3841  *
3842  * return: void
3843  */
3844 static void dp_tx_me_mem_free(struct dp_pdev *pdev,
3845 		struct dp_tx_seg_info_s *seg_info_head)
3846 {
3847 	struct dp_tx_me_buf_t *mc_uc_buf;
3848 	struct dp_tx_seg_info_s *seg_info_new = NULL;
3849 	qdf_nbuf_t nbuf = NULL;
3850 	uint64_t phy_addr;
3851 
3852 	while (seg_info_head) {
3853 		nbuf = seg_info_head->nbuf;
3854 		mc_uc_buf = (struct dp_tx_me_buf_t *)
3855 			seg_info_head->frags[0].vaddr;
3856 		phy_addr = seg_info_head->frags[0].paddr_hi;
3857 		phy_addr =  (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
3858 		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
3859 				phy_addr,
3860 				QDF_DMA_TO_DEVICE , DP_MAC_ADDR_LEN);
3861 		dp_tx_me_free_buf(pdev, mc_uc_buf);
3862 		qdf_nbuf_free(nbuf);
3863 		seg_info_new = seg_info_head;
3864 		seg_info_head = seg_info_head->next;
3865 		qdf_mem_free(seg_info_new);
3866 	}
3867 }
3868 
3869 /**
3870  * dp_tx_me_send_convert_ucast(): function to convert multicast to unicast
3871  * @vdev: DP VDEV handle
3872  * @nbuf: Multicast nbuf
3873  * @newmac: Table of the clients to which packets have to be sent
3874  * @new_mac_cnt: No of clients
3875  *
3876  * return: no of converted packets
3877  */
3878 uint16_t
3879 dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
3880 		uint8_t newmac[][DP_MAC_ADDR_LEN], uint8_t new_mac_cnt)
3881 {
3882 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
3883 	struct dp_pdev *pdev = vdev->pdev;
3884 	struct ether_header *eh;
3885 	uint8_t *data;
3886 	uint16_t len;
3887 
3888 	/* reference to frame dst addr */
3889 	uint8_t *dstmac;
3890 	/* copy of original frame src addr */
3891 	uint8_t srcmac[DP_MAC_ADDR_LEN];
3892 
3893 	/* local index into newmac */
3894 	uint8_t new_mac_idx = 0;
3895 	struct dp_tx_me_buf_t *mc_uc_buf;
3896 	qdf_nbuf_t  nbuf_clone;
3897 	struct dp_tx_msdu_info_s msdu_info;
3898 	struct dp_tx_seg_info_s *seg_info_head = NULL;
3899 	struct dp_tx_seg_info_s *seg_info_tail = NULL;
3900 	struct dp_tx_seg_info_s *seg_info_new;
3901 	struct dp_tx_frag_info_s data_frag;
3902 	qdf_dma_addr_t paddr_data;
3903 	qdf_dma_addr_t paddr_mcbuf = 0;
3904 	uint8_t empty_entry_mac[DP_MAC_ADDR_LEN] = {0};
3905 	QDF_STATUS status;
3906 
3907 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
3908 
3909 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3910 
3911 	eh = (struct ether_header *) nbuf;
3912 	qdf_mem_copy(srcmac, eh->ether_shost, DP_MAC_ADDR_LEN);
3913 
3914 	len = qdf_nbuf_len(nbuf);
3915 
3916 	data = qdf_nbuf_data(nbuf);
3917 
3918 	status = qdf_nbuf_map(vdev->osdev, nbuf,
3919 			QDF_DMA_TO_DEVICE);
3920 
3921 	if (status) {
3922 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3923 				"Mapping failure Error:%d", status);
3924 		DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
3925 		qdf_nbuf_free(nbuf);
3926 		return 1;
3927 	}
3928 
3929 	paddr_data = qdf_nbuf_get_frag_paddr(nbuf, 0) + IEEE80211_ADDR_LEN;
3930 
3931 	/*preparing data fragment*/
3932 	data_frag.vaddr = qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN;
3933 	data_frag.paddr_lo = (uint32_t)paddr_data;
3934 	data_frag.paddr_hi = (((uint64_t) paddr_data)  >> 32);
3935 	data_frag.len = len - DP_MAC_ADDR_LEN;
3936 
3937 	for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
3938 		dstmac = newmac[new_mac_idx];
3939 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3940 				"added mac addr (%pM)", dstmac);
3941 
3942 		/* Check for NULL Mac Address */
3943 		if (!qdf_mem_cmp(dstmac, empty_entry_mac, DP_MAC_ADDR_LEN))
3944 			continue;
3945 
3946 		/* frame to self mac. skip */
3947 		if (!qdf_mem_cmp(dstmac, srcmac, DP_MAC_ADDR_LEN))
3948 			continue;
3949 
3950 		/*
3951 		 * TODO: optimize to avoid malloc in per-packet path
3952 		 * For eg. seg_pool can be made part of vdev structure
3953 		 */
3954 		seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
3955 
3956 		if (!seg_info_new) {
3957 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3958 					"alloc failed");
3959 			DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
3960 			goto fail_seg_alloc;
3961 		}
3962 
3963 		mc_uc_buf = dp_tx_me_alloc_buf(pdev);
3964 		if (mc_uc_buf == NULL)
3965 			goto fail_buf_alloc;
3966 
3967 		/*
3968 		 * TODO: Check if we need to clone the nbuf
3969 		 * Or can we just use the reference for all cases
3970 		 */
3971 		if (new_mac_idx < (new_mac_cnt - 1)) {
3972 			nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
3973 			if (nbuf_clone == NULL) {
3974 				DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
3975 				goto fail_clone;
3976 			}
3977 		} else {
3978 			/*
3979 			 * Update the ref
3980 			 * to account for frame sent without cloning
3981 			 */
3982 			qdf_nbuf_ref(nbuf);
3983 			nbuf_clone = nbuf;
3984 		}
3985 
3986 		qdf_mem_copy(mc_uc_buf->data, dstmac, DP_MAC_ADDR_LEN);
3987 
3988 		status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
3989 				QDF_DMA_TO_DEVICE, DP_MAC_ADDR_LEN,
3990 				&paddr_mcbuf);
3991 
3992 		if (status) {
3993 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3994 					"Mapping failure Error:%d", status);
3995 			DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
3996 			goto fail_map;
3997 		}
3998 
3999 		seg_info_new->frags[0].vaddr =  (uint8_t *)mc_uc_buf;
4000 		seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
4001 		seg_info_new->frags[0].paddr_hi =
4002 			((uint64_t) paddr_mcbuf >> 32);
4003 		seg_info_new->frags[0].len = DP_MAC_ADDR_LEN;
4004 
4005 		seg_info_new->frags[1] = data_frag;
4006 		seg_info_new->nbuf = nbuf_clone;
4007 		seg_info_new->frag_cnt = 2;
4008 		seg_info_new->total_len = len;
4009 
4010 		seg_info_new->next = NULL;
4011 
4012 		if (seg_info_head == NULL)
4013 			seg_info_head = seg_info_new;
4014 		else
4015 			seg_info_tail->next = seg_info_new;
4016 
4017 		seg_info_tail = seg_info_new;
4018 	}
4019 
4020 	if (!seg_info_head) {
4021 		goto free_return;
4022 	}
4023 
4024 	msdu_info.u.sg_info.curr_seg = seg_info_head;
4025 	msdu_info.num_seg = new_mac_cnt;
4026 	msdu_info.frm_type = dp_tx_frm_me;
4027 
4028 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0) &&
4029 	    qdf_unlikely(pdev->hmmc_tid_override_en))
4030 		msdu_info.tid = pdev->hmmc_tid;
4031 
4032 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
4033 	dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
4034 
4035 	while (seg_info_head->next) {
4036 		seg_info_new = seg_info_head;
4037 		seg_info_head = seg_info_head->next;
4038 		qdf_mem_free(seg_info_new);
4039 	}
4040 	qdf_mem_free(seg_info_head);
4041 
4042 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
4043 	qdf_nbuf_free(nbuf);
4044 	return new_mac_cnt;
4045 
4046 fail_map:
4047 	qdf_nbuf_free(nbuf_clone);
4048 
4049 fail_clone:
4050 	dp_tx_me_free_buf(pdev, mc_uc_buf);
4051 
4052 fail_buf_alloc:
4053 	qdf_mem_free(seg_info_new);
4054 
4055 fail_seg_alloc:
4056 	dp_tx_me_mem_free(pdev, seg_info_head);
4057 
4058 free_return:
4059 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
4060 	qdf_nbuf_free(nbuf);
4061 	return 1;
4062 }
4063 
4064