xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision 302a1d9701784af5f4797b1a9fe07ae820b51907)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "hal_hw_headers.h"
21 #include "dp_tx.h"
22 #include "dp_tx_desc.h"
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "hal_tx.h"
26 #include "qdf_mem.h"
27 #include "qdf_nbuf.h"
28 #include "qdf_net_types.h"
29 #include <wlan_cfg.h>
30 #ifdef MESH_MODE_SUPPORT
31 #include "if_meta_hdr.h"
32 #endif
33 
34 #define DP_TX_QUEUE_MASK 0x3
35 
36 /* TODO Add support in TSO */
37 #define DP_DESC_NUM_FRAG(x) 0
38 
39 /* disable TQM_BYPASS */
40 #define TQM_BYPASS_WAR 0
41 
42 /* invalid peer id for reinject*/
43 #define DP_INVALID_PEER 0XFFFE
44 
45 /*mapping between hal encrypt type and cdp_sec_type*/
46 #define MAX_CDP_SEC_TYPE 12
47 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
48 					HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
49 					HAL_TX_ENCRYPT_TYPE_WEP_128,
50 					HAL_TX_ENCRYPT_TYPE_WEP_104,
51 					HAL_TX_ENCRYPT_TYPE_WEP_40,
52 					HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
53 					HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
54 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
55 					HAL_TX_ENCRYPT_TYPE_WAPI,
56 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
57 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
58 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
59 					HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
60 
61 /**
62  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
63  * @vdev: DP Virtual device handle
64  * @nbuf: Buffer pointer
65  * @queue: queue ids container for nbuf
66  *
67  * TX packet queue has 2 instances, software descriptors id and dma ring id
68  * Based on tx feature and hardware configuration queue id combination could be
69  * different.
70  * For example -
71  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
72  * With no XPS,lock based resource protection, Descriptor pool ids are different
73  * for each vdev, dma ring id will be same as single pdev id
74  *
75  * Return: None
76  */
77 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
78 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
79 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
80 {
81 	uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK;
82 	queue->desc_pool_id = queue_offset;
83 	queue->ring_id = vdev->pdev->soc->tx_ring_map[queue_offset];
84 
85 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
86 			"%s, pool_id:%d ring_id: %d",
87 			__func__, queue->desc_pool_id, queue->ring_id);
88 
89 	return;
90 }
91 #else /* QCA_OL_TX_MULTIQ_SUPPORT */
92 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
93 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
94 {
95 	/* get flow id */
96 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
97 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
98 
99 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
100 			"%s, pool_id:%d ring_id: %d",
101 			__func__, queue->desc_pool_id, queue->ring_id);
102 
103 	return;
104 }
105 #endif
106 
107 #if defined(FEATURE_TSO)
108 /**
109  * dp_tx_tso_unmap_segment() - Unmap TSO segment
110  *
111  * @soc - core txrx main context
112  * @tx_desc - Tx software descriptor
113  */
114 static void dp_tx_tso_unmap_segment(struct dp_soc *soc,
115 				    struct dp_tx_desc_s *tx_desc)
116 {
117 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
118 	if (qdf_unlikely(!tx_desc->tso_desc)) {
119 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
120 			  "%s %d TSO desc is NULL!",
121 			  __func__, __LINE__);
122 		qdf_assert(0);
123 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
124 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
125 			  "%s %d TSO num desc is NULL!",
126 			  __func__, __LINE__);
127 		qdf_assert(0);
128 	} else {
129 		bool is_last_seg;
130 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
131 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
132 
133 		if (tso_num_desc->num_seg.tso_cmn_num_seg > 1)
134 			is_last_seg = false;
135 		else
136 			is_last_seg = true;
137 		tso_num_desc->num_seg.tso_cmn_num_seg--;
138 		qdf_nbuf_unmap_tso_segment(soc->osdev,
139 					   tx_desc->tso_desc, is_last_seg);
140 	}
141 }
142 
143 /**
144  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
145  *                            back to the freelist
146  *
147  * @soc - soc device handle
148  * @tx_desc - Tx software descriptor
149  */
150 static void dp_tx_tso_desc_release(struct dp_soc *soc,
151 				   struct dp_tx_desc_s *tx_desc)
152 {
153 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
154 	if (qdf_unlikely(!tx_desc->tso_desc)) {
155 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
156 			  "%s %d TSO desc is NULL!",
157 			  __func__, __LINE__);
158 		qdf_assert(0);
159 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
160 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
161 			  "%s %d TSO num desc is NULL!",
162 			  __func__, __LINE__);
163 		qdf_assert(0);
164 	} else {
165 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
166 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
167 
168 		/* Add the tso num segment into the free list */
169 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
170 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
171 					    tx_desc->tso_num_desc);
172 			tx_desc->tso_num_desc = NULL;
173 		}
174 
175 		/* Add the tso segment into the free list*/
176 		dp_tx_tso_desc_free(soc,
177 				    tx_desc->pool_id, tx_desc->tso_desc);
178 		tx_desc->tso_desc = NULL;
179 	}
180 }
181 #else
182 static void dp_tx_tso_unmap_segment(struct dp_soc *soc,
183 				    struct dp_tx_desc_s *tx_desc)
184 
185 {
186 }
187 
188 static void dp_tx_tso_desc_release(struct dp_soc *soc,
189 				   struct dp_tx_desc_s *tx_desc)
190 {
191 }
192 #endif
193 /**
194  * dp_tx_desc_release() - Release Tx Descriptor
195  * @tx_desc : Tx Descriptor
196  * @desc_pool_id: Descriptor Pool ID
197  *
198  * Deallocate all resources attached to Tx descriptor and free the Tx
199  * descriptor.
200  *
201  * Return:
202  */
203 static void
204 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
205 {
206 	struct dp_pdev *pdev = tx_desc->pdev;
207 	struct dp_soc *soc;
208 	uint8_t comp_status = 0;
209 
210 	qdf_assert(pdev);
211 
212 	soc = pdev->soc;
213 
214 	if (tx_desc->frm_type == dp_tx_frm_tso)
215 		dp_tx_tso_desc_release(soc, tx_desc);
216 
217 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
218 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
219 
220 	if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
221 		dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
222 
223 	qdf_atomic_dec(&pdev->num_tx_outstanding);
224 
225 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
226 		qdf_atomic_dec(&pdev->num_tx_exception);
227 
228 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
229 				hal_tx_comp_get_buffer_source(&tx_desc->comp))
230 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp);
231 	else
232 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
233 
234 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
235 		"Tx Completion Release desc %d status %d outstanding %d",
236 		tx_desc->id, comp_status,
237 		qdf_atomic_read(&pdev->num_tx_outstanding));
238 
239 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
240 	return;
241 }
242 
243 /**
244  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
245  * @vdev: DP vdev Handle
246  * @nbuf: skb
247  *
248  * Prepares and fills HTT metadata in the frame pre-header for special frames
249  * that should be transmitted using varying transmit parameters.
250  * There are 2 VDEV modes that currently needs this special metadata -
251  *  1) Mesh Mode
252  *  2) DSRC Mode
253  *
254  * Return: HTT metadata size
255  *
256  */
257 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
258 		uint32_t *meta_data)
259 {
260 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
261 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
262 
263 	uint8_t htt_desc_size;
264 
265 	/* Size rounded of multiple of 8 bytes */
266 	uint8_t htt_desc_size_aligned;
267 
268 	uint8_t *hdr = NULL;
269 
270 	/*
271 	 * Metadata - HTT MSDU Extension header
272 	 */
273 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
274 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
275 
276 	if (vdev->mesh_vdev) {
277 
278 		/* Fill and add HTT metaheader */
279 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
280 		if (hdr == NULL) {
281 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
282 					"Error in filling HTT metadata");
283 
284 			return 0;
285 		}
286 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
287 
288 	} else if (vdev->opmode == wlan_op_mode_ocb) {
289 		/* Todo - Add support for DSRC */
290 	}
291 
292 	return htt_desc_size_aligned;
293 }
294 
295 /**
296  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
297  * @tso_seg: TSO segment to process
298  * @ext_desc: Pointer to MSDU extension descriptor
299  *
300  * Return: void
301  */
302 #if defined(FEATURE_TSO)
303 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
304 		void *ext_desc)
305 {
306 	uint8_t num_frag;
307 	uint32_t tso_flags;
308 
309 	/*
310 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
311 	 * tcp_flag_mask
312 	 *
313 	 * Checksum enable flags are set in TCL descriptor and not in Extension
314 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
315 	 */
316 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
317 
318 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
319 
320 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
321 		tso_seg->tso_flags.ip_len);
322 
323 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
324 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
325 
326 
327 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
328 		uint32_t lo = 0;
329 		uint32_t hi = 0;
330 
331 		qdf_dmaaddr_to_32s(
332 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
333 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
334 			tso_seg->tso_frags[num_frag].length);
335 	}
336 
337 	return;
338 }
339 #else
340 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
341 		void *ext_desc)
342 {
343 	return;
344 }
345 #endif
346 
347 #if defined(FEATURE_TSO)
348 /**
349  * dp_tx_free_tso_seg() - Loop through the tso segments
350  *                        allocated and free them
351  *
352  * @soc: soc handle
353  * @free_seg: list of tso segments
354  * @msdu_info: msdu descriptor
355  *
356  * Return - void
357  */
358 static void dp_tx_free_tso_seg(struct dp_soc *soc,
359 	struct qdf_tso_seg_elem_t *free_seg,
360 	struct dp_tx_msdu_info_s *msdu_info)
361 {
362 	struct qdf_tso_seg_elem_t *next_seg;
363 
364 	while (free_seg) {
365 		next_seg = free_seg->next;
366 		dp_tx_tso_desc_free(soc,
367 			msdu_info->tx_queue.desc_pool_id,
368 			free_seg);
369 		free_seg = next_seg;
370 	}
371 }
372 
373 /**
374  * dp_tx_free_tso_num_seg() - Loop through the tso num segments
375  *                            allocated and free them
376  *
377  * @soc:  soc handle
378  * @free_seg: list of tso segments
379  * @msdu_info: msdu descriptor
380  * Return - void
381  */
382 static void dp_tx_free_tso_num_seg(struct dp_soc *soc,
383 	struct qdf_tso_num_seg_elem_t *free_seg,
384 	struct dp_tx_msdu_info_s *msdu_info)
385 {
386 	struct qdf_tso_num_seg_elem_t *next_seg;
387 
388 	while (free_seg) {
389 		next_seg = free_seg->next;
390 		dp_tso_num_seg_free(soc,
391 			msdu_info->tx_queue.desc_pool_id,
392 			free_seg);
393 		free_seg = next_seg;
394 	}
395 }
396 
397 /**
398  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
399  * @vdev: virtual device handle
400  * @msdu: network buffer
401  * @msdu_info: meta data associated with the msdu
402  *
403  * Return: QDF_STATUS_SUCCESS success
404  */
405 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
406 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
407 {
408 	struct qdf_tso_seg_elem_t *tso_seg;
409 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
410 	struct dp_soc *soc = vdev->pdev->soc;
411 	struct qdf_tso_info_t *tso_info;
412 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
413 
414 	tso_info = &msdu_info->u.tso_info;
415 	tso_info->curr_seg = NULL;
416 	tso_info->tso_seg_list = NULL;
417 	tso_info->num_segs = num_seg;
418 	msdu_info->frm_type = dp_tx_frm_tso;
419 	tso_info->tso_num_seg_list = NULL;
420 
421 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
422 
423 	while (num_seg) {
424 		tso_seg = dp_tx_tso_desc_alloc(
425 				soc, msdu_info->tx_queue.desc_pool_id);
426 		if (tso_seg) {
427 			tso_seg->next = tso_info->tso_seg_list;
428 			tso_info->tso_seg_list = tso_seg;
429 			num_seg--;
430 		} else {
431 			struct qdf_tso_seg_elem_t *free_seg =
432 				tso_info->tso_seg_list;
433 
434 			dp_tx_free_tso_seg(soc, free_seg, msdu_info);
435 
436 			return QDF_STATUS_E_NOMEM;
437 		}
438 	}
439 
440 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
441 
442 	tso_num_seg = dp_tso_num_seg_alloc(soc,
443 			msdu_info->tx_queue.desc_pool_id);
444 
445 	if (tso_num_seg) {
446 		tso_num_seg->next = tso_info->tso_num_seg_list;
447 		tso_info->tso_num_seg_list = tso_num_seg;
448 	} else {
449 		/* Bug: free tso_num_seg and tso_seg */
450 		/* Free the already allocated num of segments */
451 		struct qdf_tso_seg_elem_t *free_seg =
452 					tso_info->tso_seg_list;
453 
454 		TSO_DEBUG(" %s: Failed alloc - Number of segs for a TSO packet",
455 			__func__);
456 		dp_tx_free_tso_seg(soc, free_seg, msdu_info);
457 
458 		return QDF_STATUS_E_NOMEM;
459 	}
460 
461 	msdu_info->num_seg =
462 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
463 
464 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
465 			msdu_info->num_seg);
466 
467 	if (!(msdu_info->num_seg)) {
468 		dp_tx_free_tso_seg(soc, tso_info->tso_seg_list, msdu_info);
469 		dp_tx_free_tso_num_seg(soc, tso_info->tso_num_seg_list,
470 					msdu_info);
471 		return QDF_STATUS_E_INVAL;
472 	}
473 
474 	tso_info->curr_seg = tso_info->tso_seg_list;
475 
476 	return QDF_STATUS_SUCCESS;
477 }
478 #else
479 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
480 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
481 {
482 	return QDF_STATUS_E_NOMEM;
483 }
484 #endif
485 
486 /**
487  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
488  * @vdev: DP Vdev handle
489  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
490  * @desc_pool_id: Descriptor Pool ID
491  *
492  * Return:
493  */
494 static
495 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
496 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
497 {
498 	uint8_t i;
499 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
500 	struct dp_tx_seg_info_s *seg_info;
501 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
502 	struct dp_soc *soc = vdev->pdev->soc;
503 
504 	/* Allocate an extension descriptor */
505 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
506 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
507 
508 	if (!msdu_ext_desc) {
509 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
510 		return NULL;
511 	}
512 
513 	if (msdu_info->exception_fw &&
514 			qdf_unlikely(vdev->mesh_vdev)) {
515 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
516 				&msdu_info->meta_data[0],
517 				sizeof(struct htt_tx_msdu_desc_ext2_t));
518 		qdf_atomic_inc(&vdev->pdev->num_tx_exception);
519 	}
520 
521 	switch (msdu_info->frm_type) {
522 	case dp_tx_frm_sg:
523 	case dp_tx_frm_me:
524 	case dp_tx_frm_raw:
525 		seg_info = msdu_info->u.sg_info.curr_seg;
526 		/* Update the buffer pointers in MSDU Extension Descriptor */
527 		for (i = 0; i < seg_info->frag_cnt; i++) {
528 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
529 				seg_info->frags[i].paddr_lo,
530 				seg_info->frags[i].paddr_hi,
531 				seg_info->frags[i].len);
532 		}
533 
534 		break;
535 
536 	case dp_tx_frm_tso:
537 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
538 				&cached_ext_desc[0]);
539 		break;
540 
541 
542 	default:
543 		break;
544 	}
545 
546 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
547 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
548 
549 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
550 			msdu_ext_desc->vaddr);
551 
552 	return msdu_ext_desc;
553 }
554 
555 /**
556  * dp_tx_trace_pkt() - Trace TX packet at DP layer
557  *
558  * @skb: skb to be traced
559  * @msdu_id: msdu_id of the packet
560  * @vdev_id: vdev_id of the packet
561  *
562  * Return: None
563  */
564 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
565 			    uint8_t vdev_id)
566 {
567 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
568 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
569 	DPTRACE(qdf_dp_trace_ptr(skb,
570 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
571 				 QDF_TRACE_DEFAULT_PDEV_ID,
572 				 qdf_nbuf_data_addr(skb),
573 				 sizeof(qdf_nbuf_data(skb)),
574 				 msdu_id, vdev_id));
575 
576 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
577 
578 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
579 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
580 				      msdu_id, QDF_TX));
581 }
582 
583 /**
584  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
585  * @vdev: DP vdev handle
586  * @nbuf: skb
587  * @desc_pool_id: Descriptor pool ID
588  * @meta_data: Metadata to the fw
589  * @tx_exc_metadata: Handle that holds exception path metadata
590  * Allocate and prepare Tx descriptor with msdu information.
591  *
592  * Return: Pointer to Tx Descriptor on success,
593  *         NULL on failure
594  */
595 static
596 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
597 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
598 		struct dp_tx_msdu_info_s *msdu_info,
599 		struct cdp_tx_exception_metadata *tx_exc_metadata)
600 {
601 	uint8_t align_pad;
602 	uint8_t is_exception = 0;
603 	uint8_t htt_hdr_size;
604 	struct ether_header *eh;
605 	struct dp_tx_desc_s *tx_desc;
606 	struct dp_pdev *pdev = vdev->pdev;
607 	struct dp_soc *soc = pdev->soc;
608 
609 	/* Allocate software Tx descriptor */
610 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
611 	if (qdf_unlikely(!tx_desc)) {
612 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
613 		return NULL;
614 	}
615 
616 	/* Flow control/Congestion Control counters */
617 	qdf_atomic_inc(&pdev->num_tx_outstanding);
618 
619 	/* Initialize the SW tx descriptor */
620 	tx_desc->nbuf = nbuf;
621 	tx_desc->frm_type = dp_tx_frm_std;
622 	tx_desc->tx_encap_type = (tx_exc_metadata ?
623 			tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
624 	tx_desc->vdev = vdev;
625 	tx_desc->pdev = pdev;
626 	tx_desc->msdu_ext_desc = NULL;
627 	tx_desc->pkt_offset = 0;
628 
629 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
630 
631 	/* Reset the control block */
632 	qdf_nbuf_reset_ctxt(nbuf);
633 
634 	/*
635 	 * For special modes (vdev_type == ocb or mesh), data frames should be
636 	 * transmitted using varying transmit parameters (tx spec) which include
637 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
638 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
639 	 * These frames are sent as exception packets to firmware.
640 	 *
641 	 * HW requirement is that metadata should always point to a
642 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
643 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
644 	 *  to get 8-byte aligned start address along with align_pad added
645 	 *
646 	 *  |-----------------------------|
647 	 *  |                             |
648 	 *  |-----------------------------| <-----Buffer Pointer Address given
649 	 *  |                             |  ^    in HW descriptor (aligned)
650 	 *  |       HTT Metadata          |  |
651 	 *  |                             |  |
652 	 *  |                             |  | Packet Offset given in descriptor
653 	 *  |                             |  |
654 	 *  |-----------------------------|  |
655 	 *  |       Alignment Pad         |  v
656 	 *  |-----------------------------| <----- Actual buffer start address
657 	 *  |        SKB Data             |           (Unaligned)
658 	 *  |                             |
659 	 *  |                             |
660 	 *  |                             |
661 	 *  |                             |
662 	 *  |                             |
663 	 *  |-----------------------------|
664 	 */
665 	if (qdf_unlikely((msdu_info->exception_fw)) ||
666 				(vdev->opmode == wlan_op_mode_ocb)) {
667 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
668 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
669 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
670 					"qdf_nbuf_push_head failed");
671 			goto failure;
672 		}
673 
674 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
675 				msdu_info->meta_data);
676 		if (htt_hdr_size == 0)
677 			goto failure;
678 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
679 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
680 		is_exception = 1;
681 	}
682 
683 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
684 				qdf_nbuf_map(soc->osdev, nbuf,
685 					QDF_DMA_TO_DEVICE))) {
686 		/* Handle failure */
687 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
688 				"qdf_nbuf_map failed");
689 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
690 		goto failure;
691 	}
692 
693 	if (qdf_unlikely(vdev->nawds_enabled)) {
694 		eh = (struct ether_header *) qdf_nbuf_data(nbuf);
695 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
696 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
697 			is_exception = 1;
698 		}
699 	}
700 
701 #if !TQM_BYPASS_WAR
702 	if (is_exception || tx_exc_metadata)
703 #endif
704 	{
705 		/* Temporary WAR due to TQM VP issues */
706 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
707 		qdf_atomic_inc(&pdev->num_tx_exception);
708 	}
709 
710 	return tx_desc;
711 
712 failure:
713 	dp_tx_desc_release(tx_desc, desc_pool_id);
714 	return NULL;
715 }
716 
717 /**
718  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
719  * @vdev: DP vdev handle
720  * @nbuf: skb
721  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
722  * @desc_pool_id : Descriptor Pool ID
723  *
724  * Allocate and prepare Tx descriptor with msdu and fragment descritor
725  * information. For frames wth fragments, allocate and prepare
726  * an MSDU extension descriptor
727  *
728  * Return: Pointer to Tx Descriptor on success,
729  *         NULL on failure
730  */
731 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
732 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
733 		uint8_t desc_pool_id)
734 {
735 	struct dp_tx_desc_s *tx_desc;
736 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
737 	struct dp_pdev *pdev = vdev->pdev;
738 	struct dp_soc *soc = pdev->soc;
739 
740 	/* Allocate software Tx descriptor */
741 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
742 	if (!tx_desc) {
743 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
744 		return NULL;
745 	}
746 
747 	/* Flow control/Congestion Control counters */
748 	qdf_atomic_inc(&pdev->num_tx_outstanding);
749 
750 	/* Initialize the SW tx descriptor */
751 	tx_desc->nbuf = nbuf;
752 	tx_desc->frm_type = msdu_info->frm_type;
753 	tx_desc->tx_encap_type = vdev->tx_encap_type;
754 	tx_desc->vdev = vdev;
755 	tx_desc->pdev = pdev;
756 	tx_desc->pkt_offset = 0;
757 	tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
758 	tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
759 
760 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
761 
762 	/* Reset the control block */
763 	qdf_nbuf_reset_ctxt(nbuf);
764 
765 	/* Handle scattered frames - TSO/SG/ME */
766 	/* Allocate and prepare an extension descriptor for scattered frames */
767 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
768 	if (!msdu_ext_desc) {
769 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
770 				"%s Tx Extension Descriptor Alloc Fail",
771 				__func__);
772 		goto failure;
773 	}
774 
775 #if TQM_BYPASS_WAR
776 	/* Temporary WAR due to TQM VP issues */
777 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
778 	qdf_atomic_inc(&pdev->num_tx_exception);
779 #endif
780 	if (qdf_unlikely(msdu_info->exception_fw))
781 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
782 
783 	tx_desc->msdu_ext_desc = msdu_ext_desc;
784 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
785 
786 	return tx_desc;
787 failure:
788 	dp_tx_desc_release(tx_desc, desc_pool_id);
789 	return NULL;
790 }
791 
792 /**
793  * dp_tx_prepare_raw() - Prepare RAW packet TX
794  * @vdev: DP vdev handle
795  * @nbuf: buffer pointer
796  * @seg_info: Pointer to Segment info Descriptor to be prepared
797  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
798  *     descriptor
799  *
800  * Return:
801  */
802 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
803 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
804 {
805 	qdf_nbuf_t curr_nbuf = NULL;
806 	uint16_t total_len = 0;
807 	qdf_dma_addr_t paddr;
808 	int32_t i;
809 	int32_t mapped_buf_num = 0;
810 
811 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
812 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
813 
814 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
815 
816 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
817 	if (vdev->raw_mode_war &&
818 	    (qos_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS))
819 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
820 
821 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
822 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
823 
824 		if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf,
825 					QDF_DMA_TO_DEVICE)) {
826 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
827 				"%s dma map error ", __func__);
828 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
829 			mapped_buf_num = i;
830 			goto error;
831 		}
832 
833 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
834 		seg_info->frags[i].paddr_lo = paddr;
835 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
836 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
837 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
838 		total_len += qdf_nbuf_len(curr_nbuf);
839 	}
840 
841 	seg_info->frag_cnt = i;
842 	seg_info->total_len = total_len;
843 	seg_info->next = NULL;
844 
845 	sg_info->curr_seg = seg_info;
846 
847 	msdu_info->frm_type = dp_tx_frm_raw;
848 	msdu_info->num_seg = 1;
849 
850 	return nbuf;
851 
852 error:
853 	i = 0;
854 	while (nbuf) {
855 		curr_nbuf = nbuf;
856 		if (i < mapped_buf_num) {
857 			qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE);
858 			i++;
859 		}
860 		nbuf = qdf_nbuf_next(nbuf);
861 		qdf_nbuf_free(curr_nbuf);
862 	}
863 	return NULL;
864 
865 }
866 
867 /**
868  * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
869  * @soc: DP Soc Handle
870  * @vdev: DP vdev handle
871  * @tx_desc: Tx Descriptor Handle
872  * @tid: TID from HLOS for overriding default DSCP-TID mapping
873  * @fw_metadata: Metadata to send to Target Firmware along with frame
874  * @ring_id: Ring ID of H/W ring to which we enqueue the packet
875  * @tx_exc_metadata: Handle that holds exception path meta data
876  *
877  *  Gets the next free TCL HW DMA descriptor and sets up required parameters
878  *  from software Tx descriptor
879  *
880  * Return:
881  */
882 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
883 				   struct dp_tx_desc_s *tx_desc, uint8_t tid,
884 				   uint16_t fw_metadata, uint8_t ring_id,
885 				   struct cdp_tx_exception_metadata
886 					*tx_exc_metadata)
887 {
888 	uint8_t type;
889 	uint16_t length;
890 	void *hal_tx_desc, *hal_tx_desc_cached;
891 	qdf_dma_addr_t dma_addr;
892 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
893 
894 	enum cdp_sec_type sec_type = (tx_exc_metadata ?
895 			tx_exc_metadata->sec_type : vdev->sec_type);
896 
897 	/* Return Buffer Manager ID */
898 	uint8_t bm_id = ring_id;
899 	void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng;
900 
901 	hal_tx_desc_cached = (void *) cached_desc;
902 	qdf_mem_zero_outline(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
903 
904 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
905 		length = HAL_TX_EXT_DESC_WITH_META_DATA;
906 		type = HAL_TX_BUF_TYPE_EXT_DESC;
907 		dma_addr = tx_desc->msdu_ext_desc->paddr;
908 	} else {
909 		length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
910 		type = HAL_TX_BUF_TYPE_BUFFER;
911 		dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
912 	}
913 
914 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
915 	hal_tx_desc_set_buf_addr(hal_tx_desc_cached,
916 					dma_addr, bm_id, tx_desc->id,
917 					type, soc->hal_soc);
918 
919 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id))
920 		return QDF_STATUS_E_RESOURCES;
921 
922 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
923 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
924 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
925 	hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
926 				HAL_TX_DESC_DEFAULT_LMAC_ID);
927 	hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
928 					  vdev->dscp_tid_map_id);
929 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
930 			sec_type_map[sec_type]);
931 
932 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
933 			"%s length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
934 			__func__, length, type, (uint64_t)dma_addr,
935 			tx_desc->pkt_offset, tx_desc->id);
936 
937 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
938 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
939 
940 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
941 			vdev->hal_desc_addr_search_flags);
942 
943 	/* verify checksum offload configuration*/
944 	if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) &&
945 		((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
946 		|| qdf_nbuf_is_tso(tx_desc->nbuf)))  {
947 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
948 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
949 	}
950 
951 	if (tid != HTT_TX_EXT_TID_INVALID)
952 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
953 
954 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
955 		hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1);
956 
957 
958 	/* Sync cached descriptor with HW */
959 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
960 
961 	if (!hal_tx_desc) {
962 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
963 			  "%s TCL ring full ring_id:%d", __func__, ring_id);
964 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
965 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
966 		return QDF_STATUS_E_RESOURCES;
967 	}
968 
969 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
970 
971 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
972 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
973 
974 	return QDF_STATUS_SUCCESS;
975 }
976 
977 
978 /**
979  * dp_cce_classify() - Classify the frame based on CCE rules
980  * @vdev: DP vdev handle
981  * @nbuf: skb
982  *
983  * Classify frames based on CCE rules
984  * Return: bool( true if classified,
985  *               else false)
986  */
987 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
988 {
989 	struct ether_header *eh = NULL;
990 	uint16_t   ether_type;
991 	qdf_llc_t *llcHdr;
992 	qdf_nbuf_t nbuf_clone = NULL;
993 	qdf_dot3_qosframe_t *qos_wh = NULL;
994 
995 	/* for mesh packets don't do any classification */
996 	if (qdf_unlikely(vdev->mesh_vdev))
997 		return false;
998 
999 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1000 		eh = (struct ether_header *) qdf_nbuf_data(nbuf);
1001 		ether_type = eh->ether_type;
1002 		llcHdr = (qdf_llc_t *)(nbuf->data +
1003 					sizeof(struct ether_header));
1004 	} else {
1005 		qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1006 		/* For encrypted packets don't do any classification */
1007 		if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
1008 			return false;
1009 
1010 		if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
1011 			if (qdf_unlikely(
1012 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
1013 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
1014 
1015 				ether_type = *(uint16_t *)(nbuf->data
1016 						+ QDF_IEEE80211_4ADDR_HDR_LEN
1017 						+ sizeof(qdf_llc_t)
1018 						- sizeof(ether_type));
1019 				llcHdr = (qdf_llc_t *)(nbuf->data +
1020 						QDF_IEEE80211_4ADDR_HDR_LEN);
1021 			} else {
1022 				ether_type = *(uint16_t *)(nbuf->data
1023 						+ QDF_IEEE80211_3ADDR_HDR_LEN
1024 						+ sizeof(qdf_llc_t)
1025 						- sizeof(ether_type));
1026 				llcHdr = (qdf_llc_t *)(nbuf->data +
1027 					QDF_IEEE80211_3ADDR_HDR_LEN);
1028 			}
1029 
1030 			if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
1031 				&& (ether_type ==
1032 				qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
1033 
1034 				DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
1035 				return true;
1036 			}
1037 		}
1038 
1039 		return false;
1040 	}
1041 
1042 	if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
1043 		ether_type = *(uint16_t *)(nbuf->data + 2*ETHER_ADDR_LEN +
1044 				sizeof(*llcHdr));
1045 		nbuf_clone = qdf_nbuf_clone(nbuf);
1046 		if (qdf_unlikely(nbuf_clone)) {
1047 			qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
1048 
1049 			if (ether_type == htons(ETHERTYPE_8021Q)) {
1050 				qdf_nbuf_pull_head(nbuf_clone,
1051 						sizeof(qdf_net_vlanhdr_t));
1052 			}
1053 		}
1054 	} else {
1055 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1056 			nbuf_clone = qdf_nbuf_clone(nbuf);
1057 			if (qdf_unlikely(nbuf_clone)) {
1058 				qdf_nbuf_pull_head(nbuf_clone,
1059 					sizeof(qdf_net_vlanhdr_t));
1060 			}
1061 		}
1062 	}
1063 
1064 	if (qdf_unlikely(nbuf_clone))
1065 		nbuf = nbuf_clone;
1066 
1067 
1068 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
1069 		|| qdf_nbuf_is_ipv4_arp_pkt(nbuf)
1070 		|| qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
1071 		|| qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
1072 		|| (qdf_nbuf_is_ipv4_pkt(nbuf)
1073 			&& qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
1074 		|| (qdf_nbuf_is_ipv6_pkt(nbuf) &&
1075 			qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
1076 		if (qdf_unlikely(nbuf_clone != NULL))
1077 			qdf_nbuf_free(nbuf_clone);
1078 		return true;
1079 	}
1080 
1081 	if (qdf_unlikely(nbuf_clone != NULL))
1082 		qdf_nbuf_free(nbuf_clone);
1083 
1084 	return false;
1085 }
1086 
1087 /**
1088  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1089  * @vdev: DP vdev handle
1090  * @nbuf: skb
1091  *
1092  * Extract the DSCP or PCP information from frame and map into TID value.
1093  * Software based TID classification is required when more than 2 DSCP-TID
1094  * mapping tables are needed.
1095  * Hardware supports 2 DSCP-TID mapping tables
1096  *
1097  * Return: void
1098  */
1099 static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1100 		struct dp_tx_msdu_info_s *msdu_info)
1101 {
1102 	uint8_t tos = 0, dscp_tid_override = 0;
1103 	uint8_t *hdr_ptr, *L3datap;
1104 	uint8_t is_mcast = 0;
1105 	struct ether_header *eh = NULL;
1106 	qdf_ethervlan_header_t *evh = NULL;
1107 	uint16_t   ether_type;
1108 	qdf_llc_t *llcHdr;
1109 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1110 
1111 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1112 
1113 	if (vdev->dscp_tid_map_id <= 1)
1114 		return;
1115 
1116 	/* for mesh packets don't do any classification */
1117 	if (qdf_unlikely(vdev->mesh_vdev))
1118 		return;
1119 
1120 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1121 		eh = (struct ether_header *) nbuf->data;
1122 		hdr_ptr = eh->ether_dhost;
1123 		L3datap = hdr_ptr + sizeof(struct ether_header);
1124 	} else {
1125 		qdf_dot3_qosframe_t *qos_wh =
1126 			(qdf_dot3_qosframe_t *) nbuf->data;
1127 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1128 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1129 		return;
1130 	}
1131 
1132 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1133 	ether_type = eh->ether_type;
1134 
1135 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(struct ether_header));
1136 	/*
1137 	 * Check if packet is dot3 or eth2 type.
1138 	 */
1139 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1140 		ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN +
1141 				sizeof(*llcHdr));
1142 
1143 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1144 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1145 				sizeof(*llcHdr);
1146 			ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN
1147 					+ sizeof(*llcHdr) +
1148 					sizeof(qdf_net_vlanhdr_t));
1149 		} else {
1150 			L3datap = hdr_ptr + sizeof(struct ether_header) +
1151 				sizeof(*llcHdr);
1152 		}
1153 	} else {
1154 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1155 			evh = (qdf_ethervlan_header_t *) eh;
1156 			ether_type = evh->ether_type;
1157 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1158 		}
1159 	}
1160 
1161 	/*
1162 	 * Find priority from IP TOS DSCP field
1163 	 */
1164 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1165 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1166 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1167 			/* Only for unicast frames */
1168 			if (!is_mcast) {
1169 				/* send it on VO queue */
1170 				msdu_info->tid = DP_VO_TID;
1171 			}
1172 		} else {
1173 			/*
1174 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1175 			 * from TOS byte.
1176 			 */
1177 			tos = ip->ip_tos;
1178 			dscp_tid_override = 1;
1179 
1180 		}
1181 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1182 		/* TODO
1183 		 * use flowlabel
1184 		 *igmpmld cases to be handled in phase 2
1185 		 */
1186 		unsigned long ver_pri_flowlabel;
1187 		unsigned long pri;
1188 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1189 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1190 			DP_IPV6_PRIORITY_SHIFT;
1191 		tos = pri;
1192 		dscp_tid_override = 1;
1193 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1194 		msdu_info->tid = DP_VO_TID;
1195 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1196 		/* Only for unicast frames */
1197 		if (!is_mcast) {
1198 			/* send ucast arp on VO queue */
1199 			msdu_info->tid = DP_VO_TID;
1200 		}
1201 	}
1202 
1203 	/*
1204 	 * Assign all MCAST packets to BE
1205 	 */
1206 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1207 		if (is_mcast) {
1208 			tos = 0;
1209 			dscp_tid_override = 1;
1210 		}
1211 	}
1212 
1213 	if (dscp_tid_override == 1) {
1214 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1215 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1216 	}
1217 	return;
1218 }
1219 
1220 #ifdef CONVERGED_TDLS_ENABLE
1221 /**
1222  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1223  * @tx_desc: TX descriptor
1224  *
1225  * Return: None
1226  */
1227 static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1228 {
1229 	if (tx_desc->vdev) {
1230 		if (tx_desc->vdev->is_tdls_frame)
1231 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1232 			tx_desc->vdev->is_tdls_frame = false;
1233 	}
1234 }
1235 
1236 /**
1237  * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1238  * @tx_desc: TX descriptor
1239  * @vdev: datapath vdev handle
1240  *
1241  * Return: None
1242  */
1243 static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
1244 				  struct dp_vdev *vdev)
1245 {
1246 	struct hal_tx_completion_status ts = {0};
1247 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1248 
1249 	hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
1250 	if (vdev->tx_non_std_data_callback.func) {
1251 		qdf_nbuf_set_next(tx_desc->nbuf, NULL);
1252 		vdev->tx_non_std_data_callback.func(
1253 				vdev->tx_non_std_data_callback.ctxt,
1254 				nbuf, ts.status);
1255 		return;
1256 	}
1257 }
1258 #endif
1259 
1260 /**
1261  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1262  * @vdev: DP vdev handle
1263  * @nbuf: skb
1264  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1265  * @meta_data: Metadata to the fw
1266  * @tx_q: Tx queue to be used for this Tx frame
1267  * @peer_id: peer_id of the peer in case of NAWDS frames
1268  * @tx_exc_metadata: Handle that holds exception path metadata
1269  *
1270  * Return: NULL on success,
1271  *         nbuf when it fails to send
1272  */
1273 static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1274 		struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
1275 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1276 {
1277 	struct dp_pdev *pdev = vdev->pdev;
1278 	struct dp_soc *soc = pdev->soc;
1279 	struct dp_tx_desc_s *tx_desc;
1280 	QDF_STATUS status;
1281 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
1282 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1283 	uint16_t htt_tcl_metadata = 0;
1284 	uint8_t tid = msdu_info->tid;
1285 
1286 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
1287 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
1288 			msdu_info, tx_exc_metadata);
1289 	if (!tx_desc) {
1290 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1291 			  "%s Tx_desc prepare Fail vdev %pK queue %d",
1292 			  __func__, vdev, tx_q->desc_pool_id);
1293 		return nbuf;
1294 	}
1295 
1296 	if (qdf_unlikely(soc->cce_disable)) {
1297 		if (dp_cce_classify(vdev, nbuf) == true) {
1298 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1299 			tid = DP_VO_TID;
1300 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1301 		}
1302 	}
1303 
1304 	dp_tx_update_tdls_flags(tx_desc);
1305 
1306 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1307 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1308 				"%s %d : HAL RING Access Failed -- %pK",
1309 				__func__, __LINE__, hal_srng);
1310 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1311 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1312 		goto fail_return;
1313 	}
1314 
1315 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
1316 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1317 		HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
1318 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
1319 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
1320 				HTT_TCL_METADATA_TYPE_PEER_BASED);
1321 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
1322 				peer_id);
1323 	} else
1324 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1325 
1326 
1327 	if (msdu_info->exception_fw) {
1328 		HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1329 	}
1330 
1331 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
1332 	status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
1333 			htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
1334 
1335 	if (status != QDF_STATUS_SUCCESS) {
1336 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1337 			  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1338 			  __func__, tx_desc, tx_q->ring_id);
1339 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1340 		goto fail_return;
1341 	}
1342 
1343 	nbuf = NULL;
1344 
1345 fail_return:
1346 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1347 		hal_srng_access_end(soc->hal_soc, hal_srng);
1348 		hif_pm_runtime_put(soc->hif_handle);
1349 	} else {
1350 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1351 	}
1352 
1353 	return nbuf;
1354 }
1355 
1356 /**
1357  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
1358  * @vdev: DP vdev handle
1359  * @nbuf: skb
1360  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
1361  *
1362  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
1363  *
1364  * Return: NULL on success,
1365  *         nbuf when it fails to send
1366  */
1367 #if QDF_LOCK_STATS
1368 static noinline
1369 #else
1370 static
1371 #endif
1372 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1373 				    struct dp_tx_msdu_info_s *msdu_info)
1374 {
1375 	uint8_t i;
1376 	struct dp_pdev *pdev = vdev->pdev;
1377 	struct dp_soc *soc = pdev->soc;
1378 	struct dp_tx_desc_s *tx_desc;
1379 	bool is_cce_classified = false;
1380 	QDF_STATUS status;
1381 	uint16_t htt_tcl_metadata = 0;
1382 
1383 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1384 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1385 
1386 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1387 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1388 				"%s %d : HAL RING Access Failed -- %pK",
1389 				__func__, __LINE__, hal_srng);
1390 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1391 		return nbuf;
1392 	}
1393 
1394 	if (qdf_unlikely(soc->cce_disable)) {
1395 		is_cce_classified = dp_cce_classify(vdev, nbuf);
1396 		if (is_cce_classified) {
1397 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1398 			msdu_info->tid = DP_VO_TID;
1399 		}
1400 	}
1401 
1402 	if (msdu_info->frm_type == dp_tx_frm_me)
1403 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1404 
1405 	i = 0;
1406 	/* Print statement to track i and num_seg */
1407 	/*
1408 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
1409 	 * descriptors using information in msdu_info
1410 	 */
1411 	while (i < msdu_info->num_seg) {
1412 		/*
1413 		 * Setup Tx descriptor for an MSDU, and MSDU extension
1414 		 * descriptor
1415 		 */
1416 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
1417 				tx_q->desc_pool_id);
1418 
1419 		if (!tx_desc) {
1420 			if (msdu_info->frm_type == dp_tx_frm_me) {
1421 				dp_tx_me_free_buf(pdev,
1422 					(void *)(msdu_info->u.sg_info
1423 						.curr_seg->frags[0].vaddr));
1424 			}
1425 			goto done;
1426 		}
1427 
1428 		if (msdu_info->frm_type == dp_tx_frm_me) {
1429 			tx_desc->me_buffer =
1430 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
1431 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
1432 		}
1433 
1434 		if (is_cce_classified)
1435 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1436 
1437 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1438 		if (msdu_info->exception_fw) {
1439 			HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1440 		}
1441 
1442 		/*
1443 		 * Enqueue the Tx MSDU descriptor to HW for transmit
1444 		 */
1445 		status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
1446 			htt_tcl_metadata, tx_q->ring_id, NULL);
1447 
1448 		if (status != QDF_STATUS_SUCCESS) {
1449 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1450 				  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1451 				  __func__, tx_desc, tx_q->ring_id);
1452 
1453 			if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
1454 				dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
1455 
1456 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1457 			goto done;
1458 		}
1459 
1460 		/*
1461 		 * TODO
1462 		 * if tso_info structure can be modified to have curr_seg
1463 		 * as first element, following 2 blocks of code (for TSO and SG)
1464 		 * can be combined into 1
1465 		 */
1466 
1467 		/*
1468 		 * For frames with multiple segments (TSO, ME), jump to next
1469 		 * segment.
1470 		 */
1471 		if (msdu_info->frm_type == dp_tx_frm_tso) {
1472 			if (msdu_info->u.tso_info.curr_seg->next) {
1473 				msdu_info->u.tso_info.curr_seg =
1474 					msdu_info->u.tso_info.curr_seg->next;
1475 
1476 				/*
1477 				 * If this is a jumbo nbuf, then increment the number of
1478 				 * nbuf users for each additional segment of the msdu.
1479 				 * This will ensure that the skb is freed only after
1480 				 * receiving tx completion for all segments of an nbuf
1481 				 */
1482 				qdf_nbuf_inc_users(nbuf);
1483 
1484 				/* Check with MCL if this is needed */
1485 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
1486 			}
1487 		}
1488 
1489 		/*
1490 		 * For Multicast-Unicast converted packets,
1491 		 * each converted frame (for a client) is represented as
1492 		 * 1 segment
1493 		 */
1494 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
1495 				(msdu_info->frm_type == dp_tx_frm_me)) {
1496 			if (msdu_info->u.sg_info.curr_seg->next) {
1497 				msdu_info->u.sg_info.curr_seg =
1498 					msdu_info->u.sg_info.curr_seg->next;
1499 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1500 			}
1501 		}
1502 		i++;
1503 	}
1504 
1505 	nbuf = NULL;
1506 
1507 done:
1508 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1509 		hal_srng_access_end(soc->hal_soc, hal_srng);
1510 		hif_pm_runtime_put(soc->hif_handle);
1511 	} else {
1512 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1513 	}
1514 
1515 	return nbuf;
1516 }
1517 
1518 /**
1519  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
1520  *                     for SG frames
1521  * @vdev: DP vdev handle
1522  * @nbuf: skb
1523  * @seg_info: Pointer to Segment info Descriptor to be prepared
1524  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1525  *
1526  * Return: NULL on success,
1527  *         nbuf when it fails to send
1528  */
1529 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1530 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1531 {
1532 	uint32_t cur_frag, nr_frags;
1533 	qdf_dma_addr_t paddr;
1534 	struct dp_tx_sg_info_s *sg_info;
1535 
1536 	sg_info = &msdu_info->u.sg_info;
1537 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
1538 
1539 	if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
1540 				QDF_DMA_TO_DEVICE)) {
1541 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1542 				"dma map error");
1543 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1544 
1545 		qdf_nbuf_free(nbuf);
1546 		return NULL;
1547 	}
1548 
1549 	paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1550 	seg_info->frags[0].paddr_lo = paddr;
1551 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
1552 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
1553 	seg_info->frags[0].vaddr = (void *) nbuf;
1554 
1555 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
1556 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
1557 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
1558 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1559 					"frag dma map error");
1560 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1561 			qdf_nbuf_free(nbuf);
1562 			return NULL;
1563 		}
1564 
1565 		paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1566 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
1567 		seg_info->frags[cur_frag + 1].paddr_hi =
1568 			((uint64_t) paddr) >> 32;
1569 		seg_info->frags[cur_frag + 1].len =
1570 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
1571 	}
1572 
1573 	seg_info->frag_cnt = (cur_frag + 1);
1574 	seg_info->total_len = qdf_nbuf_len(nbuf);
1575 	seg_info->next = NULL;
1576 
1577 	sg_info->curr_seg = seg_info;
1578 
1579 	msdu_info->frm_type = dp_tx_frm_sg;
1580 	msdu_info->num_seg = 1;
1581 
1582 	return nbuf;
1583 }
1584 
1585 #ifdef MESH_MODE_SUPPORT
1586 
1587 /**
1588  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
1589 				and prepare msdu_info for mesh frames.
1590  * @vdev: DP vdev handle
1591  * @nbuf: skb
1592  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1593  *
1594  * Return: NULL on failure,
1595  *         nbuf when extracted successfully
1596  */
1597 static
1598 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1599 				struct dp_tx_msdu_info_s *msdu_info)
1600 {
1601 	struct meta_hdr_s *mhdr;
1602 	struct htt_tx_msdu_desc_ext2_t *meta_data =
1603 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
1604 
1605 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1606 
1607 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
1608 		msdu_info->exception_fw = 0;
1609 		goto remove_meta_hdr;
1610 	}
1611 
1612 	msdu_info->exception_fw = 1;
1613 
1614 	qdf_mem_set(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t), 0);
1615 
1616 	meta_data->host_tx_desc_pool = 1;
1617 	meta_data->update_peer_cache = 1;
1618 	meta_data->learning_frame = 1;
1619 
1620 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
1621 		meta_data->power = mhdr->power;
1622 
1623 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
1624 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
1625 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
1626 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
1627 
1628 		meta_data->dyn_bw = 1;
1629 
1630 		meta_data->valid_pwr = 1;
1631 		meta_data->valid_mcs_mask = 1;
1632 		meta_data->valid_nss_mask = 1;
1633 		meta_data->valid_preamble_type  = 1;
1634 		meta_data->valid_retries = 1;
1635 		meta_data->valid_bw_info = 1;
1636 	}
1637 
1638 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
1639 		meta_data->encrypt_type = 0;
1640 		meta_data->valid_encrypt_type = 1;
1641 		meta_data->learning_frame = 0;
1642 	}
1643 
1644 	meta_data->valid_key_flags = 1;
1645 	meta_data->key_flags = (mhdr->keyix & 0x3);
1646 
1647 remove_meta_hdr:
1648 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
1649 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1650 				"qdf_nbuf_pull_head failed");
1651 		qdf_nbuf_free(nbuf);
1652 		return NULL;
1653 	}
1654 
1655 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
1656 		msdu_info->tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1657 	else
1658 		msdu_info->tid = qdf_nbuf_get_priority(nbuf);
1659 
1660 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1661 			"%s , Meta hdr %0x %0x %0x %0x %0x %0x"
1662 			" tid %d to_fw %d",
1663 			__func__, msdu_info->meta_data[0],
1664 			msdu_info->meta_data[1],
1665 			msdu_info->meta_data[2],
1666 			msdu_info->meta_data[3],
1667 			msdu_info->meta_data[4],
1668 			msdu_info->meta_data[5],
1669 			msdu_info->tid, msdu_info->exception_fw);
1670 
1671 	return nbuf;
1672 }
1673 #else
1674 static
1675 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1676 				struct dp_tx_msdu_info_s *msdu_info)
1677 {
1678 	return nbuf;
1679 }
1680 
1681 #endif
1682 
1683 #ifdef DP_FEATURE_NAWDS_TX
1684 /**
1685  * dp_tx_prepare_nawds(): Tramit NAWDS frames
1686  * @vdev: dp_vdev handle
1687  * @nbuf: skb
1688  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1689  * @tx_q: Tx queue to be used for this Tx frame
1690  * @meta_data: Meta date for mesh
1691  * @peer_id: peer_id of the peer in case of NAWDS frames
1692  *
1693  * return: NULL on success nbuf on failure
1694  */
1695 static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1696 		struct dp_tx_msdu_info_s *msdu_info)
1697 {
1698 	struct dp_peer *peer = NULL;
1699 	struct dp_soc *soc = vdev->pdev->soc;
1700 	struct dp_ast_entry *ast_entry = NULL;
1701 	struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1702 	uint16_t peer_id = HTT_INVALID_PEER;
1703 
1704 	struct dp_peer *sa_peer = NULL;
1705 	qdf_nbuf_t nbuf_copy;
1706 
1707 	qdf_spin_lock_bh(&(soc->ast_lock));
1708 	ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost));
1709 
1710 	if (ast_entry)
1711 		sa_peer = ast_entry->peer;
1712 
1713 	qdf_spin_unlock_bh(&(soc->ast_lock));
1714 
1715 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1716 		if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
1717 				(peer->nawds_enabled)) {
1718 			if (sa_peer == peer) {
1719 				QDF_TRACE(QDF_MODULE_ID_DP,
1720 						QDF_TRACE_LEVEL_DEBUG,
1721 						" %s: broadcast multicast packet",
1722 						 __func__);
1723 				DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
1724 				continue;
1725 			}
1726 
1727 			nbuf_copy = qdf_nbuf_copy(nbuf);
1728 			if (!nbuf_copy) {
1729 				QDF_TRACE(QDF_MODULE_ID_DP,
1730 						QDF_TRACE_LEVEL_ERROR,
1731 						"nbuf copy failed");
1732 			}
1733 
1734 			peer_id = peer->peer_ids[0];
1735 			nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy,
1736 					msdu_info, peer_id, NULL);
1737 			if (nbuf_copy != NULL) {
1738 				qdf_nbuf_free(nbuf_copy);
1739 				continue;
1740 			}
1741 			DP_STATS_INC_PKT(peer, tx.nawds_mcast,
1742 						1, qdf_nbuf_len(nbuf));
1743 		}
1744 	}
1745 	if (peer_id == HTT_INVALID_PEER)
1746 		return nbuf;
1747 
1748 	return NULL;
1749 }
1750 #endif
1751 
1752 /**
1753  * dp_check_exc_metadata() - Checks if parameters are valid
1754  * @tx_exc - holds all exception path parameters
1755  *
1756  * Returns true when all the parameters are valid else false
1757  *
1758  */
1759 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
1760 {
1761 	if ((tx_exc->tid > DP_MAX_TIDS && tx_exc->tid != HTT_INVALID_TID) ||
1762 	    tx_exc->tx_encap_type > htt_cmn_pkt_num_types ||
1763 	    tx_exc->sec_type > cdp_num_sec_types) {
1764 		return false;
1765 	}
1766 
1767 	return true;
1768 }
1769 
1770 /**
1771  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
1772  * @vap_dev: DP vdev handle
1773  * @nbuf: skb
1774  * @tx_exc_metadata: Handle that holds exception path meta data
1775  *
1776  * Entry point for Core Tx layer (DP_TX) invoked from
1777  * hard_start_xmit in OSIF/HDD to transmit frames through fw
1778  *
1779  * Return: NULL on success,
1780  *         nbuf when it fails to send
1781  */
1782 qdf_nbuf_t dp_tx_send_exception(void *vap_dev, qdf_nbuf_t nbuf,
1783 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1784 {
1785 	struct ether_header *eh = NULL;
1786 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1787 	struct dp_tx_msdu_info_s msdu_info;
1788 
1789 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1790 
1791 	msdu_info.tid = tx_exc_metadata->tid;
1792 
1793 	eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1794 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1795 			"%s , skb %pM",
1796 			__func__, nbuf->data);
1797 
1798 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
1799 
1800 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
1801 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1802 			"Invalid parameters in exception path");
1803 		goto fail;
1804 	}
1805 
1806 	/* Basic sanity checks for unsupported packets */
1807 
1808 	/* MESH mode */
1809 	if (qdf_unlikely(vdev->mesh_vdev)) {
1810 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1811 			"Mesh mode is not supported in exception path");
1812 		goto fail;
1813 	}
1814 
1815 	/* TSO or SG */
1816 	if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
1817 	    qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
1818 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1819 			  "TSO and SG are not supported in exception path");
1820 
1821 		goto fail;
1822 	}
1823 
1824 	/* RAW */
1825 	if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
1826 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1827 			  "Raw frame is not supported in exception path");
1828 		goto fail;
1829 	}
1830 
1831 
1832 	/* Mcast enhancement*/
1833 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
1834 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
1835 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1836 					  "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW");
1837 		}
1838 	}
1839 
1840 	/*
1841 	 * Get HW Queue to use for this frame.
1842 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
1843 	 * dedicated for data and 1 for command.
1844 	 * "queue_id" maps to one hardware ring.
1845 	 *  With each ring, we also associate a unique Tx descriptor pool
1846 	 *  to minimize lock contention for these resources.
1847 	 */
1848 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
1849 
1850 	/*  Single linear frame */
1851 	/*
1852 	 * If nbuf is a simple linear frame, use send_single function to
1853 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
1854 	 * SRNG. There is no need to setup a MSDU extension descriptor.
1855 	 */
1856 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
1857 			tx_exc_metadata->peer_id, tx_exc_metadata);
1858 
1859 	return nbuf;
1860 
1861 fail:
1862 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1863 			"pkt send failed");
1864 	return nbuf;
1865 }
1866 
1867 /**
1868  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
1869  * @vap_dev: DP vdev handle
1870  * @nbuf: skb
1871  *
1872  * Entry point for Core Tx layer (DP_TX) invoked from
1873  * hard_start_xmit in OSIF/HDD
1874  *
1875  * Return: NULL on success,
1876  *         nbuf when it fails to send
1877  */
1878 #ifdef MESH_MODE_SUPPORT
1879 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
1880 {
1881 	struct meta_hdr_s *mhdr;
1882 	qdf_nbuf_t nbuf_mesh = NULL;
1883 	qdf_nbuf_t nbuf_clone = NULL;
1884 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1885 	uint8_t no_enc_frame = 0;
1886 
1887 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
1888 	if (nbuf_mesh == NULL) {
1889 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1890 				"qdf_nbuf_unshare failed");
1891 		return nbuf;
1892 	}
1893 	nbuf = nbuf_mesh;
1894 
1895 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1896 
1897 	if ((vdev->sec_type != cdp_sec_type_none) &&
1898 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
1899 		no_enc_frame = 1;
1900 
1901 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
1902 		       !no_enc_frame) {
1903 		nbuf_clone = qdf_nbuf_clone(nbuf);
1904 		if (nbuf_clone == NULL) {
1905 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1906 				"qdf_nbuf_clone failed");
1907 			return nbuf;
1908 		}
1909 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
1910 	}
1911 
1912 	if (nbuf_clone) {
1913 		if (!dp_tx_send(vap_dev, nbuf_clone)) {
1914 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
1915 		} else {
1916 			qdf_nbuf_free(nbuf_clone);
1917 		}
1918 	}
1919 
1920 	if (no_enc_frame)
1921 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
1922 	else
1923 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
1924 
1925 	nbuf = dp_tx_send(vap_dev, nbuf);
1926 	if ((nbuf == NULL) && no_enc_frame) {
1927 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
1928 	}
1929 
1930 	return nbuf;
1931 }
1932 
1933 #else
1934 
1935 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
1936 {
1937 	return dp_tx_send(vap_dev, nbuf);
1938 }
1939 
1940 #endif
1941 
1942 /**
1943  * dp_tx_send() - Transmit a frame on a given VAP
1944  * @vap_dev: DP vdev handle
1945  * @nbuf: skb
1946  *
1947  * Entry point for Core Tx layer (DP_TX) invoked from
1948  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
1949  * cases
1950  *
1951  * Return: NULL on success,
1952  *         nbuf when it fails to send
1953  */
1954 qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
1955 {
1956 	struct ether_header *eh = NULL;
1957 	struct dp_tx_msdu_info_s msdu_info;
1958 	struct dp_tx_seg_info_s seg_info;
1959 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1960 	uint16_t peer_id = HTT_INVALID_PEER;
1961 	qdf_nbuf_t nbuf_mesh = NULL;
1962 
1963 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1964 	qdf_mem_set(&seg_info, sizeof(seg_info), 0x0);
1965 
1966 	eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1967 
1968 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1969 			"%s , skb %pM",
1970 			__func__, nbuf->data);
1971 
1972 	/*
1973 	 * Set Default Host TID value to invalid TID
1974 	 * (TID override disabled)
1975 	 */
1976 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
1977 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
1978 
1979 	if (qdf_unlikely(vdev->mesh_vdev)) {
1980 		nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
1981 								&msdu_info);
1982 		if (nbuf_mesh == NULL) {
1983 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1984 					"Extracting mesh metadata failed");
1985 			return nbuf;
1986 		}
1987 		nbuf = nbuf_mesh;
1988 	}
1989 
1990 	/*
1991 	 * Get HW Queue to use for this frame.
1992 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
1993 	 * dedicated for data and 1 for command.
1994 	 * "queue_id" maps to one hardware ring.
1995 	 *  With each ring, we also associate a unique Tx descriptor pool
1996 	 *  to minimize lock contention for these resources.
1997 	 */
1998 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
1999 
2000 	/*
2001 	 * TCL H/W supports 2 DSCP-TID mapping tables.
2002 	 *  Table 1 - Default DSCP-TID mapping table
2003 	 *  Table 2 - 1 DSCP-TID override table
2004 	 *
2005 	 * If we need a different DSCP-TID mapping for this vap,
2006 	 * call tid_classify to extract DSCP/ToS from frame and
2007 	 * map to a TID and store in msdu_info. This is later used
2008 	 * to fill in TCL Input descriptor (per-packet TID override).
2009 	 */
2010 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
2011 
2012 	/*
2013 	 * Classify the frame and call corresponding
2014 	 * "prepare" function which extracts the segment (TSO)
2015 	 * and fragmentation information (for TSO , SG, ME, or Raw)
2016 	 * into MSDU_INFO structure which is later used to fill
2017 	 * SW and HW descriptors.
2018 	 */
2019 	if (qdf_nbuf_is_tso(nbuf)) {
2020 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2021 			  "%s TSO frame %pK", __func__, vdev);
2022 		DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1,
2023 				qdf_nbuf_len(nbuf));
2024 
2025 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
2026 			DP_STATS_INC_PKT(vdev, tx_i.tso.dropped_host, 1,
2027 					 qdf_nbuf_len(nbuf));
2028 			return nbuf;
2029 		}
2030 
2031 		goto send_multiple;
2032 	}
2033 
2034 	/* SG */
2035 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2036 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
2037 
2038 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2039 			 "%s non-TSO SG frame %pK", __func__, vdev);
2040 
2041 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
2042 				qdf_nbuf_len(nbuf));
2043 
2044 		goto send_multiple;
2045 	}
2046 
2047 #ifdef ATH_SUPPORT_IQUE
2048 	/* Mcast to Ucast Conversion*/
2049 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
2050 		eh = (struct ether_header *)qdf_nbuf_data(nbuf);
2051 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
2052 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2053 				  "%s Mcast frm for ME %pK", __func__, vdev);
2054 
2055 			DP_STATS_INC_PKT(vdev,
2056 					tx_i.mcast_en.mcast_pkt, 1,
2057 					qdf_nbuf_len(nbuf));
2058 			if (dp_tx_prepare_send_me(vdev, nbuf) ==
2059 					QDF_STATUS_SUCCESS) {
2060 				return NULL;
2061 			}
2062 		}
2063 	}
2064 #endif
2065 
2066 	/* RAW */
2067 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
2068 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
2069 		if (nbuf == NULL)
2070 			return NULL;
2071 
2072 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2073 			  "%s Raw frame %pK", __func__, vdev);
2074 
2075 		goto send_multiple;
2076 
2077 	}
2078 
2079 	/*  Single linear frame */
2080 	/*
2081 	 * If nbuf is a simple linear frame, use send_single function to
2082 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2083 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2084 	 */
2085 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
2086 
2087 	return nbuf;
2088 
2089 send_multiple:
2090 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2091 
2092 	return nbuf;
2093 }
2094 
2095 /**
2096  * dp_tx_reinject_handler() - Tx Reinject Handler
2097  * @tx_desc: software descriptor head pointer
2098  * @status : Tx completion status from HTT descriptor
2099  *
2100  * This function reinjects frames back to Target.
2101  * Todo - Host queue needs to be added
2102  *
2103  * Return: none
2104  */
2105 static
2106 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2107 {
2108 	struct dp_vdev *vdev;
2109 	struct dp_peer *peer = NULL;
2110 	uint32_t peer_id = HTT_INVALID_PEER;
2111 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2112 	qdf_nbuf_t nbuf_copy = NULL;
2113 	struct dp_tx_msdu_info_s msdu_info;
2114 	struct dp_peer *sa_peer = NULL;
2115 	struct dp_ast_entry *ast_entry = NULL;
2116 	struct dp_soc *soc = NULL;
2117 	struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
2118 #ifdef WDS_VENDOR_EXTENSION
2119 	int is_mcast = 0, is_ucast = 0;
2120 	int num_peers_3addr = 0;
2121 	struct ether_header *eth_hdr = (struct ether_header *)(qdf_nbuf_data(nbuf));
2122 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
2123 #endif
2124 
2125 	vdev = tx_desc->vdev;
2126 	soc = vdev->pdev->soc;
2127 
2128 	qdf_assert(vdev);
2129 
2130 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
2131 
2132 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2133 
2134 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2135 			"%s Tx reinject path", __func__);
2136 
2137 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
2138 			qdf_nbuf_len(tx_desc->nbuf));
2139 
2140 	qdf_spin_lock_bh(&(soc->ast_lock));
2141 
2142 	ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost));
2143 
2144 	if (ast_entry)
2145 		sa_peer = ast_entry->peer;
2146 
2147 	qdf_spin_unlock_bh(&(soc->ast_lock));
2148 
2149 #ifdef WDS_VENDOR_EXTENSION
2150 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
2151 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
2152 	} else {
2153 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
2154 	}
2155 	is_ucast = !is_mcast;
2156 
2157 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2158 		if (peer->bss_peer)
2159 			continue;
2160 
2161 		/* Detect wds peers that use 3-addr framing for mcast.
2162 		 * if there are any, the bss_peer is used to send the
2163 		 * the mcast frame using 3-addr format. all wds enabled
2164 		 * peers that use 4-addr framing for mcast frames will
2165 		 * be duplicated and sent as 4-addr frames below.
2166 		 */
2167 		if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
2168 			num_peers_3addr = 1;
2169 			break;
2170 		}
2171 	}
2172 #endif
2173 
2174 	if (qdf_unlikely(vdev->mesh_vdev)) {
2175 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
2176 	} else {
2177 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2178 			if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
2179 #ifdef WDS_VENDOR_EXTENSION
2180 			/*
2181 			 * . if 3-addr STA, then send on BSS Peer
2182 			 * . if Peer WDS enabled and accept 4-addr mcast,
2183 			 * send mcast on that peer only
2184 			 * . if Peer WDS enabled and accept 4-addr ucast,
2185 			 * send ucast on that peer only
2186 			 */
2187 			((peer->bss_peer && num_peers_3addr && is_mcast) ||
2188 			 (peer->wds_enabled &&
2189 				  ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
2190 				   (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
2191 #else
2192 			((peer->bss_peer &&
2193 			  !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) ||
2194 				 peer->nawds_enabled)) {
2195 #endif
2196 				peer_id = DP_INVALID_PEER;
2197 
2198 				if (peer->nawds_enabled) {
2199 					peer_id = peer->peer_ids[0];
2200 					if (sa_peer == peer) {
2201 						QDF_TRACE(
2202 							QDF_MODULE_ID_DP,
2203 							QDF_TRACE_LEVEL_DEBUG,
2204 							" %s: multicast packet",
2205 							__func__);
2206 						DP_STATS_INC(peer,
2207 							tx.nawds_mcast_drop, 1);
2208 						continue;
2209 					}
2210 				}
2211 
2212 				nbuf_copy = qdf_nbuf_copy(nbuf);
2213 
2214 				if (!nbuf_copy) {
2215 					QDF_TRACE(QDF_MODULE_ID_DP,
2216 						QDF_TRACE_LEVEL_DEBUG,
2217 						FL("nbuf copy failed"));
2218 					break;
2219 				}
2220 
2221 				nbuf_copy = dp_tx_send_msdu_single(vdev,
2222 						nbuf_copy,
2223 						&msdu_info,
2224 						peer_id,
2225 						NULL);
2226 
2227 				if (nbuf_copy) {
2228 					QDF_TRACE(QDF_MODULE_ID_DP,
2229 						QDF_TRACE_LEVEL_DEBUG,
2230 						FL("pkt send failed"));
2231 					qdf_nbuf_free(nbuf_copy);
2232 				} else {
2233 					if (peer_id != DP_INVALID_PEER)
2234 						DP_STATS_INC_PKT(peer,
2235 							tx.nawds_mcast,
2236 							1, qdf_nbuf_len(nbuf));
2237 				}
2238 			}
2239 		}
2240 	}
2241 
2242 	if (vdev->nawds_enabled) {
2243 		peer_id = DP_INVALID_PEER;
2244 
2245 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
2246 					1, qdf_nbuf_len(nbuf));
2247 
2248 		nbuf = dp_tx_send_msdu_single(vdev,
2249 				nbuf,
2250 				&msdu_info,
2251 				peer_id, NULL);
2252 
2253 		if (nbuf) {
2254 			QDF_TRACE(QDF_MODULE_ID_DP,
2255 				QDF_TRACE_LEVEL_DEBUG,
2256 				FL("pkt send failed"));
2257 			qdf_nbuf_free(nbuf);
2258 		}
2259 	} else
2260 		qdf_nbuf_free(nbuf);
2261 
2262 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2263 }
2264 
2265 /**
2266  * dp_tx_inspect_handler() - Tx Inspect Handler
2267  * @tx_desc: software descriptor head pointer
2268  * @status : Tx completion status from HTT descriptor
2269  *
2270  * Handles Tx frames sent back to Host for inspection
2271  * (ProxyARP)
2272  *
2273  * Return: none
2274  */
2275 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2276 {
2277 
2278 	struct dp_soc *soc;
2279 	struct dp_pdev *pdev = tx_desc->pdev;
2280 
2281 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2282 			"%s Tx inspect path",
2283 			__func__);
2284 
2285 	qdf_assert(pdev);
2286 
2287 	soc = pdev->soc;
2288 
2289 	DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
2290 			qdf_nbuf_len(tx_desc->nbuf));
2291 
2292 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
2293 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2294 }
2295 
2296 #ifdef FEATURE_PERPKT_INFO
2297 /**
2298  * dp_get_completion_indication_for_stack() - send completion to stack
2299  * @soc :  dp_soc handle
2300  * @pdev:  dp_pdev handle
2301  * @peer_id: peer_id of the peer for which completion came
2302  * @ppdu_id: ppdu_id
2303  * @first_msdu: first msdu
2304  * @last_msdu: last msdu
2305  * @netbuf: Buffer pointer for free
2306  *
2307  * This function is used for indication whether buffer needs to be
2308  * send to stack for free or not
2309 */
2310 QDF_STATUS
2311 dp_get_completion_indication_for_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2312 		      uint16_t peer_id, uint32_t ppdu_id, uint8_t first_msdu,
2313 		      uint8_t last_msdu, qdf_nbuf_t netbuf)
2314 {
2315 	struct tx_capture_hdr *ppdu_hdr;
2316 	struct dp_peer *peer = NULL;
2317 	struct ether_header *eh;
2318 
2319 	if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode))
2320 		return QDF_STATUS_E_NOSUPPORT;
2321 
2322 	peer = (peer_id == HTT_INVALID_PEER) ? NULL :
2323 			dp_peer_find_by_id(soc, peer_id);
2324 
2325 	if (!peer) {
2326 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2327 				FL("Peer Invalid"));
2328 		return QDF_STATUS_E_INVAL;
2329 	}
2330 
2331 	if (pdev->mcopy_mode) {
2332 		if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
2333 			(pdev->m_copy_id.tx_peer_id == peer_id)) {
2334 			return QDF_STATUS_E_INVAL;
2335 		}
2336 
2337 		pdev->m_copy_id.tx_ppdu_id = ppdu_id;
2338 		pdev->m_copy_id.tx_peer_id = peer_id;
2339 	}
2340 
2341 	eh = (struct ether_header *)qdf_nbuf_data(netbuf);
2342 
2343 	if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) {
2344 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2345 				FL("No headroom"));
2346 		return QDF_STATUS_E_NOMEM;
2347 	}
2348 
2349 	ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
2350 	qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
2351 		     IEEE80211_ADDR_LEN);
2352 	if (peer->bss_peer) {
2353 		qdf_mem_copy(ppdu_hdr->ra, eh->ether_dhost, IEEE80211_ADDR_LEN);
2354 	} else {
2355 		qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
2356 			     IEEE80211_ADDR_LEN);
2357 	}
2358 
2359 	ppdu_hdr->ppdu_id = ppdu_id;
2360 	ppdu_hdr->peer_id = peer_id;
2361 	ppdu_hdr->first_msdu = first_msdu;
2362 	ppdu_hdr->last_msdu = last_msdu;
2363 
2364 	return QDF_STATUS_SUCCESS;
2365 }
2366 
2367 
2368 /**
2369  * dp_send_completion_to_stack() - send completion to stack
2370  * @soc :  dp_soc handle
2371  * @pdev:  dp_pdev handle
2372  * @peer_id: peer_id of the peer for which completion came
2373  * @ppdu_id: ppdu_id
2374  * @netbuf: Buffer pointer for free
2375  *
2376  * This function is used to send completion to stack
2377  * to free buffer
2378 */
2379 void  dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2380 					uint16_t peer_id, uint32_t ppdu_id,
2381 					qdf_nbuf_t netbuf)
2382 {
2383 	dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
2384 				netbuf, peer_id,
2385 				WDI_NO_VAL, pdev->pdev_id);
2386 }
2387 #else
2388 static QDF_STATUS
2389 dp_get_completion_indication_for_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2390 		      uint16_t peer_id, uint32_t ppdu_id, uint8_t first_msdu,
2391 		      uint8_t last_msdu, qdf_nbuf_t netbuf)
2392 {
2393 	return QDF_STATUS_E_NOSUPPORT;
2394 }
2395 
2396 static void
2397 dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2398 		      uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
2399 {
2400 }
2401 #endif
2402 
2403 /**
2404  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2405  * @soc: Soc handle
2406  * @desc: software Tx descriptor to be processed
2407  *
2408  * Return: none
2409  */
2410 static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
2411 		struct dp_tx_desc_s *desc)
2412 {
2413 	struct dp_vdev *vdev = desc->vdev;
2414 	qdf_nbuf_t nbuf = desc->nbuf;
2415 
2416 	/* If it is TDLS mgmt, don't unmap or free the frame */
2417 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2418 		return dp_non_std_tx_comp_free_buff(desc, vdev);
2419 
2420 	/* 0 : MSDU buffer, 1 : MLE */
2421 	if (desc->msdu_ext_desc) {
2422 		/* TSO free */
2423 		if (hal_tx_ext_desc_get_tso_enable(
2424 					desc->msdu_ext_desc->vaddr)) {
2425 			/* unmap eash TSO seg before free the nbuf */
2426 			dp_tx_tso_unmap_segment(soc, desc);
2427 			qdf_nbuf_free(nbuf);
2428 			return;
2429 		}
2430 	}
2431 
2432 	qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
2433 
2434 	if (qdf_likely(!vdev->mesh_vdev))
2435 		qdf_nbuf_free(nbuf);
2436 	else {
2437 		if (desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2438 			qdf_nbuf_free(nbuf);
2439 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2440 		} else
2441 			vdev->osif_tx_free_ext((nbuf));
2442 	}
2443 }
2444 
2445 /**
2446  * dp_tx_mec_handler() - Tx  MEC Notify Handler
2447  * @vdev: pointer to dp dev handler
2448  * @status : Tx completion status from HTT descriptor
2449  *
2450  * Handles MEC notify event sent from fw to Host
2451  *
2452  * Return: none
2453  */
2454 #ifdef FEATURE_WDS
2455 void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
2456 {
2457 
2458 	struct dp_soc *soc;
2459 	uint32_t flags = IEEE80211_NODE_F_WDS_HM;
2460 	struct dp_peer *peer;
2461 	uint8_t mac_addr[DP_MAC_ADDR_LEN], i;
2462 
2463 	if (!vdev->wds_enabled)
2464 		return;
2465 
2466 	/* MEC required only in STA mode */
2467 	if (vdev->opmode != wlan_op_mode_sta)
2468 		return;
2469 
2470 	soc = vdev->pdev->soc;
2471 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
2472 	peer = TAILQ_FIRST(&vdev->peer_list);
2473 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2474 
2475 	if (!peer) {
2476 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2477 				FL("peer is NULL"));
2478 		return;
2479 	}
2480 
2481 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2482 			"%s Tx MEC Handler",
2483 			__func__);
2484 
2485 	for (i = 0; i < DP_MAC_ADDR_LEN; i++)
2486 		mac_addr[(DP_MAC_ADDR_LEN - 1) - i] =
2487 					status[(DP_MAC_ADDR_LEN - 2) + i];
2488 
2489 	if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, DP_MAC_ADDR_LEN))
2490 		dp_peer_add_ast(soc,
2491 				peer,
2492 				mac_addr,
2493 				CDP_TXRX_AST_TYPE_MEC,
2494 				flags);
2495 }
2496 #endif
2497 
2498 /**
2499  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
2500  * @tx_desc: software descriptor head pointer
2501  * @status : Tx completion status from HTT descriptor
2502  *
2503  * This function will process HTT Tx indication messages from Target
2504  *
2505  * Return: none
2506  */
2507 static
2508 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2509 {
2510 	uint8_t tx_status;
2511 	struct dp_pdev *pdev;
2512 	struct dp_vdev *vdev;
2513 	struct dp_soc *soc;
2514 	uint32_t *htt_status_word = (uint32_t *) status;
2515 
2516 	qdf_assert(tx_desc->pdev);
2517 
2518 	pdev = tx_desc->pdev;
2519 	vdev = tx_desc->vdev;
2520 	soc = pdev->soc;
2521 
2522 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_status_word[0]);
2523 
2524 	switch (tx_status) {
2525 	case HTT_TX_FW2WBM_TX_STATUS_OK:
2526 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
2527 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
2528 	{
2529 		dp_tx_comp_free_buf(soc, tx_desc);
2530 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2531 		break;
2532 	}
2533 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
2534 	{
2535 		dp_tx_reinject_handler(tx_desc, status);
2536 		break;
2537 	}
2538 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
2539 	{
2540 		dp_tx_inspect_handler(tx_desc, status);
2541 		break;
2542 	}
2543 	case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
2544 	{
2545 		dp_tx_mec_handler(vdev, status);
2546 		break;
2547 	}
2548 	default:
2549 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2550 				"%s Invalid HTT tx_status %d",
2551 				__func__, tx_status);
2552 		break;
2553 	}
2554 }
2555 
2556 #ifdef MESH_MODE_SUPPORT
2557 /**
2558  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
2559  *                                         in mesh meta header
2560  * @tx_desc: software descriptor head pointer
2561  * @ts: pointer to tx completion stats
2562  * Return: none
2563  */
2564 static
2565 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2566 		struct hal_tx_completion_status *ts)
2567 {
2568 	struct meta_hdr_s *mhdr;
2569 	qdf_nbuf_t netbuf = tx_desc->nbuf;
2570 
2571 	if (!tx_desc->msdu_ext_desc) {
2572 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
2573 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2574 				"netbuf %pK offset %d",
2575 				netbuf, tx_desc->pkt_offset);
2576 			return;
2577 		}
2578 	}
2579 	if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
2580 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2581 			"netbuf %pK offset %d", netbuf,
2582 			sizeof(struct meta_hdr_s));
2583 		return;
2584 	}
2585 
2586 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
2587 	mhdr->rssi = ts->ack_frame_rssi;
2588 	mhdr->channel = tx_desc->pdev->operating_channel;
2589 }
2590 
2591 #else
2592 static
2593 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2594 		struct hal_tx_completion_status *ts)
2595 {
2596 }
2597 
2598 #endif
2599 
2600 /**
2601  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
2602  * @peer: Handle to DP peer
2603  * @ts: pointer to HAL Tx completion stats
2604  * @length: MSDU length
2605  *
2606  * Return: None
2607  */
2608 static void dp_tx_update_peer_stats(struct dp_peer *peer,
2609 		struct hal_tx_completion_status *ts, uint32_t length)
2610 {
2611 	struct dp_pdev *pdev = peer->vdev->pdev;
2612 	struct dp_soc *soc = pdev->soc;
2613 	uint8_t mcs, pkt_type;
2614 
2615 	mcs = ts->mcs;
2616 	pkt_type = ts->pkt_type;
2617 
2618 	if (!ts->release_src == HAL_TX_COMP_RELEASE_SOURCE_TQM)
2619 		return;
2620 
2621 	if (peer->bss_peer) {
2622 		DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
2623 	} else {
2624 		DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
2625 	}
2626 
2627 	DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
2628 
2629 	DP_STATS_INCC_PKT(peer, tx.tx_success, 1, length,
2630 			  (ts->status == HAL_TX_TQM_RR_FRAME_ACKED));
2631 
2632 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
2633 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
2634 
2635 	DP_STATS_INCC(peer, tx.dropped.fw_rem, 1,
2636 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
2637 
2638 	DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
2639 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
2640 
2641 	DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
2642 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
2643 
2644 	DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
2645 		     (ts->status == HAL_TX_TQM_RR_FW_REASON1));
2646 
2647 	DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
2648 		     (ts->status == HAL_TX_TQM_RR_FW_REASON2));
2649 
2650 	DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
2651 		     (ts->status == HAL_TX_TQM_RR_FW_REASON3));
2652 
2653 	if (!ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
2654 		return;
2655 
2656 	DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
2657 
2658 	DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
2659 	DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
2660 
2661 	if (!(soc->process_tx_status))
2662 		return;
2663 
2664 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2665 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
2666 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2667 			((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
2668 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2669 			((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
2670 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2671 			((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
2672 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2673 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
2674 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2675 			((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
2676 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2677 			((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2678 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2679 			((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2680 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2681 			((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2682 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2683 			((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2684 	DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
2685 	DP_STATS_INC(peer, tx.bw[ts->bw], 1);
2686 	DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
2687 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
2688 	DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
2689 	DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
2690 	DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
2691 
2692 	if (soc->cdp_soc.ol_ops->update_dp_stats) {
2693 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
2694 				&peer->stats, ts->peer_id,
2695 				UPDATE_PEER_STATS);
2696 	}
2697 }
2698 
2699 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2700 /**
2701  * dp_tx_flow_pool_lock() - take flow pool lock
2702  * @soc: core txrx main context
2703  * @tx_desc: tx desc
2704  *
2705  * Return: None
2706  */
2707 static inline
2708 void dp_tx_flow_pool_lock(struct dp_soc *soc,
2709 			  struct dp_tx_desc_s *tx_desc)
2710 {
2711 	struct dp_tx_desc_pool_s *pool;
2712 	uint8_t desc_pool_id;
2713 
2714 	desc_pool_id = tx_desc->pool_id;
2715 	pool = &soc->tx_desc[desc_pool_id];
2716 
2717 	qdf_spin_lock_bh(&pool->flow_pool_lock);
2718 }
2719 
2720 /**
2721  * dp_tx_flow_pool_unlock() - release flow pool lock
2722  * @soc: core txrx main context
2723  * @tx_desc: tx desc
2724  *
2725  * Return: None
2726  */
2727 static inline
2728 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
2729 			    struct dp_tx_desc_s *tx_desc)
2730 {
2731 	struct dp_tx_desc_pool_s *pool;
2732 	uint8_t desc_pool_id;
2733 
2734 	desc_pool_id = tx_desc->pool_id;
2735 	pool = &soc->tx_desc[desc_pool_id];
2736 
2737 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
2738 }
2739 #else
2740 static inline
2741 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2742 {
2743 }
2744 
2745 static inline
2746 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2747 {
2748 }
2749 #endif
2750 
2751 /**
2752  * dp_tx_notify_completion() - Notify tx completion for this desc
2753  * @soc: core txrx main context
2754  * @tx_desc: tx desc
2755  * @netbuf:  buffer
2756  *
2757  * Return: none
2758  */
2759 static inline void dp_tx_notify_completion(struct dp_soc *soc,
2760 					   struct dp_tx_desc_s *tx_desc,
2761 					   qdf_nbuf_t netbuf)
2762 {
2763 	void *osif_dev;
2764 	ol_txrx_completion_fp tx_compl_cbk = NULL;
2765 
2766 	qdf_assert(tx_desc);
2767 
2768 	dp_tx_flow_pool_lock(soc, tx_desc);
2769 
2770 	if (!tx_desc->vdev ||
2771 	    !tx_desc->vdev->osif_vdev) {
2772 		dp_tx_flow_pool_unlock(soc, tx_desc);
2773 		return;
2774 	}
2775 
2776 	osif_dev = tx_desc->vdev->osif_vdev;
2777 	tx_compl_cbk = tx_desc->vdev->tx_comp;
2778 	dp_tx_flow_pool_unlock(soc, tx_desc);
2779 
2780 	if (tx_compl_cbk)
2781 		tx_compl_cbk(netbuf, osif_dev);
2782 }
2783 
2784 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
2785  * @pdev: pdev handle
2786  * @tid: tid value
2787  * @txdesc_ts: timestamp from txdesc
2788  * @ppdu_id: ppdu id
2789  *
2790  * Return: none
2791  */
2792 #ifdef FEATURE_PERPKT_INFO
2793 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
2794 					       uint8_t tid,
2795 					       uint64_t txdesc_ts,
2796 					       uint32_t ppdu_id)
2797 {
2798 	uint64_t delta_ms;
2799 	struct cdp_tx_sojourn_stats *sojourn_stats;
2800 
2801 	if (pdev->enhanced_stats_en == 0)
2802 		return;
2803 
2804 	if (pdev->sojourn_stats.ppdu_seq_id == 0)
2805 		pdev->sojourn_stats.ppdu_seq_id = ppdu_id;
2806 
2807 	if (ppdu_id != pdev->sojourn_stats.ppdu_seq_id) {
2808 		if (!pdev->sojourn_buf)
2809 			return;
2810 
2811 		sojourn_stats = (struct cdp_tx_sojourn_stats *)
2812 					qdf_nbuf_data(pdev->sojourn_buf);
2813 
2814 		qdf_mem_copy(sojourn_stats, &pdev->sojourn_stats,
2815 			     sizeof(struct cdp_tx_sojourn_stats));
2816 
2817 		qdf_mem_zero(&pdev->sojourn_stats,
2818 			     sizeof(struct cdp_tx_sojourn_stats));
2819 
2820 		dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
2821 				     pdev->sojourn_buf, HTT_INVALID_PEER,
2822 				     WDI_NO_VAL, pdev->pdev_id);
2823 
2824 		pdev->sojourn_stats.ppdu_seq_id = ppdu_id;
2825 	}
2826 
2827 	if (tid == HTT_INVALID_TID)
2828 		return;
2829 
2830 	delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
2831 				txdesc_ts;
2832 	qdf_ewma_tx_lag_add(&pdev->sojourn_stats.avg_sojourn_msdu[tid],
2833 			    delta_ms);
2834 	pdev->sojourn_stats.sum_sojourn_msdu[tid] += delta_ms;
2835 	pdev->sojourn_stats.num_msdus[tid]++;
2836 }
2837 #else
2838 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
2839 					       uint8_t tid,
2840 					       uint64_t txdesc_ts,
2841 					       uint32_t ppdu_id)
2842 {
2843 }
2844 #endif
2845 
2846 /**
2847  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
2848  * @tx_desc: software descriptor head pointer
2849  * @length: packet length
2850  *
2851  * Return: none
2852  */
2853 static inline void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
2854 		uint32_t length)
2855 {
2856 	struct hal_tx_completion_status ts;
2857 	struct dp_soc *soc = NULL;
2858 	struct dp_vdev *vdev = tx_desc->vdev;
2859 	struct dp_peer *peer = NULL;
2860 	struct ether_header *eh =
2861 		(struct ether_header *)qdf_nbuf_data(tx_desc->nbuf);
2862 
2863 	if (!vdev) {
2864 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2865 				"invalid vdev");
2866 		goto out;
2867 	}
2868 
2869 	hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
2870 
2871 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2872 				"-------------------- \n"
2873 				"Tx Completion Stats: \n"
2874 				"-------------------- \n"
2875 				"ack_frame_rssi = %d \n"
2876 				"first_msdu = %d \n"
2877 				"last_msdu = %d \n"
2878 				"msdu_part_of_amsdu = %d \n"
2879 				"rate_stats valid = %d \n"
2880 				"bw = %d \n"
2881 				"pkt_type = %d \n"
2882 				"stbc = %d \n"
2883 				"ldpc = %d \n"
2884 				"sgi = %d \n"
2885 				"mcs = %d \n"
2886 				"ofdma = %d \n"
2887 				"tones_in_ru = %d \n"
2888 				"tsf = %d \n"
2889 				"ppdu_id = %d \n"
2890 				"transmit_cnt = %d \n"
2891 				"tid = %d \n"
2892 				"peer_id = %d ",
2893 				ts.ack_frame_rssi, ts.first_msdu, ts.last_msdu,
2894 				ts.msdu_part_of_amsdu, ts.valid, ts.bw,
2895 				ts.pkt_type, ts.stbc, ts.ldpc, ts.sgi,
2896 				ts.mcs, ts.ofdma, ts.tones_in_ru, ts.tsf,
2897 				ts.ppdu_id, ts.transmit_cnt, ts.tid,
2898 				ts.peer_id);
2899 
2900 	soc = vdev->pdev->soc;
2901 
2902 	/* Update SoC level stats */
2903 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
2904 			(ts.status == HAL_TX_TQM_RR_REM_CMD_REM));
2905 
2906 	/* Update per-packet stats */
2907 	if (qdf_unlikely(vdev->mesh_vdev) &&
2908 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
2909 		dp_tx_comp_fill_tx_completion_stats(tx_desc, &ts);
2910 
2911 	/* Update peer level stats */
2912 	peer = dp_peer_find_by_id(soc, ts.peer_id);
2913 	if (!peer) {
2914 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2915 				"invalid peer");
2916 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
2917 		goto out;
2918 	}
2919 
2920 	if (qdf_likely(peer->vdev->tx_encap_type ==
2921 				htt_cmn_pkt_type_ethernet)) {
2922 		if (peer->bss_peer && IEEE80211_IS_BROADCAST(eh->ether_dhost))
2923 			DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
2924 	}
2925 
2926 	dp_tx_sojourn_stats_process(vdev->pdev, ts.tid,
2927 				    tx_desc->timestamp,
2928 				    ts.ppdu_id);
2929 
2930 	dp_tx_update_peer_stats(peer, &ts, length);
2931 
2932 out:
2933 	return;
2934 }
2935 /**
2936  * dp_tx_comp_process_desc() - Tx complete software descriptor handler
2937  * @soc: core txrx main context
2938  * @comp_head: software descriptor head pointer
2939  *
2940  * This function will process batch of descriptors reaped by dp_tx_comp_handler
2941  * and release the software descriptors after processing is complete
2942  *
2943  * Return: none
2944  */
2945 static void dp_tx_comp_process_desc(struct dp_soc *soc,
2946 		struct dp_tx_desc_s *comp_head)
2947 {
2948 	struct dp_tx_desc_s *desc;
2949 	struct dp_tx_desc_s *next;
2950 	struct hal_tx_completion_status ts = {0};
2951 	uint32_t length;
2952 	struct dp_peer *peer;
2953 
2954 	DP_HIST_INIT();
2955 	desc = comp_head;
2956 
2957 	while (desc) {
2958 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
2959 		peer = dp_peer_find_by_id(soc, ts.peer_id);
2960 		length = qdf_nbuf_len(desc->nbuf);
2961 
2962 		/* check tx completion notification */
2963 		if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(desc->nbuf))
2964 			dp_tx_notify_completion(soc, desc, desc->nbuf);
2965 
2966 		dp_tx_comp_process_tx_status(desc, length);
2967 
2968 		DPTRACE(qdf_dp_trace_ptr
2969 				(desc->nbuf,
2970 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
2971 				 QDF_TRACE_DEFAULT_PDEV_ID,
2972 				 qdf_nbuf_data_addr(desc->nbuf),
2973 				 sizeof(qdf_nbuf_data(desc->nbuf)),
2974 				 desc->id, ts.status)
2975 			);
2976 
2977 		/*currently m_copy/tx_capture is not supported for scatter gather packets*/
2978 		if (!(desc->msdu_ext_desc) && (dp_get_completion_indication_for_stack(soc,
2979 					desc->pdev, ts.peer_id, ts.ppdu_id,
2980 					ts.first_msdu, ts.last_msdu,
2981 					desc->nbuf) == QDF_STATUS_SUCCESS)) {
2982 			qdf_nbuf_unmap(soc->osdev, desc->nbuf,
2983 						QDF_DMA_TO_DEVICE);
2984 
2985 			dp_send_completion_to_stack(soc, desc->pdev, ts.peer_id,
2986 				ts.ppdu_id, desc->nbuf);
2987 		} else {
2988 			dp_tx_comp_free_buf(soc, desc);
2989 		}
2990 
2991 		DP_HIST_PACKET_COUNT_INC(desc->pdev->pdev_id);
2992 
2993 		next = desc->next;
2994 
2995 		dp_tx_desc_release(desc, desc->pool_id);
2996 		desc = next;
2997 	}
2998 
2999 	DP_TX_HIST_STATS_PER_PDEV();
3000 }
3001 
3002 /**
3003  * dp_tx_comp_handler() - Tx completion handler
3004  * @soc: core txrx main context
3005  * @ring_id: completion ring id
3006  * @quota: No. of packets/descriptors that can be serviced in one loop
3007  *
3008  * This function will collect hardware release ring element contents and
3009  * handle descriptor contents. Based on contents, free packet or handle error
3010  * conditions
3011  *
3012  * Return: none
3013  */
3014 uint32_t dp_tx_comp_handler(struct dp_soc *soc, void *hal_srng, uint32_t quota)
3015 {
3016 	void *tx_comp_hal_desc;
3017 	uint8_t buffer_src;
3018 	uint8_t pool_id;
3019 	uint32_t tx_desc_id;
3020 	struct dp_tx_desc_s *tx_desc = NULL;
3021 	struct dp_tx_desc_s *head_desc = NULL;
3022 	struct dp_tx_desc_s *tail_desc = NULL;
3023 	uint32_t num_processed;
3024 	uint32_t count;
3025 
3026 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
3027 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3028 				"%s %d : HAL RING Access Failed -- %pK",
3029 				__func__, __LINE__, hal_srng);
3030 		return 0;
3031 	}
3032 
3033 	num_processed = 0;
3034 	count = 0;
3035 
3036 	/* Find head descriptor from completion ring */
3037 	while (qdf_likely(tx_comp_hal_desc =
3038 			hal_srng_dst_get_next(soc->hal_soc, hal_srng))) {
3039 
3040 		buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
3041 
3042 		/* If this buffer was not released by TQM or FW, then it is not
3043 		 * Tx completion indication, assert */
3044 		if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
3045 				(buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
3046 
3047 			QDF_TRACE(QDF_MODULE_ID_DP,
3048 					QDF_TRACE_LEVEL_FATAL,
3049 					"Tx comp release_src != TQM | FW");
3050 
3051 			qdf_assert_always(0);
3052 		}
3053 
3054 		/* Get descriptor id */
3055 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
3056 		pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
3057 			DP_TX_DESC_ID_POOL_OS;
3058 
3059 		if (!dp_tx_is_desc_id_valid(soc, tx_desc_id))
3060 			continue;
3061 
3062 		/* Find Tx descriptor */
3063 		tx_desc = dp_tx_desc_find(soc, pool_id,
3064 				(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
3065 				DP_TX_DESC_ID_PAGE_OS,
3066 				(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
3067 				DP_TX_DESC_ID_OFFSET_OS);
3068 
3069 		/*
3070 		 * If the release source is FW, process the HTT status
3071 		 */
3072 		if (qdf_unlikely(buffer_src ==
3073 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
3074 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
3075 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
3076 					htt_tx_status);
3077 			dp_tx_process_htt_completion(tx_desc,
3078 					htt_tx_status);
3079 		} else {
3080 			/* Pool id is not matching. Error */
3081 			if (tx_desc->pool_id != pool_id) {
3082 				QDF_TRACE(QDF_MODULE_ID_DP,
3083 					QDF_TRACE_LEVEL_FATAL,
3084 					"Tx Comp pool id %d not matched %d",
3085 					pool_id, tx_desc->pool_id);
3086 
3087 				qdf_assert_always(0);
3088 			}
3089 
3090 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
3091 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
3092 				QDF_TRACE(QDF_MODULE_ID_DP,
3093 					QDF_TRACE_LEVEL_FATAL,
3094 					"Txdesc invalid, flgs = %x,id = %d",
3095 					tx_desc->flags,	tx_desc_id);
3096 				qdf_assert_always(0);
3097 			}
3098 
3099 			/* First ring descriptor on the cycle */
3100 			if (!head_desc) {
3101 				head_desc = tx_desc;
3102 				tail_desc = tx_desc;
3103 			}
3104 
3105 			tail_desc->next = tx_desc;
3106 			tx_desc->next = NULL;
3107 			tail_desc = tx_desc;
3108 
3109 			/* Collect hw completion contents */
3110 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
3111 					&tx_desc->comp, 1);
3112 
3113 		}
3114 
3115 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
3116 
3117 		/*
3118 		 * Processed packet count is more than given quota
3119 		 * stop to processing
3120 		 */
3121 		if ((num_processed >= quota))
3122 			break;
3123 
3124 		count++;
3125 	}
3126 
3127 	hal_srng_access_end(soc->hal_soc, hal_srng);
3128 
3129 	/* Process the reaped descriptors */
3130 	if (head_desc)
3131 		dp_tx_comp_process_desc(soc, head_desc);
3132 
3133 	return num_processed;
3134 }
3135 
3136 #ifdef CONVERGED_TDLS_ENABLE
3137 /**
3138  * dp_tx_non_std() - Allow the control-path SW to send data frames
3139  *
3140  * @data_vdev - which vdev should transmit the tx data frames
3141  * @tx_spec - what non-standard handling to apply to the tx data frames
3142  * @msdu_list - NULL-terminated list of tx MSDUs
3143  *
3144  * Return: NULL on success,
3145  *         nbuf when it fails to send
3146  */
3147 qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle,
3148 			enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
3149 {
3150 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
3151 
3152 	if (tx_spec & OL_TX_SPEC_NO_FREE)
3153 		vdev->is_tdls_frame = true;
3154 	return dp_tx_send(vdev_handle, msdu_list);
3155 }
3156 #endif
3157 
3158 /**
3159  * dp_tx_vdev_attach() - attach vdev to dp tx
3160  * @vdev: virtual device instance
3161  *
3162  * Return: QDF_STATUS_SUCCESS: success
3163  *         QDF_STATUS_E_RESOURCES: Error return
3164  */
3165 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
3166 {
3167 	/*
3168 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
3169 	 */
3170 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
3171 			HTT_TCL_METADATA_TYPE_VDEV_BASED);
3172 
3173 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
3174 			vdev->vdev_id);
3175 
3176 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
3177 			DP_SW2HW_MACID(vdev->pdev->pdev_id));
3178 
3179 	/*
3180 	 * Set HTT Extension Valid bit to 0 by default
3181 	 */
3182 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
3183 
3184 	dp_tx_vdev_update_search_flags(vdev);
3185 
3186 	return QDF_STATUS_SUCCESS;
3187 }
3188 
3189 /**
3190  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
3191  * @vdev: virtual device instance
3192  *
3193  * Return: void
3194  *
3195  */
3196 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
3197 {
3198 	/*
3199 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
3200 	 * for TDLS link
3201 	 *
3202 	 * Enable AddrY (SA based search) only for non-WDS STA and
3203 	 * ProxySTA VAP modes.
3204 	 *
3205 	 * In all other VAP modes, only DA based search should be
3206 	 * enabled
3207 	 */
3208 	if (vdev->opmode == wlan_op_mode_sta &&
3209 	    vdev->tdls_link_connected)
3210 		vdev->hal_desc_addr_search_flags =
3211 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
3212 	else if ((vdev->opmode == wlan_op_mode_sta &&
3213 				(!vdev->wds_enabled || vdev->proxysta_vdev)))
3214 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
3215 	else
3216 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
3217 }
3218 
3219 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3220 static void dp_tx_desc_flush(struct dp_vdev *vdev)
3221 {
3222 }
3223 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3224 
3225 /* dp_tx_desc_flush() - release resources associated
3226  *                      to tx_desc
3227  * @vdev: virtual device instance
3228  *
3229  * This function will free all outstanding Tx buffers,
3230  * including ME buffer for which either free during
3231  * completion didn't happened or completion is not
3232  * received.
3233 */
3234 static void dp_tx_desc_flush(struct dp_vdev *vdev)
3235 {
3236 	uint8_t i, num_pool;
3237 	uint32_t j;
3238 	uint32_t num_desc;
3239 	struct dp_soc *soc = vdev->pdev->soc;
3240 	struct dp_tx_desc_s *tx_desc = NULL;
3241 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
3242 
3243 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3244 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3245 
3246 	for (i = 0; i < num_pool; i++) {
3247 		for (j = 0; j < num_desc; j++) {
3248 			tx_desc_pool = &((soc)->tx_desc[(i)]);
3249 			if (tx_desc_pool &&
3250 				tx_desc_pool->desc_pages.cacheable_pages) {
3251 				tx_desc = dp_tx_desc_find(soc, i,
3252 					(j & DP_TX_DESC_ID_PAGE_MASK) >>
3253 					DP_TX_DESC_ID_PAGE_OS,
3254 					(j & DP_TX_DESC_ID_OFFSET_MASK) >>
3255 					DP_TX_DESC_ID_OFFSET_OS);
3256 
3257 				if (tx_desc && (tx_desc->vdev == vdev) &&
3258 					(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) {
3259 					dp_tx_comp_free_buf(soc, tx_desc);
3260 					dp_tx_desc_release(tx_desc, i);
3261 				}
3262 			}
3263 		}
3264 	}
3265 }
3266 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3267 
3268 /**
3269  * dp_tx_vdev_detach() - detach vdev from dp tx
3270  * @vdev: virtual device instance
3271  *
3272  * Return: QDF_STATUS_SUCCESS: success
3273  *         QDF_STATUS_E_RESOURCES: Error return
3274  */
3275 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
3276 {
3277 	dp_tx_desc_flush(vdev);
3278 	return QDF_STATUS_SUCCESS;
3279 }
3280 
3281 /**
3282  * dp_tx_pdev_attach() - attach pdev to dp tx
3283  * @pdev: physical device instance
3284  *
3285  * Return: QDF_STATUS_SUCCESS: success
3286  *         QDF_STATUS_E_RESOURCES: Error return
3287  */
3288 QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
3289 {
3290 	struct dp_soc *soc = pdev->soc;
3291 
3292 	/* Initialize Flow control counters */
3293 	qdf_atomic_init(&pdev->num_tx_exception);
3294 	qdf_atomic_init(&pdev->num_tx_outstanding);
3295 
3296 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3297 		/* Initialize descriptors in TCL Ring */
3298 		hal_tx_init_data_ring(soc->hal_soc,
3299 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
3300 	}
3301 
3302 	return QDF_STATUS_SUCCESS;
3303 }
3304 
3305 /**
3306  * dp_tx_pdev_detach() - detach pdev from dp tx
3307  * @pdev: physical device instance
3308  *
3309  * Return: QDF_STATUS_SUCCESS: success
3310  *         QDF_STATUS_E_RESOURCES: Error return
3311  */
3312 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
3313 {
3314 	dp_tx_me_exit(pdev);
3315 	return QDF_STATUS_SUCCESS;
3316 }
3317 
3318 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3319 /* Pools will be allocated dynamically */
3320 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3321 					int num_desc)
3322 {
3323 	uint8_t i;
3324 
3325 	for (i = 0; i < num_pool; i++) {
3326 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
3327 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
3328 	}
3329 
3330 	return 0;
3331 }
3332 
3333 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3334 {
3335 	uint8_t i;
3336 
3337 	for (i = 0; i < num_pool; i++)
3338 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
3339 }
3340 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3341 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3342 					int num_desc)
3343 {
3344 	uint8_t i;
3345 
3346 	/* Allocate software Tx descriptor pools */
3347 	for (i = 0; i < num_pool; i++) {
3348 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
3349 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3350 					"%s Tx Desc Pool alloc %d failed %pK",
3351 					__func__, i, soc);
3352 			return ENOMEM;
3353 		}
3354 	}
3355 	return 0;
3356 }
3357 
3358 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3359 {
3360 	uint8_t i;
3361 
3362 	for (i = 0; i < num_pool; i++) {
3363 		qdf_assert_always(!soc->tx_desc[i].num_allocated);
3364 		if (dp_tx_desc_pool_free(soc, i)) {
3365 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3366 				"%s Tx Desc Pool Free failed", __func__);
3367 		}
3368 	}
3369 }
3370 
3371 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3372 
3373 /**
3374  * dp_tx_soc_detach() - detach soc from dp tx
3375  * @soc: core txrx main context
3376  *
3377  * This function will detach dp tx into main device context
3378  * will free dp tx resource and initialize resources
3379  *
3380  * Return: QDF_STATUS_SUCCESS: success
3381  *         QDF_STATUS_E_RESOURCES: Error return
3382  */
3383 QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
3384 {
3385 	uint8_t num_pool;
3386 	uint16_t num_desc;
3387 	uint16_t num_ext_desc;
3388 	uint8_t i;
3389 
3390 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3391 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3392 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
3393 
3394 	dp_tx_flow_control_deinit(soc);
3395 	dp_tx_delete_static_pools(soc, num_pool);
3396 
3397 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3398 			"%s Tx Desc Pool Free num_pool = %d, descs = %d",
3399 			__func__, num_pool, num_desc);
3400 
3401 	for (i = 0; i < num_pool; i++) {
3402 		if (dp_tx_ext_desc_pool_free(soc, i)) {
3403 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3404 					"%s Tx Ext Desc Pool Free failed",
3405 					__func__);
3406 			return QDF_STATUS_E_RESOURCES;
3407 		}
3408 	}
3409 
3410 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3411 			"%s MSDU Ext Desc Pool %d Free descs = %d",
3412 			__func__, num_pool, num_ext_desc);
3413 
3414 	for (i = 0; i < num_pool; i++) {
3415 		dp_tx_tso_desc_pool_free(soc, i);
3416 	}
3417 
3418 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3419 			"%s TSO Desc Pool %d Free descs = %d",
3420 			__func__, num_pool, num_desc);
3421 
3422 
3423 	for (i = 0; i < num_pool; i++)
3424 		dp_tx_tso_num_seg_pool_free(soc, i);
3425 
3426 
3427 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3428 		"%s TSO Num of seg Desc Pool %d Free descs = %d",
3429 		__func__, num_pool, num_desc);
3430 
3431 	return QDF_STATUS_SUCCESS;
3432 }
3433 
3434 /**
3435  * dp_tx_soc_attach() - attach soc to dp tx
3436  * @soc: core txrx main context
3437  *
3438  * This function will attach dp tx into main device context
3439  * will allocate dp tx resource and initialize resources
3440  *
3441  * Return: QDF_STATUS_SUCCESS: success
3442  *         QDF_STATUS_E_RESOURCES: Error return
3443  */
3444 QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
3445 {
3446 	uint8_t i;
3447 	uint8_t num_pool;
3448 	uint32_t num_desc;
3449 	uint32_t num_ext_desc;
3450 
3451 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3452 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3453 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
3454 
3455 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
3456 		goto fail;
3457 
3458 	dp_tx_flow_control_init(soc);
3459 
3460 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3461 			"%s Tx Desc Alloc num_pool = %d, descs = %d",
3462 			__func__, num_pool, num_desc);
3463 
3464 	/* Allocate extension tx descriptor pools */
3465 	for (i = 0; i < num_pool; i++) {
3466 		if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
3467 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3468 				"MSDU Ext Desc Pool alloc %d failed %pK",
3469 				i, soc);
3470 
3471 			goto fail;
3472 		}
3473 	}
3474 
3475 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3476 			"%s MSDU Ext Desc Alloc %d, descs = %d",
3477 			__func__, num_pool, num_ext_desc);
3478 
3479 	for (i = 0; i < num_pool; i++) {
3480 		if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
3481 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3482 				"TSO Desc Pool alloc %d failed %pK",
3483 				i, soc);
3484 
3485 			goto fail;
3486 		}
3487 	}
3488 
3489 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3490 			"%s TSO Desc Alloc %d, descs = %d",
3491 			__func__, num_pool, num_desc);
3492 
3493 	for (i = 0; i < num_pool; i++) {
3494 		if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
3495 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3496 				"TSO Num of seg Pool alloc %d failed %pK",
3497 				i, soc);
3498 
3499 			goto fail;
3500 		}
3501 	}
3502 
3503 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3504 			"%s TSO Num of seg pool Alloc %d, descs = %d",
3505 			__func__, num_pool, num_desc);
3506 
3507 	/* Initialize descriptors in TCL Rings */
3508 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3509 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3510 			hal_tx_init_data_ring(soc->hal_soc,
3511 					soc->tcl_data_ring[i].hal_srng);
3512 		}
3513 	}
3514 
3515 	/*
3516 	 * todo - Add a runtime config option to enable this.
3517 	 */
3518 	/*
3519 	 * Due to multiple issues on NPR EMU, enable it selectively
3520 	 * only for NPR EMU, should be removed, once NPR platforms
3521 	 * are stable.
3522 	 */
3523 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
3524 
3525 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3526 			"%s HAL Tx init Success", __func__);
3527 
3528 	return QDF_STATUS_SUCCESS;
3529 
3530 fail:
3531 	/* Detach will take care of freeing only allocated resources */
3532 	dp_tx_soc_detach(soc);
3533 	return QDF_STATUS_E_RESOURCES;
3534 }
3535 
3536 /*
3537  * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
3538  * pdev: pointer to DP PDEV structure
3539  * seg_info_head: Pointer to the head of list
3540  *
3541  * return: void
3542  */
3543 static void dp_tx_me_mem_free(struct dp_pdev *pdev,
3544 		struct dp_tx_seg_info_s *seg_info_head)
3545 {
3546 	struct dp_tx_me_buf_t *mc_uc_buf;
3547 	struct dp_tx_seg_info_s *seg_info_new = NULL;
3548 	qdf_nbuf_t nbuf = NULL;
3549 	uint64_t phy_addr;
3550 
3551 	while (seg_info_head) {
3552 		nbuf = seg_info_head->nbuf;
3553 		mc_uc_buf = (struct dp_tx_me_buf_t *)
3554 			seg_info_head->frags[0].vaddr;
3555 		phy_addr = seg_info_head->frags[0].paddr_hi;
3556 		phy_addr =  (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
3557 		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
3558 				phy_addr,
3559 				QDF_DMA_TO_DEVICE , DP_MAC_ADDR_LEN);
3560 		dp_tx_me_free_buf(pdev, mc_uc_buf);
3561 		qdf_nbuf_free(nbuf);
3562 		seg_info_new = seg_info_head;
3563 		seg_info_head = seg_info_head->next;
3564 		qdf_mem_free(seg_info_new);
3565 	}
3566 }
3567 
3568 /**
3569  * dp_tx_me_send_convert_ucast(): function to convert multicast to unicast
3570  * @vdev: DP VDEV handle
3571  * @nbuf: Multicast nbuf
3572  * @newmac: Table of the clients to which packets have to be sent
3573  * @new_mac_cnt: No of clients
3574  *
3575  * return: no of converted packets
3576  */
3577 uint16_t
3578 dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
3579 		uint8_t newmac[][DP_MAC_ADDR_LEN], uint8_t new_mac_cnt)
3580 {
3581 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
3582 	struct dp_pdev *pdev = vdev->pdev;
3583 	struct ether_header *eh;
3584 	uint8_t *data;
3585 	uint16_t len;
3586 
3587 	/* reference to frame dst addr */
3588 	uint8_t *dstmac;
3589 	/* copy of original frame src addr */
3590 	uint8_t srcmac[DP_MAC_ADDR_LEN];
3591 
3592 	/* local index into newmac */
3593 	uint8_t new_mac_idx = 0;
3594 	struct dp_tx_me_buf_t *mc_uc_buf;
3595 	qdf_nbuf_t  nbuf_clone;
3596 	struct dp_tx_msdu_info_s msdu_info;
3597 	struct dp_tx_seg_info_s *seg_info_head = NULL;
3598 	struct dp_tx_seg_info_s *seg_info_tail = NULL;
3599 	struct dp_tx_seg_info_s *seg_info_new;
3600 	struct dp_tx_frag_info_s data_frag;
3601 	qdf_dma_addr_t paddr_data;
3602 	qdf_dma_addr_t paddr_mcbuf = 0;
3603 	uint8_t empty_entry_mac[DP_MAC_ADDR_LEN] = {0};
3604 	QDF_STATUS status;
3605 
3606 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
3607 
3608 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3609 
3610 	eh = (struct ether_header *) nbuf;
3611 	qdf_mem_copy(srcmac, eh->ether_shost, DP_MAC_ADDR_LEN);
3612 
3613 	len = qdf_nbuf_len(nbuf);
3614 
3615 	data = qdf_nbuf_data(nbuf);
3616 
3617 	status = qdf_nbuf_map(vdev->osdev, nbuf,
3618 			QDF_DMA_TO_DEVICE);
3619 
3620 	if (status) {
3621 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3622 				"Mapping failure Error:%d", status);
3623 		DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
3624 		qdf_nbuf_free(nbuf);
3625 		return 1;
3626 	}
3627 
3628 	paddr_data = qdf_nbuf_get_frag_paddr(nbuf, 0) + IEEE80211_ADDR_LEN;
3629 
3630 	/*preparing data fragment*/
3631 	data_frag.vaddr = qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN;
3632 	data_frag.paddr_lo = (uint32_t)paddr_data;
3633 	data_frag.paddr_hi = (((uint64_t) paddr_data)  >> 32);
3634 	data_frag.len = len - DP_MAC_ADDR_LEN;
3635 
3636 	for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
3637 		dstmac = newmac[new_mac_idx];
3638 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3639 				"added mac addr (%pM)", dstmac);
3640 
3641 		/* Check for NULL Mac Address */
3642 		if (!qdf_mem_cmp(dstmac, empty_entry_mac, DP_MAC_ADDR_LEN))
3643 			continue;
3644 
3645 		/* frame to self mac. skip */
3646 		if (!qdf_mem_cmp(dstmac, srcmac, DP_MAC_ADDR_LEN))
3647 			continue;
3648 
3649 		/*
3650 		 * TODO: optimize to avoid malloc in per-packet path
3651 		 * For eg. seg_pool can be made part of vdev structure
3652 		 */
3653 		seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
3654 
3655 		if (!seg_info_new) {
3656 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3657 					"alloc failed");
3658 			DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
3659 			goto fail_seg_alloc;
3660 		}
3661 
3662 		mc_uc_buf = dp_tx_me_alloc_buf(pdev);
3663 		if (mc_uc_buf == NULL)
3664 			goto fail_buf_alloc;
3665 
3666 		/*
3667 		 * TODO: Check if we need to clone the nbuf
3668 		 * Or can we just use the reference for all cases
3669 		 */
3670 		if (new_mac_idx < (new_mac_cnt - 1)) {
3671 			nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
3672 			if (nbuf_clone == NULL) {
3673 				DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
3674 				goto fail_clone;
3675 			}
3676 		} else {
3677 			/*
3678 			 * Update the ref
3679 			 * to account for frame sent without cloning
3680 			 */
3681 			qdf_nbuf_ref(nbuf);
3682 			nbuf_clone = nbuf;
3683 		}
3684 
3685 		qdf_mem_copy(mc_uc_buf->data, dstmac, DP_MAC_ADDR_LEN);
3686 
3687 		status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
3688 				QDF_DMA_TO_DEVICE, DP_MAC_ADDR_LEN,
3689 				&paddr_mcbuf);
3690 
3691 		if (status) {
3692 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3693 					"Mapping failure Error:%d", status);
3694 			DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
3695 			goto fail_map;
3696 		}
3697 
3698 		seg_info_new->frags[0].vaddr =  (uint8_t *)mc_uc_buf;
3699 		seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
3700 		seg_info_new->frags[0].paddr_hi =
3701 			((uint64_t) paddr_mcbuf >> 32);
3702 		seg_info_new->frags[0].len = DP_MAC_ADDR_LEN;
3703 
3704 		seg_info_new->frags[1] = data_frag;
3705 		seg_info_new->nbuf = nbuf_clone;
3706 		seg_info_new->frag_cnt = 2;
3707 		seg_info_new->total_len = len;
3708 
3709 		seg_info_new->next = NULL;
3710 
3711 		if (seg_info_head == NULL)
3712 			seg_info_head = seg_info_new;
3713 		else
3714 			seg_info_tail->next = seg_info_new;
3715 
3716 		seg_info_tail = seg_info_new;
3717 	}
3718 
3719 	if (!seg_info_head) {
3720 		goto free_return;
3721 	}
3722 
3723 	msdu_info.u.sg_info.curr_seg = seg_info_head;
3724 	msdu_info.num_seg = new_mac_cnt;
3725 	msdu_info.frm_type = dp_tx_frm_me;
3726 
3727 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
3728 	dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3729 
3730 	while (seg_info_head->next) {
3731 		seg_info_new = seg_info_head;
3732 		seg_info_head = seg_info_head->next;
3733 		qdf_mem_free(seg_info_new);
3734 	}
3735 	qdf_mem_free(seg_info_head);
3736 
3737 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
3738 	qdf_nbuf_free(nbuf);
3739 	return new_mac_cnt;
3740 
3741 fail_map:
3742 	qdf_nbuf_free(nbuf_clone);
3743 
3744 fail_clone:
3745 	dp_tx_me_free_buf(pdev, mc_uc_buf);
3746 
3747 fail_buf_alloc:
3748 	qdf_mem_free(seg_info_new);
3749 
3750 fail_seg_alloc:
3751 	dp_tx_me_mem_free(pdev, seg_info_head);
3752 
3753 free_return:
3754 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
3755 	qdf_nbuf_free(nbuf);
3756 	return 1;
3757 }
3758 
3759