xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision 5c57a8905ee57aab8b10cde048801372f46cc3c0)
1 /*
2  * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "dp_tx.h"
21 #include "dp_tx_desc.h"
22 #include "dp_peer.h"
23 #include "dp_types.h"
24 #include "hal_tx.h"
25 #include "qdf_mem.h"
26 #include "qdf_nbuf.h"
27 #include <wlan_cfg.h>
28 #ifdef MESH_MODE_SUPPORT
29 #include "if_meta_hdr.h"
30 #endif
31 
32 #ifdef TX_PER_PDEV_DESC_POOL
33 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
34 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
35 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
36 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
37 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
38 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
39 #else
40 	#ifdef TX_PER_VDEV_DESC_POOL
41 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
42 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
43 	#else
44 		#define DP_TX_GET_DESC_POOL_ID(vdev) qdf_get_cpu()
45 		#define DP_TX_GET_RING_ID(vdev) vdev->pdev->soc->tx_ring_map[qdf_get_cpu()]
46 	#endif /* TX_PER_VDEV_DESC_POOL */
47 #endif /* TX_PER_PDEV_DESC_POOL */
48 
49 /* TODO Add support in TSO */
50 #define DP_DESC_NUM_FRAG(x) 0
51 
52 /* disable TQM_BYPASS */
53 #define TQM_BYPASS_WAR 0
54 
55 /**
56  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
57  * @vdev: DP Virtual device handle
58  * @nbuf: Buffer pointer
59  * @queue: queue ids container for nbuf
60  *
61  * TX packet queue has 2 instances, software descriptors id and dma ring id
62  * Based on tx feature and hardware configuration queue id combination could be
63  * different.
64  * For example -
65  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
66  * With no XPS,lock based resource protection, Descriptor pool ids are different
67  * for each vdev, dma ring id will be same as single pdev id
68  *
69  * Return: None
70  */
71 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
72 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
73 {
74 	/* get flow id */
75 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
76 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
77 
78 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
79 			"%s, pool_id:%d ring_id: %d\n",
80 			__func__, queue->desc_pool_id, queue->ring_id);
81 
82 	return;
83 }
84 
85 #if defined(FEATURE_TSO)
86 /**
87  * dp_tx_tso_desc_release() - Release the tso segment
88  *                            after unmapping all the fragments
89  *
90  * @pdev - physical device handle
91  * @tx_desc - Tx software descriptor
92  */
93 static void dp_tx_tso_desc_release(struct dp_soc *soc,
94 		struct dp_tx_desc_s *tx_desc)
95 {
96 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
97 	if (qdf_unlikely(tx_desc->tso_desc == NULL)) {
98 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
99 			"%s %d TSO desc is NULL!",
100 			__func__, __LINE__);
101 		qdf_assert(0);
102 	} else if (qdf_unlikely(tx_desc->tso_num_desc == NULL)) {
103 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
104 			"%s %d TSO common info is NULL!",
105 			__func__, __LINE__);
106 		qdf_assert(0);
107 	} else {
108 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
109 			(struct qdf_tso_num_seg_elem_t *) tx_desc->tso_num_desc;
110 
111 		if (tso_num_desc->num_seg.tso_cmn_num_seg > 1) {
112 			tso_num_desc->num_seg.tso_cmn_num_seg--;
113 			qdf_nbuf_unmap_tso_segment(soc->osdev,
114 					tx_desc->tso_desc, false);
115 		} else {
116 			tso_num_desc->num_seg.tso_cmn_num_seg--;
117 			qdf_assert(tso_num_desc->num_seg.tso_cmn_num_seg == 0);
118 			qdf_nbuf_unmap_tso_segment(soc->osdev,
119 					tx_desc->tso_desc, true);
120 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
121 					tx_desc->tso_num_desc);
122 			tx_desc->tso_num_desc = NULL;
123 		}
124 		dp_tx_tso_desc_free(soc,
125 				tx_desc->pool_id, tx_desc->tso_desc);
126 		tx_desc->tso_desc = NULL;
127 	}
128 }
129 #else
130 static void dp_tx_tso_desc_release(struct dp_soc *soc,
131 		struct dp_tx_desc_s *tx_desc)
132 {
133 	return;
134 }
135 #endif
136 /**
137  * dp_tx_desc_release() - Release Tx Descriptor
138  * @tx_desc : Tx Descriptor
139  * @desc_pool_id: Descriptor Pool ID
140  *
141  * Deallocate all resources attached to Tx descriptor and free the Tx
142  * descriptor.
143  *
144  * Return:
145  */
146 static void
147 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
148 {
149 	struct dp_pdev *pdev = tx_desc->pdev;
150 	struct dp_soc *soc;
151 	uint8_t comp_status = 0;
152 
153 	qdf_assert(pdev);
154 
155 	soc = pdev->soc;
156 
157 	if (tx_desc->frm_type == dp_tx_frm_tso)
158 		dp_tx_tso_desc_release(soc, tx_desc);
159 
160 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
161 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
162 
163 	qdf_atomic_dec(&pdev->num_tx_outstanding);
164 
165 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
166 		qdf_atomic_dec(&pdev->num_tx_exception);
167 
168 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
169 				hal_tx_comp_get_buffer_source(&tx_desc->comp))
170 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp);
171 	else
172 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
173 
174 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
175 			"Tx Completion Release desc %d status %d outstanding %d\n",
176 			tx_desc->id, comp_status,
177 			qdf_atomic_read(&pdev->num_tx_outstanding));
178 
179 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
180 	return;
181 }
182 
183 /**
184  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
185  * @vdev: DP vdev Handle
186  * @nbuf: skb
187  *
188  * Prepares and fills HTT metadata in the frame pre-header for special frames
189  * that should be transmitted using varying transmit parameters.
190  * There are 2 VDEV modes that currently needs this special metadata -
191  *  1) Mesh Mode
192  *  2) DSRC Mode
193  *
194  * Return: HTT metadata size
195  *
196  */
197 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
198 		uint32_t *meta_data)
199 {
200 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
201 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
202 
203 	uint8_t htt_desc_size;
204 
205 	/* Size rounded of multiple of 8 bytes */
206 	uint8_t htt_desc_size_aligned;
207 
208 	uint8_t *hdr = NULL;
209 
210 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 1);
211 
212 	/*
213 	 * Metadata - HTT MSDU Extension header
214 	 */
215 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
216 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
217 
218 	if (vdev->mesh_vdev) {
219 
220 		/* Fill and add HTT metaheader */
221 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
222 		if (hdr == NULL) {
223 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
224 					"Error in filling HTT metadata\n");
225 
226 			return 0;
227 		}
228 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
229 
230 	} else if (vdev->opmode == wlan_op_mode_ocb) {
231 		/* Todo - Add support for DSRC */
232 	}
233 
234 	return htt_desc_size_aligned;
235 }
236 
237 /**
238  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
239  * @tso_seg: TSO segment to process
240  * @ext_desc: Pointer to MSDU extension descriptor
241  *
242  * Return: void
243  */
244 #if defined(FEATURE_TSO)
245 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
246 		void *ext_desc)
247 {
248 	uint8_t num_frag;
249 	uint32_t tso_flags;
250 
251 	/*
252 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
253 	 * tcp_flag_mask
254 	 *
255 	 * Checksum enable flags are set in TCL descriptor and not in Extension
256 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
257 	 */
258 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
259 
260 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
261 
262 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
263 		tso_seg->tso_flags.ip_len);
264 
265 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
266 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
267 
268 
269 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
270 		uint32_t lo = 0;
271 		uint32_t hi = 0;
272 
273 		qdf_dmaaddr_to_32s(
274 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
275 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
276 			tso_seg->tso_frags[num_frag].length);
277 	}
278 
279 	return;
280 }
281 #else
282 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
283 		void *ext_desc)
284 {
285 	return;
286 }
287 #endif
288 
289 #if defined(FEATURE_TSO)
290 /**
291  * dp_tx_free_tso_seg() - Loop through the tso segments
292  *                        allocated and free them
293  *
294  * @soc: soc handle
295  * @free_seg: list of tso segments
296  * @msdu_info: msdu descriptor
297  *
298  * Return - void
299  */
300 static void dp_tx_free_tso_seg(struct dp_soc *soc,
301 	struct qdf_tso_seg_elem_t *free_seg,
302 	struct dp_tx_msdu_info_s *msdu_info)
303 {
304 	struct qdf_tso_seg_elem_t *next_seg;
305 
306 	while (free_seg) {
307 		next_seg = free_seg->next;
308 		dp_tx_tso_desc_free(soc,
309 			msdu_info->tx_queue.desc_pool_id,
310 			free_seg);
311 		free_seg = next_seg;
312 	}
313 }
314 
315 /**
316  * dp_tx_free_tso_num_seg() - Loop through the tso num segments
317  *                            allocated and free them
318  *
319  * @soc:  soc handle
320  * @free_seg: list of tso segments
321  * @msdu_info: msdu descriptor
322  * Return - void
323  */
324 static void dp_tx_free_tso_num_seg(struct dp_soc *soc,
325 	struct qdf_tso_num_seg_elem_t *free_seg,
326 	struct dp_tx_msdu_info_s *msdu_info)
327 {
328 	struct qdf_tso_num_seg_elem_t *next_seg;
329 
330 	while (free_seg) {
331 		next_seg = free_seg->next;
332 		dp_tso_num_seg_free(soc,
333 			msdu_info->tx_queue.desc_pool_id,
334 			free_seg);
335 		free_seg = next_seg;
336 	}
337 }
338 
339 /**
340  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
341  * @vdev: virtual device handle
342  * @msdu: network buffer
343  * @msdu_info: meta data associated with the msdu
344  *
345  * Return: QDF_STATUS_SUCCESS success
346  */
347 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
348 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
349 {
350 	struct qdf_tso_seg_elem_t *tso_seg;
351 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
352 	struct dp_soc *soc = vdev->pdev->soc;
353 	struct qdf_tso_info_t *tso_info;
354 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
355 
356 	tso_info = &msdu_info->u.tso_info;
357 	tso_info->curr_seg = NULL;
358 	tso_info->tso_seg_list = NULL;
359 	tso_info->num_segs = num_seg;
360 	msdu_info->frm_type = dp_tx_frm_tso;
361 	tso_info->tso_num_seg_list = NULL;
362 
363 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
364 
365 	while (num_seg) {
366 		tso_seg = dp_tx_tso_desc_alloc(
367 				soc, msdu_info->tx_queue.desc_pool_id);
368 		if (tso_seg) {
369 			tso_seg->next = tso_info->tso_seg_list;
370 			tso_info->tso_seg_list = tso_seg;
371 			num_seg--;
372 		} else {
373 			struct qdf_tso_seg_elem_t *free_seg =
374 				tso_info->tso_seg_list;
375 
376 			dp_tx_free_tso_seg(soc, free_seg, msdu_info);
377 
378 			return QDF_STATUS_E_NOMEM;
379 		}
380 	}
381 
382 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
383 
384 	tso_num_seg = dp_tso_num_seg_alloc(soc,
385 			msdu_info->tx_queue.desc_pool_id);
386 
387 	if (tso_num_seg) {
388 		tso_num_seg->next = tso_info->tso_num_seg_list;
389 		tso_info->tso_num_seg_list = tso_num_seg;
390 	} else {
391 		/* Bug: free tso_num_seg and tso_seg */
392 		/* Free the already allocated num of segments */
393 		struct qdf_tso_seg_elem_t *free_seg =
394 					tso_info->tso_seg_list;
395 
396 		TSO_DEBUG(" %s: Failed alloc - Number of segs for a TSO packet",
397 			__func__);
398 		dp_tx_free_tso_seg(soc, free_seg, msdu_info);
399 
400 		return QDF_STATUS_E_NOMEM;
401 	}
402 
403 	msdu_info->num_seg =
404 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
405 
406 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
407 			msdu_info->num_seg);
408 
409 	if (!(msdu_info->num_seg)) {
410 		dp_tx_free_tso_seg(soc, tso_info->tso_seg_list, msdu_info);
411 		dp_tx_free_tso_num_seg(soc, tso_info->tso_num_seg_list,
412 					msdu_info);
413 		return QDF_STATUS_E_INVAL;
414 	}
415 
416 	tso_info->curr_seg = tso_info->tso_seg_list;
417 
418 	return QDF_STATUS_SUCCESS;
419 }
420 #else
421 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
422 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
423 {
424 	return QDF_STATUS_E_NOMEM;
425 }
426 #endif
427 
428 /**
429  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
430  * @vdev: DP Vdev handle
431  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
432  * @desc_pool_id: Descriptor Pool ID
433  *
434  * Return:
435  */
436 static
437 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
438 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
439 {
440 	uint8_t i;
441 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
442 	struct dp_tx_seg_info_s *seg_info;
443 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
444 	struct dp_soc *soc = vdev->pdev->soc;
445 
446 	/* Allocate an extension descriptor */
447 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
448 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
449 
450 	if (!msdu_ext_desc) {
451 		DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
452 		return NULL;
453 	}
454 
455 	if (qdf_unlikely(vdev->mesh_vdev)) {
456 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
457 				&msdu_info->meta_data[0],
458 				sizeof(struct htt_tx_msdu_desc_ext2_t));
459 		qdf_atomic_inc(&vdev->pdev->num_tx_exception);
460 		HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 1);
461 	}
462 
463 	switch (msdu_info->frm_type) {
464 	case dp_tx_frm_sg:
465 	case dp_tx_frm_me:
466 	case dp_tx_frm_raw:
467 		seg_info = msdu_info->u.sg_info.curr_seg;
468 		/* Update the buffer pointers in MSDU Extension Descriptor */
469 		for (i = 0; i < seg_info->frag_cnt; i++) {
470 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
471 				seg_info->frags[i].paddr_lo,
472 				seg_info->frags[i].paddr_hi,
473 				seg_info->frags[i].len);
474 		}
475 
476 		break;
477 
478 	case dp_tx_frm_tso:
479 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
480 				&cached_ext_desc[0]);
481 		break;
482 
483 
484 	default:
485 		break;
486 	}
487 
488 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
489 			cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
490 
491 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
492 			msdu_ext_desc->vaddr);
493 
494 	return msdu_ext_desc;
495 }
496 
497 /**
498  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
499  * @vdev: DP vdev handle
500  * @nbuf: skb
501  * @desc_pool_id: Descriptor pool ID
502  * Allocate and prepare Tx descriptor with msdu information.
503  *
504  * Return: Pointer to Tx Descriptor on success,
505  *         NULL on failure
506  */
507 static
508 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
509 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
510 		uint32_t *meta_data)
511 {
512 	uint8_t align_pad;
513 	uint8_t is_exception = 0;
514 	uint8_t htt_hdr_size;
515 	struct ether_header *eh;
516 	struct dp_tx_desc_s *tx_desc;
517 	struct dp_pdev *pdev = vdev->pdev;
518 	struct dp_soc *soc = pdev->soc;
519 
520 	/* Allocate software Tx descriptor */
521 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
522 	if (qdf_unlikely(!tx_desc)) {
523 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
524 			"%s Tx Desc Alloc Failed\n", __func__);
525 		DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
526 		return NULL;
527 	}
528 
529 	/* Flow control/Congestion Control counters */
530 	qdf_atomic_inc(&pdev->num_tx_outstanding);
531 
532 	/* Initialize the SW tx descriptor */
533 	tx_desc->nbuf = nbuf;
534 	tx_desc->frm_type = dp_tx_frm_std;
535 	tx_desc->tx_encap_type = vdev->tx_encap_type;
536 	tx_desc->vdev = vdev;
537 	tx_desc->pdev = pdev;
538 	tx_desc->msdu_ext_desc = NULL;
539 
540 	/**
541 	 * For non-scatter regular frames, buffer pointer is directly
542 	 * programmed in TCL input descriptor instead of using an MSDU
543 	 * extension descriptor.For this cass, HW requirement is that
544 	 * descriptor should always point to a 8-byte aligned address.
545 	 *
546 	 * So we add alignment pad to start of buffer, and specify the actual
547 	 * start of data through pkt_offset
548 	 */
549 	align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
550 	if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
551 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
552 				"qdf_nbuf_push_head failed\n");
553 		goto failure;
554 	}
555 
556 	tx_desc->pkt_offset = align_pad;
557 
558 	/*
559 	 * For special modes (vdev_type == ocb or mesh), data frames should be
560 	 * transmitted using varying transmit parameters (tx spec) which include
561 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
562 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
563 	 * These frames are sent as exception packets to firmware.
564 	 *
565 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
566 	 *  to get 8-byte aligned start address along with align_pad added above
567 	 *
568 	 *  |-----------------------------|
569 	 *  |                             |
570 	 *  |-----------------------------| <-----Buffer Pointer Address given
571 	 *  |                             |  ^    in HW descriptor (aligned)
572 	 *  |       HTT Metadata          |  |
573 	 *  |                             |  |
574 	 *  |                             |  | Packet Offset given in descriptor
575 	 *  |                             |  |
576 	 *  |-----------------------------|  |
577 	 *  |       Alignment Pad         |  v
578 	 *  |-----------------------------| <----- Actual buffer start address
579 	 *  |        SKB Data             |           (Unaligned)
580 	 *  |                             |
581 	 *  |                             |
582 	 *  |                             |
583 	 *  |                             |
584 	 *  |                             |
585 	 *  |-----------------------------|
586 	 */
587 	if (qdf_unlikely(vdev->mesh_vdev ||
588 				(vdev->opmode == wlan_op_mode_ocb))) {
589 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
590 				meta_data);
591 		if (htt_hdr_size == 0)
592 			goto failure;
593 		tx_desc->pkt_offset += htt_hdr_size;
594 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
595 		is_exception = 1;
596 	}
597 
598 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
599 				qdf_nbuf_map(soc->osdev, nbuf,
600 					QDF_DMA_TO_DEVICE))) {
601 		/* Handle failure */
602 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
603 				"qdf_nbuf_map failed\n");
604 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
605 		goto failure;
606 	}
607 
608 	if (qdf_unlikely(vdev->nawds_enabled)) {
609 		eh = (struct ether_header *) qdf_nbuf_data(nbuf);
610 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
611 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
612 			is_exception = 1;
613 		}
614 	}
615 
616 #if !TQM_BYPASS_WAR
617 	if (is_exception)
618 #endif
619 	{
620 		/* Temporary WAR due to TQM VP issues */
621 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
622 		qdf_atomic_inc(&pdev->num_tx_exception);
623 	}
624 
625 	return tx_desc;
626 
627 failure:
628 	dp_tx_desc_release(tx_desc, desc_pool_id);
629 	return NULL;
630 }
631 
632 /**
633  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
634  * @vdev: DP vdev handle
635  * @nbuf: skb
636  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
637  * @desc_pool_id : Descriptor Pool ID
638  *
639  * Allocate and prepare Tx descriptor with msdu and fragment descritor
640  * information. For frames wth fragments, allocate and prepare
641  * an MSDU extension descriptor
642  *
643  * Return: Pointer to Tx Descriptor on success,
644  *         NULL on failure
645  */
646 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
647 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
648 		uint8_t desc_pool_id)
649 {
650 	struct dp_tx_desc_s *tx_desc;
651 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
652 	struct dp_pdev *pdev = vdev->pdev;
653 	struct dp_soc *soc = pdev->soc;
654 
655 	/* Allocate software Tx descriptor */
656 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
657 	if (!tx_desc) {
658 		DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
659 		return NULL;
660 	}
661 
662 	/* Flow control/Congestion Control counters */
663 	qdf_atomic_inc(&pdev->num_tx_outstanding);
664 
665 	/* Initialize the SW tx descriptor */
666 	tx_desc->nbuf = nbuf;
667 	tx_desc->frm_type = msdu_info->frm_type;
668 	tx_desc->tx_encap_type = vdev->tx_encap_type;
669 	tx_desc->vdev = vdev;
670 	tx_desc->pdev = pdev;
671 	tx_desc->pkt_offset = 0;
672 	tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
673 	tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
674 
675 	/* Handle scattered frames - TSO/SG/ME */
676 	/* Allocate and prepare an extension descriptor for scattered frames */
677 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
678 	if (!msdu_ext_desc) {
679 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
680 				"%s Tx Extension Descriptor Alloc Fail\n",
681 				__func__);
682 		goto failure;
683 	}
684 
685 #if TQM_BYPASS_WAR
686 	/* Temporary WAR due to TQM VP issues */
687 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
688 	qdf_atomic_inc(&pdev->num_tx_exception);
689 #endif
690 	if (qdf_unlikely(vdev->mesh_vdev))
691 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
692 
693 	tx_desc->msdu_ext_desc = msdu_ext_desc;
694 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
695 
696 	return tx_desc;
697 failure:
698 	dp_tx_desc_release(tx_desc, desc_pool_id);
699 	return NULL;
700 }
701 
702 /**
703  * dp_tx_prepare_raw() - Prepare RAW packet TX
704  * @vdev: DP vdev handle
705  * @nbuf: buffer pointer
706  * @seg_info: Pointer to Segment info Descriptor to be prepared
707  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
708  *     descriptor
709  *
710  * Return:
711  */
712 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
713 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
714 {
715 	qdf_nbuf_t curr_nbuf = NULL;
716 	uint16_t total_len = 0;
717 	int32_t i;
718 
719 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
720 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
721 
722 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
723 
724 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
725 	if ((qos_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS)
726 			&& (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU)) {
727 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
728 	}
729 
730 	if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
731 				QDF_DMA_TO_DEVICE)) {
732 		qdf_print("dma map error\n");
733 		DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
734 		qdf_nbuf_free(nbuf);
735 		return NULL;
736 	}
737 
738 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
739 				curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
740 		seg_info->frags[i].paddr_lo =
741 			qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
742 		seg_info->frags[i].paddr_hi = 0x0;
743 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
744 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
745 		total_len += qdf_nbuf_len(curr_nbuf);
746 	}
747 
748 	seg_info->frag_cnt = i;
749 	seg_info->total_len = total_len;
750 	seg_info->next = NULL;
751 
752 	sg_info->curr_seg = seg_info;
753 
754 	msdu_info->frm_type = dp_tx_frm_raw;
755 	msdu_info->num_seg = 1;
756 
757 	return nbuf;
758 }
759 
760 /**
761  * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
762  * @soc: DP Soc Handle
763  * @vdev: DP vdev handle
764  * @tx_desc: Tx Descriptor Handle
765  * @tid: TID from HLOS for overriding default DSCP-TID mapping
766  * @fw_metadata: Metadata to send to Target Firmware along with frame
767  * @ring_id: Ring ID of H/W ring to which we enqueue the packet
768  *
769  *  Gets the next free TCL HW DMA descriptor and sets up required parameters
770  *  from software Tx descriptor
771  *
772  * Return:
773  */
774 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
775 				   struct dp_tx_desc_s *tx_desc, uint8_t tid,
776 				   uint16_t fw_metadata, uint8_t ring_id)
777 {
778 	uint8_t type;
779 	uint16_t length;
780 	void *hal_tx_desc, *hal_tx_desc_cached;
781 	qdf_dma_addr_t dma_addr;
782 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
783 
784 	/* Return Buffer Manager ID */
785 	uint8_t bm_id = ring_id;
786 	void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng;
787 
788 	hal_tx_desc_cached = (void *) cached_desc;
789 	qdf_mem_zero_outline(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
790 
791 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
792 		length = HAL_TX_EXT_DESC_WITH_META_DATA;
793 		type = HAL_TX_BUF_TYPE_EXT_DESC;
794 		dma_addr = tx_desc->msdu_ext_desc->paddr;
795 	} else {
796 		length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
797 		type = HAL_TX_BUF_TYPE_BUFFER;
798 		dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
799 	}
800 
801 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
802 	hal_tx_desc_set_buf_addr(hal_tx_desc_cached,
803 			dma_addr , bm_id, tx_desc->id, type);
804 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
805 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
806 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
807 	hal_tx_desc_set_dscp_tid_table_id(hal_tx_desc_cached,
808 			vdev->dscp_tid_map_id);
809 
810 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
811 			"%s length:%d , type = %d, dma_addr %llx, offset %d desc id %u\n",
812 			__func__, length, type, (uint64_t)dma_addr,
813 			tx_desc->pkt_offset, tx_desc->id);
814 
815 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
816 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
817 
818 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
819 			vdev->hal_desc_addr_search_flags);
820 
821 	if ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
822 		|| qdf_nbuf_is_tso(tx_desc->nbuf))  {
823 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
824 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
825 	}
826 
827 	if (tid != HTT_TX_EXT_TID_INVALID)
828 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
829 
830 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
831 		hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1);
832 
833 
834 	/* Sync cached descriptor with HW */
835 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
836 
837 	if (!hal_tx_desc) {
838 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
839 			  "%s TCL ring full ring_id:%d\n", __func__, ring_id);
840 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
841 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
842 		return QDF_STATUS_E_RESOURCES;
843 	}
844 
845 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
846 
847 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
848 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
849 
850 	/*
851 	 * If one packet is enqueued in HW, PM usage count needs to be
852 	 * incremented by one to prevent future runtime suspend. This
853 	 * should be tied with the success of enqueuing. It will be
854 	 * decremented after the packet has been sent.
855 	 */
856 	hif_pm_runtime_get_noresume(soc->hif_handle);
857 
858 	return QDF_STATUS_SUCCESS;
859 }
860 
861 /**
862  * dp_tx_classify_tid() - Obtain TID to be used for this frame
863  * @vdev: DP vdev handle
864  * @nbuf: skb
865  *
866  * Extract the DSCP or PCP information from frame and map into TID value.
867  * Software based TID classification is required when more than 2 DSCP-TID
868  * mapping tables are needed.
869  * Hardware supports 2 DSCP-TID mapping tables
870  *
871  * Return: void
872  */
873 static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
874 		struct dp_tx_msdu_info_s *msdu_info)
875 {
876 	uint8_t tos = 0, dscp_tid_override = 0;
877 	uint8_t *hdr_ptr, *L3datap;
878 	uint8_t is_mcast = 0;
879 	struct ether_header *eh = NULL;
880 	qdf_ethervlan_header_t *evh = NULL;
881 	uint16_t   ether_type;
882 	qdf_llc_t *llcHdr;
883 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
884 
885 	/* for mesh packets don't do any classification */
886 	if (qdf_unlikely(vdev->mesh_vdev))
887 		return;
888 
889 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
890 		eh = (struct ether_header *) nbuf->data;
891 		hdr_ptr = eh->ether_dhost;
892 		L3datap = hdr_ptr + sizeof(struct ether_header);
893 	} else {
894 		qdf_dot3_qosframe_t *qos_wh =
895 			(qdf_dot3_qosframe_t *) nbuf->data;
896 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
897 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
898 		return;
899 	}
900 
901 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
902 	ether_type = eh->ether_type;
903 
904 	/*
905 	 * Check if packet is dot3 or eth2 type.
906 	 */
907 	if (IS_LLC_PRESENT(ether_type)) {
908 		ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN +
909 				sizeof(*llcHdr));
910 
911 		if (ether_type == htons(ETHERTYPE_8021Q)) {
912 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
913 				sizeof(*llcHdr);
914 			ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN
915 					+ sizeof(*llcHdr) +
916 					sizeof(qdf_net_vlanhdr_t));
917 		} else {
918 			L3datap = hdr_ptr + sizeof(struct ether_header) +
919 				sizeof(*llcHdr);
920 		}
921 
922 	} else {
923 		if (ether_type == htons(ETHERTYPE_8021Q)) {
924 			evh = (qdf_ethervlan_header_t *) eh;
925 			ether_type = evh->ether_type;
926 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
927 		}
928 	}
929 
930 	/*
931 	 * Find priority from IP TOS DSCP field
932 	 */
933 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
934 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
935 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
936 			/* Only for unicast frames */
937 			if (!is_mcast) {
938 				/* send it on VO queue */
939 				msdu_info->tid = DP_VO_TID;
940 			}
941 		} else {
942 			/*
943 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
944 			 * from TOS byte.
945 			 */
946 			tos = ip->ip_tos;
947 			dscp_tid_override = 1;
948 
949 		}
950 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
951 		/* TODO
952 		 * use flowlabel
953 		 *igmpmld cases to be handled in phase 2
954 		 */
955 		unsigned long ver_pri_flowlabel;
956 		unsigned long pri;
957 		ver_pri_flowlabel = *(unsigned long *) L3datap;
958 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
959 			DP_IPV6_PRIORITY_SHIFT;
960 		tos = pri;
961 		dscp_tid_override = 1;
962 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
963 		msdu_info->tid = DP_VO_TID;
964 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
965 		/* Only for unicast frames */
966 		if (!is_mcast) {
967 			/* send ucast arp on VO queue */
968 			msdu_info->tid = DP_VO_TID;
969 		}
970 	}
971 
972 	/*
973 	 * Assign all MCAST packets to BE
974 	 */
975 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
976 		if (is_mcast) {
977 			tos = 0;
978 			dscp_tid_override = 1;
979 		}
980 	}
981 
982 	if (dscp_tid_override == 1) {
983 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
984 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
985 	}
986 	return;
987 }
988 
989 #ifdef CONVERGED_TDLS_ENABLE
990 /**
991  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
992  * @tx_desc: TX descriptor
993  *
994  * Return: None
995  */
996 static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
997 {
998 	if (tx_desc->vdev) {
999 		if (tx_desc->vdev->is_tdls_frame)
1000 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1001 			tx_desc->vdev->is_tdls_frame = false;
1002 	}
1003 }
1004 
1005 /**
1006  * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1007  * @tx_desc: TX descriptor
1008  * @vdev: datapath vdev handle
1009  *
1010  * Return: None
1011  */
1012 static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
1013 				  struct dp_vdev *vdev)
1014 {
1015 	struct hal_tx_completion_status ts = {0};
1016 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1017 
1018 	hal_tx_comp_get_status(&tx_desc->comp, &ts);
1019 	if (vdev->tx_non_std_data_callback.func) {
1020 		qdf_nbuf_set_next(tx_desc->nbuf, NULL);
1021 		vdev->tx_non_std_data_callback.func(
1022 				vdev->tx_non_std_data_callback.ctxt,
1023 				nbuf, ts.status);
1024 		return;
1025 	}
1026 }
1027 #endif
1028 
1029 /**
1030  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1031  * @vdev: DP vdev handle
1032  * @nbuf: skb
1033  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1034  * @tx_q: Tx queue to be used for this Tx frame
1035  * @peer_id: peer_id of the peer in case of NAWDS frames
1036  *
1037  * Return: NULL on success,
1038  *         nbuf when it fails to send
1039  */
1040 static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1041 		uint8_t tid, struct dp_tx_queue *tx_q,
1042 		uint32_t *meta_data, uint16_t peer_id)
1043 {
1044 	struct dp_pdev *pdev = vdev->pdev;
1045 	struct dp_soc *soc = pdev->soc;
1046 	struct dp_tx_desc_s *tx_desc;
1047 	QDF_STATUS status;
1048 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1049 	uint16_t htt_tcl_metadata = 0;
1050 
1051 	HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 0);
1052 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
1053 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id, meta_data);
1054 	if (!tx_desc) {
1055 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1056 			  "%s Tx_desc prepare Fail vdev %pK queue %d\n",
1057 			  __func__, vdev, tx_q->desc_pool_id);
1058 		return nbuf;
1059 	}
1060 
1061 	dp_tx_update_tdls_flags(tx_desc);
1062 
1063 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1064 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1065 				"%s %d : HAL RING Access Failed -- %pK\n",
1066 				__func__, __LINE__, hal_srng);
1067 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1068 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1069 		goto fail_return;
1070 	}
1071 
1072 	if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
1073 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
1074 				HTT_TCL_METADATA_TYPE_PEER_BASED);
1075 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
1076 				peer_id);
1077 	} else
1078 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1079 
1080 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
1081 	status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
1082 			htt_tcl_metadata, tx_q->ring_id);
1083 
1084 	if (status != QDF_STATUS_SUCCESS) {
1085 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1086 			  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n",
1087 			  __func__, tx_desc, tx_q->ring_id);
1088 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1089 		goto fail_return;
1090 	}
1091 
1092 	nbuf = NULL;
1093 
1094 fail_return:
1095 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1096 		hal_srng_access_end(soc->hal_soc, hal_srng);
1097 		hif_pm_runtime_put(soc->hif_handle);
1098 	} else {
1099 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1100 	}
1101 
1102 	return nbuf;
1103 }
1104 
1105 /**
1106  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
1107  * @vdev: DP vdev handle
1108  * @nbuf: skb
1109  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
1110  *
1111  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
1112  *
1113  * Return: NULL on success,
1114  *         nbuf when it fails to send
1115  */
1116 #if QDF_LOCK_STATS
1117 static noinline
1118 #else
1119 static
1120 #endif
1121 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1122 				    struct dp_tx_msdu_info_s *msdu_info)
1123 {
1124 	uint8_t i;
1125 	struct dp_pdev *pdev = vdev->pdev;
1126 	struct dp_soc *soc = pdev->soc;
1127 	struct dp_tx_desc_s *tx_desc;
1128 	QDF_STATUS status;
1129 
1130 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1131 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1132 
1133 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1134 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1135 				"%s %d : HAL RING Access Failed -- %pK\n",
1136 				__func__, __LINE__, hal_srng);
1137 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1138 		return nbuf;
1139 	}
1140 
1141 	if (msdu_info->frm_type == dp_tx_frm_me)
1142 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1143 
1144 	i = 0;
1145 	/* Print statement to track i and num_seg */
1146 	/*
1147 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
1148 	 * descriptors using information in msdu_info
1149 	 */
1150 	while (i < msdu_info->num_seg) {
1151 		/*
1152 		 * Setup Tx descriptor for an MSDU, and MSDU extension
1153 		 * descriptor
1154 		 */
1155 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
1156 				tx_q->desc_pool_id);
1157 
1158 		if (!tx_desc) {
1159 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1160 				  "%s Tx_desc prepare Fail vdev %pK queue %d\n",
1161 				  __func__, vdev, tx_q->desc_pool_id);
1162 
1163 			if (msdu_info->frm_type == dp_tx_frm_me) {
1164 				dp_tx_me_free_buf(pdev,
1165 					(void *)(msdu_info->u.sg_info
1166 						.curr_seg->frags[0].vaddr));
1167 			}
1168 			goto done;
1169 		}
1170 
1171 		if (msdu_info->frm_type == dp_tx_frm_me) {
1172 			tx_desc->me_buffer =
1173 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
1174 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
1175 		}
1176 
1177 		/*
1178 		 * Enqueue the Tx MSDU descriptor to HW for transmit
1179 		 */
1180 		status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
1181 			vdev->htt_tcl_metadata, tx_q->ring_id);
1182 
1183 		if (status != QDF_STATUS_SUCCESS) {
1184 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1185 				  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n",
1186 				  __func__, tx_desc, tx_q->ring_id);
1187 
1188 			if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
1189 				dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
1190 
1191 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1192 			goto done;
1193 		}
1194 
1195 		/*
1196 		 * TODO
1197 		 * if tso_info structure can be modified to have curr_seg
1198 		 * as first element, following 2 blocks of code (for TSO and SG)
1199 		 * can be combined into 1
1200 		 */
1201 
1202 		/*
1203 		 * For frames with multiple segments (TSO, ME), jump to next
1204 		 * segment.
1205 		 */
1206 		if (msdu_info->frm_type == dp_tx_frm_tso) {
1207 			if (msdu_info->u.tso_info.curr_seg->next) {
1208 				msdu_info->u.tso_info.curr_seg =
1209 					msdu_info->u.tso_info.curr_seg->next;
1210 
1211 				/*
1212 				 * If this is a jumbo nbuf, then increment the number of
1213 				 * nbuf users for each additional segment of the msdu.
1214 				 * This will ensure that the skb is freed only after
1215 				 * receiving tx completion for all segments of an nbuf
1216 				 */
1217 				qdf_nbuf_inc_users(nbuf);
1218 
1219 				/* Check with MCL if this is needed */
1220 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
1221 			}
1222 		}
1223 
1224 		/*
1225 		 * For Multicast-Unicast converted packets,
1226 		 * each converted frame (for a client) is represented as
1227 		 * 1 segment
1228 		 */
1229 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
1230 				(msdu_info->frm_type == dp_tx_frm_me)) {
1231 			if (msdu_info->u.sg_info.curr_seg->next) {
1232 				msdu_info->u.sg_info.curr_seg =
1233 					msdu_info->u.sg_info.curr_seg->next;
1234 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1235 			}
1236 		}
1237 		i++;
1238 	}
1239 
1240 	nbuf = NULL;
1241 
1242 done:
1243 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1244 		hal_srng_access_end(soc->hal_soc, hal_srng);
1245 		hif_pm_runtime_put(soc->hif_handle);
1246 	} else {
1247 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1248 	}
1249 
1250 	return nbuf;
1251 }
1252 
1253 /**
1254  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
1255  *                     for SG frames
1256  * @vdev: DP vdev handle
1257  * @nbuf: skb
1258  * @seg_info: Pointer to Segment info Descriptor to be prepared
1259  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1260  *
1261  * Return: NULL on success,
1262  *         nbuf when it fails to send
1263  */
1264 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1265 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1266 {
1267 	uint32_t cur_frag, nr_frags;
1268 	qdf_dma_addr_t paddr;
1269 	struct dp_tx_sg_info_s *sg_info;
1270 
1271 	sg_info = &msdu_info->u.sg_info;
1272 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
1273 
1274 	if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
1275 				QDF_DMA_TO_DEVICE)) {
1276 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1277 				"dma map error\n");
1278 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1279 
1280 		qdf_nbuf_free(nbuf);
1281 		return NULL;
1282 	}
1283 
1284 	seg_info->frags[0].paddr_lo = qdf_nbuf_get_frag_paddr(nbuf, 0);
1285 	seg_info->frags[0].paddr_hi = 0;
1286 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
1287 	seg_info->frags[0].vaddr = (void *) nbuf;
1288 
1289 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
1290 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
1291 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
1292 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1293 					"frag dma map error\n");
1294 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1295 			qdf_nbuf_free(nbuf);
1296 			return NULL;
1297 		}
1298 
1299 		paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1300 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
1301 		seg_info->frags[cur_frag + 1].paddr_hi =
1302 			((uint64_t) paddr) >> 32;
1303 		seg_info->frags[cur_frag + 1].len =
1304 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
1305 	}
1306 
1307 	seg_info->frag_cnt = (cur_frag + 1);
1308 	seg_info->total_len = qdf_nbuf_len(nbuf);
1309 	seg_info->next = NULL;
1310 
1311 	sg_info->curr_seg = seg_info;
1312 
1313 	msdu_info->frm_type = dp_tx_frm_sg;
1314 	msdu_info->num_seg = 1;
1315 
1316 	return nbuf;
1317 }
1318 
1319 #ifdef MESH_MODE_SUPPORT
1320 
1321 /**
1322  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
1323 				and prepare msdu_info for mesh frames.
1324  * @vdev: DP vdev handle
1325  * @nbuf: skb
1326  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1327  *
1328  * Return: NULL on failure,
1329  *         nbuf when extracted successfully
1330  */
1331 static
1332 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1333 				struct dp_tx_msdu_info_s *msdu_info)
1334 {
1335 	struct meta_hdr_s *mhdr;
1336 	struct htt_tx_msdu_desc_ext2_t *meta_data =
1337 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
1338 
1339 	nbuf = qdf_nbuf_unshare(nbuf);
1340 	if (nbuf == NULL) {
1341 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1342 				"qdf_nbuf_unshare failed\n");
1343 		return nbuf;
1344 	}
1345 
1346 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1347 
1348 	qdf_mem_set(meta_data, 0, sizeof(struct htt_tx_msdu_desc_ext2_t));
1349 
1350 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
1351 		meta_data->power = mhdr->power;
1352 
1353 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
1354 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
1355 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
1356 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
1357 
1358 		meta_data->dyn_bw = 1;
1359 
1360 		meta_data->valid_pwr = 1;
1361 		meta_data->valid_mcs_mask = 1;
1362 		meta_data->valid_nss_mask = 1;
1363 		meta_data->valid_preamble_type  = 1;
1364 		meta_data->valid_retries = 1;
1365 		meta_data->valid_bw_info = 1;
1366 	}
1367 
1368 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
1369 		meta_data->encrypt_type = 0;
1370 		meta_data->valid_encrypt_type = 1;
1371 	}
1372 
1373 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
1374 		msdu_info->tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1375 	else
1376 		msdu_info->tid = qdf_nbuf_get_priority(nbuf);
1377 
1378 	meta_data->valid_key_flags = 1;
1379 	meta_data->key_flags = (mhdr->keyix & 0x3);
1380 
1381 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
1382 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1383 				"qdf_nbuf_pull_head failed\n");
1384 		qdf_nbuf_free(nbuf);
1385 		return NULL;
1386 	}
1387 
1388 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1389 			"%s , Meta hdr %0x %0x %0x %0x %0x\n",
1390 			__func__, msdu_info->meta_data[0],
1391 			msdu_info->meta_data[1],
1392 			msdu_info->meta_data[2],
1393 			msdu_info->meta_data[3],
1394 			msdu_info->meta_data[4]);
1395 
1396 	return nbuf;
1397 }
1398 #else
1399 static
1400 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1401 				struct dp_tx_msdu_info_s *msdu_info)
1402 {
1403 	return nbuf;
1404 }
1405 
1406 #endif
1407 
1408 /**
1409  * dp_tx_prepare_nawds(): Tramit NAWDS frames
1410  * @vdev: dp_vdev handle
1411  * @nbuf: skb
1412  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1413  * @tx_q: Tx queue to be used for this Tx frame
1414  * @meta_data: Meta date for mesh
1415  * @peer_id: peer_id of the peer in case of NAWDS frames
1416  *
1417  * return: NULL on success nbuf on failure
1418  */
1419 static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1420 		uint8_t tid, struct dp_tx_queue *tx_q, uint32_t *meta_data,
1421 		uint32_t peer_id)
1422 {
1423 	struct dp_peer *peer = NULL;
1424 	qdf_nbuf_t nbuf_copy;
1425 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1426 		if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
1427 				(peer->nawds_enabled || peer->bss_peer)) {
1428 			nbuf_copy = qdf_nbuf_copy(nbuf);
1429 			if (!nbuf_copy) {
1430 				QDF_TRACE(QDF_MODULE_ID_DP,
1431 						QDF_TRACE_LEVEL_ERROR,
1432 						"nbuf copy failed");
1433 			}
1434 
1435 			peer_id = peer->peer_ids[0];
1436 			nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy, tid,
1437 					tx_q, meta_data, peer_id);
1438 			if (nbuf_copy != NULL) {
1439 				qdf_nbuf_free(nbuf);
1440 				return nbuf_copy;
1441 			}
1442 		}
1443 	}
1444 	if (peer_id == HTT_INVALID_PEER)
1445 		return nbuf;
1446 
1447 	qdf_nbuf_free(nbuf);
1448 	return NULL;
1449 }
1450 
1451 /**
1452  * dp_tx_send() - Transmit a frame on a given VAP
1453  * @vap_dev: DP vdev handle
1454  * @nbuf: skb
1455  *
1456  * Entry point for Core Tx layer (DP_TX) invoked from
1457  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
1458  * cases
1459  *
1460  * Return: NULL on success,
1461  *         nbuf when it fails to send
1462  */
1463 qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
1464 {
1465 	struct ether_header *eh = NULL;
1466 	struct dp_tx_msdu_info_s msdu_info;
1467 	struct dp_tx_seg_info_s seg_info;
1468 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1469 	uint16_t peer_id = HTT_INVALID_PEER;
1470 	qdf_nbuf_t nbuf_mesh = NULL;
1471 
1472 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1473 	qdf_mem_set(&seg_info, sizeof(seg_info), 0x0);
1474 
1475 	eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1476 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1477 			"%s , skb %0x:%0x:%0x:%0x:%0x:%0x\n",
1478 			__func__, nbuf->data[0], nbuf->data[1], nbuf->data[2],
1479 			nbuf->data[3], nbuf->data[4], nbuf->data[5]);
1480 	/*
1481 	 * Set Default Host TID value to invalid TID
1482 	 * (TID override disabled)
1483 	 */
1484 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
1485 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
1486 
1487 	if (qdf_unlikely(vdev->mesh_vdev)) {
1488 		nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
1489 								&msdu_info);
1490 		if (nbuf_mesh == NULL) {
1491 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1492 					"Extracting mesh metadata failed\n");
1493 			return nbuf;
1494 		}
1495 		nbuf = nbuf_mesh;
1496 	}
1497 
1498 	/*
1499 	 * Get HW Queue to use for this frame.
1500 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
1501 	 * dedicated for data and 1 for command.
1502 	 * "queue_id" maps to one hardware ring.
1503 	 *  With each ring, we also associate a unique Tx descriptor pool
1504 	 *  to minimize lock contention for these resources.
1505 	 */
1506 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
1507 
1508 	/*
1509 	 * TCL H/W supports 2 DSCP-TID mapping tables.
1510 	 *  Table 1 - Default DSCP-TID mapping table
1511 	 *  Table 2 - 1 DSCP-TID override table
1512 	 *
1513 	 * If we need a different DSCP-TID mapping for this vap,
1514 	 * call tid_classify to extract DSCP/ToS from frame and
1515 	 * map to a TID and store in msdu_info. This is later used
1516 	 * to fill in TCL Input descriptor (per-packet TID override).
1517 	 */
1518 	if (vdev->dscp_tid_map_id > 1)
1519 		dp_tx_classify_tid(vdev, nbuf, &msdu_info);
1520 
1521 	/* Reset the control block */
1522 	qdf_nbuf_reset_ctxt(nbuf);
1523 
1524 	/*
1525 	 * Classify the frame and call corresponding
1526 	 * "prepare" function which extracts the segment (TSO)
1527 	 * and fragmentation information (for TSO , SG, ME, or Raw)
1528 	 * into MSDU_INFO structure which is later used to fill
1529 	 * SW and HW descriptors.
1530 	 */
1531 	if (qdf_nbuf_is_tso(nbuf)) {
1532 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1533 			  "%s TSO frame %pK\n", __func__, vdev);
1534 		DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1,
1535 				qdf_nbuf_len(nbuf));
1536 
1537 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
1538 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1539 					"%s tso_prepare fail vdev_id:%d\n",
1540 					__func__, vdev->vdev_id);
1541 			DP_STATS_INC(vdev, tx_i.tso.dropped_host, 1);
1542 			return nbuf;
1543 		}
1544 
1545 		goto send_multiple;
1546 	}
1547 
1548 	/* SG */
1549 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
1550 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
1551 
1552 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1553 			 "%s non-TSO SG frame %pK\n", __func__, vdev);
1554 
1555 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
1556 				qdf_nbuf_len(nbuf));
1557 
1558 		goto send_multiple;
1559 	}
1560 
1561 #ifdef ATH_SUPPORT_IQUE
1562 	/* Mcast to Ucast Conversion*/
1563 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
1564 		eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1565 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
1566 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1567 				  "%s Mcast frm for ME %pK\n", __func__, vdev);
1568 
1569 			DP_STATS_INC_PKT(vdev,
1570 					tx_i.mcast_en.mcast_pkt, 1,
1571 					qdf_nbuf_len(nbuf));
1572 			if (dp_tx_prepare_send_me(vdev, nbuf)) {
1573 				qdf_nbuf_free(nbuf);
1574 				return NULL;
1575 			}
1576 			return nbuf;
1577 		}
1578 	}
1579 #endif
1580 
1581 	/* RAW */
1582 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
1583 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
1584 		if (nbuf == NULL)
1585 			return NULL;
1586 
1587 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1588 			  "%s Raw frame %pK\n", __func__, vdev);
1589 
1590 		goto send_multiple;
1591 
1592 	}
1593 
1594 	if (vdev->nawds_enabled) {
1595 		eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1596 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
1597 			nbuf = dp_tx_prepare_nawds(vdev, nbuf, msdu_info.tid,
1598 					&msdu_info.tx_queue,
1599 					msdu_info.meta_data, peer_id);
1600 			return nbuf;
1601 		}
1602 	}
1603 
1604 	/*  Single linear frame */
1605 	/*
1606 	 * If nbuf is a simple linear frame, use send_single function to
1607 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
1608 	 * SRNG. There is no need to setup a MSDU extension descriptor.
1609 	 */
1610 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, msdu_info.tid,
1611 			&msdu_info.tx_queue, msdu_info.meta_data, peer_id);
1612 
1613 	return nbuf;
1614 
1615 send_multiple:
1616 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
1617 
1618 	return nbuf;
1619 }
1620 
1621 /**
1622  * dp_tx_reinject_handler() - Tx Reinject Handler
1623  * @tx_desc: software descriptor head pointer
1624  * @status : Tx completion status from HTT descriptor
1625  *
1626  * This function reinjects frames back to Target.
1627  * Todo - Host queue needs to be added
1628  *
1629  * Return: none
1630  */
1631 static
1632 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
1633 {
1634 	struct dp_vdev *vdev;
1635 	struct dp_peer *peer = NULL;
1636 	uint32_t peer_id = HTT_INVALID_PEER;
1637 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1638 	qdf_nbuf_t nbuf_copy = NULL;
1639 	struct dp_tx_msdu_info_s msdu_info;
1640 
1641 	vdev = tx_desc->vdev;
1642 
1643 	qdf_assert(vdev);
1644 
1645 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1646 
1647 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
1648 
1649 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1650 			"%s Tx reinject path\n", __func__);
1651 
1652 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
1653 			qdf_nbuf_len(tx_desc->nbuf));
1654 
1655 	if (!vdev->osif_proxy_arp) {
1656 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1657 				"function pointer to proxy arp not present\n");
1658 		return;
1659 	}
1660 
1661 	if (qdf_unlikely(vdev->mesh_vdev)) {
1662 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
1663 	} else {
1664 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1665 			if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
1666 					(peer->bss_peer || peer->nawds_enabled)
1667 					&& !(vdev->osif_proxy_arp(
1668 							vdev->osif_vdev,
1669 							nbuf))) {
1670 				nbuf_copy = qdf_nbuf_copy(nbuf);
1671 
1672 				if (!nbuf_copy) {
1673 					QDF_TRACE(QDF_MODULE_ID_DP,
1674 							QDF_TRACE_LEVEL_ERROR,
1675 							FL("nbuf copy failed"));
1676 					break;
1677 				}
1678 
1679 				if (peer->nawds_enabled)
1680 					peer_id = peer->peer_ids[0];
1681 				else
1682 					peer_id = HTT_INVALID_PEER;
1683 
1684 				nbuf_copy = dp_tx_send_msdu_single(vdev,
1685 						nbuf_copy, msdu_info.tid,
1686 						&msdu_info.tx_queue,
1687 						msdu_info.meta_data, peer_id);
1688 
1689 				if (nbuf_copy) {
1690 					QDF_TRACE(QDF_MODULE_ID_DP,
1691 							QDF_TRACE_LEVEL_ERROR,
1692 							FL("pkt send failed"));
1693 					qdf_nbuf_free(nbuf_copy);
1694 				}
1695 			}
1696 		}
1697 	}
1698 
1699 	qdf_nbuf_free(nbuf);
1700 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
1701 }
1702 
1703 /**
1704  * dp_tx_inspect_handler() - Tx Inspect Handler
1705  * @tx_desc: software descriptor head pointer
1706  * @status : Tx completion status from HTT descriptor
1707  *
1708  * Handles Tx frames sent back to Host for inspection
1709  * (ProxyARP)
1710  *
1711  * Return: none
1712  */
1713 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
1714 {
1715 
1716 	struct dp_soc *soc;
1717 	struct dp_pdev *pdev = tx_desc->pdev;
1718 
1719 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1720 			"%s Tx inspect path\n",
1721 			__func__);
1722 
1723 	qdf_assert(pdev);
1724 
1725 	soc = pdev->soc;
1726 
1727 	DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
1728 			qdf_nbuf_len(tx_desc->nbuf));
1729 
1730 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
1731 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
1732 }
1733 
1734 /**
1735  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
1736  * @soc: Soc handle
1737  * @desc: software Tx descriptor to be processed
1738  *
1739  * Return: none
1740  */
1741 static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
1742 		struct dp_tx_desc_s *desc)
1743 {
1744 	struct dp_vdev *vdev = desc->vdev;
1745 	qdf_nbuf_t nbuf = desc->nbuf;
1746 
1747 	/* If it is TDLS mgmt, don't unmap or free the frame */
1748 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
1749 		return dp_non_std_tx_comp_free_buff(desc, vdev);
1750 
1751 	/* 0 : MSDU buffer, 1 : MLE */
1752 	if (desc->msdu_ext_desc) {
1753 		/* TSO free */
1754 		if (hal_tx_ext_desc_get_tso_enable(
1755 					desc->msdu_ext_desc->vaddr)) {
1756 			/* If remaining number of segment is 0
1757 			 * actual TSO may unmap and free */
1758 			if (!DP_DESC_NUM_FRAG(desc)) {
1759 				qdf_nbuf_unmap(soc->osdev, nbuf,
1760 						QDF_DMA_TO_DEVICE);
1761 				qdf_nbuf_free(nbuf);
1762 				return;
1763 			}
1764 		}
1765 	}
1766 
1767 	if (desc->flags & DP_TX_DESC_FLAG_ME)
1768 		dp_tx_me_free_buf(desc->pdev, desc->me_buffer);
1769 
1770 	qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
1771 
1772 	if (!vdev->mesh_vdev) {
1773 		qdf_nbuf_free(nbuf);
1774 	} else {
1775 		vdev->osif_tx_free_ext((nbuf));
1776 	}
1777 }
1778 
1779 /**
1780  * dp_tx_mec_handler() - Tx  MEC Notify Handler
1781  * @vdev: pointer to dp dev handler
1782  * @status : Tx completion status from HTT descriptor
1783  *
1784  * Handles MEC notify event sent from fw to Host
1785  *
1786  * Return: none
1787  */
1788 #ifdef FEATURE_WDS
1789 void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
1790 {
1791 
1792 	struct dp_soc *soc;
1793 	uint32_t flags = IEEE80211_NODE_F_WDS_HM;
1794 	struct dp_peer *peer;
1795 	uint8_t mac_addr[DP_MAC_ADDR_LEN], i;
1796 
1797 	soc = vdev->pdev->soc;
1798 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
1799 	peer = TAILQ_FIRST(&vdev->peer_list);
1800 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1801 
1802 	if (!peer) {
1803 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1804 				FL("peer is NULL"));
1805 		return;
1806 	}
1807 
1808 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1809 			"%s Tx MEC Handler\n",
1810 			__func__);
1811 
1812 	for (i = 0; i < DP_MAC_ADDR_LEN; i++)
1813 		mac_addr[(DP_MAC_ADDR_LEN - 1) - i] =
1814 					status[(DP_MAC_ADDR_LEN - 2) + i];
1815 
1816 	if (!dp_peer_add_ast(soc, peer, mac_addr, 2)) {
1817 		soc->cdp_soc.ol_ops->peer_add_wds_entry(
1818 				vdev->pdev->osif_pdev,
1819 				mac_addr,
1820 				vdev->mac_addr.raw,
1821 				flags);
1822 	}
1823 }
1824 #else
1825 static void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
1826 {
1827 }
1828 #endif
1829 
1830 /**
1831  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
1832  * @tx_desc: software descriptor head pointer
1833  * @status : Tx completion status from HTT descriptor
1834  *
1835  * This function will process HTT Tx indication messages from Target
1836  *
1837  * Return: none
1838  */
1839 static
1840 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
1841 {
1842 	uint8_t tx_status;
1843 	struct dp_pdev *pdev;
1844 	struct dp_vdev *vdev;
1845 	struct dp_soc *soc;
1846 	uint32_t *htt_status_word = (uint32_t *) status;
1847 
1848 	qdf_assert(tx_desc->pdev);
1849 
1850 	pdev = tx_desc->pdev;
1851 	vdev = tx_desc->vdev;
1852 	soc = pdev->soc;
1853 
1854 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_status_word[0]);
1855 
1856 	switch (tx_status) {
1857 	case HTT_TX_FW2WBM_TX_STATUS_OK:
1858 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
1859 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
1860 	{
1861 		dp_tx_comp_free_buf(soc, tx_desc);
1862 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
1863 		break;
1864 	}
1865 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
1866 	{
1867 		dp_tx_reinject_handler(tx_desc, status);
1868 		break;
1869 	}
1870 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
1871 	{
1872 		dp_tx_inspect_handler(tx_desc, status);
1873 		break;
1874 	}
1875 	case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
1876 	{
1877 		dp_tx_mec_handler(vdev, status);
1878 		break;
1879 	}
1880 	default:
1881 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1882 				"%s Invalid HTT tx_status %d\n",
1883 				__func__, tx_status);
1884 		break;
1885 	}
1886 }
1887 
1888 #ifdef MESH_MODE_SUPPORT
1889 /**
1890  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
1891  *                                         in mesh meta header
1892  * @tx_desc: software descriptor head pointer
1893  * @ts: pointer to tx completion stats
1894  * Return: none
1895  */
1896 static
1897 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
1898 		struct hal_tx_completion_status *ts)
1899 {
1900 	struct meta_hdr_s *mhdr;
1901 	qdf_nbuf_t netbuf = tx_desc->nbuf;
1902 
1903 	if (!tx_desc->msdu_ext_desc) {
1904 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
1905 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1906 				"netbuf %pK offset %d\n",
1907 				netbuf, tx_desc->pkt_offset);
1908 			return;
1909 		}
1910 	}
1911 	if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
1912 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1913 			"netbuf %pK offset %d\n", netbuf,
1914 			sizeof(struct meta_hdr_s));
1915 		return;
1916 	}
1917 
1918 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
1919 	mhdr->rssi = ts->ack_frame_rssi;
1920 	mhdr->channel = tx_desc->pdev->operating_channel;
1921 }
1922 
1923 #else
1924 static
1925 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
1926 		struct hal_tx_completion_status *ts)
1927 {
1928 }
1929 
1930 #endif
1931 
1932 /**
1933  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
1934  * @peer: Handle to DP peer
1935  * @ts: pointer to HAL Tx completion stats
1936  * @length: MSDU length
1937  *
1938  * Return: None
1939  */
1940 static void dp_tx_update_peer_stats(struct dp_peer *peer,
1941 		struct hal_tx_completion_status *ts, uint32_t length)
1942 {
1943 	struct dp_pdev *pdev = peer->vdev->pdev;
1944 	struct dp_soc *soc = pdev->soc;
1945 
1946 	DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
1947 
1948 	if (!ts->release_src == HAL_TX_COMP_RELEASE_SOURCE_TQM)
1949 		return;
1950 
1951 	DP_STATS_INCC(peer, tx.tx_failed, 1,
1952 			!(ts->status == HAL_TX_TQM_RR_FRAME_ACKED));
1953 
1954 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
1955 			(ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
1956 
1957 	DP_STATS_INCC(peer, tx.dropped.fw_rem, 1,
1958 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
1959 
1960 	DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
1961 			(ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
1962 
1963 	DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
1964 			(ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
1965 
1966 	if (!ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
1967 		return;
1968 
1969 	DP_STATS_INCC(peer, tx.pkt_type[ts->pkt_type].mcs_count[MAX_MCS], 1,
1970 		((ts->mcs >= MAX_MCS_11A) && (ts->pkt_type == DOT11_A)));
1971 	DP_STATS_INCC(peer, tx.pkt_type[ts->pkt_type].mcs_count[ts->mcs], 1,
1972 		((ts->mcs <= MAX_MCS_11A) && (ts->pkt_type == DOT11_A)));
1973 
1974 	DP_STATS_INCC(peer, tx.pkt_type[ts->pkt_type].mcs_count[MAX_MCS], 1,
1975 		((ts->mcs >= MAX_MCS_11B) && (ts->pkt_type == DOT11_B)));
1976 	DP_STATS_INCC(peer, tx.pkt_type[ts->pkt_type].mcs_count[ts->mcs], 1,
1977 		((ts->mcs <= MAX_MCS_11B) && (ts->pkt_type == DOT11_B)));
1978 	DP_STATS_INCC(peer, tx.pkt_type[ts->pkt_type].mcs_count[MAX_MCS], 1,
1979 		((ts->mcs >= MAX_MCS_11A) && (ts->pkt_type == DOT11_N)));
1980 	DP_STATS_INCC(peer, tx.pkt_type[ts->pkt_type].mcs_count[ts->mcs], 1,
1981 		((ts->mcs <= MAX_MCS_11A) && (ts->pkt_type == DOT11_N)));
1982 	DP_STATS_INCC(peer, tx.pkt_type[ts->pkt_type].mcs_count[MAX_MCS], 1,
1983 		((ts->mcs >= MAX_MCS_11AC) && (ts->pkt_type == DOT11_AC)));
1984 	DP_STATS_INCC(peer, tx.pkt_type[ts->pkt_type].mcs_count[ts->mcs], 1,
1985 		((ts->mcs <= MAX_MCS_11AC) && (ts->pkt_type == DOT11_AC)));
1986 
1987 	DP_STATS_INCC(peer, tx.pkt_type[ts->pkt_type].mcs_count[MAX_MCS], 1,
1988 		((ts->mcs >= (MAX_MCS-1)) && (ts->pkt_type == DOT11_AX)));
1989 	DP_STATS_INCC(peer, tx.pkt_type[ts->pkt_type].mcs_count[ts->mcs], 1,
1990 		((ts->mcs <= (MAX_MCS-1)) && (ts->pkt_type == DOT11_AX)));
1991 
1992 	DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
1993 	DP_STATS_INC(peer, tx.bw[ts->bw], 1);
1994 	DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
1995 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
1996 	DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
1997 	DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
1998 	DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
1999 	DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
2000 	DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
2001 	DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
2002 
2003 	if (soc->cdp_soc.ol_ops->update_dp_stats) {
2004 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->osif_pdev,
2005 				&peer->stats, ts->peer_id,
2006 				UPDATE_PEER_STATS);
2007 	}
2008 }
2009 
2010 /**
2011  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
2012  * @tx_desc: software descriptor head pointer
2013  * @length: packet length
2014  *
2015  * Return: none
2016  */
2017 static inline void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
2018 		uint32_t length)
2019 {
2020 	struct hal_tx_completion_status ts;
2021 	struct dp_soc *soc = NULL;
2022 	struct dp_vdev *vdev = tx_desc->vdev;
2023 	struct dp_peer *peer = NULL;
2024 	hal_tx_comp_get_status(&tx_desc->comp, &ts);
2025 
2026 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2027 				"-------------------- \n"
2028 				"Tx Completion Stats: \n"
2029 				"-------------------- \n"
2030 				"ack_frame_rssi = %d \n"
2031 				"first_msdu = %d \n"
2032 				"last_msdu = %d \n"
2033 				"msdu_part_of_amsdu = %d \n"
2034 				"rate_stats valid = %d \n"
2035 				"bw = %d \n"
2036 				"pkt_type = %d \n"
2037 				"stbc = %d \n"
2038 				"ldpc = %d \n"
2039 				"sgi = %d \n"
2040 				"mcs = %d \n"
2041 				"ofdma = %d \n"
2042 				"tones_in_ru = %d \n"
2043 				"tsf = %d \n"
2044 				"ppdu_id = %d \n"
2045 				"transmit_cnt = %d \n"
2046 				"tid = %d \n"
2047 				"peer_id = %d \n",
2048 				ts.ack_frame_rssi, ts.first_msdu, ts.last_msdu,
2049 				ts.msdu_part_of_amsdu, ts.valid, ts.bw,
2050 				ts.pkt_type, ts.stbc, ts.ldpc, ts.sgi,
2051 				ts.mcs, ts.ofdma, ts.tones_in_ru, ts.tsf,
2052 				ts.ppdu_id, ts.transmit_cnt, ts.tid,
2053 				ts.peer_id);
2054 
2055 	if (!vdev) {
2056 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2057 				"invalid vdev");
2058 		goto out;
2059 	}
2060 
2061 	soc = vdev->pdev->soc;
2062 
2063 	/* Update SoC level stats */
2064 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
2065 			(ts.status == HAL_TX_TQM_RR_REM_CMD_REM));
2066 
2067 	/* Update per-packet stats */
2068 	if (qdf_unlikely(vdev->mesh_vdev))
2069 		dp_tx_comp_fill_tx_completion_stats(tx_desc, &ts);
2070 
2071 	/* Update peer level stats */
2072 	peer = dp_peer_find_by_id(soc, ts.peer_id);
2073 	if (!peer) {
2074 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2075 				"invalid peer");
2076 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
2077 		goto out;
2078 	}
2079 
2080 	dp_tx_update_peer_stats(peer, &ts, length);
2081 
2082 out:
2083 	return;
2084 }
2085 
2086 /**
2087  * dp_tx_comp_process_desc() - Tx complete software descriptor handler
2088  * @soc: core txrx main context
2089  * @comp_head: software descriptor head pointer
2090  *
2091  * This function will process batch of descriptors reaped by dp_tx_comp_handler
2092  * and release the software descriptors after processing is complete
2093  *
2094  * Return: none
2095  */
2096 static void dp_tx_comp_process_desc(struct dp_soc *soc,
2097 		struct dp_tx_desc_s *comp_head)
2098 {
2099 	struct dp_tx_desc_s *desc;
2100 	struct dp_tx_desc_s *next;
2101 	struct hal_tx_completion_status ts = {0};
2102 	uint32_t length;
2103 	struct dp_peer *peer;
2104 
2105 	DP_HIST_INIT();
2106 	desc = comp_head;
2107 
2108 	while (desc) {
2109 		hal_tx_comp_get_status(&desc->comp, &ts);
2110 		peer = dp_peer_find_by_id(soc, ts.peer_id);
2111 		length = qdf_nbuf_len(desc->nbuf);
2112 
2113 		/* Process Tx status in descriptor */
2114 		if (soc->process_tx_status ||
2115 				(desc->vdev && desc->vdev->mesh_vdev))
2116 			dp_tx_comp_process_tx_status(desc, length);
2117 
2118 		dp_tx_comp_free_buf(soc, desc);
2119 
2120 		DP_HIST_PACKET_COUNT_INC(desc->pdev->pdev_id);
2121 
2122 		next = desc->next;
2123 		dp_tx_desc_release(desc, desc->pool_id);
2124 		desc = next;
2125 	}
2126 	DP_TX_HIST_STATS_PER_PDEV();
2127 }
2128 
2129 /**
2130  * dp_tx_comp_handler() - Tx completion handler
2131  * @soc: core txrx main context
2132  * @ring_id: completion ring id
2133  * @quota: No. of packets/descriptors that can be serviced in one loop
2134  *
2135  * This function will collect hardware release ring element contents and
2136  * handle descriptor contents. Based on contents, free packet or handle error
2137  * conditions
2138  *
2139  * Return: none
2140  */
2141 uint32_t dp_tx_comp_handler(struct dp_soc *soc, void *hal_srng, uint32_t quota)
2142 {
2143 	void *tx_comp_hal_desc;
2144 	uint8_t buffer_src;
2145 	uint8_t pool_id;
2146 	uint32_t tx_desc_id;
2147 	struct dp_tx_desc_s *tx_desc = NULL;
2148 	struct dp_tx_desc_s *head_desc = NULL;
2149 	struct dp_tx_desc_s *tail_desc = NULL;
2150 	uint32_t num_processed;
2151 	uint32_t count;
2152 
2153 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
2154 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2155 				"%s %d : HAL RING Access Failed -- %pK\n",
2156 				__func__, __LINE__, hal_srng);
2157 		return 0;
2158 	}
2159 
2160 	num_processed = 0;
2161 	count = 0;
2162 
2163 	/* Find head descriptor from completion ring */
2164 	while (qdf_likely(tx_comp_hal_desc =
2165 			hal_srng_dst_get_next(soc->hal_soc, hal_srng))) {
2166 
2167 		buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
2168 
2169 		/* If this buffer was not released by TQM or FW, then it is not
2170 		 * Tx completion indication, assert */
2171 		if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
2172 				(buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
2173 
2174 			QDF_TRACE(QDF_MODULE_ID_DP,
2175 					QDF_TRACE_LEVEL_FATAL,
2176 					"Tx comp release_src != TQM | FW");
2177 
2178 			qdf_assert_always(0);
2179 		}
2180 
2181 		/* Get descriptor id */
2182 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
2183 		pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
2184 			DP_TX_DESC_ID_POOL_OS;
2185 
2186 		/* Pool ID is out of limit. Error */
2187 		if (pool_id > wlan_cfg_get_num_tx_desc_pool(
2188 					soc->wlan_cfg_ctx)) {
2189 			QDF_TRACE(QDF_MODULE_ID_DP,
2190 					QDF_TRACE_LEVEL_FATAL,
2191 					"Tx Comp pool id %d not valid",
2192 					pool_id);
2193 
2194 			qdf_assert_always(0);
2195 		}
2196 
2197 		/* Find Tx descriptor */
2198 		tx_desc = dp_tx_desc_find(soc, pool_id,
2199 				(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
2200 				DP_TX_DESC_ID_PAGE_OS,
2201 				(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
2202 				DP_TX_DESC_ID_OFFSET_OS);
2203 
2204 		/* Pool id is not matching. Error */
2205 		if (tx_desc && (tx_desc->pool_id != pool_id)) {
2206 			QDF_TRACE(QDF_MODULE_ID_DP,
2207 					QDF_TRACE_LEVEL_FATAL,
2208 					"Tx Comp pool id %d not matched %d",
2209 					pool_id, tx_desc->pool_id);
2210 
2211 			qdf_assert_always(0);
2212 		}
2213 
2214 		if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
2215 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
2216 			QDF_TRACE(QDF_MODULE_ID_DP,
2217 					QDF_TRACE_LEVEL_FATAL,
2218 					"Txdesc invalid, flgs = %x,id = %d",
2219 					tx_desc->flags,	tx_desc_id);
2220 
2221 			qdf_assert_always(0);
2222 		}
2223 
2224 		/*
2225 		 * If the release source is FW, process the HTT status
2226 		 */
2227 		if (qdf_unlikely(buffer_src ==
2228 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
2229 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
2230 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
2231 					htt_tx_status);
2232 			dp_tx_process_htt_completion(tx_desc,
2233 					htt_tx_status);
2234 		} else {
2235 
2236 			/* First ring descriptor on the cycle */
2237 			if (!head_desc) {
2238 				head_desc = tx_desc;
2239 				tail_desc = tx_desc;
2240 			}
2241 
2242 			tail_desc->next = tx_desc;
2243 			tx_desc->next = NULL;
2244 			tail_desc = tx_desc;
2245 
2246 			/* Collect hw completion contents */
2247 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
2248 					&tx_desc->comp, soc->process_tx_status);
2249 
2250 		}
2251 
2252 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
2253 		/* Decrement PM usage count if the packet has been sent.*/
2254 		hif_pm_runtime_put(soc->hif_handle);
2255 
2256 		/*
2257 		 * Processed packet count is more than given quota
2258 		 * stop to processing
2259 		 */
2260 		if ((num_processed >= quota))
2261 			break;
2262 
2263 		count++;
2264 	}
2265 
2266 	hal_srng_access_end(soc->hal_soc, hal_srng);
2267 
2268 	/* Process the reaped descriptors */
2269 	if (head_desc)
2270 		dp_tx_comp_process_desc(soc, head_desc);
2271 
2272 	return num_processed;
2273 }
2274 
2275 /**
2276  * dp_tx_non_std() - Allow the control-path SW to send data frames
2277  *
2278  * @data_vdev - which vdev should transmit the tx data frames
2279  * @tx_spec - what non-standard handling to apply to the tx data frames
2280  * @msdu_list - NULL-terminated list of tx MSDUs
2281  *
2282  * Return: NULL on success,
2283  *         nbuf when it fails to send
2284  */
2285 qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle,
2286 		enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
2287 {
2288 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
2289 
2290 	if (tx_spec & OL_TX_SPEC_NO_FREE)
2291 		vdev->is_tdls_frame = true;
2292 	return dp_tx_send(vdev_handle, msdu_list);
2293 }
2294 
2295 /**
2296  * dp_tx_vdev_attach() - attach vdev to dp tx
2297  * @vdev: virtual device instance
2298  *
2299  * Return: QDF_STATUS_SUCCESS: success
2300  *         QDF_STATUS_E_RESOURCES: Error return
2301  */
2302 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
2303 {
2304 	/*
2305 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
2306 	 */
2307 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
2308 			HTT_TCL_METADATA_TYPE_VDEV_BASED);
2309 
2310 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
2311 			vdev->vdev_id);
2312 
2313 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
2314 			DP_SW2HW_MACID(vdev->pdev->pdev_id));
2315 
2316 	/*
2317 	 * Set HTT Extension Valid bit to 0 by default
2318 	 */
2319 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
2320 
2321 	dp_tx_vdev_update_search_flags(vdev);
2322 
2323 	return QDF_STATUS_SUCCESS;
2324 }
2325 
2326 /**
2327  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
2328  * @vdev: virtual device instance
2329  *
2330  * Return: void
2331  *
2332  */
2333 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
2334 {
2335 	/*
2336 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
2337 	 * for TDLS link
2338 	 *
2339 	 * Enable AddrY (SA based search) only for non-WDS STA and
2340 	 * ProxySTA VAP modes.
2341 	 *
2342 	 * In all other VAP modes, only DA based search should be
2343 	 * enabled
2344 	 */
2345 	if (vdev->opmode == wlan_op_mode_sta &&
2346 	    vdev->tdls_link_connected)
2347 		vdev->hal_desc_addr_search_flags =
2348 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
2349 	else if ((vdev->opmode == wlan_op_mode_sta &&
2350 				(!vdev->wds_enabled || vdev->proxysta_vdev)))
2351 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
2352 	else
2353 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
2354 }
2355 
2356 /**
2357  * dp_tx_vdev_detach() - detach vdev from dp tx
2358  * @vdev: virtual device instance
2359  *
2360  * Return: QDF_STATUS_SUCCESS: success
2361  *         QDF_STATUS_E_RESOURCES: Error return
2362  */
2363 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
2364 {
2365 	return QDF_STATUS_SUCCESS;
2366 }
2367 
2368 /**
2369  * dp_tx_pdev_attach() - attach pdev to dp tx
2370  * @pdev: physical device instance
2371  *
2372  * Return: QDF_STATUS_SUCCESS: success
2373  *         QDF_STATUS_E_RESOURCES: Error return
2374  */
2375 QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
2376 {
2377 	struct dp_soc *soc = pdev->soc;
2378 
2379 	/* Initialize Flow control counters */
2380 	qdf_atomic_init(&pdev->num_tx_exception);
2381 	qdf_atomic_init(&pdev->num_tx_outstanding);
2382 
2383 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2384 		/* Initialize descriptors in TCL Ring */
2385 		hal_tx_init_data_ring(soc->hal_soc,
2386 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
2387 	}
2388 
2389 	return QDF_STATUS_SUCCESS;
2390 }
2391 
2392 /**
2393  * dp_tx_pdev_detach() - detach pdev from dp tx
2394  * @pdev: physical device instance
2395  *
2396  * Return: QDF_STATUS_SUCCESS: success
2397  *         QDF_STATUS_E_RESOURCES: Error return
2398  */
2399 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
2400 {
2401 	/* What should do here? */
2402 	return QDF_STATUS_SUCCESS;
2403 }
2404 
2405 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2406 /* Pools will be allocated dynamically */
2407 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
2408 					int num_desc)
2409 {
2410 	uint8_t i;
2411 
2412 	for (i = 0; i < num_pool; i++) {
2413 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
2414 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
2415 	}
2416 
2417 	return 0;
2418 }
2419 
2420 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
2421 {
2422 	uint8_t i;
2423 
2424 	for (i = 0; i < num_pool; i++)
2425 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
2426 }
2427 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
2428 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
2429 					int num_desc)
2430 {
2431 	uint8_t i;
2432 
2433 	/* Allocate software Tx descriptor pools */
2434 	for (i = 0; i < num_pool; i++) {
2435 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
2436 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2437 					"%s Tx Desc Pool alloc %d failed %pK\n",
2438 					__func__, i, soc);
2439 			return ENOMEM;
2440 		}
2441 	}
2442 	return 0;
2443 }
2444 
2445 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
2446 {
2447 	uint8_t i;
2448 
2449 	for (i = 0; i < num_pool; i++) {
2450 		if (dp_tx_desc_pool_free(soc, i)) {
2451 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2452 				"%s Tx Desc Pool Free failed\n", __func__);
2453 		}
2454 	}
2455 }
2456 
2457 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
2458 
2459 /**
2460  * dp_tx_soc_detach() - detach soc from dp tx
2461  * @soc: core txrx main context
2462  *
2463  * This function will detach dp tx into main device context
2464  * will free dp tx resource and initialize resources
2465  *
2466  * Return: QDF_STATUS_SUCCESS: success
2467  *         QDF_STATUS_E_RESOURCES: Error return
2468  */
2469 QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
2470 {
2471 	uint8_t num_pool;
2472 	uint16_t num_desc;
2473 	uint16_t num_ext_desc;
2474 	uint8_t i;
2475 
2476 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
2477 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
2478 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
2479 
2480 	dp_tx_flow_control_deinit(soc);
2481 	dp_tx_delete_static_pools(soc, num_pool);
2482 
2483 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2484 			"%s Tx Desc Pool Free num_pool = %d, descs = %d\n",
2485 			__func__, num_pool, num_desc);
2486 
2487 	for (i = 0; i < num_pool; i++) {
2488 		if (dp_tx_ext_desc_pool_free(soc, i)) {
2489 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2490 					"%s Tx Ext Desc Pool Free failed\n",
2491 					__func__);
2492 			return QDF_STATUS_E_RESOURCES;
2493 		}
2494 	}
2495 
2496 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2497 			"%s MSDU Ext Desc Pool %d Free descs = %d\n",
2498 			__func__, num_pool, num_ext_desc);
2499 
2500 	for (i = 0; i < num_pool; i++) {
2501 		dp_tx_tso_desc_pool_free(soc, i);
2502 	}
2503 
2504 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2505 			"%s TSO Desc Pool %d Free descs = %d\n",
2506 			__func__, num_pool, num_desc);
2507 
2508 
2509 	for (i = 0; i < num_pool; i++)
2510 		dp_tx_tso_num_seg_pool_free(soc, i);
2511 
2512 
2513 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2514 		"%s TSO Num of seg Desc Pool %d Free descs = %d\n",
2515 		__func__, num_pool, num_desc);
2516 
2517 	return QDF_STATUS_SUCCESS;
2518 }
2519 
2520 /**
2521  * dp_tx_soc_attach() - attach soc to dp tx
2522  * @soc: core txrx main context
2523  *
2524  * This function will attach dp tx into main device context
2525  * will allocate dp tx resource and initialize resources
2526  *
2527  * Return: QDF_STATUS_SUCCESS: success
2528  *         QDF_STATUS_E_RESOURCES: Error return
2529  */
2530 QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
2531 {
2532 	uint8_t i;
2533 	uint8_t num_pool;
2534 	uint32_t num_desc;
2535 	uint32_t num_ext_desc;
2536 
2537 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
2538 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
2539 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
2540 
2541 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
2542 		goto fail;
2543 
2544 	dp_tx_flow_control_init(soc);
2545 
2546 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2547 			"%s Tx Desc Alloc num_pool = %d, descs = %d\n",
2548 			__func__, num_pool, num_desc);
2549 
2550 	/* Allocate extension tx descriptor pools */
2551 	for (i = 0; i < num_pool; i++) {
2552 		if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
2553 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2554 				"MSDU Ext Desc Pool alloc %d failed %pK\n",
2555 				i, soc);
2556 
2557 			goto fail;
2558 		}
2559 	}
2560 
2561 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2562 			"%s MSDU Ext Desc Alloc %d, descs = %d\n",
2563 			__func__, num_pool, num_ext_desc);
2564 
2565 	for (i = 0; i < num_pool; i++) {
2566 		if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
2567 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2568 				"TSO Desc Pool alloc %d failed %pK\n",
2569 				i, soc);
2570 
2571 			goto fail;
2572 		}
2573 	}
2574 
2575 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2576 			"%s TSO Desc Alloc %d, descs = %d\n",
2577 			__func__, num_pool, num_desc);
2578 
2579 	for (i = 0; i < num_pool; i++) {
2580 		if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
2581 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2582 				"TSO Num of seg Pool alloc %d failed %pK\n",
2583 				i, soc);
2584 
2585 			goto fail;
2586 		}
2587 	}
2588 
2589 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2590 			"%s TSO Num of seg pool Alloc %d, descs = %d\n",
2591 			__func__, num_pool, num_desc);
2592 
2593 	/* Initialize descriptors in TCL Rings */
2594 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2595 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2596 			hal_tx_init_data_ring(soc->hal_soc,
2597 					soc->tcl_data_ring[i].hal_srng);
2598 		}
2599 	}
2600 
2601 	/*
2602 	 * todo - Add a runtime config option to enable this.
2603 	 */
2604 	/*
2605 	 * Due to multiple issues on NPR EMU, enable it selectively
2606 	 * only for NPR EMU, should be removed, once NPR platforms
2607 	 * are stable.
2608 	 */
2609 	soc->process_tx_status = 0;
2610 
2611 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2612 			"%s HAL Tx init Success\n", __func__);
2613 
2614 	return QDF_STATUS_SUCCESS;
2615 
2616 fail:
2617 	/* Detach will take care of freeing only allocated resources */
2618 	dp_tx_soc_detach(soc);
2619 	return QDF_STATUS_E_RESOURCES;
2620 }
2621 
2622 /*
2623  * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
2624  * pdev: pointer to DP PDEV structure
2625  * seg_info_head: Pointer to the head of list
2626  *
2627  * return: void
2628  */
2629 static inline void dp_tx_me_mem_free(struct dp_pdev *pdev,
2630 		struct dp_tx_seg_info_s *seg_info_head)
2631 {
2632 	struct dp_tx_me_buf_t *mc_uc_buf;
2633 	struct dp_tx_seg_info_s *seg_info_new = NULL;
2634 	qdf_nbuf_t nbuf = NULL;
2635 	uint64_t phy_addr;
2636 
2637 	while (seg_info_head) {
2638 		nbuf = seg_info_head->nbuf;
2639 		mc_uc_buf = (struct dp_tx_me_buf_t *)
2640 			seg_info_new->frags[0].vaddr;
2641 		phy_addr = seg_info_head->frags[0].paddr_hi;
2642 		phy_addr =  (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
2643 		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
2644 				phy_addr,
2645 				QDF_DMA_TO_DEVICE , DP_MAC_ADDR_LEN);
2646 		dp_tx_me_free_buf(pdev, mc_uc_buf);
2647 		qdf_nbuf_free(nbuf);
2648 		seg_info_new = seg_info_head;
2649 		seg_info_head = seg_info_head->next;
2650 		qdf_mem_free(seg_info_new);
2651 	}
2652 }
2653 
2654 /**
2655  * dp_tx_me_send_convert_ucast(): fuction to convert multicast to unicast
2656  * @vdev: DP VDEV handle
2657  * @nbuf: Multicast nbuf
2658  * @newmac: Table of the clients to which packets have to be sent
2659  * @new_mac_cnt: No of clients
2660  *
2661  * return: no of converted packets
2662  */
2663 uint16_t
2664 dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
2665 		uint8_t newmac[][DP_MAC_ADDR_LEN], uint8_t new_mac_cnt)
2666 {
2667 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
2668 	struct dp_pdev *pdev = vdev->pdev;
2669 	struct ether_header *eh;
2670 	uint8_t *data;
2671 	uint16_t len;
2672 
2673 	/* reference to frame dst addr */
2674 	uint8_t *dstmac;
2675 	/* copy of original frame src addr */
2676 	uint8_t srcmac[DP_MAC_ADDR_LEN];
2677 
2678 	/* local index into newmac */
2679 	uint8_t new_mac_idx = 0;
2680 	struct dp_tx_me_buf_t *mc_uc_buf;
2681 	qdf_nbuf_t  nbuf_clone;
2682 	struct dp_tx_msdu_info_s msdu_info;
2683 	struct dp_tx_seg_info_s *seg_info_head = NULL;
2684 	struct dp_tx_seg_info_s *seg_info_tail = NULL;
2685 	struct dp_tx_seg_info_s *seg_info_new;
2686 	struct dp_tx_frag_info_s data_frag;
2687 	qdf_dma_addr_t paddr_data;
2688 	qdf_dma_addr_t paddr_mcbuf = 0;
2689 	uint8_t empty_entry_mac[DP_MAC_ADDR_LEN] = {0};
2690 	QDF_STATUS status;
2691 
2692 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
2693 
2694 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2695 
2696 	eh = (struct ether_header *) nbuf;
2697 	qdf_mem_copy(srcmac, eh->ether_shost, DP_MAC_ADDR_LEN);
2698 
2699 	len = qdf_nbuf_len(nbuf);
2700 
2701 	data = qdf_nbuf_data(nbuf);
2702 
2703 	status = qdf_nbuf_map(vdev->osdev, nbuf,
2704 			QDF_DMA_TO_DEVICE);
2705 
2706 	if (status) {
2707 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2708 				"Mapping failure Error:%d", status);
2709 		DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
2710 		return 0;
2711 	}
2712 
2713 	paddr_data = qdf_nbuf_get_frag_paddr(nbuf, 0) + IEEE80211_ADDR_LEN;
2714 
2715 	/*preparing data fragment*/
2716 	data_frag.vaddr = qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN;
2717 	data_frag.paddr_lo = (uint32_t)paddr_data;
2718 	data_frag.paddr_hi = ((uint64_t)paddr_data & 0xffffffff00000000) >> 32;
2719 	data_frag.len = len - DP_MAC_ADDR_LEN;
2720 
2721 	for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
2722 		dstmac = newmac[new_mac_idx];
2723 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2724 				"added mac addr (%pM)", dstmac);
2725 
2726 		/* Check for NULL Mac Address */
2727 		if (!qdf_mem_cmp(dstmac, empty_entry_mac, DP_MAC_ADDR_LEN))
2728 			continue;
2729 
2730 		/* frame to self mac. skip */
2731 		if (!qdf_mem_cmp(dstmac, srcmac, DP_MAC_ADDR_LEN))
2732 			continue;
2733 
2734 		/*
2735 		 * TODO: optimize to avoid malloc in per-packet path
2736 		 * For eg. seg_pool can be made part of vdev structure
2737 		 */
2738 		seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
2739 
2740 		if (!seg_info_new) {
2741 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2742 					"alloc failed");
2743 			DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
2744 			goto fail_seg_alloc;
2745 		}
2746 
2747 		mc_uc_buf = dp_tx_me_alloc_buf(pdev);
2748 		if (mc_uc_buf == NULL)
2749 			goto fail_buf_alloc;
2750 
2751 		/*
2752 		 * TODO: Check if we need to clone the nbuf
2753 		 * Or can we just use the reference for all cases
2754 		 */
2755 		if (new_mac_idx < (new_mac_cnt - 1)) {
2756 			nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
2757 			if (nbuf_clone == NULL) {
2758 				DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
2759 				goto fail_clone;
2760 			}
2761 		} else {
2762 			/*
2763 			 * Update the ref
2764 			 * to account for frame sent without cloning
2765 			 */
2766 			qdf_nbuf_ref(nbuf);
2767 			nbuf_clone = nbuf;
2768 		}
2769 
2770 		qdf_mem_copy(mc_uc_buf->data, dstmac, DP_MAC_ADDR_LEN);
2771 
2772 		status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
2773 				QDF_DMA_TO_DEVICE, DP_MAC_ADDR_LEN,
2774 				&paddr_mcbuf);
2775 
2776 		if (status) {
2777 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2778 					"Mapping failure Error:%d", status);
2779 			DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
2780 			goto fail_map;
2781 		}
2782 
2783 		seg_info_new->frags[0].vaddr =  (uint8_t *)mc_uc_buf;
2784 		seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
2785 		seg_info_new->frags[0].paddr_hi =
2786 			((u64)paddr_mcbuf & 0xffffffff00000000) >> 32;
2787 		seg_info_new->frags[0].len = DP_MAC_ADDR_LEN;
2788 
2789 		seg_info_new->frags[1] = data_frag;
2790 		seg_info_new->nbuf = nbuf_clone;
2791 		seg_info_new->frag_cnt = 2;
2792 		seg_info_new->total_len = len;
2793 
2794 		seg_info_new->next = NULL;
2795 
2796 		if (seg_info_head == NULL)
2797 			seg_info_head = seg_info_new;
2798 		else
2799 			seg_info_tail->next = seg_info_new;
2800 
2801 		seg_info_tail = seg_info_new;
2802 	}
2803 
2804 	if (!seg_info_head)
2805 		return 0;
2806 
2807 	msdu_info.u.sg_info.curr_seg = seg_info_head;
2808 	msdu_info.num_seg = new_mac_cnt;
2809 	msdu_info.frm_type = dp_tx_frm_me;
2810 
2811 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
2812 	dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2813 
2814 	while (seg_info_head->next) {
2815 		seg_info_new = seg_info_head;
2816 		seg_info_head = seg_info_head->next;
2817 		qdf_mem_free(seg_info_new);
2818 	}
2819 	qdf_mem_free(seg_info_head);
2820 
2821 	return new_mac_cnt;
2822 
2823 fail_map:
2824 	qdf_nbuf_free(nbuf_clone);
2825 
2826 fail_clone:
2827 	dp_tx_me_free_buf(pdev, mc_uc_buf);
2828 
2829 fail_buf_alloc:
2830 	qdf_mem_free(seg_info_new);
2831 
2832 fail_seg_alloc:
2833 	dp_tx_me_mem_free(pdev, seg_info_head);
2834 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
2835 	return 0;
2836 }
2837