xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision 6ecd284e5a94a1c96e26d571dd47419ac305990d)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "dp_tx.h"
21 #include "dp_tx_desc.h"
22 #include "dp_peer.h"
23 #include "dp_types.h"
24 #include "hal_tx.h"
25 #include "qdf_mem.h"
26 #include "qdf_nbuf.h"
27 #include "qdf_net_types.h"
28 #include <wlan_cfg.h>
29 #ifdef MESH_MODE_SUPPORT
30 #include "if_meta_hdr.h"
31 #endif
32 
33 #ifdef TX_PER_PDEV_DESC_POOL
34 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
35 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
36 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
37 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
38 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
39 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
40 #else
41 	#ifdef TX_PER_VDEV_DESC_POOL
42 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
43 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
44 	#else
45 		#define DP_TX_GET_DESC_POOL_ID(vdev) qdf_get_cpu()
46 		#define DP_TX_GET_RING_ID(vdev) vdev->pdev->soc->tx_ring_map[qdf_get_cpu()]
47 	#endif /* TX_PER_VDEV_DESC_POOL */
48 #endif /* TX_PER_PDEV_DESC_POOL */
49 
50 /* TODO Add support in TSO */
51 #define DP_DESC_NUM_FRAG(x) 0
52 
53 /* disable TQM_BYPASS */
54 #define TQM_BYPASS_WAR 0
55 
56 /* invalid peer id for reinject*/
57 #define DP_INVALID_PEER 0XFFFE
58 
59 /*mapping between hal encrypt type and cdp_sec_type*/
60 #define MAX_CDP_SEC_TYPE 12
61 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
62 					HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
63 					HAL_TX_ENCRYPT_TYPE_WEP_128,
64 					HAL_TX_ENCRYPT_TYPE_WEP_104,
65 					HAL_TX_ENCRYPT_TYPE_WEP_40,
66 					HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
67 					HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
68 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
69 					HAL_TX_ENCRYPT_TYPE_WAPI,
70 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
71 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
72 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
73 					HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
74 
75 /**
76  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
77  * @vdev: DP Virtual device handle
78  * @nbuf: Buffer pointer
79  * @queue: queue ids container for nbuf
80  *
81  * TX packet queue has 2 instances, software descriptors id and dma ring id
82  * Based on tx feature and hardware configuration queue id combination could be
83  * different.
84  * For example -
85  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
86  * With no XPS,lock based resource protection, Descriptor pool ids are different
87  * for each vdev, dma ring id will be same as single pdev id
88  *
89  * Return: None
90  */
91 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
92 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
93 {
94 	/* get flow id */
95 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
96 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
97 
98 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
99 			"%s, pool_id:%d ring_id: %d",
100 			__func__, queue->desc_pool_id, queue->ring_id);
101 
102 	return;
103 }
104 
105 #if defined(FEATURE_TSO)
106 /**
107  * dp_tx_tso_desc_release() - Release the tso segment
108  *                            after unmapping all the fragments
109  *
110  * @pdev - physical device handle
111  * @tx_desc - Tx software descriptor
112  */
113 static void dp_tx_tso_desc_release(struct dp_soc *soc,
114 		struct dp_tx_desc_s *tx_desc)
115 {
116 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
117 	if (qdf_unlikely(tx_desc->tso_desc == NULL)) {
118 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
119 			"%s %d TSO desc is NULL!",
120 			__func__, __LINE__);
121 		qdf_assert(0);
122 	} else if (qdf_unlikely(tx_desc->tso_num_desc == NULL)) {
123 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
124 			"%s %d TSO common info is NULL!",
125 			__func__, __LINE__);
126 		qdf_assert(0);
127 	} else {
128 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
129 			(struct qdf_tso_num_seg_elem_t *) tx_desc->tso_num_desc;
130 
131 		if (tso_num_desc->num_seg.tso_cmn_num_seg > 1) {
132 			tso_num_desc->num_seg.tso_cmn_num_seg--;
133 			qdf_nbuf_unmap_tso_segment(soc->osdev,
134 					tx_desc->tso_desc, false);
135 		} else {
136 			tso_num_desc->num_seg.tso_cmn_num_seg--;
137 			qdf_assert(tso_num_desc->num_seg.tso_cmn_num_seg == 0);
138 			qdf_nbuf_unmap_tso_segment(soc->osdev,
139 					tx_desc->tso_desc, true);
140 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
141 					tx_desc->tso_num_desc);
142 			tx_desc->tso_num_desc = NULL;
143 		}
144 		dp_tx_tso_desc_free(soc,
145 				tx_desc->pool_id, tx_desc->tso_desc);
146 		tx_desc->tso_desc = NULL;
147 	}
148 }
149 #else
150 static void dp_tx_tso_desc_release(struct dp_soc *soc,
151 		struct dp_tx_desc_s *tx_desc)
152 {
153 	return;
154 }
155 #endif
156 /**
157  * dp_tx_desc_release() - Release Tx Descriptor
158  * @tx_desc : Tx Descriptor
159  * @desc_pool_id: Descriptor Pool ID
160  *
161  * Deallocate all resources attached to Tx descriptor and free the Tx
162  * descriptor.
163  *
164  * Return:
165  */
166 static void
167 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
168 {
169 	struct dp_pdev *pdev = tx_desc->pdev;
170 	struct dp_soc *soc;
171 	uint8_t comp_status = 0;
172 
173 	qdf_assert(pdev);
174 
175 	soc = pdev->soc;
176 
177 	if (tx_desc->frm_type == dp_tx_frm_tso)
178 		dp_tx_tso_desc_release(soc, tx_desc);
179 
180 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
181 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
182 
183 	if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
184 		dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
185 
186 	qdf_atomic_dec(&pdev->num_tx_outstanding);
187 
188 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
189 		qdf_atomic_dec(&pdev->num_tx_exception);
190 
191 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
192 				hal_tx_comp_get_buffer_source(&tx_desc->comp))
193 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp);
194 	else
195 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
196 
197 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
198 		"Tx Completion Release desc %d status %d outstanding %d",
199 		tx_desc->id, comp_status,
200 		qdf_atomic_read(&pdev->num_tx_outstanding));
201 
202 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
203 	return;
204 }
205 
206 /**
207  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
208  * @vdev: DP vdev Handle
209  * @nbuf: skb
210  *
211  * Prepares and fills HTT metadata in the frame pre-header for special frames
212  * that should be transmitted using varying transmit parameters.
213  * There are 2 VDEV modes that currently needs this special metadata -
214  *  1) Mesh Mode
215  *  2) DSRC Mode
216  *
217  * Return: HTT metadata size
218  *
219  */
220 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
221 		uint32_t *meta_data)
222 {
223 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
224 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
225 
226 	uint8_t htt_desc_size;
227 
228 	/* Size rounded of multiple of 8 bytes */
229 	uint8_t htt_desc_size_aligned;
230 
231 	uint8_t *hdr = NULL;
232 
233 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 1);
234 
235 	/*
236 	 * Metadata - HTT MSDU Extension header
237 	 */
238 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
239 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
240 
241 	if (vdev->mesh_vdev) {
242 
243 		/* Fill and add HTT metaheader */
244 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
245 		if (hdr == NULL) {
246 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
247 					"Error in filling HTT metadata\n");
248 
249 			return 0;
250 		}
251 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
252 
253 	} else if (vdev->opmode == wlan_op_mode_ocb) {
254 		/* Todo - Add support for DSRC */
255 	}
256 
257 	return htt_desc_size_aligned;
258 }
259 
260 /**
261  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
262  * @tso_seg: TSO segment to process
263  * @ext_desc: Pointer to MSDU extension descriptor
264  *
265  * Return: void
266  */
267 #if defined(FEATURE_TSO)
268 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
269 		void *ext_desc)
270 {
271 	uint8_t num_frag;
272 	uint32_t tso_flags;
273 
274 	/*
275 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
276 	 * tcp_flag_mask
277 	 *
278 	 * Checksum enable flags are set in TCL descriptor and not in Extension
279 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
280 	 */
281 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
282 
283 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
284 
285 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
286 		tso_seg->tso_flags.ip_len);
287 
288 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
289 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
290 
291 
292 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
293 		uint32_t lo = 0;
294 		uint32_t hi = 0;
295 
296 		qdf_dmaaddr_to_32s(
297 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
298 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
299 			tso_seg->tso_frags[num_frag].length);
300 	}
301 
302 	return;
303 }
304 #else
305 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
306 		void *ext_desc)
307 {
308 	return;
309 }
310 #endif
311 
312 #if defined(FEATURE_TSO)
313 /**
314  * dp_tx_free_tso_seg() - Loop through the tso segments
315  *                        allocated and free them
316  *
317  * @soc: soc handle
318  * @free_seg: list of tso segments
319  * @msdu_info: msdu descriptor
320  *
321  * Return - void
322  */
323 static void dp_tx_free_tso_seg(struct dp_soc *soc,
324 	struct qdf_tso_seg_elem_t *free_seg,
325 	struct dp_tx_msdu_info_s *msdu_info)
326 {
327 	struct qdf_tso_seg_elem_t *next_seg;
328 
329 	while (free_seg) {
330 		next_seg = free_seg->next;
331 		dp_tx_tso_desc_free(soc,
332 			msdu_info->tx_queue.desc_pool_id,
333 			free_seg);
334 		free_seg = next_seg;
335 	}
336 }
337 
338 /**
339  * dp_tx_free_tso_num_seg() - Loop through the tso num segments
340  *                            allocated and free them
341  *
342  * @soc:  soc handle
343  * @free_seg: list of tso segments
344  * @msdu_info: msdu descriptor
345  * Return - void
346  */
347 static void dp_tx_free_tso_num_seg(struct dp_soc *soc,
348 	struct qdf_tso_num_seg_elem_t *free_seg,
349 	struct dp_tx_msdu_info_s *msdu_info)
350 {
351 	struct qdf_tso_num_seg_elem_t *next_seg;
352 
353 	while (free_seg) {
354 		next_seg = free_seg->next;
355 		dp_tso_num_seg_free(soc,
356 			msdu_info->tx_queue.desc_pool_id,
357 			free_seg);
358 		free_seg = next_seg;
359 	}
360 }
361 
362 /**
363  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
364  * @vdev: virtual device handle
365  * @msdu: network buffer
366  * @msdu_info: meta data associated with the msdu
367  *
368  * Return: QDF_STATUS_SUCCESS success
369  */
370 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
371 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
372 {
373 	struct qdf_tso_seg_elem_t *tso_seg;
374 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
375 	struct dp_soc *soc = vdev->pdev->soc;
376 	struct qdf_tso_info_t *tso_info;
377 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
378 
379 	tso_info = &msdu_info->u.tso_info;
380 	tso_info->curr_seg = NULL;
381 	tso_info->tso_seg_list = NULL;
382 	tso_info->num_segs = num_seg;
383 	msdu_info->frm_type = dp_tx_frm_tso;
384 	tso_info->tso_num_seg_list = NULL;
385 
386 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
387 
388 	while (num_seg) {
389 		tso_seg = dp_tx_tso_desc_alloc(
390 				soc, msdu_info->tx_queue.desc_pool_id);
391 		if (tso_seg) {
392 			tso_seg->next = tso_info->tso_seg_list;
393 			tso_info->tso_seg_list = tso_seg;
394 			num_seg--;
395 		} else {
396 			struct qdf_tso_seg_elem_t *free_seg =
397 				tso_info->tso_seg_list;
398 
399 			dp_tx_free_tso_seg(soc, free_seg, msdu_info);
400 
401 			return QDF_STATUS_E_NOMEM;
402 		}
403 	}
404 
405 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
406 
407 	tso_num_seg = dp_tso_num_seg_alloc(soc,
408 			msdu_info->tx_queue.desc_pool_id);
409 
410 	if (tso_num_seg) {
411 		tso_num_seg->next = tso_info->tso_num_seg_list;
412 		tso_info->tso_num_seg_list = tso_num_seg;
413 	} else {
414 		/* Bug: free tso_num_seg and tso_seg */
415 		/* Free the already allocated num of segments */
416 		struct qdf_tso_seg_elem_t *free_seg =
417 					tso_info->tso_seg_list;
418 
419 		TSO_DEBUG(" %s: Failed alloc - Number of segs for a TSO packet",
420 			__func__);
421 		dp_tx_free_tso_seg(soc, free_seg, msdu_info);
422 
423 		return QDF_STATUS_E_NOMEM;
424 	}
425 
426 	msdu_info->num_seg =
427 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
428 
429 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
430 			msdu_info->num_seg);
431 
432 	if (!(msdu_info->num_seg)) {
433 		dp_tx_free_tso_seg(soc, tso_info->tso_seg_list, msdu_info);
434 		dp_tx_free_tso_num_seg(soc, tso_info->tso_num_seg_list,
435 					msdu_info);
436 		return QDF_STATUS_E_INVAL;
437 	}
438 
439 	tso_info->curr_seg = tso_info->tso_seg_list;
440 
441 	return QDF_STATUS_SUCCESS;
442 }
443 #else
444 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
445 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
446 {
447 	return QDF_STATUS_E_NOMEM;
448 }
449 #endif
450 
451 /**
452  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
453  * @vdev: DP Vdev handle
454  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
455  * @desc_pool_id: Descriptor Pool ID
456  *
457  * Return:
458  */
459 static
460 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
461 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
462 {
463 	uint8_t i;
464 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
465 	struct dp_tx_seg_info_s *seg_info;
466 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
467 	struct dp_soc *soc = vdev->pdev->soc;
468 
469 	/* Allocate an extension descriptor */
470 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
471 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
472 
473 	if (!msdu_ext_desc) {
474 		DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
475 		return NULL;
476 	}
477 
478 	if (msdu_info->exception_fw &&
479 			qdf_unlikely(vdev->mesh_vdev)) {
480 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
481 				&msdu_info->meta_data[0],
482 				sizeof(struct htt_tx_msdu_desc_ext2_t));
483 		qdf_atomic_inc(&vdev->pdev->num_tx_exception);
484 		HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 1);
485 	} else
486 		HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
487 
488 	switch (msdu_info->frm_type) {
489 	case dp_tx_frm_sg:
490 	case dp_tx_frm_me:
491 	case dp_tx_frm_raw:
492 		seg_info = msdu_info->u.sg_info.curr_seg;
493 		/* Update the buffer pointers in MSDU Extension Descriptor */
494 		for (i = 0; i < seg_info->frag_cnt; i++) {
495 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
496 				seg_info->frags[i].paddr_lo,
497 				seg_info->frags[i].paddr_hi,
498 				seg_info->frags[i].len);
499 		}
500 
501 		break;
502 
503 	case dp_tx_frm_tso:
504 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
505 				&cached_ext_desc[0]);
506 		break;
507 
508 
509 	default:
510 		break;
511 	}
512 
513 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
514 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
515 
516 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
517 			msdu_ext_desc->vaddr);
518 
519 	return msdu_ext_desc;
520 }
521 
522 /**
523  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
524  * @vdev: DP vdev handle
525  * @nbuf: skb
526  * @desc_pool_id: Descriptor pool ID
527  * @meta_data: Metadata to the fw
528  * @tx_exc_metadata: Handle that holds exception path metadata
529  * Allocate and prepare Tx descriptor with msdu information.
530  *
531  * Return: Pointer to Tx Descriptor on success,
532  *         NULL on failure
533  */
534 static
535 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
536 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
537 		struct dp_tx_msdu_info_s *msdu_info,
538 		struct cdp_tx_exception_metadata *tx_exc_metadata)
539 {
540 	uint8_t align_pad;
541 	uint8_t is_exception = 0;
542 	uint8_t htt_hdr_size;
543 	struct ether_header *eh;
544 	struct dp_tx_desc_s *tx_desc;
545 	struct dp_pdev *pdev = vdev->pdev;
546 	struct dp_soc *soc = pdev->soc;
547 
548 	/* Allocate software Tx descriptor */
549 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
550 	if (qdf_unlikely(!tx_desc)) {
551 		DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
552 		return NULL;
553 	}
554 
555 	/* Flow control/Congestion Control counters */
556 	qdf_atomic_inc(&pdev->num_tx_outstanding);
557 
558 	/* Initialize the SW tx descriptor */
559 	tx_desc->nbuf = nbuf;
560 	tx_desc->frm_type = dp_tx_frm_std;
561 	tx_desc->tx_encap_type = (tx_exc_metadata ?
562 			tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
563 	tx_desc->vdev = vdev;
564 	tx_desc->pdev = pdev;
565 	tx_desc->msdu_ext_desc = NULL;
566 	tx_desc->pkt_offset = 0;
567 
568 	/*
569 	 * For special modes (vdev_type == ocb or mesh), data frames should be
570 	 * transmitted using varying transmit parameters (tx spec) which include
571 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
572 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
573 	 * These frames are sent as exception packets to firmware.
574 	 *
575 	 * HW requirement is that metadata should always point to a
576 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
577 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
578 	 *  to get 8-byte aligned start address along with align_pad added
579 	 *
580 	 *  |-----------------------------|
581 	 *  |                             |
582 	 *  |-----------------------------| <-----Buffer Pointer Address given
583 	 *  |                             |  ^    in HW descriptor (aligned)
584 	 *  |       HTT Metadata          |  |
585 	 *  |                             |  |
586 	 *  |                             |  | Packet Offset given in descriptor
587 	 *  |                             |  |
588 	 *  |-----------------------------|  |
589 	 *  |       Alignment Pad         |  v
590 	 *  |-----------------------------| <----- Actual buffer start address
591 	 *  |        SKB Data             |           (Unaligned)
592 	 *  |                             |
593 	 *  |                             |
594 	 *  |                             |
595 	 *  |                             |
596 	 *  |                             |
597 	 *  |-----------------------------|
598 	 */
599 	if (qdf_unlikely((msdu_info->exception_fw)) ||
600 				(vdev->opmode == wlan_op_mode_ocb)) {
601 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
602 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
603 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
604 					"qdf_nbuf_push_head failed\n");
605 			goto failure;
606 		}
607 
608 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
609 				msdu_info->meta_data);
610 		if (htt_hdr_size == 0)
611 			goto failure;
612 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
613 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
614 		is_exception = 1;
615 	}
616 
617 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
618 				qdf_nbuf_map(soc->osdev, nbuf,
619 					QDF_DMA_TO_DEVICE))) {
620 		/* Handle failure */
621 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
622 				"qdf_nbuf_map failed\n");
623 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
624 		goto failure;
625 	}
626 
627 	if (qdf_unlikely(vdev->nawds_enabled)) {
628 		eh = (struct ether_header *) qdf_nbuf_data(nbuf);
629 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
630 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
631 			is_exception = 1;
632 		}
633 	}
634 
635 #if !TQM_BYPASS_WAR
636 	if (is_exception || tx_exc_metadata)
637 #endif
638 	{
639 		/* Temporary WAR due to TQM VP issues */
640 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
641 		qdf_atomic_inc(&pdev->num_tx_exception);
642 	}
643 
644 	return tx_desc;
645 
646 failure:
647 	dp_tx_desc_release(tx_desc, desc_pool_id);
648 	return NULL;
649 }
650 
651 /**
652  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
653  * @vdev: DP vdev handle
654  * @nbuf: skb
655  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
656  * @desc_pool_id : Descriptor Pool ID
657  *
658  * Allocate and prepare Tx descriptor with msdu and fragment descritor
659  * information. For frames wth fragments, allocate and prepare
660  * an MSDU extension descriptor
661  *
662  * Return: Pointer to Tx Descriptor on success,
663  *         NULL on failure
664  */
665 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
666 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
667 		uint8_t desc_pool_id)
668 {
669 	struct dp_tx_desc_s *tx_desc;
670 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
671 	struct dp_pdev *pdev = vdev->pdev;
672 	struct dp_soc *soc = pdev->soc;
673 
674 	/* Allocate software Tx descriptor */
675 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
676 	if (!tx_desc) {
677 		DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
678 		return NULL;
679 	}
680 
681 	/* Flow control/Congestion Control counters */
682 	qdf_atomic_inc(&pdev->num_tx_outstanding);
683 
684 	/* Initialize the SW tx descriptor */
685 	tx_desc->nbuf = nbuf;
686 	tx_desc->frm_type = msdu_info->frm_type;
687 	tx_desc->tx_encap_type = vdev->tx_encap_type;
688 	tx_desc->vdev = vdev;
689 	tx_desc->pdev = pdev;
690 	tx_desc->pkt_offset = 0;
691 	tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
692 	tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
693 
694 	/* Handle scattered frames - TSO/SG/ME */
695 	/* Allocate and prepare an extension descriptor for scattered frames */
696 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
697 	if (!msdu_ext_desc) {
698 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
699 				"%s Tx Extension Descriptor Alloc Fail\n",
700 				__func__);
701 		goto failure;
702 	}
703 
704 #if TQM_BYPASS_WAR
705 	/* Temporary WAR due to TQM VP issues */
706 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
707 	qdf_atomic_inc(&pdev->num_tx_exception);
708 #endif
709 	if (qdf_unlikely(msdu_info->exception_fw))
710 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
711 
712 	tx_desc->msdu_ext_desc = msdu_ext_desc;
713 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
714 
715 	return tx_desc;
716 failure:
717 	dp_tx_desc_release(tx_desc, desc_pool_id);
718 	return NULL;
719 }
720 
721 /**
722  * dp_tx_prepare_raw() - Prepare RAW packet TX
723  * @vdev: DP vdev handle
724  * @nbuf: buffer pointer
725  * @seg_info: Pointer to Segment info Descriptor to be prepared
726  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
727  *     descriptor
728  *
729  * Return:
730  */
731 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
732 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
733 {
734 	qdf_nbuf_t curr_nbuf = NULL;
735 	uint16_t total_len = 0;
736 	qdf_dma_addr_t paddr;
737 	int32_t i;
738 	int32_t mapped_buf_num = 0;
739 
740 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
741 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
742 
743 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
744 
745 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
746 	if (qos_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS)
747 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
748 
749 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
750 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
751 
752 		if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf,
753 					QDF_DMA_TO_DEVICE)) {
754 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
755 				"%s dma map error \n", __func__);
756 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
757 			mapped_buf_num = i;
758 			goto error;
759 		}
760 
761 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
762 		seg_info->frags[i].paddr_lo = paddr;
763 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
764 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
765 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
766 		total_len += qdf_nbuf_len(curr_nbuf);
767 	}
768 
769 	seg_info->frag_cnt = i;
770 	seg_info->total_len = total_len;
771 	seg_info->next = NULL;
772 
773 	sg_info->curr_seg = seg_info;
774 
775 	msdu_info->frm_type = dp_tx_frm_raw;
776 	msdu_info->num_seg = 1;
777 
778 	return nbuf;
779 
780 error:
781 	i = 0;
782 	while (nbuf) {
783 		curr_nbuf = nbuf;
784 		if (i < mapped_buf_num) {
785 			qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE);
786 			i++;
787 		}
788 		nbuf = qdf_nbuf_next(nbuf);
789 		qdf_nbuf_free(curr_nbuf);
790 	}
791 	return NULL;
792 
793 }
794 
795 /**
796  * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
797  * @soc: DP Soc Handle
798  * @vdev: DP vdev handle
799  * @tx_desc: Tx Descriptor Handle
800  * @tid: TID from HLOS for overriding default DSCP-TID mapping
801  * @fw_metadata: Metadata to send to Target Firmware along with frame
802  * @ring_id: Ring ID of H/W ring to which we enqueue the packet
803  * @tx_exc_metadata: Handle that holds exception path meta data
804  *
805  *  Gets the next free TCL HW DMA descriptor and sets up required parameters
806  *  from software Tx descriptor
807  *
808  * Return:
809  */
810 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
811 				   struct dp_tx_desc_s *tx_desc, uint8_t tid,
812 				   uint16_t fw_metadata, uint8_t ring_id,
813 				   struct cdp_tx_exception_metadata
814 					*tx_exc_metadata)
815 {
816 	uint8_t type;
817 	uint16_t length;
818 	void *hal_tx_desc, *hal_tx_desc_cached;
819 	qdf_dma_addr_t dma_addr;
820 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
821 
822 	enum cdp_sec_type sec_type = (tx_exc_metadata ?
823 			tx_exc_metadata->sec_type : vdev->sec_type);
824 
825 	/* Return Buffer Manager ID */
826 	uint8_t bm_id = ring_id;
827 	void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng;
828 
829 	hal_tx_desc_cached = (void *) cached_desc;
830 	qdf_mem_zero_outline(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
831 
832 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
833 		length = HAL_TX_EXT_DESC_WITH_META_DATA;
834 		type = HAL_TX_BUF_TYPE_EXT_DESC;
835 		dma_addr = tx_desc->msdu_ext_desc->paddr;
836 	} else {
837 		length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
838 		type = HAL_TX_BUF_TYPE_BUFFER;
839 		dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
840 	}
841 
842 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
843 	hal_tx_desc_set_buf_addr(hal_tx_desc_cached,
844 			dma_addr , bm_id, tx_desc->id, type);
845 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
846 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
847 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
848 	hal_tx_desc_set_lmac_id(hal_tx_desc_cached,
849 					HAL_TX_DESC_DEFAULT_LMAC_ID);
850 	hal_tx_desc_set_dscp_tid_table_id(hal_tx_desc_cached,
851 			vdev->dscp_tid_map_id);
852 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
853 			sec_type_map[sec_type]);
854 
855 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
856 			"%s length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
857 			__func__, length, type, (uint64_t)dma_addr,
858 			tx_desc->pkt_offset, tx_desc->id);
859 
860 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
861 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
862 
863 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
864 			vdev->hal_desc_addr_search_flags);
865 
866 	/* verify checksum offload configuration*/
867 	if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) &&
868 		((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
869 		|| qdf_nbuf_is_tso(tx_desc->nbuf)))  {
870 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
871 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
872 	}
873 
874 	if (tid != HTT_TX_EXT_TID_INVALID)
875 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
876 
877 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
878 		hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1);
879 
880 
881 	/* Sync cached descriptor with HW */
882 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
883 
884 	if (!hal_tx_desc) {
885 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
886 			  "%s TCL ring full ring_id:%d\n", __func__, ring_id);
887 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
888 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
889 		return QDF_STATUS_E_RESOURCES;
890 	}
891 
892 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
893 
894 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
895 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
896 
897 	/*
898 	 * If one packet is enqueued in HW, PM usage count needs to be
899 	 * incremented by one to prevent future runtime suspend. This
900 	 * should be tied with the success of enqueuing. It will be
901 	 * decremented after the packet has been sent.
902 	 */
903 	hif_pm_runtime_get_noresume(soc->hif_handle);
904 
905 	return QDF_STATUS_SUCCESS;
906 }
907 
908 
909 /**
910  * dp_cce_classify() - Classify the frame based on CCE rules
911  * @vdev: DP vdev handle
912  * @nbuf: skb
913  *
914  * Classify frames based on CCE rules
915  * Return: bool( true if classified,
916  *               else false)
917  */
918 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
919 {
920 	struct ether_header *eh = NULL;
921 	uint16_t   ether_type;
922 	qdf_llc_t *llcHdr;
923 	qdf_nbuf_t nbuf_clone = NULL;
924 	qdf_dot3_qosframe_t *qos_wh = NULL;
925 
926 	/* for mesh packets don't do any classification */
927 	if (qdf_unlikely(vdev->mesh_vdev))
928 		return false;
929 
930 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
931 		eh = (struct ether_header *) qdf_nbuf_data(nbuf);
932 		ether_type = eh->ether_type;
933 		llcHdr = (qdf_llc_t *)(nbuf->data +
934 					sizeof(struct ether_header));
935 	} else {
936 		qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
937 
938 		if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
939 			if (qdf_unlikely(
940 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
941 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
942 
943 				ether_type = *(uint16_t *)(nbuf->data
944 						+ QDF_IEEE80211_4ADDR_HDR_LEN
945 						+ sizeof(qdf_llc_t)
946 						- sizeof(ether_type));
947 				llcHdr = (qdf_llc_t *)(nbuf->data +
948 						QDF_IEEE80211_4ADDR_HDR_LEN);
949 			} else {
950 				ether_type = *(uint16_t *)(nbuf->data
951 						+ QDF_IEEE80211_3ADDR_HDR_LEN
952 						+ sizeof(qdf_llc_t)
953 						- sizeof(ether_type));
954 				llcHdr = (qdf_llc_t *)(nbuf->data +
955 					QDF_IEEE80211_3ADDR_HDR_LEN);
956 			}
957 
958 			if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
959 				&& (ether_type ==
960 				qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
961 
962 				DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
963 				return true;
964 			}
965 		}
966 
967 		return false;
968 	}
969 
970 	if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
971 		ether_type = *(uint16_t *)(nbuf->data + 2*ETHER_ADDR_LEN +
972 				sizeof(*llcHdr));
973 		nbuf_clone = qdf_nbuf_clone(nbuf);
974 		if (qdf_unlikely(nbuf_clone)) {
975 			qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
976 
977 			if (ether_type == htons(ETHERTYPE_8021Q)) {
978 				qdf_nbuf_pull_head(nbuf_clone,
979 						sizeof(qdf_net_vlanhdr_t));
980 			}
981 		}
982 	} else {
983 		if (ether_type == htons(ETHERTYPE_8021Q)) {
984 			nbuf_clone = qdf_nbuf_clone(nbuf);
985 			if (qdf_unlikely(nbuf_clone)) {
986 				qdf_nbuf_pull_head(nbuf_clone,
987 					sizeof(qdf_net_vlanhdr_t));
988 			}
989 		}
990 	}
991 
992 	if (qdf_unlikely(nbuf_clone))
993 		nbuf = nbuf_clone;
994 
995 
996 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
997 		|| qdf_nbuf_is_ipv4_arp_pkt(nbuf)
998 		|| qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
999 		|| qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
1000 		|| (qdf_nbuf_is_ipv4_pkt(nbuf)
1001 			&& qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
1002 		|| (qdf_nbuf_is_ipv6_pkt(nbuf) &&
1003 			qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
1004 		if (qdf_unlikely(nbuf_clone != NULL))
1005 			qdf_nbuf_free(nbuf_clone);
1006 		return true;
1007 	}
1008 
1009 	if (qdf_unlikely(nbuf_clone != NULL))
1010 		qdf_nbuf_free(nbuf_clone);
1011 
1012 	return false;
1013 }
1014 
1015 /**
1016  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1017  * @vdev: DP vdev handle
1018  * @nbuf: skb
1019  *
1020  * Extract the DSCP or PCP information from frame and map into TID value.
1021  * Software based TID classification is required when more than 2 DSCP-TID
1022  * mapping tables are needed.
1023  * Hardware supports 2 DSCP-TID mapping tables
1024  *
1025  * Return: void
1026  */
1027 static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1028 		struct dp_tx_msdu_info_s *msdu_info)
1029 {
1030 	uint8_t tos = 0, dscp_tid_override = 0;
1031 	uint8_t *hdr_ptr, *L3datap;
1032 	uint8_t is_mcast = 0;
1033 	struct ether_header *eh = NULL;
1034 	qdf_ethervlan_header_t *evh = NULL;
1035 	uint16_t   ether_type;
1036 	qdf_llc_t *llcHdr;
1037 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1038 
1039 	/* for mesh packets don't do any classification */
1040 	if (qdf_unlikely(vdev->mesh_vdev))
1041 		return;
1042 
1043 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1044 		eh = (struct ether_header *) nbuf->data;
1045 		hdr_ptr = eh->ether_dhost;
1046 		L3datap = hdr_ptr + sizeof(struct ether_header);
1047 	} else {
1048 		qdf_dot3_qosframe_t *qos_wh =
1049 			(qdf_dot3_qosframe_t *) nbuf->data;
1050 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1051 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1052 		return;
1053 	}
1054 
1055 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1056 	ether_type = eh->ether_type;
1057 
1058 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(struct ether_header));
1059 	/*
1060 	 * Check if packet is dot3 or eth2 type.
1061 	 */
1062 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1063 		ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN +
1064 				sizeof(*llcHdr));
1065 
1066 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1067 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1068 				sizeof(*llcHdr);
1069 			ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN
1070 					+ sizeof(*llcHdr) +
1071 					sizeof(qdf_net_vlanhdr_t));
1072 		} else {
1073 			L3datap = hdr_ptr + sizeof(struct ether_header) +
1074 				sizeof(*llcHdr);
1075 		}
1076 	} else {
1077 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1078 			evh = (qdf_ethervlan_header_t *) eh;
1079 			ether_type = evh->ether_type;
1080 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1081 		}
1082 	}
1083 
1084 	/*
1085 	 * Find priority from IP TOS DSCP field
1086 	 */
1087 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1088 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1089 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1090 			/* Only for unicast frames */
1091 			if (!is_mcast) {
1092 				/* send it on VO queue */
1093 				msdu_info->tid = DP_VO_TID;
1094 			}
1095 		} else {
1096 			/*
1097 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1098 			 * from TOS byte.
1099 			 */
1100 			tos = ip->ip_tos;
1101 			dscp_tid_override = 1;
1102 
1103 		}
1104 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1105 		/* TODO
1106 		 * use flowlabel
1107 		 *igmpmld cases to be handled in phase 2
1108 		 */
1109 		unsigned long ver_pri_flowlabel;
1110 		unsigned long pri;
1111 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1112 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1113 			DP_IPV6_PRIORITY_SHIFT;
1114 		tos = pri;
1115 		dscp_tid_override = 1;
1116 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1117 		msdu_info->tid = DP_VO_TID;
1118 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1119 		/* Only for unicast frames */
1120 		if (!is_mcast) {
1121 			/* send ucast arp on VO queue */
1122 			msdu_info->tid = DP_VO_TID;
1123 		}
1124 	}
1125 
1126 	/*
1127 	 * Assign all MCAST packets to BE
1128 	 */
1129 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1130 		if (is_mcast) {
1131 			tos = 0;
1132 			dscp_tid_override = 1;
1133 		}
1134 	}
1135 
1136 	if (dscp_tid_override == 1) {
1137 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1138 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1139 	}
1140 	return;
1141 }
1142 
1143 #ifdef CONVERGED_TDLS_ENABLE
1144 /**
1145  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1146  * @tx_desc: TX descriptor
1147  *
1148  * Return: None
1149  */
1150 static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1151 {
1152 	if (tx_desc->vdev) {
1153 		if (tx_desc->vdev->is_tdls_frame)
1154 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1155 			tx_desc->vdev->is_tdls_frame = false;
1156 	}
1157 }
1158 
1159 /**
1160  * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1161  * @tx_desc: TX descriptor
1162  * @vdev: datapath vdev handle
1163  *
1164  * Return: None
1165  */
1166 static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
1167 				  struct dp_vdev *vdev)
1168 {
1169 	struct hal_tx_completion_status ts = {0};
1170 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1171 
1172 	hal_tx_comp_get_status(&tx_desc->comp, &ts);
1173 	if (vdev->tx_non_std_data_callback.func) {
1174 		qdf_nbuf_set_next(tx_desc->nbuf, NULL);
1175 		vdev->tx_non_std_data_callback.func(
1176 				vdev->tx_non_std_data_callback.ctxt,
1177 				nbuf, ts.status);
1178 		return;
1179 	}
1180 }
1181 #endif
1182 
1183 /**
1184  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1185  * @vdev: DP vdev handle
1186  * @nbuf: skb
1187  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1188  * @meta_data: Metadata to the fw
1189  * @tx_q: Tx queue to be used for this Tx frame
1190  * @peer_id: peer_id of the peer in case of NAWDS frames
1191  * @tx_exc_metadata: Handle that holds exception path metadata
1192  *
1193  * Return: NULL on success,
1194  *         nbuf when it fails to send
1195  */
1196 static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1197 		struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
1198 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1199 {
1200 	struct dp_pdev *pdev = vdev->pdev;
1201 	struct dp_soc *soc = pdev->soc;
1202 	struct dp_tx_desc_s *tx_desc;
1203 	QDF_STATUS status;
1204 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
1205 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1206 	uint16_t htt_tcl_metadata = 0;
1207 	uint8_t tid = msdu_info->tid;
1208 
1209 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
1210 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
1211 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
1212 			msdu_info, tx_exc_metadata);
1213 	if (!tx_desc) {
1214 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1215 			  "%s Tx_desc prepare Fail vdev %pK queue %d\n",
1216 			  __func__, vdev, tx_q->desc_pool_id);
1217 		return nbuf;
1218 	}
1219 
1220 	if (qdf_unlikely(soc->cce_disable)) {
1221 		if (dp_cce_classify(vdev, nbuf) == true) {
1222 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1223 			tid = DP_VO_TID;
1224 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1225 		}
1226 	}
1227 
1228 	dp_tx_update_tdls_flags(tx_desc);
1229 
1230 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1231 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1232 				"%s %d : HAL RING Access Failed -- %pK\n",
1233 				__func__, __LINE__, hal_srng);
1234 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1235 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1236 		goto fail_return;
1237 	}
1238 
1239 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
1240 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1241 		HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
1242 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
1243 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
1244 				HTT_TCL_METADATA_TYPE_PEER_BASED);
1245 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
1246 				peer_id);
1247 	} else
1248 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1249 
1250 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
1251 	status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
1252 			htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
1253 
1254 	if (status != QDF_STATUS_SUCCESS) {
1255 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1256 			  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n",
1257 			  __func__, tx_desc, tx_q->ring_id);
1258 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1259 		goto fail_return;
1260 	}
1261 
1262 	nbuf = NULL;
1263 
1264 fail_return:
1265 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1266 		hal_srng_access_end(soc->hal_soc, hal_srng);
1267 		hif_pm_runtime_put(soc->hif_handle);
1268 	} else {
1269 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1270 	}
1271 
1272 	return nbuf;
1273 }
1274 
1275 /**
1276  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
1277  * @vdev: DP vdev handle
1278  * @nbuf: skb
1279  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
1280  *
1281  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
1282  *
1283  * Return: NULL on success,
1284  *         nbuf when it fails to send
1285  */
1286 #if QDF_LOCK_STATS
1287 static noinline
1288 #else
1289 static
1290 #endif
1291 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1292 				    struct dp_tx_msdu_info_s *msdu_info)
1293 {
1294 	uint8_t i;
1295 	struct dp_pdev *pdev = vdev->pdev;
1296 	struct dp_soc *soc = pdev->soc;
1297 	struct dp_tx_desc_s *tx_desc;
1298 	bool is_cce_classified = false;
1299 	QDF_STATUS status;
1300 
1301 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1302 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1303 
1304 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1305 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1306 				"%s %d : HAL RING Access Failed -- %pK\n",
1307 				__func__, __LINE__, hal_srng);
1308 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1309 		return nbuf;
1310 	}
1311 
1312 	if (qdf_unlikely(soc->cce_disable)) {
1313 		is_cce_classified = dp_cce_classify(vdev, nbuf);
1314 		if (is_cce_classified) {
1315 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1316 			msdu_info->tid = DP_VO_TID;
1317 		}
1318 	}
1319 
1320 	if (msdu_info->frm_type == dp_tx_frm_me)
1321 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1322 
1323 	i = 0;
1324 	/* Print statement to track i and num_seg */
1325 	/*
1326 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
1327 	 * descriptors using information in msdu_info
1328 	 */
1329 	while (i < msdu_info->num_seg) {
1330 		/*
1331 		 * Setup Tx descriptor for an MSDU, and MSDU extension
1332 		 * descriptor
1333 		 */
1334 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
1335 				tx_q->desc_pool_id);
1336 
1337 		if (!tx_desc) {
1338 			if (msdu_info->frm_type == dp_tx_frm_me) {
1339 				dp_tx_me_free_buf(pdev,
1340 					(void *)(msdu_info->u.sg_info
1341 						.curr_seg->frags[0].vaddr));
1342 			}
1343 			goto done;
1344 		}
1345 
1346 		if (msdu_info->frm_type == dp_tx_frm_me) {
1347 			tx_desc->me_buffer =
1348 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
1349 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
1350 		}
1351 
1352 		if (is_cce_classified)
1353 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1354 
1355 		/*
1356 		 * Enqueue the Tx MSDU descriptor to HW for transmit
1357 		 */
1358 		status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
1359 			vdev->htt_tcl_metadata, tx_q->ring_id, NULL);
1360 
1361 		if (status != QDF_STATUS_SUCCESS) {
1362 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1363 				  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n",
1364 				  __func__, tx_desc, tx_q->ring_id);
1365 
1366 			if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
1367 				dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
1368 
1369 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1370 			goto done;
1371 		}
1372 
1373 		/*
1374 		 * TODO
1375 		 * if tso_info structure can be modified to have curr_seg
1376 		 * as first element, following 2 blocks of code (for TSO and SG)
1377 		 * can be combined into 1
1378 		 */
1379 
1380 		/*
1381 		 * For frames with multiple segments (TSO, ME), jump to next
1382 		 * segment.
1383 		 */
1384 		if (msdu_info->frm_type == dp_tx_frm_tso) {
1385 			if (msdu_info->u.tso_info.curr_seg->next) {
1386 				msdu_info->u.tso_info.curr_seg =
1387 					msdu_info->u.tso_info.curr_seg->next;
1388 
1389 				/*
1390 				 * If this is a jumbo nbuf, then increment the number of
1391 				 * nbuf users for each additional segment of the msdu.
1392 				 * This will ensure that the skb is freed only after
1393 				 * receiving tx completion for all segments of an nbuf
1394 				 */
1395 				qdf_nbuf_inc_users(nbuf);
1396 
1397 				/* Check with MCL if this is needed */
1398 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
1399 			}
1400 		}
1401 
1402 		/*
1403 		 * For Multicast-Unicast converted packets,
1404 		 * each converted frame (for a client) is represented as
1405 		 * 1 segment
1406 		 */
1407 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
1408 				(msdu_info->frm_type == dp_tx_frm_me)) {
1409 			if (msdu_info->u.sg_info.curr_seg->next) {
1410 				msdu_info->u.sg_info.curr_seg =
1411 					msdu_info->u.sg_info.curr_seg->next;
1412 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1413 			}
1414 		}
1415 		i++;
1416 	}
1417 
1418 	nbuf = NULL;
1419 
1420 done:
1421 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1422 		hal_srng_access_end(soc->hal_soc, hal_srng);
1423 		hif_pm_runtime_put(soc->hif_handle);
1424 	} else {
1425 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1426 	}
1427 
1428 	return nbuf;
1429 }
1430 
1431 /**
1432  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
1433  *                     for SG frames
1434  * @vdev: DP vdev handle
1435  * @nbuf: skb
1436  * @seg_info: Pointer to Segment info Descriptor to be prepared
1437  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1438  *
1439  * Return: NULL on success,
1440  *         nbuf when it fails to send
1441  */
1442 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1443 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1444 {
1445 	uint32_t cur_frag, nr_frags;
1446 	qdf_dma_addr_t paddr;
1447 	struct dp_tx_sg_info_s *sg_info;
1448 
1449 	sg_info = &msdu_info->u.sg_info;
1450 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
1451 
1452 	if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
1453 				QDF_DMA_TO_DEVICE)) {
1454 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1455 				"dma map error\n");
1456 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1457 
1458 		qdf_nbuf_free(nbuf);
1459 		return NULL;
1460 	}
1461 
1462 	paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1463 	seg_info->frags[0].paddr_lo = paddr;
1464 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
1465 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
1466 	seg_info->frags[0].vaddr = (void *) nbuf;
1467 
1468 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
1469 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
1470 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
1471 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1472 					"frag dma map error\n");
1473 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1474 			qdf_nbuf_free(nbuf);
1475 			return NULL;
1476 		}
1477 
1478 		paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1479 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
1480 		seg_info->frags[cur_frag + 1].paddr_hi =
1481 			((uint64_t) paddr) >> 32;
1482 		seg_info->frags[cur_frag + 1].len =
1483 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
1484 	}
1485 
1486 	seg_info->frag_cnt = (cur_frag + 1);
1487 	seg_info->total_len = qdf_nbuf_len(nbuf);
1488 	seg_info->next = NULL;
1489 
1490 	sg_info->curr_seg = seg_info;
1491 
1492 	msdu_info->frm_type = dp_tx_frm_sg;
1493 	msdu_info->num_seg = 1;
1494 
1495 	return nbuf;
1496 }
1497 
1498 #ifdef MESH_MODE_SUPPORT
1499 
1500 /**
1501  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
1502 				and prepare msdu_info for mesh frames.
1503  * @vdev: DP vdev handle
1504  * @nbuf: skb
1505  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1506  *
1507  * Return: NULL on failure,
1508  *         nbuf when extracted successfully
1509  */
1510 static
1511 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1512 				struct dp_tx_msdu_info_s *msdu_info)
1513 {
1514 	struct meta_hdr_s *mhdr;
1515 	struct htt_tx_msdu_desc_ext2_t *meta_data =
1516 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
1517 
1518 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1519 
1520 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
1521 		msdu_info->exception_fw = 0;
1522 		goto remove_meta_hdr;
1523 	}
1524 
1525 	msdu_info->exception_fw = 1;
1526 
1527 	qdf_mem_set(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t), 0);
1528 
1529 	meta_data->host_tx_desc_pool = 1;
1530 	meta_data->update_peer_cache = 1;
1531 	meta_data->learning_frame = 1;
1532 
1533 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
1534 		meta_data->power = mhdr->power;
1535 
1536 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
1537 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
1538 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
1539 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
1540 
1541 		meta_data->dyn_bw = 1;
1542 
1543 		meta_data->valid_pwr = 1;
1544 		meta_data->valid_mcs_mask = 1;
1545 		meta_data->valid_nss_mask = 1;
1546 		meta_data->valid_preamble_type  = 1;
1547 		meta_data->valid_retries = 1;
1548 		meta_data->valid_bw_info = 1;
1549 	}
1550 
1551 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
1552 		meta_data->encrypt_type = 0;
1553 		meta_data->valid_encrypt_type = 1;
1554 		meta_data->learning_frame = 0;
1555 	}
1556 
1557 	meta_data->valid_key_flags = 1;
1558 	meta_data->key_flags = (mhdr->keyix & 0x3);
1559 
1560 remove_meta_hdr:
1561 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
1562 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1563 				"qdf_nbuf_pull_head failed\n");
1564 		qdf_nbuf_free(nbuf);
1565 		return NULL;
1566 	}
1567 
1568 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
1569 		msdu_info->tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1570 	else
1571 		msdu_info->tid = qdf_nbuf_get_priority(nbuf);
1572 
1573 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1574 			"%s , Meta hdr %0x %0x %0x %0x %0x %0x"
1575 			" tid %d to_fw %d\n",
1576 			__func__, msdu_info->meta_data[0],
1577 			msdu_info->meta_data[1],
1578 			msdu_info->meta_data[2],
1579 			msdu_info->meta_data[3],
1580 			msdu_info->meta_data[4],
1581 			msdu_info->meta_data[5],
1582 			msdu_info->tid, msdu_info->exception_fw);
1583 
1584 	return nbuf;
1585 }
1586 #else
1587 static
1588 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1589 				struct dp_tx_msdu_info_s *msdu_info)
1590 {
1591 	return nbuf;
1592 }
1593 
1594 #endif
1595 
1596 #ifdef DP_FEATURE_NAWDS_TX
1597 /**
1598  * dp_tx_prepare_nawds(): Tramit NAWDS frames
1599  * @vdev: dp_vdev handle
1600  * @nbuf: skb
1601  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1602  * @tx_q: Tx queue to be used for this Tx frame
1603  * @meta_data: Meta date for mesh
1604  * @peer_id: peer_id of the peer in case of NAWDS frames
1605  *
1606  * return: NULL on success nbuf on failure
1607  */
1608 static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1609 		struct dp_tx_msdu_info_s *msdu_info)
1610 {
1611 	struct dp_peer *peer = NULL;
1612 	struct dp_soc *soc = vdev->pdev->soc;
1613 	struct dp_ast_entry *ast_entry = NULL;
1614 	struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1615 	uint16_t peer_id = HTT_INVALID_PEER;
1616 
1617 	struct dp_peer *sa_peer = NULL;
1618 	qdf_nbuf_t nbuf_copy;
1619 
1620 	qdf_spin_lock_bh(&(soc->ast_lock));
1621 	ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost));
1622 
1623 	if (ast_entry)
1624 		sa_peer = ast_entry->peer;
1625 
1626 	qdf_spin_unlock_bh(&(soc->ast_lock));
1627 
1628 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1629 		if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
1630 				(peer->nawds_enabled)) {
1631 			if (sa_peer == peer) {
1632 				QDF_TRACE(QDF_MODULE_ID_DP,
1633 						QDF_TRACE_LEVEL_DEBUG,
1634 						" %s: broadcast multicast packet",
1635 						 __func__);
1636 				DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
1637 				continue;
1638 			}
1639 
1640 			nbuf_copy = qdf_nbuf_copy(nbuf);
1641 			if (!nbuf_copy) {
1642 				QDF_TRACE(QDF_MODULE_ID_DP,
1643 						QDF_TRACE_LEVEL_ERROR,
1644 						"nbuf copy failed");
1645 			}
1646 
1647 			peer_id = peer->peer_ids[0];
1648 			nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy,
1649 					msdu_info, peer_id, NULL);
1650 			if (nbuf_copy != NULL) {
1651 				qdf_nbuf_free(nbuf_copy);
1652 				continue;
1653 			}
1654 			DP_STATS_INC_PKT(peer, tx.nawds_mcast,
1655 						1, qdf_nbuf_len(nbuf));
1656 		}
1657 	}
1658 	if (peer_id == HTT_INVALID_PEER)
1659 		return nbuf;
1660 
1661 	return NULL;
1662 }
1663 #endif
1664 
1665 /**
1666  * dp_check_exc_metadata() - Checks if parameters are valid
1667  * @tx_exc - holds all exception path parameters
1668  *
1669  * Returns true when all the parameters are valid else false
1670  *
1671  */
1672 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
1673 {
1674 	if ((tx_exc->tid > DP_MAX_TIDS && tx_exc->tid != HTT_INVALID_TID) ||
1675 	    tx_exc->tx_encap_type > htt_cmn_pkt_num_types ||
1676 	    tx_exc->sec_type > cdp_num_sec_types) {
1677 		return false;
1678 	}
1679 
1680 	return true;
1681 }
1682 
1683 /**
1684  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
1685  * @vap_dev: DP vdev handle
1686  * @nbuf: skb
1687  * @tx_exc_metadata: Handle that holds exception path meta data
1688  *
1689  * Entry point for Core Tx layer (DP_TX) invoked from
1690  * hard_start_xmit in OSIF/HDD to transmit frames through fw
1691  *
1692  * Return: NULL on success,
1693  *         nbuf when it fails to send
1694  */
1695 qdf_nbuf_t dp_tx_send_exception(void *vap_dev, qdf_nbuf_t nbuf,
1696 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1697 {
1698 	struct ether_header *eh = NULL;
1699 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1700 	struct dp_tx_msdu_info_s msdu_info;
1701 
1702 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1703 
1704 	msdu_info.tid = tx_exc_metadata->tid;
1705 
1706 	eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1707 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1708 			"%s , skb %pM",
1709 			__func__, nbuf->data);
1710 
1711 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
1712 
1713 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
1714 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1715 			"Invalid parameters in exception path");
1716 		goto fail;
1717 	}
1718 
1719 	/* Basic sanity checks for unsupported packets */
1720 
1721 	/* MESH mode */
1722 	if (qdf_unlikely(vdev->mesh_vdev)) {
1723 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1724 			"Mesh mode is not supported in exception path");
1725 		goto fail;
1726 	}
1727 
1728 	/* TSO or SG */
1729 	if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
1730 	    qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
1731 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1732 			  "TSO and SG are not supported in exception path");
1733 
1734 		goto fail;
1735 	}
1736 
1737 	/* RAW */
1738 	if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
1739 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1740 			  "Raw frame is not supported in exception path");
1741 		goto fail;
1742 	}
1743 
1744 
1745 	/* Mcast enhancement*/
1746 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
1747 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
1748 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1749 					  "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW\n");
1750 		}
1751 	}
1752 
1753 	/*
1754 	 * Get HW Queue to use for this frame.
1755 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
1756 	 * dedicated for data and 1 for command.
1757 	 * "queue_id" maps to one hardware ring.
1758 	 *  With each ring, we also associate a unique Tx descriptor pool
1759 	 *  to minimize lock contention for these resources.
1760 	 */
1761 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
1762 
1763 	/* Reset the control block */
1764 	qdf_nbuf_reset_ctxt(nbuf);
1765 
1766 	/*  Single linear frame */
1767 	/*
1768 	 * If nbuf is a simple linear frame, use send_single function to
1769 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
1770 	 * SRNG. There is no need to setup a MSDU extension descriptor.
1771 	 */
1772 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
1773 			tx_exc_metadata->peer_id, tx_exc_metadata);
1774 
1775 	return nbuf;
1776 
1777 fail:
1778 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1779 			"pkt send failed");
1780 	return nbuf;
1781 }
1782 
1783 /**
1784  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
1785  * @vap_dev: DP vdev handle
1786  * @nbuf: skb
1787  *
1788  * Entry point for Core Tx layer (DP_TX) invoked from
1789  * hard_start_xmit in OSIF/HDD
1790  *
1791  * Return: NULL on success,
1792  *         nbuf when it fails to send
1793  */
1794 #ifdef MESH_MODE_SUPPORT
1795 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
1796 {
1797 	struct meta_hdr_s *mhdr;
1798 	qdf_nbuf_t nbuf_mesh = NULL;
1799 	qdf_nbuf_t nbuf_clone = NULL;
1800 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1801 	uint8_t no_enc_frame = 0;
1802 
1803 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
1804 	if (nbuf_mesh == NULL) {
1805 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1806 				"qdf_nbuf_unshare failed\n");
1807 		return nbuf;
1808 	}
1809 	nbuf = nbuf_mesh;
1810 
1811 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1812 
1813 	if ((vdev->sec_type != cdp_sec_type_none) &&
1814 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
1815 		no_enc_frame = 1;
1816 
1817 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
1818 		       !no_enc_frame) {
1819 		nbuf_clone = qdf_nbuf_clone(nbuf);
1820 		if (nbuf_clone == NULL) {
1821 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1822 				"qdf_nbuf_clone failed\n");
1823 			return nbuf;
1824 		}
1825 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
1826 	}
1827 
1828 	if (nbuf_clone) {
1829 		if (!dp_tx_send(vap_dev, nbuf_clone)) {
1830 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
1831 		} else
1832 			qdf_nbuf_free(nbuf_clone);
1833 	}
1834 
1835 	if (no_enc_frame)
1836 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
1837 	else
1838 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
1839 
1840 	nbuf = dp_tx_send(vap_dev, nbuf);
1841 	if ((nbuf == NULL) && no_enc_frame) {
1842 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
1843 	}
1844 
1845 	return nbuf;
1846 }
1847 
1848 #else
1849 
1850 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
1851 {
1852 	return dp_tx_send(vap_dev, nbuf);
1853 }
1854 
1855 #endif
1856 
1857 /**
1858  * dp_tx_send() - Transmit a frame on a given VAP
1859  * @vap_dev: DP vdev handle
1860  * @nbuf: skb
1861  *
1862  * Entry point for Core Tx layer (DP_TX) invoked from
1863  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
1864  * cases
1865  *
1866  * Return: NULL on success,
1867  *         nbuf when it fails to send
1868  */
1869 qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
1870 {
1871 	struct ether_header *eh = NULL;
1872 	struct dp_tx_msdu_info_s msdu_info;
1873 	struct dp_tx_seg_info_s seg_info;
1874 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1875 	uint16_t peer_id = HTT_INVALID_PEER;
1876 	qdf_nbuf_t nbuf_mesh = NULL;
1877 
1878 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1879 	qdf_mem_set(&seg_info, sizeof(seg_info), 0x0);
1880 
1881 	eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1882 
1883 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1884 			"%s , skb %pM",
1885 			__func__, nbuf->data);
1886 
1887 	/*
1888 	 * Set Default Host TID value to invalid TID
1889 	 * (TID override disabled)
1890 	 */
1891 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
1892 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
1893 
1894 	if (qdf_unlikely(vdev->mesh_vdev)) {
1895 		nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
1896 								&msdu_info);
1897 		if (nbuf_mesh == NULL) {
1898 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1899 					"Extracting mesh metadata failed\n");
1900 			return nbuf;
1901 		}
1902 		nbuf = nbuf_mesh;
1903 	}
1904 
1905 	/*
1906 	 * Get HW Queue to use for this frame.
1907 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
1908 	 * dedicated for data and 1 for command.
1909 	 * "queue_id" maps to one hardware ring.
1910 	 *  With each ring, we also associate a unique Tx descriptor pool
1911 	 *  to minimize lock contention for these resources.
1912 	 */
1913 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
1914 
1915 	/*
1916 	 * TCL H/W supports 2 DSCP-TID mapping tables.
1917 	 *  Table 1 - Default DSCP-TID mapping table
1918 	 *  Table 2 - 1 DSCP-TID override table
1919 	 *
1920 	 * If we need a different DSCP-TID mapping for this vap,
1921 	 * call tid_classify to extract DSCP/ToS from frame and
1922 	 * map to a TID and store in msdu_info. This is later used
1923 	 * to fill in TCL Input descriptor (per-packet TID override).
1924 	 */
1925 	if (vdev->dscp_tid_map_id > 1)
1926 		dp_tx_classify_tid(vdev, nbuf, &msdu_info);
1927 
1928 	/* Reset the control block */
1929 	qdf_nbuf_reset_ctxt(nbuf);
1930 
1931 	/*
1932 	 * Classify the frame and call corresponding
1933 	 * "prepare" function which extracts the segment (TSO)
1934 	 * and fragmentation information (for TSO , SG, ME, or Raw)
1935 	 * into MSDU_INFO structure which is later used to fill
1936 	 * SW and HW descriptors.
1937 	 */
1938 	if (qdf_nbuf_is_tso(nbuf)) {
1939 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1940 			  "%s TSO frame %pK\n", __func__, vdev);
1941 		DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1,
1942 				qdf_nbuf_len(nbuf));
1943 
1944 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
1945 			DP_STATS_INC(vdev, tx_i.tso.dropped_host, 1);
1946 			return nbuf;
1947 		}
1948 
1949 		goto send_multiple;
1950 	}
1951 
1952 	/* SG */
1953 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
1954 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
1955 
1956 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1957 			 "%s non-TSO SG frame %pK\n", __func__, vdev);
1958 
1959 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
1960 				qdf_nbuf_len(nbuf));
1961 
1962 		goto send_multiple;
1963 	}
1964 
1965 #ifdef ATH_SUPPORT_IQUE
1966 	/* Mcast to Ucast Conversion*/
1967 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
1968 		eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1969 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
1970 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1971 				  "%s Mcast frm for ME %pK\n", __func__, vdev);
1972 
1973 			DP_STATS_INC_PKT(vdev,
1974 					tx_i.mcast_en.mcast_pkt, 1,
1975 					qdf_nbuf_len(nbuf));
1976 			if (dp_tx_prepare_send_me(vdev, nbuf) ==
1977 					QDF_STATUS_SUCCESS) {
1978 				return NULL;
1979 			}
1980 		}
1981 	}
1982 #endif
1983 
1984 	/* RAW */
1985 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
1986 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
1987 		if (nbuf == NULL)
1988 			return NULL;
1989 
1990 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1991 			  "%s Raw frame %pK\n", __func__, vdev);
1992 
1993 		goto send_multiple;
1994 
1995 	}
1996 
1997 	/*  Single linear frame */
1998 	/*
1999 	 * If nbuf is a simple linear frame, use send_single function to
2000 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2001 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2002 	 */
2003 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
2004 
2005 	return nbuf;
2006 
2007 send_multiple:
2008 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2009 
2010 	return nbuf;
2011 }
2012 
2013 /**
2014  * dp_tx_reinject_handler() - Tx Reinject Handler
2015  * @tx_desc: software descriptor head pointer
2016  * @status : Tx completion status from HTT descriptor
2017  *
2018  * This function reinjects frames back to Target.
2019  * Todo - Host queue needs to be added
2020  *
2021  * Return: none
2022  */
2023 static
2024 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2025 {
2026 	struct dp_vdev *vdev;
2027 	struct dp_peer *peer = NULL;
2028 	uint32_t peer_id = HTT_INVALID_PEER;
2029 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2030 	qdf_nbuf_t nbuf_copy = NULL;
2031 	struct dp_tx_msdu_info_s msdu_info;
2032 	struct dp_peer *sa_peer = NULL;
2033 	struct dp_ast_entry *ast_entry = NULL;
2034 	struct dp_soc *soc = NULL;
2035 	struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
2036 #ifdef WDS_VENDOR_EXTENSION
2037 	int is_mcast = 0, is_ucast = 0;
2038 	int num_peers_3addr = 0;
2039 	struct ether_header *eth_hdr = (struct ether_header *)(qdf_nbuf_data(nbuf));
2040 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
2041 #endif
2042 
2043 	vdev = tx_desc->vdev;
2044 	soc = vdev->pdev->soc;
2045 
2046 	qdf_assert(vdev);
2047 
2048 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
2049 
2050 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2051 
2052 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2053 			"%s Tx reinject path\n", __func__);
2054 
2055 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
2056 			qdf_nbuf_len(tx_desc->nbuf));
2057 
2058 	qdf_spin_lock_bh(&(soc->ast_lock));
2059 
2060 	ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost));
2061 
2062 	if (ast_entry)
2063 		sa_peer = ast_entry->peer;
2064 
2065 	qdf_spin_unlock_bh(&(soc->ast_lock));
2066 
2067 #ifdef WDS_VENDOR_EXTENSION
2068 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
2069 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
2070 	} else {
2071 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
2072 	}
2073 	is_ucast = !is_mcast;
2074 
2075 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2076 		if (peer->bss_peer)
2077 			continue;
2078 
2079 		/* Detect wds peers that use 3-addr framing for mcast.
2080 		 * if there are any, the bss_peer is used to send the
2081 		 * the mcast frame using 3-addr format. all wds enabled
2082 		 * peers that use 4-addr framing for mcast frames will
2083 		 * be duplicated and sent as 4-addr frames below.
2084 		 */
2085 		if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
2086 			num_peers_3addr = 1;
2087 			break;
2088 		}
2089 	}
2090 #endif
2091 
2092 	if (qdf_unlikely(vdev->mesh_vdev)) {
2093 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
2094 	} else {
2095 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2096 			if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
2097 #ifdef WDS_VENDOR_EXTENSION
2098 			/*
2099 			 * . if 3-addr STA, then send on BSS Peer
2100 			 * . if Peer WDS enabled and accept 4-addr mcast,
2101 			 * send mcast on that peer only
2102 			 * . if Peer WDS enabled and accept 4-addr ucast,
2103 			 * send ucast on that peer only
2104 			 */
2105 			((peer->bss_peer && num_peers_3addr && is_mcast) ||
2106 			 (peer->wds_enabled &&
2107 				  ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
2108 				   (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
2109 #else
2110 			((peer->bss_peer &&
2111 			  !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) ||
2112 				 peer->nawds_enabled)) {
2113 #endif
2114 				peer_id = DP_INVALID_PEER;
2115 
2116 				if (peer->nawds_enabled) {
2117 					peer_id = peer->peer_ids[0];
2118 					if (sa_peer == peer) {
2119 						QDF_TRACE(
2120 							QDF_MODULE_ID_DP,
2121 							QDF_TRACE_LEVEL_DEBUG,
2122 							" %s: multicast packet",
2123 							__func__);
2124 						DP_STATS_INC(peer,
2125 							tx.nawds_mcast_drop, 1);
2126 						continue;
2127 					}
2128 				}
2129 
2130 				nbuf_copy = qdf_nbuf_copy(nbuf);
2131 
2132 				if (!nbuf_copy) {
2133 					QDF_TRACE(QDF_MODULE_ID_DP,
2134 						QDF_TRACE_LEVEL_DEBUG,
2135 						FL("nbuf copy failed"));
2136 					break;
2137 				}
2138 
2139 				nbuf_copy = dp_tx_send_msdu_single(vdev,
2140 						nbuf_copy,
2141 						&msdu_info,
2142 						peer_id,
2143 						NULL);
2144 
2145 				if (nbuf_copy) {
2146 					QDF_TRACE(QDF_MODULE_ID_DP,
2147 						QDF_TRACE_LEVEL_DEBUG,
2148 						FL("pkt send failed"));
2149 					qdf_nbuf_free(nbuf_copy);
2150 				} else {
2151 					if (peer_id != DP_INVALID_PEER)
2152 						DP_STATS_INC_PKT(peer,
2153 							tx.nawds_mcast,
2154 							1, qdf_nbuf_len(nbuf));
2155 				}
2156 			}
2157 		}
2158 	}
2159 
2160 	if (vdev->nawds_enabled) {
2161 		peer_id = DP_INVALID_PEER;
2162 
2163 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
2164 					1, qdf_nbuf_len(nbuf));
2165 
2166 		nbuf = dp_tx_send_msdu_single(vdev,
2167 				nbuf,
2168 				&msdu_info,
2169 				peer_id, NULL);
2170 
2171 		if (nbuf) {
2172 			QDF_TRACE(QDF_MODULE_ID_DP,
2173 				QDF_TRACE_LEVEL_DEBUG,
2174 				FL("pkt send failed"));
2175 			qdf_nbuf_free(nbuf);
2176 		}
2177 	} else
2178 		qdf_nbuf_free(nbuf);
2179 
2180 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2181 }
2182 
2183 /**
2184  * dp_tx_inspect_handler() - Tx Inspect Handler
2185  * @tx_desc: software descriptor head pointer
2186  * @status : Tx completion status from HTT descriptor
2187  *
2188  * Handles Tx frames sent back to Host for inspection
2189  * (ProxyARP)
2190  *
2191  * Return: none
2192  */
2193 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2194 {
2195 
2196 	struct dp_soc *soc;
2197 	struct dp_pdev *pdev = tx_desc->pdev;
2198 
2199 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2200 			"%s Tx inspect path\n",
2201 			__func__);
2202 
2203 	qdf_assert(pdev);
2204 
2205 	soc = pdev->soc;
2206 
2207 	DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
2208 			qdf_nbuf_len(tx_desc->nbuf));
2209 
2210 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
2211 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2212 }
2213 
2214 #ifdef FEATURE_PERPKT_INFO
2215 /**
2216  * dp_get_completion_indication_for_stack() - send completion to stack
2217  * @soc :  dp_soc handle
2218  * @pdev:  dp_pdev handle
2219  * @peer_id: peer_id of the peer for which completion came
2220  * @ppdu_id: ppdu_id
2221  * @first_msdu: first msdu
2222  * @last_msdu: last msdu
2223  * @netbuf: Buffer pointer for free
2224  *
2225  * This function is used for indication whether buffer needs to be
2226  * send to stack for free or not
2227 */
2228 QDF_STATUS
2229 dp_get_completion_indication_for_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2230 		      uint16_t peer_id, uint32_t ppdu_id, uint8_t first_msdu,
2231 		      uint8_t last_msdu, qdf_nbuf_t netbuf)
2232 {
2233 	struct tx_capture_hdr *ppdu_hdr;
2234 	struct dp_peer *peer = NULL;
2235 
2236 	if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode))
2237 		return QDF_STATUS_E_NOSUPPORT;
2238 
2239 	peer = (peer_id == HTT_INVALID_PEER) ? NULL :
2240 			dp_peer_find_by_id(soc, peer_id);
2241 
2242 	if (!peer) {
2243 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2244 				FL("Peer Invalid"));
2245 		return QDF_STATUS_E_INVAL;
2246 	}
2247 
2248 	if (pdev->mcopy_mode) {
2249 		if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
2250 			(pdev->m_copy_id.tx_peer_id == peer_id)) {
2251 			return QDF_STATUS_E_INVAL;
2252 		}
2253 
2254 		pdev->m_copy_id.tx_ppdu_id = ppdu_id;
2255 		pdev->m_copy_id.tx_peer_id = peer_id;
2256 	}
2257 
2258 	if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) {
2259 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2260 				FL("No headroom"));
2261 		return QDF_STATUS_E_NOMEM;
2262 	}
2263 
2264 	ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
2265 	qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
2266 					IEEE80211_ADDR_LEN);
2267 	ppdu_hdr->ppdu_id = ppdu_id;
2268 	qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
2269 			IEEE80211_ADDR_LEN);
2270 	ppdu_hdr->peer_id = peer_id;
2271 	ppdu_hdr->first_msdu = first_msdu;
2272 	ppdu_hdr->last_msdu = last_msdu;
2273 
2274 	return QDF_STATUS_SUCCESS;
2275 }
2276 
2277 
2278 /**
2279  * dp_send_completion_to_stack() - send completion to stack
2280  * @soc :  dp_soc handle
2281  * @pdev:  dp_pdev handle
2282  * @peer_id: peer_id of the peer for which completion came
2283  * @ppdu_id: ppdu_id
2284  * @netbuf: Buffer pointer for free
2285  *
2286  * This function is used to send completion to stack
2287  * to free buffer
2288 */
2289 void  dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2290 					uint16_t peer_id, uint32_t ppdu_id,
2291 					qdf_nbuf_t netbuf)
2292 {
2293 	dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
2294 				netbuf, peer_id,
2295 				WDI_NO_VAL, pdev->pdev_id);
2296 }
2297 #else
2298 static QDF_STATUS
2299 dp_get_completion_indication_for_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2300 		      uint16_t peer_id, uint32_t ppdu_id, uint8_t first_msdu,
2301 		      uint8_t last_msdu, qdf_nbuf_t netbuf)
2302 {
2303 	return QDF_STATUS_E_NOSUPPORT;
2304 }
2305 
2306 static void
2307 dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2308 		      uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
2309 {
2310 }
2311 #endif
2312 
2313 /**
2314  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2315  * @soc: Soc handle
2316  * @desc: software Tx descriptor to be processed
2317  *
2318  * Return: none
2319  */
2320 static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
2321 		struct dp_tx_desc_s *desc)
2322 {
2323 	struct dp_vdev *vdev = desc->vdev;
2324 	qdf_nbuf_t nbuf = desc->nbuf;
2325 
2326 	/* If it is TDLS mgmt, don't unmap or free the frame */
2327 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2328 		return dp_non_std_tx_comp_free_buff(desc, vdev);
2329 
2330 	/* 0 : MSDU buffer, 1 : MLE */
2331 	if (desc->msdu_ext_desc) {
2332 		/* TSO free */
2333 		if (hal_tx_ext_desc_get_tso_enable(
2334 					desc->msdu_ext_desc->vaddr)) {
2335 			/* If remaining number of segment is 0
2336 			 * actual TSO may unmap and free */
2337 			if (qdf_nbuf_get_users(nbuf) == 1)
2338 				__qdf_nbuf_unmap_single(soc->osdev,
2339 						nbuf,
2340 						QDF_DMA_TO_DEVICE);
2341 
2342 			qdf_nbuf_free(nbuf);
2343 			return;
2344 		}
2345 	}
2346 
2347 	qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
2348 
2349 	if (qdf_likely(!vdev->mesh_vdev))
2350 		qdf_nbuf_free(nbuf);
2351 	else {
2352 		if (desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2353 			qdf_nbuf_free(nbuf);
2354 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2355 		} else
2356 			vdev->osif_tx_free_ext((nbuf));
2357 	}
2358 }
2359 
2360 /**
2361  * dp_tx_mec_handler() - Tx  MEC Notify Handler
2362  * @vdev: pointer to dp dev handler
2363  * @status : Tx completion status from HTT descriptor
2364  *
2365  * Handles MEC notify event sent from fw to Host
2366  *
2367  * Return: none
2368  */
2369 #ifdef FEATURE_WDS
2370 void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
2371 {
2372 
2373 	struct dp_soc *soc;
2374 	uint32_t flags = IEEE80211_NODE_F_WDS_HM;
2375 	struct dp_peer *peer;
2376 	uint8_t mac_addr[DP_MAC_ADDR_LEN], i;
2377 
2378 	if (!vdev->wds_enabled)
2379 		return;
2380 
2381 	soc = vdev->pdev->soc;
2382 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
2383 	peer = TAILQ_FIRST(&vdev->peer_list);
2384 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2385 
2386 	if (!peer) {
2387 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2388 				FL("peer is NULL"));
2389 		return;
2390 	}
2391 
2392 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2393 			"%s Tx MEC Handler\n",
2394 			__func__);
2395 
2396 	for (i = 0; i < DP_MAC_ADDR_LEN; i++)
2397 		mac_addr[(DP_MAC_ADDR_LEN - 1) - i] =
2398 					status[(DP_MAC_ADDR_LEN - 2) + i];
2399 
2400 	if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, DP_MAC_ADDR_LEN))
2401 		dp_peer_add_ast(soc,
2402 				peer,
2403 				mac_addr,
2404 				CDP_TXRX_AST_TYPE_MEC,
2405 				flags);
2406 }
2407 #endif
2408 
2409 /**
2410  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
2411  * @tx_desc: software descriptor head pointer
2412  * @status : Tx completion status from HTT descriptor
2413  *
2414  * This function will process HTT Tx indication messages from Target
2415  *
2416  * Return: none
2417  */
2418 static
2419 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2420 {
2421 	uint8_t tx_status;
2422 	struct dp_pdev *pdev;
2423 	struct dp_vdev *vdev;
2424 	struct dp_soc *soc;
2425 	uint32_t *htt_status_word = (uint32_t *) status;
2426 
2427 	qdf_assert(tx_desc->pdev);
2428 
2429 	pdev = tx_desc->pdev;
2430 	vdev = tx_desc->vdev;
2431 	soc = pdev->soc;
2432 
2433 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_status_word[0]);
2434 
2435 	switch (tx_status) {
2436 	case HTT_TX_FW2WBM_TX_STATUS_OK:
2437 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
2438 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
2439 	{
2440 		dp_tx_comp_free_buf(soc, tx_desc);
2441 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2442 		break;
2443 	}
2444 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
2445 	{
2446 		dp_tx_reinject_handler(tx_desc, status);
2447 		break;
2448 	}
2449 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
2450 	{
2451 		dp_tx_inspect_handler(tx_desc, status);
2452 		break;
2453 	}
2454 	case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
2455 	{
2456 		dp_tx_mec_handler(vdev, status);
2457 		break;
2458 	}
2459 	default:
2460 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2461 				"%s Invalid HTT tx_status %d\n",
2462 				__func__, tx_status);
2463 		break;
2464 	}
2465 }
2466 
2467 #ifdef MESH_MODE_SUPPORT
2468 /**
2469  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
2470  *                                         in mesh meta header
2471  * @tx_desc: software descriptor head pointer
2472  * @ts: pointer to tx completion stats
2473  * Return: none
2474  */
2475 static
2476 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2477 		struct hal_tx_completion_status *ts)
2478 {
2479 	struct meta_hdr_s *mhdr;
2480 	qdf_nbuf_t netbuf = tx_desc->nbuf;
2481 
2482 	if (!tx_desc->msdu_ext_desc) {
2483 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
2484 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2485 				"netbuf %pK offset %d\n",
2486 				netbuf, tx_desc->pkt_offset);
2487 			return;
2488 		}
2489 	}
2490 	if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
2491 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2492 			"netbuf %pK offset %d\n", netbuf,
2493 			sizeof(struct meta_hdr_s));
2494 		return;
2495 	}
2496 
2497 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
2498 	mhdr->rssi = ts->ack_frame_rssi;
2499 	mhdr->channel = tx_desc->pdev->operating_channel;
2500 }
2501 
2502 #else
2503 static
2504 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2505 		struct hal_tx_completion_status *ts)
2506 {
2507 }
2508 
2509 #endif
2510 
2511 /**
2512  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
2513  * @peer: Handle to DP peer
2514  * @ts: pointer to HAL Tx completion stats
2515  * @length: MSDU length
2516  *
2517  * Return: None
2518  */
2519 static void dp_tx_update_peer_stats(struct dp_peer *peer,
2520 		struct hal_tx_completion_status *ts, uint32_t length)
2521 {
2522 	struct dp_pdev *pdev = peer->vdev->pdev;
2523 	struct dp_soc *soc = pdev->soc;
2524 	uint8_t mcs, pkt_type;
2525 
2526 	mcs = ts->mcs;
2527 	pkt_type = ts->pkt_type;
2528 
2529 	if (!ts->release_src == HAL_TX_COMP_RELEASE_SOURCE_TQM)
2530 		return;
2531 
2532 	if (peer->bss_peer) {
2533 		DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
2534 		DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
2535 	} else {
2536 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
2537 			DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
2538 			DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
2539 		}
2540 	}
2541 
2542 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
2543 			(ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
2544 
2545 	DP_STATS_INCC(peer, tx.dropped.fw_rem, 1,
2546 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
2547 
2548 	DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
2549 			(ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
2550 
2551 	DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
2552 			(ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
2553 
2554 	DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
2555 			(ts->status == HAL_TX_TQM_RR_FW_REASON1));
2556 
2557 	DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
2558 			(ts->status == HAL_TX_TQM_RR_FW_REASON2));
2559 
2560 	DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
2561 			(ts->status == HAL_TX_TQM_RR_FW_REASON3));
2562 
2563 	if (!ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
2564 		return;
2565 
2566 	DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
2567 
2568 	DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
2569 	DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
2570 
2571 	if (!(soc->process_tx_status))
2572 		return;
2573 
2574 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2575 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
2576 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2577 			((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
2578 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2579 			((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
2580 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2581 			((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
2582 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2583 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
2584 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2585 			((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
2586 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2587 			((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2588 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2589 			((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2590 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2591 			((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2592 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2593 			((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2594 	DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
2595 	DP_STATS_INC(peer, tx.bw[ts->bw], 1);
2596 	DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
2597 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
2598 	DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
2599 	DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
2600 	DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
2601 	DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
2602 
2603 	if (soc->cdp_soc.ol_ops->update_dp_stats) {
2604 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->osif_pdev,
2605 				&peer->stats, ts->peer_id,
2606 				UPDATE_PEER_STATS);
2607 	}
2608 }
2609 
2610 /**
2611  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
2612  * @tx_desc: software descriptor head pointer
2613  * @length: packet length
2614  *
2615  * Return: none
2616  */
2617 static inline void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
2618 		uint32_t length)
2619 {
2620 	struct hal_tx_completion_status ts;
2621 	struct dp_soc *soc = NULL;
2622 	struct dp_vdev *vdev = tx_desc->vdev;
2623 	struct dp_peer *peer = NULL;
2624 	struct ether_header *eh =
2625 		(struct ether_header *)qdf_nbuf_data(tx_desc->nbuf);
2626 
2627 	hal_tx_comp_get_status(&tx_desc->comp, &ts);
2628 
2629 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2630 				"-------------------- \n"
2631 				"Tx Completion Stats: \n"
2632 				"-------------------- \n"
2633 				"ack_frame_rssi = %d \n"
2634 				"first_msdu = %d \n"
2635 				"last_msdu = %d \n"
2636 				"msdu_part_of_amsdu = %d \n"
2637 				"rate_stats valid = %d \n"
2638 				"bw = %d \n"
2639 				"pkt_type = %d \n"
2640 				"stbc = %d \n"
2641 				"ldpc = %d \n"
2642 				"sgi = %d \n"
2643 				"mcs = %d \n"
2644 				"ofdma = %d \n"
2645 				"tones_in_ru = %d \n"
2646 				"tsf = %d \n"
2647 				"ppdu_id = %d \n"
2648 				"transmit_cnt = %d \n"
2649 				"tid = %d \n"
2650 				"peer_id = %d \n",
2651 				ts.ack_frame_rssi, ts.first_msdu, ts.last_msdu,
2652 				ts.msdu_part_of_amsdu, ts.valid, ts.bw,
2653 				ts.pkt_type, ts.stbc, ts.ldpc, ts.sgi,
2654 				ts.mcs, ts.ofdma, ts.tones_in_ru, ts.tsf,
2655 				ts.ppdu_id, ts.transmit_cnt, ts.tid,
2656 				ts.peer_id);
2657 
2658 	if (!vdev) {
2659 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2660 				"invalid vdev");
2661 		goto out;
2662 	}
2663 
2664 	soc = vdev->pdev->soc;
2665 
2666 	/* Update SoC level stats */
2667 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
2668 			(ts.status == HAL_TX_TQM_RR_REM_CMD_REM));
2669 
2670 	/* Update per-packet stats */
2671 	if (qdf_unlikely(vdev->mesh_vdev) &&
2672 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
2673 		dp_tx_comp_fill_tx_completion_stats(tx_desc, &ts);
2674 
2675 	/* Update peer level stats */
2676 	peer = dp_peer_find_by_id(soc, ts.peer_id);
2677 	if (!peer) {
2678 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2679 				"invalid peer");
2680 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
2681 		goto out;
2682 	}
2683 
2684 	if (qdf_likely(peer->vdev->tx_encap_type ==
2685 				htt_cmn_pkt_type_ethernet)) {
2686 		if (peer->bss_peer && IEEE80211_IS_BROADCAST(eh->ether_dhost))
2687 			DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
2688 	}
2689 
2690 	dp_tx_update_peer_stats(peer, &ts, length);
2691 
2692 out:
2693 	return;
2694 }
2695 
2696 /**
2697  * dp_tx_comp_process_desc() - Tx complete software descriptor handler
2698  * @soc: core txrx main context
2699  * @comp_head: software descriptor head pointer
2700  *
2701  * This function will process batch of descriptors reaped by dp_tx_comp_handler
2702  * and release the software descriptors after processing is complete
2703  *
2704  * Return: none
2705  */
2706 static void dp_tx_comp_process_desc(struct dp_soc *soc,
2707 		struct dp_tx_desc_s *comp_head)
2708 {
2709 	struct dp_tx_desc_s *desc;
2710 	struct dp_tx_desc_s *next;
2711 	struct hal_tx_completion_status ts = {0};
2712 	uint32_t length;
2713 	struct dp_peer *peer;
2714 
2715 	DP_HIST_INIT();
2716 	desc = comp_head;
2717 
2718 	while (desc) {
2719 		hal_tx_comp_get_status(&desc->comp, &ts);
2720 		peer = dp_peer_find_by_id(soc, ts.peer_id);
2721 		length = qdf_nbuf_len(desc->nbuf);
2722 
2723 		dp_tx_comp_process_tx_status(desc, length);
2724 
2725 		/*currently m_copy/tx_capture is not supported for scatter gather packets*/
2726 		if (!(desc->msdu_ext_desc) && (dp_get_completion_indication_for_stack(soc,
2727 					desc->pdev, ts.peer_id, ts.ppdu_id,
2728 					ts.first_msdu, ts.last_msdu,
2729 					desc->nbuf) == QDF_STATUS_SUCCESS)) {
2730 			qdf_nbuf_unmap(soc->osdev, desc->nbuf,
2731 						QDF_DMA_TO_DEVICE);
2732 
2733 			dp_send_completion_to_stack(soc, desc->pdev, ts.peer_id,
2734 				ts.ppdu_id, desc->nbuf);
2735 		} else {
2736 			dp_tx_comp_free_buf(soc, desc);
2737 		}
2738 
2739 		DP_HIST_PACKET_COUNT_INC(desc->pdev->pdev_id);
2740 
2741 		next = desc->next;
2742 		dp_tx_desc_release(desc, desc->pool_id);
2743 		desc = next;
2744 	}
2745 	DP_TX_HIST_STATS_PER_PDEV();
2746 }
2747 
2748 /**
2749  * dp_tx_comp_handler() - Tx completion handler
2750  * @soc: core txrx main context
2751  * @ring_id: completion ring id
2752  * @quota: No. of packets/descriptors that can be serviced in one loop
2753  *
2754  * This function will collect hardware release ring element contents and
2755  * handle descriptor contents. Based on contents, free packet or handle error
2756  * conditions
2757  *
2758  * Return: none
2759  */
2760 uint32_t dp_tx_comp_handler(struct dp_soc *soc, void *hal_srng, uint32_t quota)
2761 {
2762 	void *tx_comp_hal_desc;
2763 	uint8_t buffer_src;
2764 	uint8_t pool_id;
2765 	uint32_t tx_desc_id;
2766 	struct dp_tx_desc_s *tx_desc = NULL;
2767 	struct dp_tx_desc_s *head_desc = NULL;
2768 	struct dp_tx_desc_s *tail_desc = NULL;
2769 	uint32_t num_processed;
2770 	uint32_t count;
2771 
2772 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
2773 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2774 				"%s %d : HAL RING Access Failed -- %pK\n",
2775 				__func__, __LINE__, hal_srng);
2776 		return 0;
2777 	}
2778 
2779 	num_processed = 0;
2780 	count = 0;
2781 
2782 	/* Find head descriptor from completion ring */
2783 	while (qdf_likely(tx_comp_hal_desc =
2784 			hal_srng_dst_get_next(soc->hal_soc, hal_srng))) {
2785 
2786 		buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
2787 
2788 		/* If this buffer was not released by TQM or FW, then it is not
2789 		 * Tx completion indication, assert */
2790 		if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
2791 				(buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
2792 
2793 			QDF_TRACE(QDF_MODULE_ID_DP,
2794 					QDF_TRACE_LEVEL_FATAL,
2795 					"Tx comp release_src != TQM | FW");
2796 
2797 			qdf_assert_always(0);
2798 		}
2799 
2800 		/* Get descriptor id */
2801 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
2802 		pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
2803 			DP_TX_DESC_ID_POOL_OS;
2804 
2805 		/* Pool ID is out of limit. Error */
2806 		if (pool_id > wlan_cfg_get_num_tx_desc_pool(
2807 					soc->wlan_cfg_ctx)) {
2808 			QDF_TRACE(QDF_MODULE_ID_DP,
2809 					QDF_TRACE_LEVEL_FATAL,
2810 					"Tx Comp pool id %d not valid",
2811 					pool_id);
2812 
2813 			qdf_assert_always(0);
2814 		}
2815 
2816 		/* Find Tx descriptor */
2817 		tx_desc = dp_tx_desc_find(soc, pool_id,
2818 				(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
2819 				DP_TX_DESC_ID_PAGE_OS,
2820 				(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
2821 				DP_TX_DESC_ID_OFFSET_OS);
2822 
2823 		/*
2824 		 * If the release source is FW, process the HTT status
2825 		 */
2826 		if (qdf_unlikely(buffer_src ==
2827 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
2828 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
2829 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
2830 					htt_tx_status);
2831 			dp_tx_process_htt_completion(tx_desc,
2832 					htt_tx_status);
2833 		} else {
2834 			/* Pool id is not matching. Error */
2835 			if (tx_desc->pool_id != pool_id) {
2836 				QDF_TRACE(QDF_MODULE_ID_DP,
2837 					QDF_TRACE_LEVEL_FATAL,
2838 					"Tx Comp pool id %d not matched %d",
2839 					pool_id, tx_desc->pool_id);
2840 
2841 				qdf_assert_always(0);
2842 			}
2843 
2844 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
2845 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
2846 				QDF_TRACE(QDF_MODULE_ID_DP,
2847 					QDF_TRACE_LEVEL_FATAL,
2848 					"Txdesc invalid, flgs = %x,id = %d",
2849 					tx_desc->flags,	tx_desc_id);
2850 				qdf_assert_always(0);
2851 			}
2852 
2853 			/* First ring descriptor on the cycle */
2854 			if (!head_desc) {
2855 				head_desc = tx_desc;
2856 				tail_desc = tx_desc;
2857 			}
2858 
2859 			tail_desc->next = tx_desc;
2860 			tx_desc->next = NULL;
2861 			tail_desc = tx_desc;
2862 
2863 			/* Collect hw completion contents */
2864 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
2865 					&tx_desc->comp, 1);
2866 
2867 		}
2868 
2869 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
2870 		/* Decrement PM usage count if the packet has been sent.*/
2871 		hif_pm_runtime_put(soc->hif_handle);
2872 
2873 		/*
2874 		 * Processed packet count is more than given quota
2875 		 * stop to processing
2876 		 */
2877 		if ((num_processed >= quota))
2878 			break;
2879 
2880 		count++;
2881 	}
2882 
2883 	hal_srng_access_end(soc->hal_soc, hal_srng);
2884 
2885 	/* Process the reaped descriptors */
2886 	if (head_desc)
2887 		dp_tx_comp_process_desc(soc, head_desc);
2888 
2889 	return num_processed;
2890 }
2891 
2892 #ifdef CONVERGED_TDLS_ENABLE
2893 /**
2894  * dp_tx_non_std() - Allow the control-path SW to send data frames
2895  *
2896  * @data_vdev - which vdev should transmit the tx data frames
2897  * @tx_spec - what non-standard handling to apply to the tx data frames
2898  * @msdu_list - NULL-terminated list of tx MSDUs
2899  *
2900  * Return: NULL on success,
2901  *         nbuf when it fails to send
2902  */
2903 qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle,
2904 			enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
2905 {
2906 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
2907 
2908 	if (tx_spec & OL_TX_SPEC_NO_FREE)
2909 		vdev->is_tdls_frame = true;
2910 	return dp_tx_send(vdev_handle, msdu_list);
2911 }
2912 #endif
2913 
2914 /**
2915  * dp_tx_vdev_attach() - attach vdev to dp tx
2916  * @vdev: virtual device instance
2917  *
2918  * Return: QDF_STATUS_SUCCESS: success
2919  *         QDF_STATUS_E_RESOURCES: Error return
2920  */
2921 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
2922 {
2923 	/*
2924 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
2925 	 */
2926 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
2927 			HTT_TCL_METADATA_TYPE_VDEV_BASED);
2928 
2929 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
2930 			vdev->vdev_id);
2931 
2932 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
2933 			DP_SW2HW_MACID(vdev->pdev->pdev_id));
2934 
2935 	/*
2936 	 * Set HTT Extension Valid bit to 0 by default
2937 	 */
2938 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
2939 
2940 	dp_tx_vdev_update_search_flags(vdev);
2941 
2942 	return QDF_STATUS_SUCCESS;
2943 }
2944 
2945 /**
2946  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
2947  * @vdev: virtual device instance
2948  *
2949  * Return: void
2950  *
2951  */
2952 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
2953 {
2954 	/*
2955 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
2956 	 * for TDLS link
2957 	 *
2958 	 * Enable AddrY (SA based search) only for non-WDS STA and
2959 	 * ProxySTA VAP modes.
2960 	 *
2961 	 * In all other VAP modes, only DA based search should be
2962 	 * enabled
2963 	 */
2964 	if (vdev->opmode == wlan_op_mode_sta &&
2965 	    vdev->tdls_link_connected)
2966 		vdev->hal_desc_addr_search_flags =
2967 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
2968 	else if ((vdev->opmode == wlan_op_mode_sta &&
2969 				(!vdev->wds_enabled || vdev->proxysta_vdev)))
2970 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
2971 	else
2972 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
2973 }
2974 
2975 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2976 static void dp_tx_desc_flush(struct dp_vdev *vdev)
2977 {
2978 }
2979 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
2980 
2981 /* dp_tx_desc_flush() - release resources associated
2982  *                      to tx_desc
2983  * @vdev: virtual device instance
2984  *
2985  * This function will free all outstanding Tx buffers,
2986  * including ME buffer for which either free during
2987  * completion didn't happened or completion is not
2988  * received.
2989 */
2990 static void dp_tx_desc_flush(struct dp_vdev *vdev)
2991 {
2992 	uint8_t i, num_pool;
2993 	uint32_t j;
2994 	uint32_t num_desc;
2995 	struct dp_soc *soc = vdev->pdev->soc;
2996 	struct dp_tx_desc_s *tx_desc = NULL;
2997 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
2998 
2999 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3000 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3001 
3002 	for (i = 0; i < num_pool; i++) {
3003 		for (j = 0; j < num_desc; j++) {
3004 			tx_desc_pool = &((soc)->tx_desc[(i)]);
3005 			if (tx_desc_pool &&
3006 				tx_desc_pool->desc_pages.cacheable_pages) {
3007 				tx_desc = dp_tx_desc_find(soc, i,
3008 					(j & DP_TX_DESC_ID_PAGE_MASK) >>
3009 					DP_TX_DESC_ID_PAGE_OS,
3010 					(j & DP_TX_DESC_ID_OFFSET_MASK) >>
3011 					DP_TX_DESC_ID_OFFSET_OS);
3012 
3013 				if (tx_desc && (tx_desc->vdev == vdev) &&
3014 					(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) {
3015 					dp_tx_comp_free_buf(soc, tx_desc);
3016 					dp_tx_desc_release(tx_desc, i);
3017 				}
3018 			}
3019 		}
3020 	}
3021 }
3022 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3023 
3024 /**
3025  * dp_tx_vdev_detach() - detach vdev from dp tx
3026  * @vdev: virtual device instance
3027  *
3028  * Return: QDF_STATUS_SUCCESS: success
3029  *         QDF_STATUS_E_RESOURCES: Error return
3030  */
3031 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
3032 {
3033 	dp_tx_desc_flush(vdev);
3034 	return QDF_STATUS_SUCCESS;
3035 }
3036 
3037 /**
3038  * dp_tx_pdev_attach() - attach pdev to dp tx
3039  * @pdev: physical device instance
3040  *
3041  * Return: QDF_STATUS_SUCCESS: success
3042  *         QDF_STATUS_E_RESOURCES: Error return
3043  */
3044 QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
3045 {
3046 	struct dp_soc *soc = pdev->soc;
3047 
3048 	/* Initialize Flow control counters */
3049 	qdf_atomic_init(&pdev->num_tx_exception);
3050 	qdf_atomic_init(&pdev->num_tx_outstanding);
3051 
3052 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3053 		/* Initialize descriptors in TCL Ring */
3054 		hal_tx_init_data_ring(soc->hal_soc,
3055 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
3056 	}
3057 
3058 	return QDF_STATUS_SUCCESS;
3059 }
3060 
3061 /**
3062  * dp_tx_pdev_detach() - detach pdev from dp tx
3063  * @pdev: physical device instance
3064  *
3065  * Return: QDF_STATUS_SUCCESS: success
3066  *         QDF_STATUS_E_RESOURCES: Error return
3067  */
3068 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
3069 {
3070 	dp_tx_me_exit(pdev);
3071 	return QDF_STATUS_SUCCESS;
3072 }
3073 
3074 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3075 /* Pools will be allocated dynamically */
3076 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3077 					int num_desc)
3078 {
3079 	uint8_t i;
3080 
3081 	for (i = 0; i < num_pool; i++) {
3082 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
3083 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
3084 	}
3085 
3086 	return 0;
3087 }
3088 
3089 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3090 {
3091 	uint8_t i;
3092 
3093 	for (i = 0; i < num_pool; i++)
3094 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
3095 }
3096 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3097 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3098 					int num_desc)
3099 {
3100 	uint8_t i;
3101 
3102 	/* Allocate software Tx descriptor pools */
3103 	for (i = 0; i < num_pool; i++) {
3104 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
3105 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3106 					"%s Tx Desc Pool alloc %d failed %pK\n",
3107 					__func__, i, soc);
3108 			return ENOMEM;
3109 		}
3110 	}
3111 	return 0;
3112 }
3113 
3114 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3115 {
3116 	uint8_t i;
3117 
3118 	for (i = 0; i < num_pool; i++) {
3119 		qdf_assert_always(!soc->tx_desc[i].num_allocated);
3120 		if (dp_tx_desc_pool_free(soc, i)) {
3121 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3122 				"%s Tx Desc Pool Free failed\n", __func__);
3123 		}
3124 	}
3125 }
3126 
3127 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3128 
3129 /**
3130  * dp_tx_soc_detach() - detach soc from dp tx
3131  * @soc: core txrx main context
3132  *
3133  * This function will detach dp tx into main device context
3134  * will free dp tx resource and initialize resources
3135  *
3136  * Return: QDF_STATUS_SUCCESS: success
3137  *         QDF_STATUS_E_RESOURCES: Error return
3138  */
3139 QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
3140 {
3141 	uint8_t num_pool;
3142 	uint16_t num_desc;
3143 	uint16_t num_ext_desc;
3144 	uint8_t i;
3145 
3146 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3147 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3148 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
3149 
3150 	dp_tx_flow_control_deinit(soc);
3151 	dp_tx_delete_static_pools(soc, num_pool);
3152 
3153 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3154 			"%s Tx Desc Pool Free num_pool = %d, descs = %d\n",
3155 			__func__, num_pool, num_desc);
3156 
3157 	for (i = 0; i < num_pool; i++) {
3158 		if (dp_tx_ext_desc_pool_free(soc, i)) {
3159 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3160 					"%s Tx Ext Desc Pool Free failed\n",
3161 					__func__);
3162 			return QDF_STATUS_E_RESOURCES;
3163 		}
3164 	}
3165 
3166 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3167 			"%s MSDU Ext Desc Pool %d Free descs = %d\n",
3168 			__func__, num_pool, num_ext_desc);
3169 
3170 	for (i = 0; i < num_pool; i++) {
3171 		dp_tx_tso_desc_pool_free(soc, i);
3172 	}
3173 
3174 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3175 			"%s TSO Desc Pool %d Free descs = %d\n",
3176 			__func__, num_pool, num_desc);
3177 
3178 
3179 	for (i = 0; i < num_pool; i++)
3180 		dp_tx_tso_num_seg_pool_free(soc, i);
3181 
3182 
3183 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3184 		"%s TSO Num of seg Desc Pool %d Free descs = %d\n",
3185 		__func__, num_pool, num_desc);
3186 
3187 	return QDF_STATUS_SUCCESS;
3188 }
3189 
3190 /**
3191  * dp_tx_soc_attach() - attach soc to dp tx
3192  * @soc: core txrx main context
3193  *
3194  * This function will attach dp tx into main device context
3195  * will allocate dp tx resource and initialize resources
3196  *
3197  * Return: QDF_STATUS_SUCCESS: success
3198  *         QDF_STATUS_E_RESOURCES: Error return
3199  */
3200 QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
3201 {
3202 	uint8_t i;
3203 	uint8_t num_pool;
3204 	uint32_t num_desc;
3205 	uint32_t num_ext_desc;
3206 
3207 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3208 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3209 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
3210 
3211 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
3212 		goto fail;
3213 
3214 	dp_tx_flow_control_init(soc);
3215 
3216 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3217 			"%s Tx Desc Alloc num_pool = %d, descs = %d\n",
3218 			__func__, num_pool, num_desc);
3219 
3220 	/* Allocate extension tx descriptor pools */
3221 	for (i = 0; i < num_pool; i++) {
3222 		if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
3223 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3224 				"MSDU Ext Desc Pool alloc %d failed %pK\n",
3225 				i, soc);
3226 
3227 			goto fail;
3228 		}
3229 	}
3230 
3231 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3232 			"%s MSDU Ext Desc Alloc %d, descs = %d\n",
3233 			__func__, num_pool, num_ext_desc);
3234 
3235 	for (i = 0; i < num_pool; i++) {
3236 		if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
3237 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3238 				"TSO Desc Pool alloc %d failed %pK\n",
3239 				i, soc);
3240 
3241 			goto fail;
3242 		}
3243 	}
3244 
3245 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3246 			"%s TSO Desc Alloc %d, descs = %d\n",
3247 			__func__, num_pool, num_desc);
3248 
3249 	for (i = 0; i < num_pool; i++) {
3250 		if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
3251 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3252 				"TSO Num of seg Pool alloc %d failed %pK\n",
3253 				i, soc);
3254 
3255 			goto fail;
3256 		}
3257 	}
3258 
3259 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3260 			"%s TSO Num of seg pool Alloc %d, descs = %d\n",
3261 			__func__, num_pool, num_desc);
3262 
3263 	/* Initialize descriptors in TCL Rings */
3264 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3265 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3266 			hal_tx_init_data_ring(soc->hal_soc,
3267 					soc->tcl_data_ring[i].hal_srng);
3268 		}
3269 	}
3270 
3271 	/*
3272 	 * todo - Add a runtime config option to enable this.
3273 	 */
3274 	/*
3275 	 * Due to multiple issues on NPR EMU, enable it selectively
3276 	 * only for NPR EMU, should be removed, once NPR platforms
3277 	 * are stable.
3278 	 */
3279 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
3280 
3281 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3282 			"%s HAL Tx init Success\n", __func__);
3283 
3284 	return QDF_STATUS_SUCCESS;
3285 
3286 fail:
3287 	/* Detach will take care of freeing only allocated resources */
3288 	dp_tx_soc_detach(soc);
3289 	return QDF_STATUS_E_RESOURCES;
3290 }
3291 
3292 /*
3293  * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
3294  * pdev: pointer to DP PDEV structure
3295  * seg_info_head: Pointer to the head of list
3296  *
3297  * return: void
3298  */
3299 static void dp_tx_me_mem_free(struct dp_pdev *pdev,
3300 		struct dp_tx_seg_info_s *seg_info_head)
3301 {
3302 	struct dp_tx_me_buf_t *mc_uc_buf;
3303 	struct dp_tx_seg_info_s *seg_info_new = NULL;
3304 	qdf_nbuf_t nbuf = NULL;
3305 	uint64_t phy_addr;
3306 
3307 	while (seg_info_head) {
3308 		nbuf = seg_info_head->nbuf;
3309 		mc_uc_buf = (struct dp_tx_me_buf_t *)
3310 			seg_info_head->frags[0].vaddr;
3311 		phy_addr = seg_info_head->frags[0].paddr_hi;
3312 		phy_addr =  (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
3313 		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
3314 				phy_addr,
3315 				QDF_DMA_TO_DEVICE , DP_MAC_ADDR_LEN);
3316 		dp_tx_me_free_buf(pdev, mc_uc_buf);
3317 		qdf_nbuf_free(nbuf);
3318 		seg_info_new = seg_info_head;
3319 		seg_info_head = seg_info_head->next;
3320 		qdf_mem_free(seg_info_new);
3321 	}
3322 }
3323 
3324 /**
3325  * dp_tx_me_send_convert_ucast(): fuction to convert multicast to unicast
3326  * @vdev: DP VDEV handle
3327  * @nbuf: Multicast nbuf
3328  * @newmac: Table of the clients to which packets have to be sent
3329  * @new_mac_cnt: No of clients
3330  *
3331  * return: no of converted packets
3332  */
3333 uint16_t
3334 dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
3335 		uint8_t newmac[][DP_MAC_ADDR_LEN], uint8_t new_mac_cnt)
3336 {
3337 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
3338 	struct dp_pdev *pdev = vdev->pdev;
3339 	struct ether_header *eh;
3340 	uint8_t *data;
3341 	uint16_t len;
3342 
3343 	/* reference to frame dst addr */
3344 	uint8_t *dstmac;
3345 	/* copy of original frame src addr */
3346 	uint8_t srcmac[DP_MAC_ADDR_LEN];
3347 
3348 	/* local index into newmac */
3349 	uint8_t new_mac_idx = 0;
3350 	struct dp_tx_me_buf_t *mc_uc_buf;
3351 	qdf_nbuf_t  nbuf_clone;
3352 	struct dp_tx_msdu_info_s msdu_info;
3353 	struct dp_tx_seg_info_s *seg_info_head = NULL;
3354 	struct dp_tx_seg_info_s *seg_info_tail = NULL;
3355 	struct dp_tx_seg_info_s *seg_info_new;
3356 	struct dp_tx_frag_info_s data_frag;
3357 	qdf_dma_addr_t paddr_data;
3358 	qdf_dma_addr_t paddr_mcbuf = 0;
3359 	uint8_t empty_entry_mac[DP_MAC_ADDR_LEN] = {0};
3360 	QDF_STATUS status;
3361 
3362 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
3363 
3364 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3365 
3366 	eh = (struct ether_header *) nbuf;
3367 	qdf_mem_copy(srcmac, eh->ether_shost, DP_MAC_ADDR_LEN);
3368 
3369 	len = qdf_nbuf_len(nbuf);
3370 
3371 	data = qdf_nbuf_data(nbuf);
3372 
3373 	status = qdf_nbuf_map(vdev->osdev, nbuf,
3374 			QDF_DMA_TO_DEVICE);
3375 
3376 	if (status) {
3377 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3378 				"Mapping failure Error:%d", status);
3379 		DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
3380 		qdf_nbuf_free(nbuf);
3381 		return 1;
3382 	}
3383 
3384 	paddr_data = qdf_nbuf_get_frag_paddr(nbuf, 0) + IEEE80211_ADDR_LEN;
3385 
3386 	/*preparing data fragment*/
3387 	data_frag.vaddr = qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN;
3388 	data_frag.paddr_lo = (uint32_t)paddr_data;
3389 	data_frag.paddr_hi = (((uint64_t) paddr_data)  >> 32);
3390 	data_frag.len = len - DP_MAC_ADDR_LEN;
3391 
3392 	for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
3393 		dstmac = newmac[new_mac_idx];
3394 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3395 				"added mac addr (%pM)", dstmac);
3396 
3397 		/* Check for NULL Mac Address */
3398 		if (!qdf_mem_cmp(dstmac, empty_entry_mac, DP_MAC_ADDR_LEN))
3399 			continue;
3400 
3401 		/* frame to self mac. skip */
3402 		if (!qdf_mem_cmp(dstmac, srcmac, DP_MAC_ADDR_LEN))
3403 			continue;
3404 
3405 		/*
3406 		 * TODO: optimize to avoid malloc in per-packet path
3407 		 * For eg. seg_pool can be made part of vdev structure
3408 		 */
3409 		seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
3410 
3411 		if (!seg_info_new) {
3412 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3413 					"alloc failed");
3414 			DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
3415 			goto fail_seg_alloc;
3416 		}
3417 
3418 		mc_uc_buf = dp_tx_me_alloc_buf(pdev);
3419 		if (mc_uc_buf == NULL)
3420 			goto fail_buf_alloc;
3421 
3422 		/*
3423 		 * TODO: Check if we need to clone the nbuf
3424 		 * Or can we just use the reference for all cases
3425 		 */
3426 		if (new_mac_idx < (new_mac_cnt - 1)) {
3427 			nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
3428 			if (nbuf_clone == NULL) {
3429 				DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
3430 				goto fail_clone;
3431 			}
3432 		} else {
3433 			/*
3434 			 * Update the ref
3435 			 * to account for frame sent without cloning
3436 			 */
3437 			qdf_nbuf_ref(nbuf);
3438 			nbuf_clone = nbuf;
3439 		}
3440 
3441 		qdf_mem_copy(mc_uc_buf->data, dstmac, DP_MAC_ADDR_LEN);
3442 
3443 		status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
3444 				QDF_DMA_TO_DEVICE, DP_MAC_ADDR_LEN,
3445 				&paddr_mcbuf);
3446 
3447 		if (status) {
3448 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3449 					"Mapping failure Error:%d", status);
3450 			DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
3451 			goto fail_map;
3452 		}
3453 
3454 		seg_info_new->frags[0].vaddr =  (uint8_t *)mc_uc_buf;
3455 		seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
3456 		seg_info_new->frags[0].paddr_hi =
3457 			((uint64_t) paddr_mcbuf >> 32);
3458 		seg_info_new->frags[0].len = DP_MAC_ADDR_LEN;
3459 
3460 		seg_info_new->frags[1] = data_frag;
3461 		seg_info_new->nbuf = nbuf_clone;
3462 		seg_info_new->frag_cnt = 2;
3463 		seg_info_new->total_len = len;
3464 
3465 		seg_info_new->next = NULL;
3466 
3467 		if (seg_info_head == NULL)
3468 			seg_info_head = seg_info_new;
3469 		else
3470 			seg_info_tail->next = seg_info_new;
3471 
3472 		seg_info_tail = seg_info_new;
3473 	}
3474 
3475 	if (!seg_info_head) {
3476 		goto free_return;
3477 	}
3478 
3479 	msdu_info.u.sg_info.curr_seg = seg_info_head;
3480 	msdu_info.num_seg = new_mac_cnt;
3481 	msdu_info.frm_type = dp_tx_frm_me;
3482 
3483 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
3484 	dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3485 
3486 	while (seg_info_head->next) {
3487 		seg_info_new = seg_info_head;
3488 		seg_info_head = seg_info_head->next;
3489 		qdf_mem_free(seg_info_new);
3490 	}
3491 	qdf_mem_free(seg_info_head);
3492 
3493 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
3494 	qdf_nbuf_free(nbuf);
3495 	return new_mac_cnt;
3496 
3497 fail_map:
3498 	qdf_nbuf_free(nbuf_clone);
3499 
3500 fail_clone:
3501 	dp_tx_me_free_buf(pdev, mc_uc_buf);
3502 
3503 fail_buf_alloc:
3504 	qdf_mem_free(seg_info_new);
3505 
3506 fail_seg_alloc:
3507 	dp_tx_me_mem_free(pdev, seg_info_head);
3508 
3509 free_return:
3510 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
3511 	qdf_nbuf_free(nbuf);
3512 	return 1;
3513 }
3514 
3515