xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision 1b9674e21e24478fba4530f5ae7396b9555e9c6a)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "hal_hw_headers.h"
21 #include "dp_tx.h"
22 #include "dp_tx_desc.h"
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "hal_tx.h"
26 #include "qdf_mem.h"
27 #include "qdf_nbuf.h"
28 #include "qdf_net_types.h"
29 #include <wlan_cfg.h>
30 #ifdef MESH_MODE_SUPPORT
31 #include "if_meta_hdr.h"
32 #endif
33 
34 #define DP_TX_QUEUE_MASK 0x3
35 
36 /* TODO Add support in TSO */
37 #define DP_DESC_NUM_FRAG(x) 0
38 
39 /* disable TQM_BYPASS */
40 #define TQM_BYPASS_WAR 0
41 
42 /* invalid peer id for reinject*/
43 #define DP_INVALID_PEER 0XFFFE
44 
45 /*mapping between hal encrypt type and cdp_sec_type*/
46 #define MAX_CDP_SEC_TYPE 12
47 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
48 					HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
49 					HAL_TX_ENCRYPT_TYPE_WEP_128,
50 					HAL_TX_ENCRYPT_TYPE_WEP_104,
51 					HAL_TX_ENCRYPT_TYPE_WEP_40,
52 					HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
53 					HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
54 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
55 					HAL_TX_ENCRYPT_TYPE_WAPI,
56 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
57 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
58 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
59 					HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
60 
61 /**
62  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
63  * @vdev: DP Virtual device handle
64  * @nbuf: Buffer pointer
65  * @queue: queue ids container for nbuf
66  *
67  * TX packet queue has 2 instances, software descriptors id and dma ring id
68  * Based on tx feature and hardware configuration queue id combination could be
69  * different.
70  * For example -
71  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
72  * With no XPS,lock based resource protection, Descriptor pool ids are different
73  * for each vdev, dma ring id will be same as single pdev id
74  *
75  * Return: None
76  */
77 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
78 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
79 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
80 {
81 	uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK;
82 	queue->desc_pool_id = queue_offset;
83 	queue->ring_id = vdev->pdev->soc->tx_ring_map[queue_offset];
84 
85 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
86 			"%s, pool_id:%d ring_id: %d",
87 			__func__, queue->desc_pool_id, queue->ring_id);
88 
89 	return;
90 }
91 #else /* QCA_OL_TX_MULTIQ_SUPPORT */
92 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
93 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
94 {
95 	/* get flow id */
96 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
97 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
98 
99 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
100 			"%s, pool_id:%d ring_id: %d",
101 			__func__, queue->desc_pool_id, queue->ring_id);
102 
103 	return;
104 }
105 #endif
106 
107 #if defined(FEATURE_TSO)
108 /**
109  * dp_tx_tso_unmap_segment() - Unmap TSO segment
110  *
111  * @soc - core txrx main context
112  * @tx_desc - Tx software descriptor
113  */
114 static void dp_tx_tso_unmap_segment(struct dp_soc *soc,
115 				    struct dp_tx_desc_s *tx_desc)
116 {
117 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
118 	if (qdf_unlikely(!tx_desc->tso_desc)) {
119 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
120 			  "%s %d TSO desc is NULL!",
121 			  __func__, __LINE__);
122 		qdf_assert(0);
123 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
124 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
125 			  "%s %d TSO num desc is NULL!",
126 			  __func__, __LINE__);
127 		qdf_assert(0);
128 	} else {
129 		bool is_last_seg;
130 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
131 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
132 
133 		if (tso_num_desc->num_seg.tso_cmn_num_seg > 1)
134 			is_last_seg = false;
135 		else
136 			is_last_seg = true;
137 		tso_num_desc->num_seg.tso_cmn_num_seg--;
138 		qdf_nbuf_unmap_tso_segment(soc->osdev,
139 					   tx_desc->tso_desc, is_last_seg);
140 	}
141 }
142 
143 /**
144  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
145  *                            back to the freelist
146  *
147  * @soc - soc device handle
148  * @tx_desc - Tx software descriptor
149  */
150 static void dp_tx_tso_desc_release(struct dp_soc *soc,
151 				   struct dp_tx_desc_s *tx_desc)
152 {
153 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
154 	if (qdf_unlikely(!tx_desc->tso_desc)) {
155 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
156 			  "%s %d TSO desc is NULL!",
157 			  __func__, __LINE__);
158 		qdf_assert(0);
159 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
160 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
161 			  "%s %d TSO num desc is NULL!",
162 			  __func__, __LINE__);
163 		qdf_assert(0);
164 	} else {
165 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
166 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
167 
168 		/* Add the tso num segment into the free list */
169 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
170 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
171 					    tx_desc->tso_num_desc);
172 			tx_desc->tso_num_desc = NULL;
173 		}
174 
175 		/* Add the tso segment into the free list*/
176 		dp_tx_tso_desc_free(soc,
177 				    tx_desc->pool_id, tx_desc->tso_desc);
178 		tx_desc->tso_desc = NULL;
179 	}
180 }
181 #else
182 static void dp_tx_tso_unmap_segment(struct dp_soc *soc,
183 				    struct dp_tx_desc_s *tx_desc)
184 
185 {
186 }
187 
188 static void dp_tx_tso_desc_release(struct dp_soc *soc,
189 				   struct dp_tx_desc_s *tx_desc)
190 {
191 }
192 #endif
193 /**
194  * dp_tx_desc_release() - Release Tx Descriptor
195  * @tx_desc : Tx Descriptor
196  * @desc_pool_id: Descriptor Pool ID
197  *
198  * Deallocate all resources attached to Tx descriptor and free the Tx
199  * descriptor.
200  *
201  * Return:
202  */
203 static void
204 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
205 {
206 	struct dp_pdev *pdev = tx_desc->pdev;
207 	struct dp_soc *soc;
208 	uint8_t comp_status = 0;
209 
210 	qdf_assert(pdev);
211 
212 	soc = pdev->soc;
213 
214 	if (tx_desc->frm_type == dp_tx_frm_tso)
215 		dp_tx_tso_desc_release(soc, tx_desc);
216 
217 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
218 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
219 
220 	if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
221 		dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
222 
223 	qdf_atomic_dec(&pdev->num_tx_outstanding);
224 
225 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
226 		qdf_atomic_dec(&pdev->num_tx_exception);
227 
228 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
229 				hal_tx_comp_get_buffer_source(&tx_desc->comp))
230 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp);
231 	else
232 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
233 
234 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
235 		"Tx Completion Release desc %d status %d outstanding %d",
236 		tx_desc->id, comp_status,
237 		qdf_atomic_read(&pdev->num_tx_outstanding));
238 
239 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
240 	return;
241 }
242 
243 /**
244  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
245  * @vdev: DP vdev Handle
246  * @nbuf: skb
247  *
248  * Prepares and fills HTT metadata in the frame pre-header for special frames
249  * that should be transmitted using varying transmit parameters.
250  * There are 2 VDEV modes that currently needs this special metadata -
251  *  1) Mesh Mode
252  *  2) DSRC Mode
253  *
254  * Return: HTT metadata size
255  *
256  */
257 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
258 		uint32_t *meta_data)
259 {
260 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
261 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
262 
263 	uint8_t htt_desc_size;
264 
265 	/* Size rounded of multiple of 8 bytes */
266 	uint8_t htt_desc_size_aligned;
267 
268 	uint8_t *hdr = NULL;
269 
270 	/*
271 	 * Metadata - HTT MSDU Extension header
272 	 */
273 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
274 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
275 
276 	if (vdev->mesh_vdev) {
277 
278 		/* Fill and add HTT metaheader */
279 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
280 		if (hdr == NULL) {
281 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
282 					"Error in filling HTT metadata");
283 
284 			return 0;
285 		}
286 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
287 
288 	} else if (vdev->opmode == wlan_op_mode_ocb) {
289 		/* Todo - Add support for DSRC */
290 	}
291 
292 	return htt_desc_size_aligned;
293 }
294 
295 /**
296  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
297  * @tso_seg: TSO segment to process
298  * @ext_desc: Pointer to MSDU extension descriptor
299  *
300  * Return: void
301  */
302 #if defined(FEATURE_TSO)
303 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
304 		void *ext_desc)
305 {
306 	uint8_t num_frag;
307 	uint32_t tso_flags;
308 
309 	/*
310 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
311 	 * tcp_flag_mask
312 	 *
313 	 * Checksum enable flags are set in TCL descriptor and not in Extension
314 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
315 	 */
316 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
317 
318 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
319 
320 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
321 		tso_seg->tso_flags.ip_len);
322 
323 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
324 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
325 
326 
327 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
328 		uint32_t lo = 0;
329 		uint32_t hi = 0;
330 
331 		qdf_dmaaddr_to_32s(
332 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
333 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
334 			tso_seg->tso_frags[num_frag].length);
335 	}
336 
337 	return;
338 }
339 #else
340 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
341 		void *ext_desc)
342 {
343 	return;
344 }
345 #endif
346 
347 #if defined(FEATURE_TSO)
348 /**
349  * dp_tx_free_tso_seg() - Loop through the tso segments
350  *                        allocated and free them
351  *
352  * @soc: soc handle
353  * @free_seg: list of tso segments
354  * @msdu_info: msdu descriptor
355  *
356  * Return - void
357  */
358 static void dp_tx_free_tso_seg(struct dp_soc *soc,
359 	struct qdf_tso_seg_elem_t *free_seg,
360 	struct dp_tx_msdu_info_s *msdu_info)
361 {
362 	struct qdf_tso_seg_elem_t *next_seg;
363 
364 	while (free_seg) {
365 		next_seg = free_seg->next;
366 		dp_tx_tso_desc_free(soc,
367 			msdu_info->tx_queue.desc_pool_id,
368 			free_seg);
369 		free_seg = next_seg;
370 	}
371 }
372 
373 /**
374  * dp_tx_free_tso_num_seg() - Loop through the tso num segments
375  *                            allocated and free them
376  *
377  * @soc:  soc handle
378  * @free_seg: list of tso segments
379  * @msdu_info: msdu descriptor
380  * Return - void
381  */
382 static void dp_tx_free_tso_num_seg(struct dp_soc *soc,
383 	struct qdf_tso_num_seg_elem_t *free_seg,
384 	struct dp_tx_msdu_info_s *msdu_info)
385 {
386 	struct qdf_tso_num_seg_elem_t *next_seg;
387 
388 	while (free_seg) {
389 		next_seg = free_seg->next;
390 		dp_tso_num_seg_free(soc,
391 			msdu_info->tx_queue.desc_pool_id,
392 			free_seg);
393 		free_seg = next_seg;
394 	}
395 }
396 
397 /**
398  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
399  * @vdev: virtual device handle
400  * @msdu: network buffer
401  * @msdu_info: meta data associated with the msdu
402  *
403  * Return: QDF_STATUS_SUCCESS success
404  */
405 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
406 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
407 {
408 	struct qdf_tso_seg_elem_t *tso_seg;
409 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
410 	struct dp_soc *soc = vdev->pdev->soc;
411 	struct qdf_tso_info_t *tso_info;
412 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
413 
414 	tso_info = &msdu_info->u.tso_info;
415 	tso_info->curr_seg = NULL;
416 	tso_info->tso_seg_list = NULL;
417 	tso_info->num_segs = num_seg;
418 	msdu_info->frm_type = dp_tx_frm_tso;
419 	tso_info->tso_num_seg_list = NULL;
420 
421 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
422 
423 	while (num_seg) {
424 		tso_seg = dp_tx_tso_desc_alloc(
425 				soc, msdu_info->tx_queue.desc_pool_id);
426 		if (tso_seg) {
427 			tso_seg->next = tso_info->tso_seg_list;
428 			tso_info->tso_seg_list = tso_seg;
429 			num_seg--;
430 		} else {
431 			struct qdf_tso_seg_elem_t *free_seg =
432 				tso_info->tso_seg_list;
433 
434 			dp_tx_free_tso_seg(soc, free_seg, msdu_info);
435 
436 			return QDF_STATUS_E_NOMEM;
437 		}
438 	}
439 
440 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
441 
442 	tso_num_seg = dp_tso_num_seg_alloc(soc,
443 			msdu_info->tx_queue.desc_pool_id);
444 
445 	if (tso_num_seg) {
446 		tso_num_seg->next = tso_info->tso_num_seg_list;
447 		tso_info->tso_num_seg_list = tso_num_seg;
448 	} else {
449 		/* Bug: free tso_num_seg and tso_seg */
450 		/* Free the already allocated num of segments */
451 		struct qdf_tso_seg_elem_t *free_seg =
452 					tso_info->tso_seg_list;
453 
454 		TSO_DEBUG(" %s: Failed alloc - Number of segs for a TSO packet",
455 			__func__);
456 		dp_tx_free_tso_seg(soc, free_seg, msdu_info);
457 
458 		return QDF_STATUS_E_NOMEM;
459 	}
460 
461 	msdu_info->num_seg =
462 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
463 
464 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
465 			msdu_info->num_seg);
466 
467 	if (!(msdu_info->num_seg)) {
468 		dp_tx_free_tso_seg(soc, tso_info->tso_seg_list, msdu_info);
469 		dp_tx_free_tso_num_seg(soc, tso_info->tso_num_seg_list,
470 					msdu_info);
471 		return QDF_STATUS_E_INVAL;
472 	}
473 
474 	tso_info->curr_seg = tso_info->tso_seg_list;
475 
476 	return QDF_STATUS_SUCCESS;
477 }
478 #else
479 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
480 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
481 {
482 	return QDF_STATUS_E_NOMEM;
483 }
484 #endif
485 
486 /**
487  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
488  * @vdev: DP Vdev handle
489  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
490  * @desc_pool_id: Descriptor Pool ID
491  *
492  * Return:
493  */
494 static
495 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
496 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
497 {
498 	uint8_t i;
499 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
500 	struct dp_tx_seg_info_s *seg_info;
501 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
502 	struct dp_soc *soc = vdev->pdev->soc;
503 
504 	/* Allocate an extension descriptor */
505 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
506 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
507 
508 	if (!msdu_ext_desc) {
509 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
510 		return NULL;
511 	}
512 
513 	if (msdu_info->exception_fw &&
514 			qdf_unlikely(vdev->mesh_vdev)) {
515 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
516 				&msdu_info->meta_data[0],
517 				sizeof(struct htt_tx_msdu_desc_ext2_t));
518 		qdf_atomic_inc(&vdev->pdev->num_tx_exception);
519 	}
520 
521 	switch (msdu_info->frm_type) {
522 	case dp_tx_frm_sg:
523 	case dp_tx_frm_me:
524 	case dp_tx_frm_raw:
525 		seg_info = msdu_info->u.sg_info.curr_seg;
526 		/* Update the buffer pointers in MSDU Extension Descriptor */
527 		for (i = 0; i < seg_info->frag_cnt; i++) {
528 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
529 				seg_info->frags[i].paddr_lo,
530 				seg_info->frags[i].paddr_hi,
531 				seg_info->frags[i].len);
532 		}
533 
534 		break;
535 
536 	case dp_tx_frm_tso:
537 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
538 				&cached_ext_desc[0]);
539 		break;
540 
541 
542 	default:
543 		break;
544 	}
545 
546 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
547 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
548 
549 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
550 			msdu_ext_desc->vaddr);
551 
552 	return msdu_ext_desc;
553 }
554 
555 /**
556  * dp_tx_trace_pkt() - Trace TX packet at DP layer
557  *
558  * @skb: skb to be traced
559  * @msdu_id: msdu_id of the packet
560  * @vdev_id: vdev_id of the packet
561  *
562  * Return: None
563  */
564 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
565 			    uint8_t vdev_id)
566 {
567 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
568 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
569 	DPTRACE(qdf_dp_trace_ptr(skb,
570 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
571 				 QDF_TRACE_DEFAULT_PDEV_ID,
572 				 qdf_nbuf_data_addr(skb),
573 				 sizeof(qdf_nbuf_data(skb)),
574 				 msdu_id, vdev_id));
575 
576 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
577 
578 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
579 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
580 				      msdu_id, QDF_TX));
581 }
582 
583 /**
584  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
585  * @vdev: DP vdev handle
586  * @nbuf: skb
587  * @desc_pool_id: Descriptor pool ID
588  * @meta_data: Metadata to the fw
589  * @tx_exc_metadata: Handle that holds exception path metadata
590  * Allocate and prepare Tx descriptor with msdu information.
591  *
592  * Return: Pointer to Tx Descriptor on success,
593  *         NULL on failure
594  */
595 static
596 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
597 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
598 		struct dp_tx_msdu_info_s *msdu_info,
599 		struct cdp_tx_exception_metadata *tx_exc_metadata)
600 {
601 	uint8_t align_pad;
602 	uint8_t is_exception = 0;
603 	uint8_t htt_hdr_size;
604 	struct ether_header *eh;
605 	struct dp_tx_desc_s *tx_desc;
606 	struct dp_pdev *pdev = vdev->pdev;
607 	struct dp_soc *soc = pdev->soc;
608 
609 	/* Allocate software Tx descriptor */
610 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
611 	if (qdf_unlikely(!tx_desc)) {
612 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
613 		return NULL;
614 	}
615 
616 	/* Flow control/Congestion Control counters */
617 	qdf_atomic_inc(&pdev->num_tx_outstanding);
618 
619 	/* Initialize the SW tx descriptor */
620 	tx_desc->nbuf = nbuf;
621 	tx_desc->frm_type = dp_tx_frm_std;
622 	tx_desc->tx_encap_type = (tx_exc_metadata ?
623 			tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
624 	tx_desc->vdev = vdev;
625 	tx_desc->pdev = pdev;
626 	tx_desc->msdu_ext_desc = NULL;
627 	tx_desc->pkt_offset = 0;
628 
629 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
630 
631 	/*
632 	 * For special modes (vdev_type == ocb or mesh), data frames should be
633 	 * transmitted using varying transmit parameters (tx spec) which include
634 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
635 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
636 	 * These frames are sent as exception packets to firmware.
637 	 *
638 	 * HW requirement is that metadata should always point to a
639 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
640 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
641 	 *  to get 8-byte aligned start address along with align_pad added
642 	 *
643 	 *  |-----------------------------|
644 	 *  |                             |
645 	 *  |-----------------------------| <-----Buffer Pointer Address given
646 	 *  |                             |  ^    in HW descriptor (aligned)
647 	 *  |       HTT Metadata          |  |
648 	 *  |                             |  |
649 	 *  |                             |  | Packet Offset given in descriptor
650 	 *  |                             |  |
651 	 *  |-----------------------------|  |
652 	 *  |       Alignment Pad         |  v
653 	 *  |-----------------------------| <----- Actual buffer start address
654 	 *  |        SKB Data             |           (Unaligned)
655 	 *  |                             |
656 	 *  |                             |
657 	 *  |                             |
658 	 *  |                             |
659 	 *  |                             |
660 	 *  |-----------------------------|
661 	 */
662 	if (qdf_unlikely((msdu_info->exception_fw)) ||
663 				(vdev->opmode == wlan_op_mode_ocb)) {
664 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
665 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
666 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
667 					"qdf_nbuf_push_head failed");
668 			goto failure;
669 		}
670 
671 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
672 				msdu_info->meta_data);
673 		if (htt_hdr_size == 0)
674 			goto failure;
675 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
676 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
677 		is_exception = 1;
678 	}
679 
680 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
681 				qdf_nbuf_map(soc->osdev, nbuf,
682 					QDF_DMA_TO_DEVICE))) {
683 		/* Handle failure */
684 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
685 				"qdf_nbuf_map failed");
686 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
687 		goto failure;
688 	}
689 
690 	if (qdf_unlikely(vdev->nawds_enabled)) {
691 		eh = (struct ether_header *) qdf_nbuf_data(nbuf);
692 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
693 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
694 			is_exception = 1;
695 		}
696 	}
697 
698 #if !TQM_BYPASS_WAR
699 	if (is_exception || tx_exc_metadata)
700 #endif
701 	{
702 		/* Temporary WAR due to TQM VP issues */
703 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
704 		qdf_atomic_inc(&pdev->num_tx_exception);
705 	}
706 
707 	return tx_desc;
708 
709 failure:
710 	dp_tx_desc_release(tx_desc, desc_pool_id);
711 	return NULL;
712 }
713 
714 /**
715  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
716  * @vdev: DP vdev handle
717  * @nbuf: skb
718  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
719  * @desc_pool_id : Descriptor Pool ID
720  *
721  * Allocate and prepare Tx descriptor with msdu and fragment descritor
722  * information. For frames wth fragments, allocate and prepare
723  * an MSDU extension descriptor
724  *
725  * Return: Pointer to Tx Descriptor on success,
726  *         NULL on failure
727  */
728 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
729 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
730 		uint8_t desc_pool_id)
731 {
732 	struct dp_tx_desc_s *tx_desc;
733 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
734 	struct dp_pdev *pdev = vdev->pdev;
735 	struct dp_soc *soc = pdev->soc;
736 
737 	/* Allocate software Tx descriptor */
738 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
739 	if (!tx_desc) {
740 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
741 		return NULL;
742 	}
743 
744 	/* Flow control/Congestion Control counters */
745 	qdf_atomic_inc(&pdev->num_tx_outstanding);
746 
747 	/* Initialize the SW tx descriptor */
748 	tx_desc->nbuf = nbuf;
749 	tx_desc->frm_type = msdu_info->frm_type;
750 	tx_desc->tx_encap_type = vdev->tx_encap_type;
751 	tx_desc->vdev = vdev;
752 	tx_desc->pdev = pdev;
753 	tx_desc->pkt_offset = 0;
754 	tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
755 	tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
756 
757 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
758 
759 	/* Handle scattered frames - TSO/SG/ME */
760 	/* Allocate and prepare an extension descriptor for scattered frames */
761 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
762 	if (!msdu_ext_desc) {
763 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
764 				"%s Tx Extension Descriptor Alloc Fail",
765 				__func__);
766 		goto failure;
767 	}
768 
769 #if TQM_BYPASS_WAR
770 	/* Temporary WAR due to TQM VP issues */
771 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
772 	qdf_atomic_inc(&pdev->num_tx_exception);
773 #endif
774 	if (qdf_unlikely(msdu_info->exception_fw))
775 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
776 
777 	tx_desc->msdu_ext_desc = msdu_ext_desc;
778 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
779 
780 	return tx_desc;
781 failure:
782 	dp_tx_desc_release(tx_desc, desc_pool_id);
783 	return NULL;
784 }
785 
786 /**
787  * dp_tx_prepare_raw() - Prepare RAW packet TX
788  * @vdev: DP vdev handle
789  * @nbuf: buffer pointer
790  * @seg_info: Pointer to Segment info Descriptor to be prepared
791  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
792  *     descriptor
793  *
794  * Return:
795  */
796 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
797 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
798 {
799 	qdf_nbuf_t curr_nbuf = NULL;
800 	uint16_t total_len = 0;
801 	qdf_dma_addr_t paddr;
802 	int32_t i;
803 	int32_t mapped_buf_num = 0;
804 
805 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
806 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
807 
808 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
809 
810 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
811 	if (qos_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS)
812 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
813 
814 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
815 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
816 
817 		if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf,
818 					QDF_DMA_TO_DEVICE)) {
819 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
820 				"%s dma map error ", __func__);
821 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
822 			mapped_buf_num = i;
823 			goto error;
824 		}
825 
826 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
827 		seg_info->frags[i].paddr_lo = paddr;
828 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
829 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
830 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
831 		total_len += qdf_nbuf_len(curr_nbuf);
832 	}
833 
834 	seg_info->frag_cnt = i;
835 	seg_info->total_len = total_len;
836 	seg_info->next = NULL;
837 
838 	sg_info->curr_seg = seg_info;
839 
840 	msdu_info->frm_type = dp_tx_frm_raw;
841 	msdu_info->num_seg = 1;
842 
843 	return nbuf;
844 
845 error:
846 	i = 0;
847 	while (nbuf) {
848 		curr_nbuf = nbuf;
849 		if (i < mapped_buf_num) {
850 			qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE);
851 			i++;
852 		}
853 		nbuf = qdf_nbuf_next(nbuf);
854 		qdf_nbuf_free(curr_nbuf);
855 	}
856 	return NULL;
857 
858 }
859 
860 /**
861  * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
862  * @soc: DP Soc Handle
863  * @vdev: DP vdev handle
864  * @tx_desc: Tx Descriptor Handle
865  * @tid: TID from HLOS for overriding default DSCP-TID mapping
866  * @fw_metadata: Metadata to send to Target Firmware along with frame
867  * @ring_id: Ring ID of H/W ring to which we enqueue the packet
868  * @tx_exc_metadata: Handle that holds exception path meta data
869  *
870  *  Gets the next free TCL HW DMA descriptor and sets up required parameters
871  *  from software Tx descriptor
872  *
873  * Return:
874  */
875 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
876 				   struct dp_tx_desc_s *tx_desc, uint8_t tid,
877 				   uint16_t fw_metadata, uint8_t ring_id,
878 				   struct cdp_tx_exception_metadata
879 					*tx_exc_metadata)
880 {
881 	uint8_t type;
882 	uint16_t length;
883 	void *hal_tx_desc, *hal_tx_desc_cached;
884 	qdf_dma_addr_t dma_addr;
885 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
886 
887 	enum cdp_sec_type sec_type = (tx_exc_metadata ?
888 			tx_exc_metadata->sec_type : vdev->sec_type);
889 
890 	/* Return Buffer Manager ID */
891 	uint8_t bm_id = ring_id;
892 	void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng;
893 
894 	hal_tx_desc_cached = (void *) cached_desc;
895 	qdf_mem_zero_outline(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
896 
897 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
898 		length = HAL_TX_EXT_DESC_WITH_META_DATA;
899 		type = HAL_TX_BUF_TYPE_EXT_DESC;
900 		dma_addr = tx_desc->msdu_ext_desc->paddr;
901 	} else {
902 		length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
903 		type = HAL_TX_BUF_TYPE_BUFFER;
904 		dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
905 	}
906 
907 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
908 	hal_tx_desc_set_buf_addr(hal_tx_desc_cached,
909 			dma_addr , bm_id, tx_desc->id, type);
910 
911 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id))
912 		return QDF_STATUS_E_RESOURCES;
913 
914 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
915 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
916 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
917 	hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
918 				HAL_TX_DESC_DEFAULT_LMAC_ID);
919 	hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
920 					  vdev->dscp_tid_map_id);
921 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
922 			sec_type_map[sec_type]);
923 
924 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
925 			"%s length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
926 			__func__, length, type, (uint64_t)dma_addr,
927 			tx_desc->pkt_offset, tx_desc->id);
928 
929 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
930 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
931 
932 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
933 			vdev->hal_desc_addr_search_flags);
934 
935 	/* verify checksum offload configuration*/
936 	if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) &&
937 		((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
938 		|| qdf_nbuf_is_tso(tx_desc->nbuf)))  {
939 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
940 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
941 	}
942 
943 	if (tid != HTT_TX_EXT_TID_INVALID)
944 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
945 
946 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
947 		hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1);
948 
949 
950 	/* Sync cached descriptor with HW */
951 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
952 
953 	if (!hal_tx_desc) {
954 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
955 			  "%s TCL ring full ring_id:%d", __func__, ring_id);
956 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
957 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
958 		return QDF_STATUS_E_RESOURCES;
959 	}
960 
961 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
962 
963 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
964 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
965 
966 	/*
967 	 * If one packet is enqueued in HW, PM usage count needs to be
968 	 * incremented by one to prevent future runtime suspend. This
969 	 * should be tied with the success of enqueuing. It will be
970 	 * decremented after the packet has been sent.
971 	 */
972 	hif_pm_runtime_get_noresume(soc->hif_handle);
973 
974 	return QDF_STATUS_SUCCESS;
975 }
976 
977 
978 /**
979  * dp_cce_classify() - Classify the frame based on CCE rules
980  * @vdev: DP vdev handle
981  * @nbuf: skb
982  *
983  * Classify frames based on CCE rules
984  * Return: bool( true if classified,
985  *               else false)
986  */
987 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
988 {
989 	struct ether_header *eh = NULL;
990 	uint16_t   ether_type;
991 	qdf_llc_t *llcHdr;
992 	qdf_nbuf_t nbuf_clone = NULL;
993 	qdf_dot3_qosframe_t *qos_wh = NULL;
994 
995 	/* for mesh packets don't do any classification */
996 	if (qdf_unlikely(vdev->mesh_vdev))
997 		return false;
998 
999 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1000 		eh = (struct ether_header *) qdf_nbuf_data(nbuf);
1001 		ether_type = eh->ether_type;
1002 		llcHdr = (qdf_llc_t *)(nbuf->data +
1003 					sizeof(struct ether_header));
1004 	} else {
1005 		qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1006 		/* For encrypted packets don't do any classification */
1007 		if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
1008 			return false;
1009 
1010 		if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
1011 			if (qdf_unlikely(
1012 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
1013 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
1014 
1015 				ether_type = *(uint16_t *)(nbuf->data
1016 						+ QDF_IEEE80211_4ADDR_HDR_LEN
1017 						+ sizeof(qdf_llc_t)
1018 						- sizeof(ether_type));
1019 				llcHdr = (qdf_llc_t *)(nbuf->data +
1020 						QDF_IEEE80211_4ADDR_HDR_LEN);
1021 			} else {
1022 				ether_type = *(uint16_t *)(nbuf->data
1023 						+ QDF_IEEE80211_3ADDR_HDR_LEN
1024 						+ sizeof(qdf_llc_t)
1025 						- sizeof(ether_type));
1026 				llcHdr = (qdf_llc_t *)(nbuf->data +
1027 					QDF_IEEE80211_3ADDR_HDR_LEN);
1028 			}
1029 
1030 			if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
1031 				&& (ether_type ==
1032 				qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
1033 
1034 				DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
1035 				return true;
1036 			}
1037 		}
1038 
1039 		return false;
1040 	}
1041 
1042 	if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
1043 		ether_type = *(uint16_t *)(nbuf->data + 2*ETHER_ADDR_LEN +
1044 				sizeof(*llcHdr));
1045 		nbuf_clone = qdf_nbuf_clone(nbuf);
1046 		if (qdf_unlikely(nbuf_clone)) {
1047 			qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
1048 
1049 			if (ether_type == htons(ETHERTYPE_8021Q)) {
1050 				qdf_nbuf_pull_head(nbuf_clone,
1051 						sizeof(qdf_net_vlanhdr_t));
1052 			}
1053 		}
1054 	} else {
1055 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1056 			nbuf_clone = qdf_nbuf_clone(nbuf);
1057 			if (qdf_unlikely(nbuf_clone)) {
1058 				qdf_nbuf_pull_head(nbuf_clone,
1059 					sizeof(qdf_net_vlanhdr_t));
1060 			}
1061 		}
1062 	}
1063 
1064 	if (qdf_unlikely(nbuf_clone))
1065 		nbuf = nbuf_clone;
1066 
1067 
1068 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
1069 		|| qdf_nbuf_is_ipv4_arp_pkt(nbuf)
1070 		|| qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
1071 		|| qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
1072 		|| (qdf_nbuf_is_ipv4_pkt(nbuf)
1073 			&& qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
1074 		|| (qdf_nbuf_is_ipv6_pkt(nbuf) &&
1075 			qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
1076 		if (qdf_unlikely(nbuf_clone != NULL))
1077 			qdf_nbuf_free(nbuf_clone);
1078 		return true;
1079 	}
1080 
1081 	if (qdf_unlikely(nbuf_clone != NULL))
1082 		qdf_nbuf_free(nbuf_clone);
1083 
1084 	return false;
1085 }
1086 
1087 /**
1088  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1089  * @vdev: DP vdev handle
1090  * @nbuf: skb
1091  *
1092  * Extract the DSCP or PCP information from frame and map into TID value.
1093  * Software based TID classification is required when more than 2 DSCP-TID
1094  * mapping tables are needed.
1095  * Hardware supports 2 DSCP-TID mapping tables
1096  *
1097  * Return: void
1098  */
1099 static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1100 		struct dp_tx_msdu_info_s *msdu_info)
1101 {
1102 	uint8_t tos = 0, dscp_tid_override = 0;
1103 	uint8_t *hdr_ptr, *L3datap;
1104 	uint8_t is_mcast = 0;
1105 	struct ether_header *eh = NULL;
1106 	qdf_ethervlan_header_t *evh = NULL;
1107 	uint16_t   ether_type;
1108 	qdf_llc_t *llcHdr;
1109 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1110 
1111 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1112 
1113 	if (vdev->dscp_tid_map_id <= 1)
1114 		return;
1115 
1116 	/* for mesh packets don't do any classification */
1117 	if (qdf_unlikely(vdev->mesh_vdev))
1118 		return;
1119 
1120 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1121 		eh = (struct ether_header *) nbuf->data;
1122 		hdr_ptr = eh->ether_dhost;
1123 		L3datap = hdr_ptr + sizeof(struct ether_header);
1124 	} else {
1125 		qdf_dot3_qosframe_t *qos_wh =
1126 			(qdf_dot3_qosframe_t *) nbuf->data;
1127 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1128 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1129 		return;
1130 	}
1131 
1132 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1133 	ether_type = eh->ether_type;
1134 
1135 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(struct ether_header));
1136 	/*
1137 	 * Check if packet is dot3 or eth2 type.
1138 	 */
1139 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1140 		ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN +
1141 				sizeof(*llcHdr));
1142 
1143 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1144 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1145 				sizeof(*llcHdr);
1146 			ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN
1147 					+ sizeof(*llcHdr) +
1148 					sizeof(qdf_net_vlanhdr_t));
1149 		} else {
1150 			L3datap = hdr_ptr + sizeof(struct ether_header) +
1151 				sizeof(*llcHdr);
1152 		}
1153 	} else {
1154 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1155 			evh = (qdf_ethervlan_header_t *) eh;
1156 			ether_type = evh->ether_type;
1157 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1158 		}
1159 	}
1160 
1161 	/*
1162 	 * Find priority from IP TOS DSCP field
1163 	 */
1164 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1165 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1166 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1167 			/* Only for unicast frames */
1168 			if (!is_mcast) {
1169 				/* send it on VO queue */
1170 				msdu_info->tid = DP_VO_TID;
1171 			}
1172 		} else {
1173 			/*
1174 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1175 			 * from TOS byte.
1176 			 */
1177 			tos = ip->ip_tos;
1178 			dscp_tid_override = 1;
1179 
1180 		}
1181 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1182 		/* TODO
1183 		 * use flowlabel
1184 		 *igmpmld cases to be handled in phase 2
1185 		 */
1186 		unsigned long ver_pri_flowlabel;
1187 		unsigned long pri;
1188 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1189 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1190 			DP_IPV6_PRIORITY_SHIFT;
1191 		tos = pri;
1192 		dscp_tid_override = 1;
1193 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1194 		msdu_info->tid = DP_VO_TID;
1195 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1196 		/* Only for unicast frames */
1197 		if (!is_mcast) {
1198 			/* send ucast arp on VO queue */
1199 			msdu_info->tid = DP_VO_TID;
1200 		}
1201 	}
1202 
1203 	/*
1204 	 * Assign all MCAST packets to BE
1205 	 */
1206 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1207 		if (is_mcast) {
1208 			tos = 0;
1209 			dscp_tid_override = 1;
1210 		}
1211 	}
1212 
1213 	if (dscp_tid_override == 1) {
1214 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1215 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1216 	}
1217 	return;
1218 }
1219 
1220 #ifdef CONVERGED_TDLS_ENABLE
1221 /**
1222  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1223  * @tx_desc: TX descriptor
1224  *
1225  * Return: None
1226  */
1227 static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1228 {
1229 	if (tx_desc->vdev) {
1230 		if (tx_desc->vdev->is_tdls_frame)
1231 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1232 			tx_desc->vdev->is_tdls_frame = false;
1233 	}
1234 }
1235 
1236 /**
1237  * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1238  * @tx_desc: TX descriptor
1239  * @vdev: datapath vdev handle
1240  *
1241  * Return: None
1242  */
1243 static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
1244 				  struct dp_vdev *vdev)
1245 {
1246 	struct hal_tx_completion_status ts = {0};
1247 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1248 
1249 	hal_tx_comp_get_status(&tx_desc->comp, &ts);
1250 	if (vdev->tx_non_std_data_callback.func) {
1251 		qdf_nbuf_set_next(tx_desc->nbuf, NULL);
1252 		vdev->tx_non_std_data_callback.func(
1253 				vdev->tx_non_std_data_callback.ctxt,
1254 				nbuf, ts.status);
1255 		return;
1256 	}
1257 }
1258 #endif
1259 
1260 /**
1261  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1262  * @vdev: DP vdev handle
1263  * @nbuf: skb
1264  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1265  * @meta_data: Metadata to the fw
1266  * @tx_q: Tx queue to be used for this Tx frame
1267  * @peer_id: peer_id of the peer in case of NAWDS frames
1268  * @tx_exc_metadata: Handle that holds exception path metadata
1269  *
1270  * Return: NULL on success,
1271  *         nbuf when it fails to send
1272  */
1273 static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1274 		struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
1275 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1276 {
1277 	struct dp_pdev *pdev = vdev->pdev;
1278 	struct dp_soc *soc = pdev->soc;
1279 	struct dp_tx_desc_s *tx_desc;
1280 	QDF_STATUS status;
1281 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
1282 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1283 	uint16_t htt_tcl_metadata = 0;
1284 	uint8_t tid = msdu_info->tid;
1285 
1286 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
1287 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
1288 			msdu_info, tx_exc_metadata);
1289 	if (!tx_desc) {
1290 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1291 			  "%s Tx_desc prepare Fail vdev %pK queue %d",
1292 			  __func__, vdev, tx_q->desc_pool_id);
1293 		return nbuf;
1294 	}
1295 
1296 	if (qdf_unlikely(soc->cce_disable)) {
1297 		if (dp_cce_classify(vdev, nbuf) == true) {
1298 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1299 			tid = DP_VO_TID;
1300 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1301 		}
1302 	}
1303 
1304 	dp_tx_update_tdls_flags(tx_desc);
1305 
1306 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1307 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1308 				"%s %d : HAL RING Access Failed -- %pK",
1309 				__func__, __LINE__, hal_srng);
1310 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1311 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1312 		goto fail_return;
1313 	}
1314 
1315 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
1316 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1317 		HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
1318 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
1319 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
1320 				HTT_TCL_METADATA_TYPE_PEER_BASED);
1321 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
1322 				peer_id);
1323 	} else
1324 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1325 
1326 
1327 	if (msdu_info->exception_fw) {
1328 		HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1329 	}
1330 
1331 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
1332 	status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
1333 			htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
1334 
1335 	if (status != QDF_STATUS_SUCCESS) {
1336 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1337 			  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1338 			  __func__, tx_desc, tx_q->ring_id);
1339 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1340 		goto fail_return;
1341 	}
1342 
1343 	nbuf = NULL;
1344 
1345 fail_return:
1346 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1347 		hal_srng_access_end(soc->hal_soc, hal_srng);
1348 		hif_pm_runtime_put(soc->hif_handle);
1349 	} else {
1350 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1351 	}
1352 
1353 	return nbuf;
1354 }
1355 
1356 /**
1357  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
1358  * @vdev: DP vdev handle
1359  * @nbuf: skb
1360  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
1361  *
1362  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
1363  *
1364  * Return: NULL on success,
1365  *         nbuf when it fails to send
1366  */
1367 #if QDF_LOCK_STATS
1368 static noinline
1369 #else
1370 static
1371 #endif
1372 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1373 				    struct dp_tx_msdu_info_s *msdu_info)
1374 {
1375 	uint8_t i;
1376 	struct dp_pdev *pdev = vdev->pdev;
1377 	struct dp_soc *soc = pdev->soc;
1378 	struct dp_tx_desc_s *tx_desc;
1379 	bool is_cce_classified = false;
1380 	QDF_STATUS status;
1381 	uint16_t htt_tcl_metadata = 0;
1382 
1383 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1384 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1385 
1386 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1387 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1388 				"%s %d : HAL RING Access Failed -- %pK",
1389 				__func__, __LINE__, hal_srng);
1390 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1391 		return nbuf;
1392 	}
1393 
1394 	if (qdf_unlikely(soc->cce_disable)) {
1395 		is_cce_classified = dp_cce_classify(vdev, nbuf);
1396 		if (is_cce_classified) {
1397 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1398 			msdu_info->tid = DP_VO_TID;
1399 		}
1400 	}
1401 
1402 	if (msdu_info->frm_type == dp_tx_frm_me)
1403 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1404 
1405 	i = 0;
1406 	/* Print statement to track i and num_seg */
1407 	/*
1408 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
1409 	 * descriptors using information in msdu_info
1410 	 */
1411 	while (i < msdu_info->num_seg) {
1412 		/*
1413 		 * Setup Tx descriptor for an MSDU, and MSDU extension
1414 		 * descriptor
1415 		 */
1416 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
1417 				tx_q->desc_pool_id);
1418 
1419 		if (!tx_desc) {
1420 			if (msdu_info->frm_type == dp_tx_frm_me) {
1421 				dp_tx_me_free_buf(pdev,
1422 					(void *)(msdu_info->u.sg_info
1423 						.curr_seg->frags[0].vaddr));
1424 			}
1425 			goto done;
1426 		}
1427 
1428 		if (msdu_info->frm_type == dp_tx_frm_me) {
1429 			tx_desc->me_buffer =
1430 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
1431 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
1432 		}
1433 
1434 		if (is_cce_classified)
1435 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1436 
1437 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1438 		if (msdu_info->exception_fw) {
1439 			HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1440 		}
1441 
1442 		/*
1443 		 * Enqueue the Tx MSDU descriptor to HW for transmit
1444 		 */
1445 		status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
1446 			htt_tcl_metadata, tx_q->ring_id, NULL);
1447 
1448 		if (status != QDF_STATUS_SUCCESS) {
1449 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1450 				  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1451 				  __func__, tx_desc, tx_q->ring_id);
1452 
1453 			if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
1454 				dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
1455 
1456 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1457 			goto done;
1458 		}
1459 
1460 		/*
1461 		 * TODO
1462 		 * if tso_info structure can be modified to have curr_seg
1463 		 * as first element, following 2 blocks of code (for TSO and SG)
1464 		 * can be combined into 1
1465 		 */
1466 
1467 		/*
1468 		 * For frames with multiple segments (TSO, ME), jump to next
1469 		 * segment.
1470 		 */
1471 		if (msdu_info->frm_type == dp_tx_frm_tso) {
1472 			if (msdu_info->u.tso_info.curr_seg->next) {
1473 				msdu_info->u.tso_info.curr_seg =
1474 					msdu_info->u.tso_info.curr_seg->next;
1475 
1476 				/*
1477 				 * If this is a jumbo nbuf, then increment the number of
1478 				 * nbuf users for each additional segment of the msdu.
1479 				 * This will ensure that the skb is freed only after
1480 				 * receiving tx completion for all segments of an nbuf
1481 				 */
1482 				qdf_nbuf_inc_users(nbuf);
1483 
1484 				/* Check with MCL if this is needed */
1485 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
1486 			}
1487 		}
1488 
1489 		/*
1490 		 * For Multicast-Unicast converted packets,
1491 		 * each converted frame (for a client) is represented as
1492 		 * 1 segment
1493 		 */
1494 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
1495 				(msdu_info->frm_type == dp_tx_frm_me)) {
1496 			if (msdu_info->u.sg_info.curr_seg->next) {
1497 				msdu_info->u.sg_info.curr_seg =
1498 					msdu_info->u.sg_info.curr_seg->next;
1499 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1500 			}
1501 		}
1502 		i++;
1503 	}
1504 
1505 	nbuf = NULL;
1506 
1507 done:
1508 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1509 		hal_srng_access_end(soc->hal_soc, hal_srng);
1510 		hif_pm_runtime_put(soc->hif_handle);
1511 	} else {
1512 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1513 	}
1514 
1515 	return nbuf;
1516 }
1517 
1518 /**
1519  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
1520  *                     for SG frames
1521  * @vdev: DP vdev handle
1522  * @nbuf: skb
1523  * @seg_info: Pointer to Segment info Descriptor to be prepared
1524  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1525  *
1526  * Return: NULL on success,
1527  *         nbuf when it fails to send
1528  */
1529 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1530 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1531 {
1532 	uint32_t cur_frag, nr_frags;
1533 	qdf_dma_addr_t paddr;
1534 	struct dp_tx_sg_info_s *sg_info;
1535 
1536 	sg_info = &msdu_info->u.sg_info;
1537 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
1538 
1539 	if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
1540 				QDF_DMA_TO_DEVICE)) {
1541 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1542 				"dma map error");
1543 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1544 
1545 		qdf_nbuf_free(nbuf);
1546 		return NULL;
1547 	}
1548 
1549 	paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1550 	seg_info->frags[0].paddr_lo = paddr;
1551 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
1552 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
1553 	seg_info->frags[0].vaddr = (void *) nbuf;
1554 
1555 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
1556 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
1557 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
1558 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1559 					"frag dma map error");
1560 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1561 			qdf_nbuf_free(nbuf);
1562 			return NULL;
1563 		}
1564 
1565 		paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1566 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
1567 		seg_info->frags[cur_frag + 1].paddr_hi =
1568 			((uint64_t) paddr) >> 32;
1569 		seg_info->frags[cur_frag + 1].len =
1570 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
1571 	}
1572 
1573 	seg_info->frag_cnt = (cur_frag + 1);
1574 	seg_info->total_len = qdf_nbuf_len(nbuf);
1575 	seg_info->next = NULL;
1576 
1577 	sg_info->curr_seg = seg_info;
1578 
1579 	msdu_info->frm_type = dp_tx_frm_sg;
1580 	msdu_info->num_seg = 1;
1581 
1582 	return nbuf;
1583 }
1584 
1585 #ifdef MESH_MODE_SUPPORT
1586 
1587 /**
1588  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
1589 				and prepare msdu_info for mesh frames.
1590  * @vdev: DP vdev handle
1591  * @nbuf: skb
1592  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1593  *
1594  * Return: NULL on failure,
1595  *         nbuf when extracted successfully
1596  */
1597 static
1598 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1599 				struct dp_tx_msdu_info_s *msdu_info)
1600 {
1601 	struct meta_hdr_s *mhdr;
1602 	struct htt_tx_msdu_desc_ext2_t *meta_data =
1603 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
1604 
1605 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1606 
1607 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
1608 		msdu_info->exception_fw = 0;
1609 		goto remove_meta_hdr;
1610 	}
1611 
1612 	msdu_info->exception_fw = 1;
1613 
1614 	qdf_mem_set(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t), 0);
1615 
1616 	meta_data->host_tx_desc_pool = 1;
1617 	meta_data->update_peer_cache = 1;
1618 	meta_data->learning_frame = 1;
1619 
1620 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
1621 		meta_data->power = mhdr->power;
1622 
1623 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
1624 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
1625 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
1626 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
1627 
1628 		meta_data->dyn_bw = 1;
1629 
1630 		meta_data->valid_pwr = 1;
1631 		meta_data->valid_mcs_mask = 1;
1632 		meta_data->valid_nss_mask = 1;
1633 		meta_data->valid_preamble_type  = 1;
1634 		meta_data->valid_retries = 1;
1635 		meta_data->valid_bw_info = 1;
1636 	}
1637 
1638 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
1639 		meta_data->encrypt_type = 0;
1640 		meta_data->valid_encrypt_type = 1;
1641 		meta_data->learning_frame = 0;
1642 	}
1643 
1644 	meta_data->valid_key_flags = 1;
1645 	meta_data->key_flags = (mhdr->keyix & 0x3);
1646 
1647 remove_meta_hdr:
1648 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
1649 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1650 				"qdf_nbuf_pull_head failed");
1651 		qdf_nbuf_free(nbuf);
1652 		return NULL;
1653 	}
1654 
1655 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
1656 		msdu_info->tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1657 	else
1658 		msdu_info->tid = qdf_nbuf_get_priority(nbuf);
1659 
1660 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1661 			"%s , Meta hdr %0x %0x %0x %0x %0x %0x"
1662 			" tid %d to_fw %d",
1663 			__func__, msdu_info->meta_data[0],
1664 			msdu_info->meta_data[1],
1665 			msdu_info->meta_data[2],
1666 			msdu_info->meta_data[3],
1667 			msdu_info->meta_data[4],
1668 			msdu_info->meta_data[5],
1669 			msdu_info->tid, msdu_info->exception_fw);
1670 
1671 	return nbuf;
1672 }
1673 #else
1674 static
1675 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1676 				struct dp_tx_msdu_info_s *msdu_info)
1677 {
1678 	return nbuf;
1679 }
1680 
1681 #endif
1682 
1683 #ifdef DP_FEATURE_NAWDS_TX
1684 /**
1685  * dp_tx_prepare_nawds(): Tramit NAWDS frames
1686  * @vdev: dp_vdev handle
1687  * @nbuf: skb
1688  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1689  * @tx_q: Tx queue to be used for this Tx frame
1690  * @meta_data: Meta date for mesh
1691  * @peer_id: peer_id of the peer in case of NAWDS frames
1692  *
1693  * return: NULL on success nbuf on failure
1694  */
1695 static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1696 		struct dp_tx_msdu_info_s *msdu_info)
1697 {
1698 	struct dp_peer *peer = NULL;
1699 	struct dp_soc *soc = vdev->pdev->soc;
1700 	struct dp_ast_entry *ast_entry = NULL;
1701 	struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1702 	uint16_t peer_id = HTT_INVALID_PEER;
1703 
1704 	struct dp_peer *sa_peer = NULL;
1705 	qdf_nbuf_t nbuf_copy;
1706 
1707 	qdf_spin_lock_bh(&(soc->ast_lock));
1708 	ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost));
1709 
1710 	if (ast_entry)
1711 		sa_peer = ast_entry->peer;
1712 
1713 	qdf_spin_unlock_bh(&(soc->ast_lock));
1714 
1715 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1716 		if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
1717 				(peer->nawds_enabled)) {
1718 			if (sa_peer == peer) {
1719 				QDF_TRACE(QDF_MODULE_ID_DP,
1720 						QDF_TRACE_LEVEL_DEBUG,
1721 						" %s: broadcast multicast packet",
1722 						 __func__);
1723 				DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
1724 				continue;
1725 			}
1726 
1727 			nbuf_copy = qdf_nbuf_copy(nbuf);
1728 			if (!nbuf_copy) {
1729 				QDF_TRACE(QDF_MODULE_ID_DP,
1730 						QDF_TRACE_LEVEL_ERROR,
1731 						"nbuf copy failed");
1732 			}
1733 
1734 			peer_id = peer->peer_ids[0];
1735 			nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy,
1736 					msdu_info, peer_id, NULL);
1737 			if (nbuf_copy != NULL) {
1738 				qdf_nbuf_free(nbuf_copy);
1739 				continue;
1740 			}
1741 			DP_STATS_INC_PKT(peer, tx.nawds_mcast,
1742 						1, qdf_nbuf_len(nbuf));
1743 		}
1744 	}
1745 	if (peer_id == HTT_INVALID_PEER)
1746 		return nbuf;
1747 
1748 	return NULL;
1749 }
1750 #endif
1751 
1752 /**
1753  * dp_check_exc_metadata() - Checks if parameters are valid
1754  * @tx_exc - holds all exception path parameters
1755  *
1756  * Returns true when all the parameters are valid else false
1757  *
1758  */
1759 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
1760 {
1761 	if ((tx_exc->tid > DP_MAX_TIDS && tx_exc->tid != HTT_INVALID_TID) ||
1762 	    tx_exc->tx_encap_type > htt_cmn_pkt_num_types ||
1763 	    tx_exc->sec_type > cdp_num_sec_types) {
1764 		return false;
1765 	}
1766 
1767 	return true;
1768 }
1769 
1770 /**
1771  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
1772  * @vap_dev: DP vdev handle
1773  * @nbuf: skb
1774  * @tx_exc_metadata: Handle that holds exception path meta data
1775  *
1776  * Entry point for Core Tx layer (DP_TX) invoked from
1777  * hard_start_xmit in OSIF/HDD to transmit frames through fw
1778  *
1779  * Return: NULL on success,
1780  *         nbuf when it fails to send
1781  */
1782 qdf_nbuf_t dp_tx_send_exception(void *vap_dev, qdf_nbuf_t nbuf,
1783 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1784 {
1785 	struct ether_header *eh = NULL;
1786 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1787 	struct dp_tx_msdu_info_s msdu_info;
1788 
1789 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1790 
1791 	msdu_info.tid = tx_exc_metadata->tid;
1792 
1793 	eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1794 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1795 			"%s , skb %pM",
1796 			__func__, nbuf->data);
1797 
1798 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
1799 
1800 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
1801 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1802 			"Invalid parameters in exception path");
1803 		goto fail;
1804 	}
1805 
1806 	/* Basic sanity checks for unsupported packets */
1807 
1808 	/* MESH mode */
1809 	if (qdf_unlikely(vdev->mesh_vdev)) {
1810 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1811 			"Mesh mode is not supported in exception path");
1812 		goto fail;
1813 	}
1814 
1815 	/* TSO or SG */
1816 	if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
1817 	    qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
1818 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1819 			  "TSO and SG are not supported in exception path");
1820 
1821 		goto fail;
1822 	}
1823 
1824 	/* RAW */
1825 	if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
1826 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1827 			  "Raw frame is not supported in exception path");
1828 		goto fail;
1829 	}
1830 
1831 
1832 	/* Mcast enhancement*/
1833 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
1834 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
1835 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1836 					  "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW");
1837 		}
1838 	}
1839 
1840 	/*
1841 	 * Get HW Queue to use for this frame.
1842 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
1843 	 * dedicated for data and 1 for command.
1844 	 * "queue_id" maps to one hardware ring.
1845 	 *  With each ring, we also associate a unique Tx descriptor pool
1846 	 *  to minimize lock contention for these resources.
1847 	 */
1848 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
1849 
1850 	/* Reset the control block */
1851 	qdf_nbuf_reset_ctxt(nbuf);
1852 
1853 	/*  Single linear frame */
1854 	/*
1855 	 * If nbuf is a simple linear frame, use send_single function to
1856 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
1857 	 * SRNG. There is no need to setup a MSDU extension descriptor.
1858 	 */
1859 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
1860 			tx_exc_metadata->peer_id, tx_exc_metadata);
1861 
1862 	return nbuf;
1863 
1864 fail:
1865 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1866 			"pkt send failed");
1867 	return nbuf;
1868 }
1869 
1870 /**
1871  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
1872  * @vap_dev: DP vdev handle
1873  * @nbuf: skb
1874  *
1875  * Entry point for Core Tx layer (DP_TX) invoked from
1876  * hard_start_xmit in OSIF/HDD
1877  *
1878  * Return: NULL on success,
1879  *         nbuf when it fails to send
1880  */
1881 #ifdef MESH_MODE_SUPPORT
1882 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
1883 {
1884 	struct meta_hdr_s *mhdr;
1885 	qdf_nbuf_t nbuf_mesh = NULL;
1886 	qdf_nbuf_t nbuf_clone = NULL;
1887 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1888 	uint8_t no_enc_frame = 0;
1889 
1890 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
1891 	if (nbuf_mesh == NULL) {
1892 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1893 				"qdf_nbuf_unshare failed");
1894 		return nbuf;
1895 	}
1896 	nbuf = nbuf_mesh;
1897 
1898 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1899 
1900 	if ((vdev->sec_type != cdp_sec_type_none) &&
1901 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
1902 		no_enc_frame = 1;
1903 
1904 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
1905 		       !no_enc_frame) {
1906 		nbuf_clone = qdf_nbuf_clone(nbuf);
1907 		if (nbuf_clone == NULL) {
1908 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1909 				"qdf_nbuf_clone failed");
1910 			return nbuf;
1911 		}
1912 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
1913 	}
1914 
1915 	if (nbuf_clone) {
1916 		if (!dp_tx_send(vap_dev, nbuf_clone)) {
1917 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
1918 		} else {
1919 			qdf_nbuf_free(nbuf_clone);
1920 		}
1921 	}
1922 
1923 	if (no_enc_frame)
1924 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
1925 	else
1926 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
1927 
1928 	nbuf = dp_tx_send(vap_dev, nbuf);
1929 	if ((nbuf == NULL) && no_enc_frame) {
1930 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
1931 	}
1932 
1933 	return nbuf;
1934 }
1935 
1936 #else
1937 
1938 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
1939 {
1940 	return dp_tx_send(vap_dev, nbuf);
1941 }
1942 
1943 #endif
1944 
1945 /**
1946  * dp_tx_send() - Transmit a frame on a given VAP
1947  * @vap_dev: DP vdev handle
1948  * @nbuf: skb
1949  *
1950  * Entry point for Core Tx layer (DP_TX) invoked from
1951  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
1952  * cases
1953  *
1954  * Return: NULL on success,
1955  *         nbuf when it fails to send
1956  */
1957 qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
1958 {
1959 	struct ether_header *eh = NULL;
1960 	struct dp_tx_msdu_info_s msdu_info;
1961 	struct dp_tx_seg_info_s seg_info;
1962 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1963 	uint16_t peer_id = HTT_INVALID_PEER;
1964 	qdf_nbuf_t nbuf_mesh = NULL;
1965 
1966 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1967 	qdf_mem_set(&seg_info, sizeof(seg_info), 0x0);
1968 
1969 	eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1970 
1971 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1972 			"%s , skb %pM",
1973 			__func__, nbuf->data);
1974 
1975 	/*
1976 	 * Set Default Host TID value to invalid TID
1977 	 * (TID override disabled)
1978 	 */
1979 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
1980 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
1981 
1982 	if (qdf_unlikely(vdev->mesh_vdev)) {
1983 		nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
1984 								&msdu_info);
1985 		if (nbuf_mesh == NULL) {
1986 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1987 					"Extracting mesh metadata failed");
1988 			return nbuf;
1989 		}
1990 		nbuf = nbuf_mesh;
1991 	}
1992 
1993 	/*
1994 	 * Get HW Queue to use for this frame.
1995 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
1996 	 * dedicated for data and 1 for command.
1997 	 * "queue_id" maps to one hardware ring.
1998 	 *  With each ring, we also associate a unique Tx descriptor pool
1999 	 *  to minimize lock contention for these resources.
2000 	 */
2001 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2002 
2003 	/*
2004 	 * TCL H/W supports 2 DSCP-TID mapping tables.
2005 	 *  Table 1 - Default DSCP-TID mapping table
2006 	 *  Table 2 - 1 DSCP-TID override table
2007 	 *
2008 	 * If we need a different DSCP-TID mapping for this vap,
2009 	 * call tid_classify to extract DSCP/ToS from frame and
2010 	 * map to a TID and store in msdu_info. This is later used
2011 	 * to fill in TCL Input descriptor (per-packet TID override).
2012 	 */
2013 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
2014 
2015 	/* Reset the control block */
2016 	qdf_nbuf_reset_ctxt(nbuf);
2017 
2018 	/*
2019 	 * Classify the frame and call corresponding
2020 	 * "prepare" function which extracts the segment (TSO)
2021 	 * and fragmentation information (for TSO , SG, ME, or Raw)
2022 	 * into MSDU_INFO structure which is later used to fill
2023 	 * SW and HW descriptors.
2024 	 */
2025 	if (qdf_nbuf_is_tso(nbuf)) {
2026 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2027 			  "%s TSO frame %pK", __func__, vdev);
2028 		DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1,
2029 				qdf_nbuf_len(nbuf));
2030 
2031 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
2032 			DP_STATS_INC_PKT(vdev, tx_i.tso.dropped_host, 1,
2033 					 qdf_nbuf_len(nbuf));
2034 			return nbuf;
2035 		}
2036 
2037 		goto send_multiple;
2038 	}
2039 
2040 	/* SG */
2041 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2042 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
2043 
2044 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2045 			 "%s non-TSO SG frame %pK", __func__, vdev);
2046 
2047 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
2048 				qdf_nbuf_len(nbuf));
2049 
2050 		goto send_multiple;
2051 	}
2052 
2053 #ifdef ATH_SUPPORT_IQUE
2054 	/* Mcast to Ucast Conversion*/
2055 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
2056 		eh = (struct ether_header *)qdf_nbuf_data(nbuf);
2057 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
2058 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2059 				  "%s Mcast frm for ME %pK", __func__, vdev);
2060 
2061 			DP_STATS_INC_PKT(vdev,
2062 					tx_i.mcast_en.mcast_pkt, 1,
2063 					qdf_nbuf_len(nbuf));
2064 			if (dp_tx_prepare_send_me(vdev, nbuf) ==
2065 					QDF_STATUS_SUCCESS) {
2066 				return NULL;
2067 			}
2068 		}
2069 	}
2070 #endif
2071 
2072 	/* RAW */
2073 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
2074 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
2075 		if (nbuf == NULL)
2076 			return NULL;
2077 
2078 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2079 			  "%s Raw frame %pK", __func__, vdev);
2080 
2081 		goto send_multiple;
2082 
2083 	}
2084 
2085 	/*  Single linear frame */
2086 	/*
2087 	 * If nbuf is a simple linear frame, use send_single function to
2088 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2089 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2090 	 */
2091 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
2092 
2093 	return nbuf;
2094 
2095 send_multiple:
2096 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2097 
2098 	return nbuf;
2099 }
2100 
2101 /**
2102  * dp_tx_reinject_handler() - Tx Reinject Handler
2103  * @tx_desc: software descriptor head pointer
2104  * @status : Tx completion status from HTT descriptor
2105  *
2106  * This function reinjects frames back to Target.
2107  * Todo - Host queue needs to be added
2108  *
2109  * Return: none
2110  */
2111 static
2112 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2113 {
2114 	struct dp_vdev *vdev;
2115 	struct dp_peer *peer = NULL;
2116 	uint32_t peer_id = HTT_INVALID_PEER;
2117 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2118 	qdf_nbuf_t nbuf_copy = NULL;
2119 	struct dp_tx_msdu_info_s msdu_info;
2120 	struct dp_peer *sa_peer = NULL;
2121 	struct dp_ast_entry *ast_entry = NULL;
2122 	struct dp_soc *soc = NULL;
2123 	struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
2124 #ifdef WDS_VENDOR_EXTENSION
2125 	int is_mcast = 0, is_ucast = 0;
2126 	int num_peers_3addr = 0;
2127 	struct ether_header *eth_hdr = (struct ether_header *)(qdf_nbuf_data(nbuf));
2128 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
2129 #endif
2130 
2131 	vdev = tx_desc->vdev;
2132 	soc = vdev->pdev->soc;
2133 
2134 	qdf_assert(vdev);
2135 
2136 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
2137 
2138 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2139 
2140 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2141 			"%s Tx reinject path", __func__);
2142 
2143 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
2144 			qdf_nbuf_len(tx_desc->nbuf));
2145 
2146 	qdf_spin_lock_bh(&(soc->ast_lock));
2147 
2148 	ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost));
2149 
2150 	if (ast_entry)
2151 		sa_peer = ast_entry->peer;
2152 
2153 	qdf_spin_unlock_bh(&(soc->ast_lock));
2154 
2155 #ifdef WDS_VENDOR_EXTENSION
2156 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
2157 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
2158 	} else {
2159 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
2160 	}
2161 	is_ucast = !is_mcast;
2162 
2163 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2164 		if (peer->bss_peer)
2165 			continue;
2166 
2167 		/* Detect wds peers that use 3-addr framing for mcast.
2168 		 * if there are any, the bss_peer is used to send the
2169 		 * the mcast frame using 3-addr format. all wds enabled
2170 		 * peers that use 4-addr framing for mcast frames will
2171 		 * be duplicated and sent as 4-addr frames below.
2172 		 */
2173 		if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
2174 			num_peers_3addr = 1;
2175 			break;
2176 		}
2177 	}
2178 #endif
2179 
2180 	if (qdf_unlikely(vdev->mesh_vdev)) {
2181 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
2182 	} else {
2183 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2184 			if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
2185 #ifdef WDS_VENDOR_EXTENSION
2186 			/*
2187 			 * . if 3-addr STA, then send on BSS Peer
2188 			 * . if Peer WDS enabled and accept 4-addr mcast,
2189 			 * send mcast on that peer only
2190 			 * . if Peer WDS enabled and accept 4-addr ucast,
2191 			 * send ucast on that peer only
2192 			 */
2193 			((peer->bss_peer && num_peers_3addr && is_mcast) ||
2194 			 (peer->wds_enabled &&
2195 				  ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
2196 				   (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
2197 #else
2198 			((peer->bss_peer &&
2199 			  !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) ||
2200 				 peer->nawds_enabled)) {
2201 #endif
2202 				peer_id = DP_INVALID_PEER;
2203 
2204 				if (peer->nawds_enabled) {
2205 					peer_id = peer->peer_ids[0];
2206 					if (sa_peer == peer) {
2207 						QDF_TRACE(
2208 							QDF_MODULE_ID_DP,
2209 							QDF_TRACE_LEVEL_DEBUG,
2210 							" %s: multicast packet",
2211 							__func__);
2212 						DP_STATS_INC(peer,
2213 							tx.nawds_mcast_drop, 1);
2214 						continue;
2215 					}
2216 				}
2217 
2218 				nbuf_copy = qdf_nbuf_copy(nbuf);
2219 
2220 				if (!nbuf_copy) {
2221 					QDF_TRACE(QDF_MODULE_ID_DP,
2222 						QDF_TRACE_LEVEL_DEBUG,
2223 						FL("nbuf copy failed"));
2224 					break;
2225 				}
2226 
2227 				nbuf_copy = dp_tx_send_msdu_single(vdev,
2228 						nbuf_copy,
2229 						&msdu_info,
2230 						peer_id,
2231 						NULL);
2232 
2233 				if (nbuf_copy) {
2234 					QDF_TRACE(QDF_MODULE_ID_DP,
2235 						QDF_TRACE_LEVEL_DEBUG,
2236 						FL("pkt send failed"));
2237 					qdf_nbuf_free(nbuf_copy);
2238 				} else {
2239 					if (peer_id != DP_INVALID_PEER)
2240 						DP_STATS_INC_PKT(peer,
2241 							tx.nawds_mcast,
2242 							1, qdf_nbuf_len(nbuf));
2243 				}
2244 			}
2245 		}
2246 	}
2247 
2248 	if (vdev->nawds_enabled) {
2249 		peer_id = DP_INVALID_PEER;
2250 
2251 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
2252 					1, qdf_nbuf_len(nbuf));
2253 
2254 		nbuf = dp_tx_send_msdu_single(vdev,
2255 				nbuf,
2256 				&msdu_info,
2257 				peer_id, NULL);
2258 
2259 		if (nbuf) {
2260 			QDF_TRACE(QDF_MODULE_ID_DP,
2261 				QDF_TRACE_LEVEL_DEBUG,
2262 				FL("pkt send failed"));
2263 			qdf_nbuf_free(nbuf);
2264 		}
2265 	} else
2266 		qdf_nbuf_free(nbuf);
2267 
2268 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2269 }
2270 
2271 /**
2272  * dp_tx_inspect_handler() - Tx Inspect Handler
2273  * @tx_desc: software descriptor head pointer
2274  * @status : Tx completion status from HTT descriptor
2275  *
2276  * Handles Tx frames sent back to Host for inspection
2277  * (ProxyARP)
2278  *
2279  * Return: none
2280  */
2281 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2282 {
2283 
2284 	struct dp_soc *soc;
2285 	struct dp_pdev *pdev = tx_desc->pdev;
2286 
2287 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2288 			"%s Tx inspect path",
2289 			__func__);
2290 
2291 	qdf_assert(pdev);
2292 
2293 	soc = pdev->soc;
2294 
2295 	DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
2296 			qdf_nbuf_len(tx_desc->nbuf));
2297 
2298 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
2299 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2300 }
2301 
2302 #ifdef FEATURE_PERPKT_INFO
2303 /**
2304  * dp_get_completion_indication_for_stack() - send completion to stack
2305  * @soc :  dp_soc handle
2306  * @pdev:  dp_pdev handle
2307  * @peer_id: peer_id of the peer for which completion came
2308  * @ppdu_id: ppdu_id
2309  * @first_msdu: first msdu
2310  * @last_msdu: last msdu
2311  * @netbuf: Buffer pointer for free
2312  *
2313  * This function is used for indication whether buffer needs to be
2314  * send to stack for free or not
2315 */
2316 QDF_STATUS
2317 dp_get_completion_indication_for_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2318 		      uint16_t peer_id, uint32_t ppdu_id, uint8_t first_msdu,
2319 		      uint8_t last_msdu, qdf_nbuf_t netbuf)
2320 {
2321 	struct tx_capture_hdr *ppdu_hdr;
2322 	struct dp_peer *peer = NULL;
2323 	struct ether_header *eh;
2324 
2325 	if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode))
2326 		return QDF_STATUS_E_NOSUPPORT;
2327 
2328 	peer = (peer_id == HTT_INVALID_PEER) ? NULL :
2329 			dp_peer_find_by_id(soc, peer_id);
2330 
2331 	if (!peer) {
2332 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2333 				FL("Peer Invalid"));
2334 		return QDF_STATUS_E_INVAL;
2335 	}
2336 
2337 	if (pdev->mcopy_mode) {
2338 		if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
2339 			(pdev->m_copy_id.tx_peer_id == peer_id)) {
2340 			return QDF_STATUS_E_INVAL;
2341 		}
2342 
2343 		pdev->m_copy_id.tx_ppdu_id = ppdu_id;
2344 		pdev->m_copy_id.tx_peer_id = peer_id;
2345 	}
2346 
2347 	eh = (struct ether_header *)qdf_nbuf_data(netbuf);
2348 
2349 	if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) {
2350 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2351 				FL("No headroom"));
2352 		return QDF_STATUS_E_NOMEM;
2353 	}
2354 
2355 	ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
2356 	qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
2357 		     IEEE80211_ADDR_LEN);
2358 	if (peer->bss_peer) {
2359 		qdf_mem_copy(ppdu_hdr->ra, eh->ether_dhost, IEEE80211_ADDR_LEN);
2360 	} else {
2361 		qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
2362 			     IEEE80211_ADDR_LEN);
2363 	}
2364 
2365 	ppdu_hdr->ppdu_id = ppdu_id;
2366 	ppdu_hdr->peer_id = peer_id;
2367 	ppdu_hdr->first_msdu = first_msdu;
2368 	ppdu_hdr->last_msdu = last_msdu;
2369 
2370 	return QDF_STATUS_SUCCESS;
2371 }
2372 
2373 
2374 /**
2375  * dp_send_completion_to_stack() - send completion to stack
2376  * @soc :  dp_soc handle
2377  * @pdev:  dp_pdev handle
2378  * @peer_id: peer_id of the peer for which completion came
2379  * @ppdu_id: ppdu_id
2380  * @netbuf: Buffer pointer for free
2381  *
2382  * This function is used to send completion to stack
2383  * to free buffer
2384 */
2385 void  dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2386 					uint16_t peer_id, uint32_t ppdu_id,
2387 					qdf_nbuf_t netbuf)
2388 {
2389 	dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
2390 				netbuf, peer_id,
2391 				WDI_NO_VAL, pdev->pdev_id);
2392 }
2393 #else
2394 static QDF_STATUS
2395 dp_get_completion_indication_for_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2396 		      uint16_t peer_id, uint32_t ppdu_id, uint8_t first_msdu,
2397 		      uint8_t last_msdu, qdf_nbuf_t netbuf)
2398 {
2399 	return QDF_STATUS_E_NOSUPPORT;
2400 }
2401 
2402 static void
2403 dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2404 		      uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
2405 {
2406 }
2407 #endif
2408 
2409 /**
2410  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2411  * @soc: Soc handle
2412  * @desc: software Tx descriptor to be processed
2413  *
2414  * Return: none
2415  */
2416 static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
2417 		struct dp_tx_desc_s *desc)
2418 {
2419 	struct dp_vdev *vdev = desc->vdev;
2420 	qdf_nbuf_t nbuf = desc->nbuf;
2421 
2422 	/* If it is TDLS mgmt, don't unmap or free the frame */
2423 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2424 		return dp_non_std_tx_comp_free_buff(desc, vdev);
2425 
2426 	/* 0 : MSDU buffer, 1 : MLE */
2427 	if (desc->msdu_ext_desc) {
2428 		/* TSO free */
2429 		if (hal_tx_ext_desc_get_tso_enable(
2430 					desc->msdu_ext_desc->vaddr)) {
2431 			/* unmap eash TSO seg before free the nbuf */
2432 			dp_tx_tso_unmap_segment(soc, desc);
2433 			qdf_nbuf_free(nbuf);
2434 			return;
2435 		}
2436 	}
2437 
2438 	qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
2439 
2440 	if (qdf_likely(!vdev->mesh_vdev))
2441 		qdf_nbuf_free(nbuf);
2442 	else {
2443 		if (desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2444 			qdf_nbuf_free(nbuf);
2445 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2446 		} else
2447 			vdev->osif_tx_free_ext((nbuf));
2448 	}
2449 }
2450 
2451 /**
2452  * dp_tx_mec_handler() - Tx  MEC Notify Handler
2453  * @vdev: pointer to dp dev handler
2454  * @status : Tx completion status from HTT descriptor
2455  *
2456  * Handles MEC notify event sent from fw to Host
2457  *
2458  * Return: none
2459  */
2460 #ifdef FEATURE_WDS
2461 void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
2462 {
2463 
2464 	struct dp_soc *soc;
2465 	uint32_t flags = IEEE80211_NODE_F_WDS_HM;
2466 	struct dp_peer *peer;
2467 	uint8_t mac_addr[DP_MAC_ADDR_LEN], i;
2468 
2469 	if (!vdev->wds_enabled)
2470 		return;
2471 
2472 	/* MEC required only in STA mode */
2473 	if (vdev->opmode != wlan_op_mode_sta)
2474 		return;
2475 
2476 	soc = vdev->pdev->soc;
2477 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
2478 	peer = TAILQ_FIRST(&vdev->peer_list);
2479 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2480 
2481 	if (!peer) {
2482 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2483 				FL("peer is NULL"));
2484 		return;
2485 	}
2486 
2487 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2488 			"%s Tx MEC Handler",
2489 			__func__);
2490 
2491 	for (i = 0; i < DP_MAC_ADDR_LEN; i++)
2492 		mac_addr[(DP_MAC_ADDR_LEN - 1) - i] =
2493 					status[(DP_MAC_ADDR_LEN - 2) + i];
2494 
2495 	if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, DP_MAC_ADDR_LEN))
2496 		dp_peer_add_ast(soc,
2497 				peer,
2498 				mac_addr,
2499 				CDP_TXRX_AST_TYPE_MEC,
2500 				flags);
2501 }
2502 #endif
2503 
2504 /**
2505  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
2506  * @tx_desc: software descriptor head pointer
2507  * @status : Tx completion status from HTT descriptor
2508  *
2509  * This function will process HTT Tx indication messages from Target
2510  *
2511  * Return: none
2512  */
2513 static
2514 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2515 {
2516 	uint8_t tx_status;
2517 	struct dp_pdev *pdev;
2518 	struct dp_vdev *vdev;
2519 	struct dp_soc *soc;
2520 	uint32_t *htt_status_word = (uint32_t *) status;
2521 
2522 	qdf_assert(tx_desc->pdev);
2523 
2524 	pdev = tx_desc->pdev;
2525 	vdev = tx_desc->vdev;
2526 	soc = pdev->soc;
2527 
2528 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_status_word[0]);
2529 
2530 	switch (tx_status) {
2531 	case HTT_TX_FW2WBM_TX_STATUS_OK:
2532 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
2533 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
2534 	{
2535 		dp_tx_comp_free_buf(soc, tx_desc);
2536 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2537 		break;
2538 	}
2539 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
2540 	{
2541 		dp_tx_reinject_handler(tx_desc, status);
2542 		break;
2543 	}
2544 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
2545 	{
2546 		dp_tx_inspect_handler(tx_desc, status);
2547 		break;
2548 	}
2549 	case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
2550 	{
2551 		dp_tx_mec_handler(vdev, status);
2552 		break;
2553 	}
2554 	default:
2555 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2556 				"%s Invalid HTT tx_status %d",
2557 				__func__, tx_status);
2558 		break;
2559 	}
2560 }
2561 
2562 #ifdef MESH_MODE_SUPPORT
2563 /**
2564  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
2565  *                                         in mesh meta header
2566  * @tx_desc: software descriptor head pointer
2567  * @ts: pointer to tx completion stats
2568  * Return: none
2569  */
2570 static
2571 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2572 		struct hal_tx_completion_status *ts)
2573 {
2574 	struct meta_hdr_s *mhdr;
2575 	qdf_nbuf_t netbuf = tx_desc->nbuf;
2576 
2577 	if (!tx_desc->msdu_ext_desc) {
2578 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
2579 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2580 				"netbuf %pK offset %d",
2581 				netbuf, tx_desc->pkt_offset);
2582 			return;
2583 		}
2584 	}
2585 	if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
2586 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2587 			"netbuf %pK offset %d", netbuf,
2588 			sizeof(struct meta_hdr_s));
2589 		return;
2590 	}
2591 
2592 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
2593 	mhdr->rssi = ts->ack_frame_rssi;
2594 	mhdr->channel = tx_desc->pdev->operating_channel;
2595 }
2596 
2597 #else
2598 static
2599 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2600 		struct hal_tx_completion_status *ts)
2601 {
2602 }
2603 
2604 #endif
2605 
2606 /**
2607  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
2608  * @peer: Handle to DP peer
2609  * @ts: pointer to HAL Tx completion stats
2610  * @length: MSDU length
2611  *
2612  * Return: None
2613  */
2614 static void dp_tx_update_peer_stats(struct dp_peer *peer,
2615 		struct hal_tx_completion_status *ts, uint32_t length)
2616 {
2617 	struct dp_pdev *pdev = peer->vdev->pdev;
2618 	struct dp_soc *soc = pdev->soc;
2619 	uint8_t mcs, pkt_type;
2620 
2621 	mcs = ts->mcs;
2622 	pkt_type = ts->pkt_type;
2623 
2624 	if (!ts->release_src == HAL_TX_COMP_RELEASE_SOURCE_TQM)
2625 		return;
2626 
2627 	if (peer->bss_peer) {
2628 		DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
2629 	} else {
2630 		DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
2631 	}
2632 
2633 	DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
2634 
2635 	DP_STATS_INCC_PKT(peer, tx.tx_success, 1, length,
2636 			  (ts->status == HAL_TX_TQM_RR_FRAME_ACKED));
2637 
2638 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
2639 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
2640 
2641 	DP_STATS_INCC(peer, tx.dropped.fw_rem, 1,
2642 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
2643 
2644 	DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
2645 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
2646 
2647 	DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
2648 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
2649 
2650 	DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
2651 		     (ts->status == HAL_TX_TQM_RR_FW_REASON1));
2652 
2653 	DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
2654 		     (ts->status == HAL_TX_TQM_RR_FW_REASON2));
2655 
2656 	DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
2657 		     (ts->status == HAL_TX_TQM_RR_FW_REASON3));
2658 
2659 	if (!ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
2660 		return;
2661 
2662 	DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
2663 
2664 	DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
2665 	DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
2666 
2667 	if (!(soc->process_tx_status))
2668 		return;
2669 
2670 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2671 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
2672 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2673 			((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
2674 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2675 			((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
2676 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2677 			((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
2678 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2679 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
2680 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2681 			((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
2682 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2683 			((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2684 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2685 			((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2686 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2687 			((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2688 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2689 			((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2690 	DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
2691 	DP_STATS_INC(peer, tx.bw[ts->bw], 1);
2692 	DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
2693 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
2694 	DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
2695 	DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
2696 	DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
2697 
2698 	if (soc->cdp_soc.ol_ops->update_dp_stats) {
2699 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
2700 				&peer->stats, ts->peer_id,
2701 				UPDATE_PEER_STATS);
2702 	}
2703 }
2704 
2705 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2706 /**
2707  * dp_tx_flow_pool_lock() - take flow pool lock
2708  * @soc: core txrx main context
2709  * @tx_desc: tx desc
2710  *
2711  * Return: None
2712  */
2713 static inline
2714 void dp_tx_flow_pool_lock(struct dp_soc *soc,
2715 			  struct dp_tx_desc_s *tx_desc)
2716 {
2717 	struct dp_tx_desc_pool_s *pool;
2718 	uint8_t desc_pool_id;
2719 
2720 	desc_pool_id = tx_desc->pool_id;
2721 	pool = &soc->tx_desc[desc_pool_id];
2722 
2723 	qdf_spin_lock_bh(&pool->flow_pool_lock);
2724 }
2725 
2726 /**
2727  * dp_tx_flow_pool_unlock() - release flow pool lock
2728  * @soc: core txrx main context
2729  * @tx_desc: tx desc
2730  *
2731  * Return: None
2732  */
2733 static inline
2734 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
2735 			    struct dp_tx_desc_s *tx_desc)
2736 {
2737 	struct dp_tx_desc_pool_s *pool;
2738 	uint8_t desc_pool_id;
2739 
2740 	desc_pool_id = tx_desc->pool_id;
2741 	pool = &soc->tx_desc[desc_pool_id];
2742 
2743 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
2744 }
2745 #else
2746 static inline
2747 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2748 {
2749 }
2750 
2751 static inline
2752 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2753 {
2754 }
2755 #endif
2756 
2757 /**
2758  * dp_tx_notify_completion() - Notify tx completion for this desc
2759  * @soc: core txrx main context
2760  * @tx_desc: tx desc
2761  * @netbuf:  buffer
2762  *
2763  * Return: none
2764  */
2765 static inline void dp_tx_notify_completion(struct dp_soc *soc,
2766 					   struct dp_tx_desc_s *tx_desc,
2767 					   qdf_nbuf_t netbuf)
2768 {
2769 	void *osif_dev;
2770 	ol_txrx_completion_fp tx_compl_cbk = NULL;
2771 
2772 	qdf_assert(tx_desc);
2773 
2774 	dp_tx_flow_pool_lock(soc, tx_desc);
2775 
2776 	if (!tx_desc->vdev ||
2777 	    !tx_desc->vdev->osif_vdev) {
2778 		dp_tx_flow_pool_unlock(soc, tx_desc);
2779 		return;
2780 	}
2781 
2782 	osif_dev = tx_desc->vdev->osif_vdev;
2783 	tx_compl_cbk = tx_desc->vdev->tx_comp;
2784 	dp_tx_flow_pool_unlock(soc, tx_desc);
2785 
2786 	if (tx_compl_cbk)
2787 		tx_compl_cbk(netbuf, osif_dev);
2788 }
2789 
2790 /**
2791  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
2792  * @tx_desc: software descriptor head pointer
2793  * @length: packet length
2794  *
2795  * Return: none
2796  */
2797 static inline void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
2798 		uint32_t length)
2799 {
2800 	struct hal_tx_completion_status ts;
2801 	struct dp_soc *soc = NULL;
2802 	struct dp_vdev *vdev = tx_desc->vdev;
2803 	struct dp_peer *peer = NULL;
2804 	struct ether_header *eh =
2805 		(struct ether_header *)qdf_nbuf_data(tx_desc->nbuf);
2806 
2807 	hal_tx_comp_get_status(&tx_desc->comp, &ts);
2808 
2809 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2810 				"-------------------- \n"
2811 				"Tx Completion Stats: \n"
2812 				"-------------------- \n"
2813 				"ack_frame_rssi = %d \n"
2814 				"first_msdu = %d \n"
2815 				"last_msdu = %d \n"
2816 				"msdu_part_of_amsdu = %d \n"
2817 				"rate_stats valid = %d \n"
2818 				"bw = %d \n"
2819 				"pkt_type = %d \n"
2820 				"stbc = %d \n"
2821 				"ldpc = %d \n"
2822 				"sgi = %d \n"
2823 				"mcs = %d \n"
2824 				"ofdma = %d \n"
2825 				"tones_in_ru = %d \n"
2826 				"tsf = %d \n"
2827 				"ppdu_id = %d \n"
2828 				"transmit_cnt = %d \n"
2829 				"tid = %d \n"
2830 				"peer_id = %d ",
2831 				ts.ack_frame_rssi, ts.first_msdu, ts.last_msdu,
2832 				ts.msdu_part_of_amsdu, ts.valid, ts.bw,
2833 				ts.pkt_type, ts.stbc, ts.ldpc, ts.sgi,
2834 				ts.mcs, ts.ofdma, ts.tones_in_ru, ts.tsf,
2835 				ts.ppdu_id, ts.transmit_cnt, ts.tid,
2836 				ts.peer_id);
2837 
2838 	if (!vdev) {
2839 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2840 				"invalid vdev");
2841 		goto out;
2842 	}
2843 
2844 	soc = vdev->pdev->soc;
2845 
2846 	/* Update SoC level stats */
2847 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
2848 			(ts.status == HAL_TX_TQM_RR_REM_CMD_REM));
2849 
2850 	/* Update per-packet stats */
2851 	if (qdf_unlikely(vdev->mesh_vdev) &&
2852 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
2853 		dp_tx_comp_fill_tx_completion_stats(tx_desc, &ts);
2854 
2855 	/* Update peer level stats */
2856 	peer = dp_peer_find_by_id(soc, ts.peer_id);
2857 	if (!peer) {
2858 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2859 				"invalid peer");
2860 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
2861 		goto out;
2862 	}
2863 
2864 	if (qdf_likely(peer->vdev->tx_encap_type ==
2865 				htt_cmn_pkt_type_ethernet)) {
2866 		if (peer->bss_peer && IEEE80211_IS_BROADCAST(eh->ether_dhost))
2867 			DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
2868 	}
2869 
2870 	dp_tx_update_peer_stats(peer, &ts, length);
2871 
2872 out:
2873 	return;
2874 }
2875 
2876 /**
2877  * dp_tx_comp_process_desc() - Tx complete software descriptor handler
2878  * @soc: core txrx main context
2879  * @comp_head: software descriptor head pointer
2880  *
2881  * This function will process batch of descriptors reaped by dp_tx_comp_handler
2882  * and release the software descriptors after processing is complete
2883  *
2884  * Return: none
2885  */
2886 static void dp_tx_comp_process_desc(struct dp_soc *soc,
2887 		struct dp_tx_desc_s *comp_head)
2888 {
2889 	struct dp_tx_desc_s *desc;
2890 	struct dp_tx_desc_s *next;
2891 	struct hal_tx_completion_status ts = {0};
2892 	uint32_t length;
2893 	struct dp_peer *peer;
2894 
2895 	DP_HIST_INIT();
2896 	desc = comp_head;
2897 
2898 	while (desc) {
2899 		hal_tx_comp_get_status(&desc->comp, &ts);
2900 		peer = dp_peer_find_by_id(soc, ts.peer_id);
2901 		length = qdf_nbuf_len(desc->nbuf);
2902 
2903 		/* check tx completion notification */
2904 		if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(desc->nbuf))
2905 			dp_tx_notify_completion(soc, desc, desc->nbuf);
2906 
2907 		dp_tx_comp_process_tx_status(desc, length);
2908 
2909 		DPTRACE(qdf_dp_trace_ptr
2910 				(desc->nbuf,
2911 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
2912 				 QDF_TRACE_DEFAULT_PDEV_ID,
2913 				 qdf_nbuf_data_addr(desc->nbuf),
2914 				 sizeof(qdf_nbuf_data(desc->nbuf)),
2915 				 desc->id, ts.status)
2916 			);
2917 
2918 		/*currently m_copy/tx_capture is not supported for scatter gather packets*/
2919 		if (!(desc->msdu_ext_desc) && (dp_get_completion_indication_for_stack(soc,
2920 					desc->pdev, ts.peer_id, ts.ppdu_id,
2921 					ts.first_msdu, ts.last_msdu,
2922 					desc->nbuf) == QDF_STATUS_SUCCESS)) {
2923 			qdf_nbuf_unmap(soc->osdev, desc->nbuf,
2924 						QDF_DMA_TO_DEVICE);
2925 
2926 			dp_send_completion_to_stack(soc, desc->pdev, ts.peer_id,
2927 				ts.ppdu_id, desc->nbuf);
2928 		} else {
2929 			dp_tx_comp_free_buf(soc, desc);
2930 		}
2931 
2932 		DP_HIST_PACKET_COUNT_INC(desc->pdev->pdev_id);
2933 
2934 		next = desc->next;
2935 		dp_tx_desc_release(desc, desc->pool_id);
2936 		desc = next;
2937 	}
2938 	DP_TX_HIST_STATS_PER_PDEV();
2939 }
2940 
2941 /**
2942  * dp_tx_comp_handler() - Tx completion handler
2943  * @soc: core txrx main context
2944  * @ring_id: completion ring id
2945  * @quota: No. of packets/descriptors that can be serviced in one loop
2946  *
2947  * This function will collect hardware release ring element contents and
2948  * handle descriptor contents. Based on contents, free packet or handle error
2949  * conditions
2950  *
2951  * Return: none
2952  */
2953 uint32_t dp_tx_comp_handler(struct dp_soc *soc, void *hal_srng, uint32_t quota)
2954 {
2955 	void *tx_comp_hal_desc;
2956 	uint8_t buffer_src;
2957 	uint8_t pool_id;
2958 	uint32_t tx_desc_id;
2959 	struct dp_tx_desc_s *tx_desc = NULL;
2960 	struct dp_tx_desc_s *head_desc = NULL;
2961 	struct dp_tx_desc_s *tail_desc = NULL;
2962 	uint32_t num_processed;
2963 	uint32_t count;
2964 
2965 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
2966 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2967 				"%s %d : HAL RING Access Failed -- %pK",
2968 				__func__, __LINE__, hal_srng);
2969 		return 0;
2970 	}
2971 
2972 	num_processed = 0;
2973 	count = 0;
2974 
2975 	/* Find head descriptor from completion ring */
2976 	while (qdf_likely(tx_comp_hal_desc =
2977 			hal_srng_dst_get_next(soc->hal_soc, hal_srng))) {
2978 
2979 		buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
2980 
2981 		/* If this buffer was not released by TQM or FW, then it is not
2982 		 * Tx completion indication, assert */
2983 		if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
2984 				(buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
2985 
2986 			QDF_TRACE(QDF_MODULE_ID_DP,
2987 					QDF_TRACE_LEVEL_FATAL,
2988 					"Tx comp release_src != TQM | FW");
2989 
2990 			qdf_assert_always(0);
2991 		}
2992 
2993 		/* Get descriptor id */
2994 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
2995 		pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
2996 			DP_TX_DESC_ID_POOL_OS;
2997 
2998 		if (!dp_tx_is_desc_id_valid(soc, tx_desc_id))
2999 			continue;
3000 
3001 		/* Find Tx descriptor */
3002 		tx_desc = dp_tx_desc_find(soc, pool_id,
3003 				(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
3004 				DP_TX_DESC_ID_PAGE_OS,
3005 				(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
3006 				DP_TX_DESC_ID_OFFSET_OS);
3007 
3008 		/*
3009 		 * If the release source is FW, process the HTT status
3010 		 */
3011 		if (qdf_unlikely(buffer_src ==
3012 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
3013 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
3014 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
3015 					htt_tx_status);
3016 			dp_tx_process_htt_completion(tx_desc,
3017 					htt_tx_status);
3018 		} else {
3019 			/* Pool id is not matching. Error */
3020 			if (tx_desc->pool_id != pool_id) {
3021 				QDF_TRACE(QDF_MODULE_ID_DP,
3022 					QDF_TRACE_LEVEL_FATAL,
3023 					"Tx Comp pool id %d not matched %d",
3024 					pool_id, tx_desc->pool_id);
3025 
3026 				qdf_assert_always(0);
3027 			}
3028 
3029 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
3030 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
3031 				QDF_TRACE(QDF_MODULE_ID_DP,
3032 					QDF_TRACE_LEVEL_FATAL,
3033 					"Txdesc invalid, flgs = %x,id = %d",
3034 					tx_desc->flags,	tx_desc_id);
3035 				qdf_assert_always(0);
3036 			}
3037 
3038 			/* First ring descriptor on the cycle */
3039 			if (!head_desc) {
3040 				head_desc = tx_desc;
3041 				tail_desc = tx_desc;
3042 			}
3043 
3044 			tail_desc->next = tx_desc;
3045 			tx_desc->next = NULL;
3046 			tail_desc = tx_desc;
3047 
3048 			/* Collect hw completion contents */
3049 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
3050 					&tx_desc->comp, 1);
3051 
3052 		}
3053 
3054 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
3055 		/* Decrement PM usage count if the packet has been sent.*/
3056 		hif_pm_runtime_put(soc->hif_handle);
3057 
3058 		/*
3059 		 * Processed packet count is more than given quota
3060 		 * stop to processing
3061 		 */
3062 		if ((num_processed >= quota))
3063 			break;
3064 
3065 		count++;
3066 	}
3067 
3068 	hal_srng_access_end(soc->hal_soc, hal_srng);
3069 
3070 	/* Process the reaped descriptors */
3071 	if (head_desc)
3072 		dp_tx_comp_process_desc(soc, head_desc);
3073 
3074 	return num_processed;
3075 }
3076 
3077 #ifdef CONVERGED_TDLS_ENABLE
3078 /**
3079  * dp_tx_non_std() - Allow the control-path SW to send data frames
3080  *
3081  * @data_vdev - which vdev should transmit the tx data frames
3082  * @tx_spec - what non-standard handling to apply to the tx data frames
3083  * @msdu_list - NULL-terminated list of tx MSDUs
3084  *
3085  * Return: NULL on success,
3086  *         nbuf when it fails to send
3087  */
3088 qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle,
3089 			enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
3090 {
3091 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
3092 
3093 	if (tx_spec & OL_TX_SPEC_NO_FREE)
3094 		vdev->is_tdls_frame = true;
3095 	return dp_tx_send(vdev_handle, msdu_list);
3096 }
3097 #endif
3098 
3099 /**
3100  * dp_tx_vdev_attach() - attach vdev to dp tx
3101  * @vdev: virtual device instance
3102  *
3103  * Return: QDF_STATUS_SUCCESS: success
3104  *         QDF_STATUS_E_RESOURCES: Error return
3105  */
3106 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
3107 {
3108 	/*
3109 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
3110 	 */
3111 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
3112 			HTT_TCL_METADATA_TYPE_VDEV_BASED);
3113 
3114 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
3115 			vdev->vdev_id);
3116 
3117 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
3118 			DP_SW2HW_MACID(vdev->pdev->pdev_id));
3119 
3120 	/*
3121 	 * Set HTT Extension Valid bit to 0 by default
3122 	 */
3123 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
3124 
3125 	dp_tx_vdev_update_search_flags(vdev);
3126 
3127 	return QDF_STATUS_SUCCESS;
3128 }
3129 
3130 /**
3131  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
3132  * @vdev: virtual device instance
3133  *
3134  * Return: void
3135  *
3136  */
3137 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
3138 {
3139 	/*
3140 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
3141 	 * for TDLS link
3142 	 *
3143 	 * Enable AddrY (SA based search) only for non-WDS STA and
3144 	 * ProxySTA VAP modes.
3145 	 *
3146 	 * In all other VAP modes, only DA based search should be
3147 	 * enabled
3148 	 */
3149 	if (vdev->opmode == wlan_op_mode_sta &&
3150 	    vdev->tdls_link_connected)
3151 		vdev->hal_desc_addr_search_flags =
3152 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
3153 	else if ((vdev->opmode == wlan_op_mode_sta &&
3154 				(!vdev->wds_enabled || vdev->proxysta_vdev)))
3155 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
3156 	else
3157 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
3158 }
3159 
3160 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3161 static void dp_tx_desc_flush(struct dp_vdev *vdev)
3162 {
3163 }
3164 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3165 
3166 /* dp_tx_desc_flush() - release resources associated
3167  *                      to tx_desc
3168  * @vdev: virtual device instance
3169  *
3170  * This function will free all outstanding Tx buffers,
3171  * including ME buffer for which either free during
3172  * completion didn't happened or completion is not
3173  * received.
3174 */
3175 static void dp_tx_desc_flush(struct dp_vdev *vdev)
3176 {
3177 	uint8_t i, num_pool;
3178 	uint32_t j;
3179 	uint32_t num_desc;
3180 	struct dp_soc *soc = vdev->pdev->soc;
3181 	struct dp_tx_desc_s *tx_desc = NULL;
3182 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
3183 
3184 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3185 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3186 
3187 	for (i = 0; i < num_pool; i++) {
3188 		for (j = 0; j < num_desc; j++) {
3189 			tx_desc_pool = &((soc)->tx_desc[(i)]);
3190 			if (tx_desc_pool &&
3191 				tx_desc_pool->desc_pages.cacheable_pages) {
3192 				tx_desc = dp_tx_desc_find(soc, i,
3193 					(j & DP_TX_DESC_ID_PAGE_MASK) >>
3194 					DP_TX_DESC_ID_PAGE_OS,
3195 					(j & DP_TX_DESC_ID_OFFSET_MASK) >>
3196 					DP_TX_DESC_ID_OFFSET_OS);
3197 
3198 				if (tx_desc && (tx_desc->vdev == vdev) &&
3199 					(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) {
3200 					dp_tx_comp_free_buf(soc, tx_desc);
3201 					dp_tx_desc_release(tx_desc, i);
3202 				}
3203 			}
3204 		}
3205 	}
3206 }
3207 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3208 
3209 /**
3210  * dp_tx_vdev_detach() - detach vdev from dp tx
3211  * @vdev: virtual device instance
3212  *
3213  * Return: QDF_STATUS_SUCCESS: success
3214  *         QDF_STATUS_E_RESOURCES: Error return
3215  */
3216 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
3217 {
3218 	dp_tx_desc_flush(vdev);
3219 	return QDF_STATUS_SUCCESS;
3220 }
3221 
3222 /**
3223  * dp_tx_pdev_attach() - attach pdev to dp tx
3224  * @pdev: physical device instance
3225  *
3226  * Return: QDF_STATUS_SUCCESS: success
3227  *         QDF_STATUS_E_RESOURCES: Error return
3228  */
3229 QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
3230 {
3231 	struct dp_soc *soc = pdev->soc;
3232 
3233 	/* Initialize Flow control counters */
3234 	qdf_atomic_init(&pdev->num_tx_exception);
3235 	qdf_atomic_init(&pdev->num_tx_outstanding);
3236 
3237 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3238 		/* Initialize descriptors in TCL Ring */
3239 		hal_tx_init_data_ring(soc->hal_soc,
3240 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
3241 	}
3242 
3243 	return QDF_STATUS_SUCCESS;
3244 }
3245 
3246 /**
3247  * dp_tx_pdev_detach() - detach pdev from dp tx
3248  * @pdev: physical device instance
3249  *
3250  * Return: QDF_STATUS_SUCCESS: success
3251  *         QDF_STATUS_E_RESOURCES: Error return
3252  */
3253 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
3254 {
3255 	dp_tx_me_exit(pdev);
3256 	return QDF_STATUS_SUCCESS;
3257 }
3258 
3259 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3260 /* Pools will be allocated dynamically */
3261 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3262 					int num_desc)
3263 {
3264 	uint8_t i;
3265 
3266 	for (i = 0; i < num_pool; i++) {
3267 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
3268 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
3269 	}
3270 
3271 	return 0;
3272 }
3273 
3274 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3275 {
3276 	uint8_t i;
3277 
3278 	for (i = 0; i < num_pool; i++)
3279 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
3280 }
3281 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3282 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3283 					int num_desc)
3284 {
3285 	uint8_t i;
3286 
3287 	/* Allocate software Tx descriptor pools */
3288 	for (i = 0; i < num_pool; i++) {
3289 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
3290 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3291 					"%s Tx Desc Pool alloc %d failed %pK",
3292 					__func__, i, soc);
3293 			return ENOMEM;
3294 		}
3295 	}
3296 	return 0;
3297 }
3298 
3299 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3300 {
3301 	uint8_t i;
3302 
3303 	for (i = 0; i < num_pool; i++) {
3304 		qdf_assert_always(!soc->tx_desc[i].num_allocated);
3305 		if (dp_tx_desc_pool_free(soc, i)) {
3306 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3307 				"%s Tx Desc Pool Free failed", __func__);
3308 		}
3309 	}
3310 }
3311 
3312 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3313 
3314 /**
3315  * dp_tx_soc_detach() - detach soc from dp tx
3316  * @soc: core txrx main context
3317  *
3318  * This function will detach dp tx into main device context
3319  * will free dp tx resource and initialize resources
3320  *
3321  * Return: QDF_STATUS_SUCCESS: success
3322  *         QDF_STATUS_E_RESOURCES: Error return
3323  */
3324 QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
3325 {
3326 	uint8_t num_pool;
3327 	uint16_t num_desc;
3328 	uint16_t num_ext_desc;
3329 	uint8_t i;
3330 
3331 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3332 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3333 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
3334 
3335 	dp_tx_flow_control_deinit(soc);
3336 	dp_tx_delete_static_pools(soc, num_pool);
3337 
3338 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3339 			"%s Tx Desc Pool Free num_pool = %d, descs = %d",
3340 			__func__, num_pool, num_desc);
3341 
3342 	for (i = 0; i < num_pool; i++) {
3343 		if (dp_tx_ext_desc_pool_free(soc, i)) {
3344 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3345 					"%s Tx Ext Desc Pool Free failed",
3346 					__func__);
3347 			return QDF_STATUS_E_RESOURCES;
3348 		}
3349 	}
3350 
3351 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3352 			"%s MSDU Ext Desc Pool %d Free descs = %d",
3353 			__func__, num_pool, num_ext_desc);
3354 
3355 	for (i = 0; i < num_pool; i++) {
3356 		dp_tx_tso_desc_pool_free(soc, i);
3357 	}
3358 
3359 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3360 			"%s TSO Desc Pool %d Free descs = %d",
3361 			__func__, num_pool, num_desc);
3362 
3363 
3364 	for (i = 0; i < num_pool; i++)
3365 		dp_tx_tso_num_seg_pool_free(soc, i);
3366 
3367 
3368 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3369 		"%s TSO Num of seg Desc Pool %d Free descs = %d",
3370 		__func__, num_pool, num_desc);
3371 
3372 	return QDF_STATUS_SUCCESS;
3373 }
3374 
3375 /**
3376  * dp_tx_soc_attach() - attach soc to dp tx
3377  * @soc: core txrx main context
3378  *
3379  * This function will attach dp tx into main device context
3380  * will allocate dp tx resource and initialize resources
3381  *
3382  * Return: QDF_STATUS_SUCCESS: success
3383  *         QDF_STATUS_E_RESOURCES: Error return
3384  */
3385 QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
3386 {
3387 	uint8_t i;
3388 	uint8_t num_pool;
3389 	uint32_t num_desc;
3390 	uint32_t num_ext_desc;
3391 
3392 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3393 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3394 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
3395 
3396 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
3397 		goto fail;
3398 
3399 	dp_tx_flow_control_init(soc);
3400 
3401 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3402 			"%s Tx Desc Alloc num_pool = %d, descs = %d",
3403 			__func__, num_pool, num_desc);
3404 
3405 	/* Allocate extension tx descriptor pools */
3406 	for (i = 0; i < num_pool; i++) {
3407 		if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
3408 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3409 				"MSDU Ext Desc Pool alloc %d failed %pK",
3410 				i, soc);
3411 
3412 			goto fail;
3413 		}
3414 	}
3415 
3416 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3417 			"%s MSDU Ext Desc Alloc %d, descs = %d",
3418 			__func__, num_pool, num_ext_desc);
3419 
3420 	for (i = 0; i < num_pool; i++) {
3421 		if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
3422 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3423 				"TSO Desc Pool alloc %d failed %pK",
3424 				i, soc);
3425 
3426 			goto fail;
3427 		}
3428 	}
3429 
3430 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3431 			"%s TSO Desc Alloc %d, descs = %d",
3432 			__func__, num_pool, num_desc);
3433 
3434 	for (i = 0; i < num_pool; i++) {
3435 		if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
3436 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3437 				"TSO Num of seg Pool alloc %d failed %pK",
3438 				i, soc);
3439 
3440 			goto fail;
3441 		}
3442 	}
3443 
3444 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3445 			"%s TSO Num of seg pool Alloc %d, descs = %d",
3446 			__func__, num_pool, num_desc);
3447 
3448 	/* Initialize descriptors in TCL Rings */
3449 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3450 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3451 			hal_tx_init_data_ring(soc->hal_soc,
3452 					soc->tcl_data_ring[i].hal_srng);
3453 		}
3454 	}
3455 
3456 	/*
3457 	 * todo - Add a runtime config option to enable this.
3458 	 */
3459 	/*
3460 	 * Due to multiple issues on NPR EMU, enable it selectively
3461 	 * only for NPR EMU, should be removed, once NPR platforms
3462 	 * are stable.
3463 	 */
3464 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
3465 
3466 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3467 			"%s HAL Tx init Success", __func__);
3468 
3469 	return QDF_STATUS_SUCCESS;
3470 
3471 fail:
3472 	/* Detach will take care of freeing only allocated resources */
3473 	dp_tx_soc_detach(soc);
3474 	return QDF_STATUS_E_RESOURCES;
3475 }
3476 
3477 /*
3478  * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
3479  * pdev: pointer to DP PDEV structure
3480  * seg_info_head: Pointer to the head of list
3481  *
3482  * return: void
3483  */
3484 static void dp_tx_me_mem_free(struct dp_pdev *pdev,
3485 		struct dp_tx_seg_info_s *seg_info_head)
3486 {
3487 	struct dp_tx_me_buf_t *mc_uc_buf;
3488 	struct dp_tx_seg_info_s *seg_info_new = NULL;
3489 	qdf_nbuf_t nbuf = NULL;
3490 	uint64_t phy_addr;
3491 
3492 	while (seg_info_head) {
3493 		nbuf = seg_info_head->nbuf;
3494 		mc_uc_buf = (struct dp_tx_me_buf_t *)
3495 			seg_info_head->frags[0].vaddr;
3496 		phy_addr = seg_info_head->frags[0].paddr_hi;
3497 		phy_addr =  (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
3498 		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
3499 				phy_addr,
3500 				QDF_DMA_TO_DEVICE , DP_MAC_ADDR_LEN);
3501 		dp_tx_me_free_buf(pdev, mc_uc_buf);
3502 		qdf_nbuf_free(nbuf);
3503 		seg_info_new = seg_info_head;
3504 		seg_info_head = seg_info_head->next;
3505 		qdf_mem_free(seg_info_new);
3506 	}
3507 }
3508 
3509 /**
3510  * dp_tx_me_send_convert_ucast(): function to convert multicast to unicast
3511  * @vdev: DP VDEV handle
3512  * @nbuf: Multicast nbuf
3513  * @newmac: Table of the clients to which packets have to be sent
3514  * @new_mac_cnt: No of clients
3515  *
3516  * return: no of converted packets
3517  */
3518 uint16_t
3519 dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
3520 		uint8_t newmac[][DP_MAC_ADDR_LEN], uint8_t new_mac_cnt)
3521 {
3522 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
3523 	struct dp_pdev *pdev = vdev->pdev;
3524 	struct ether_header *eh;
3525 	uint8_t *data;
3526 	uint16_t len;
3527 
3528 	/* reference to frame dst addr */
3529 	uint8_t *dstmac;
3530 	/* copy of original frame src addr */
3531 	uint8_t srcmac[DP_MAC_ADDR_LEN];
3532 
3533 	/* local index into newmac */
3534 	uint8_t new_mac_idx = 0;
3535 	struct dp_tx_me_buf_t *mc_uc_buf;
3536 	qdf_nbuf_t  nbuf_clone;
3537 	struct dp_tx_msdu_info_s msdu_info;
3538 	struct dp_tx_seg_info_s *seg_info_head = NULL;
3539 	struct dp_tx_seg_info_s *seg_info_tail = NULL;
3540 	struct dp_tx_seg_info_s *seg_info_new;
3541 	struct dp_tx_frag_info_s data_frag;
3542 	qdf_dma_addr_t paddr_data;
3543 	qdf_dma_addr_t paddr_mcbuf = 0;
3544 	uint8_t empty_entry_mac[DP_MAC_ADDR_LEN] = {0};
3545 	QDF_STATUS status;
3546 
3547 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
3548 
3549 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3550 
3551 	eh = (struct ether_header *) nbuf;
3552 	qdf_mem_copy(srcmac, eh->ether_shost, DP_MAC_ADDR_LEN);
3553 
3554 	len = qdf_nbuf_len(nbuf);
3555 
3556 	data = qdf_nbuf_data(nbuf);
3557 
3558 	status = qdf_nbuf_map(vdev->osdev, nbuf,
3559 			QDF_DMA_TO_DEVICE);
3560 
3561 	if (status) {
3562 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3563 				"Mapping failure Error:%d", status);
3564 		DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
3565 		qdf_nbuf_free(nbuf);
3566 		return 1;
3567 	}
3568 
3569 	paddr_data = qdf_nbuf_get_frag_paddr(nbuf, 0) + IEEE80211_ADDR_LEN;
3570 
3571 	/*preparing data fragment*/
3572 	data_frag.vaddr = qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN;
3573 	data_frag.paddr_lo = (uint32_t)paddr_data;
3574 	data_frag.paddr_hi = (((uint64_t) paddr_data)  >> 32);
3575 	data_frag.len = len - DP_MAC_ADDR_LEN;
3576 
3577 	for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
3578 		dstmac = newmac[new_mac_idx];
3579 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3580 				"added mac addr (%pM)", dstmac);
3581 
3582 		/* Check for NULL Mac Address */
3583 		if (!qdf_mem_cmp(dstmac, empty_entry_mac, DP_MAC_ADDR_LEN))
3584 			continue;
3585 
3586 		/* frame to self mac. skip */
3587 		if (!qdf_mem_cmp(dstmac, srcmac, DP_MAC_ADDR_LEN))
3588 			continue;
3589 
3590 		/*
3591 		 * TODO: optimize to avoid malloc in per-packet path
3592 		 * For eg. seg_pool can be made part of vdev structure
3593 		 */
3594 		seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
3595 
3596 		if (!seg_info_new) {
3597 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3598 					"alloc failed");
3599 			DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
3600 			goto fail_seg_alloc;
3601 		}
3602 
3603 		mc_uc_buf = dp_tx_me_alloc_buf(pdev);
3604 		if (mc_uc_buf == NULL)
3605 			goto fail_buf_alloc;
3606 
3607 		/*
3608 		 * TODO: Check if we need to clone the nbuf
3609 		 * Or can we just use the reference for all cases
3610 		 */
3611 		if (new_mac_idx < (new_mac_cnt - 1)) {
3612 			nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
3613 			if (nbuf_clone == NULL) {
3614 				DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
3615 				goto fail_clone;
3616 			}
3617 		} else {
3618 			/*
3619 			 * Update the ref
3620 			 * to account for frame sent without cloning
3621 			 */
3622 			qdf_nbuf_ref(nbuf);
3623 			nbuf_clone = nbuf;
3624 		}
3625 
3626 		qdf_mem_copy(mc_uc_buf->data, dstmac, DP_MAC_ADDR_LEN);
3627 
3628 		status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
3629 				QDF_DMA_TO_DEVICE, DP_MAC_ADDR_LEN,
3630 				&paddr_mcbuf);
3631 
3632 		if (status) {
3633 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3634 					"Mapping failure Error:%d", status);
3635 			DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
3636 			goto fail_map;
3637 		}
3638 
3639 		seg_info_new->frags[0].vaddr =  (uint8_t *)mc_uc_buf;
3640 		seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
3641 		seg_info_new->frags[0].paddr_hi =
3642 			((uint64_t) paddr_mcbuf >> 32);
3643 		seg_info_new->frags[0].len = DP_MAC_ADDR_LEN;
3644 
3645 		seg_info_new->frags[1] = data_frag;
3646 		seg_info_new->nbuf = nbuf_clone;
3647 		seg_info_new->frag_cnt = 2;
3648 		seg_info_new->total_len = len;
3649 
3650 		seg_info_new->next = NULL;
3651 
3652 		if (seg_info_head == NULL)
3653 			seg_info_head = seg_info_new;
3654 		else
3655 			seg_info_tail->next = seg_info_new;
3656 
3657 		seg_info_tail = seg_info_new;
3658 	}
3659 
3660 	if (!seg_info_head) {
3661 		goto free_return;
3662 	}
3663 
3664 	msdu_info.u.sg_info.curr_seg = seg_info_head;
3665 	msdu_info.num_seg = new_mac_cnt;
3666 	msdu_info.frm_type = dp_tx_frm_me;
3667 
3668 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
3669 	dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3670 
3671 	while (seg_info_head->next) {
3672 		seg_info_new = seg_info_head;
3673 		seg_info_head = seg_info_head->next;
3674 		qdf_mem_free(seg_info_new);
3675 	}
3676 	qdf_mem_free(seg_info_head);
3677 
3678 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
3679 	qdf_nbuf_free(nbuf);
3680 	return new_mac_cnt;
3681 
3682 fail_map:
3683 	qdf_nbuf_free(nbuf_clone);
3684 
3685 fail_clone:
3686 	dp_tx_me_free_buf(pdev, mc_uc_buf);
3687 
3688 fail_buf_alloc:
3689 	qdf_mem_free(seg_info_new);
3690 
3691 fail_seg_alloc:
3692 	dp_tx_me_mem_free(pdev, seg_info_head);
3693 
3694 free_return:
3695 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
3696 	qdf_nbuf_free(nbuf);
3697 	return 1;
3698 }
3699 
3700