xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision d78dedc9dd8c4ee677ac1649d1d42f2a7c3cc1b7)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "dp_tx.h"
21 #include "dp_tx_desc.h"
22 #include "dp_peer.h"
23 #include "dp_types.h"
24 #include "hal_tx.h"
25 #include "qdf_mem.h"
26 #include "qdf_nbuf.h"
27 #include "qdf_net_types.h"
28 #include <wlan_cfg.h>
29 #ifdef MESH_MODE_SUPPORT
30 #include "if_meta_hdr.h"
31 #endif
32 
33 #define DP_TX_QUEUE_MASK 0x3
34 
35 /* TODO Add support in TSO */
36 #define DP_DESC_NUM_FRAG(x) 0
37 
38 /* disable TQM_BYPASS */
39 #define TQM_BYPASS_WAR 0
40 
41 /* invalid peer id for reinject*/
42 #define DP_INVALID_PEER 0XFFFE
43 
44 /*mapping between hal encrypt type and cdp_sec_type*/
45 #define MAX_CDP_SEC_TYPE 12
46 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
47 					HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
48 					HAL_TX_ENCRYPT_TYPE_WEP_128,
49 					HAL_TX_ENCRYPT_TYPE_WEP_104,
50 					HAL_TX_ENCRYPT_TYPE_WEP_40,
51 					HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
52 					HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
53 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
54 					HAL_TX_ENCRYPT_TYPE_WAPI,
55 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
56 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
57 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
58 					HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
59 
60 /**
61  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
62  * @vdev: DP Virtual device handle
63  * @nbuf: Buffer pointer
64  * @queue: queue ids container for nbuf
65  *
66  * TX packet queue has 2 instances, software descriptors id and dma ring id
67  * Based on tx feature and hardware configuration queue id combination could be
68  * different.
69  * For example -
70  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
71  * With no XPS,lock based resource protection, Descriptor pool ids are different
72  * for each vdev, dma ring id will be same as single pdev id
73  *
74  * Return: None
75  */
76 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
77 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
78 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
79 {
80 	uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK;
81 	queue->desc_pool_id = queue_offset;
82 	queue->ring_id = vdev->pdev->soc->tx_ring_map[queue_offset];
83 
84 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
85 			"%s, pool_id:%d ring_id: %d",
86 			__func__, queue->desc_pool_id, queue->ring_id);
87 
88 	return;
89 }
90 #else /* QCA_OL_TX_MULTIQ_SUPPORT */
91 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
92 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
93 {
94 	/* get flow id */
95 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
96 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
97 
98 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
99 			"%s, pool_id:%d ring_id: %d",
100 			__func__, queue->desc_pool_id, queue->ring_id);
101 
102 	return;
103 }
104 #endif
105 
106 #if defined(FEATURE_TSO)
107 /**
108  * dp_tx_tso_desc_release() - Release the tso segment
109  *                            after unmapping all the fragments
110  *
111  * @pdev - physical device handle
112  * @tx_desc - Tx software descriptor
113  */
114 static void dp_tx_tso_desc_release(struct dp_soc *soc,
115 		struct dp_tx_desc_s *tx_desc)
116 {
117 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
118 	if (qdf_unlikely(tx_desc->tso_desc == NULL)) {
119 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
120 			"%s %d TSO desc is NULL!",
121 			__func__, __LINE__);
122 		qdf_assert(0);
123 	} else if (qdf_unlikely(tx_desc->tso_num_desc == NULL)) {
124 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
125 			"%s %d TSO common info is NULL!",
126 			__func__, __LINE__);
127 		qdf_assert(0);
128 	} else {
129 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
130 			(struct qdf_tso_num_seg_elem_t *) tx_desc->tso_num_desc;
131 
132 		if (tso_num_desc->num_seg.tso_cmn_num_seg > 1) {
133 			tso_num_desc->num_seg.tso_cmn_num_seg--;
134 			qdf_nbuf_unmap_tso_segment(soc->osdev,
135 					tx_desc->tso_desc, false);
136 		} else {
137 			tso_num_desc->num_seg.tso_cmn_num_seg--;
138 			qdf_assert(tso_num_desc->num_seg.tso_cmn_num_seg == 0);
139 			qdf_nbuf_unmap_tso_segment(soc->osdev,
140 					tx_desc->tso_desc, true);
141 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
142 					tx_desc->tso_num_desc);
143 			tx_desc->tso_num_desc = NULL;
144 		}
145 		dp_tx_tso_desc_free(soc,
146 				tx_desc->pool_id, tx_desc->tso_desc);
147 		tx_desc->tso_desc = NULL;
148 	}
149 }
150 #else
151 static void dp_tx_tso_desc_release(struct dp_soc *soc,
152 		struct dp_tx_desc_s *tx_desc)
153 {
154 	return;
155 }
156 #endif
157 /**
158  * dp_tx_desc_release() - Release Tx Descriptor
159  * @tx_desc : Tx Descriptor
160  * @desc_pool_id: Descriptor Pool ID
161  *
162  * Deallocate all resources attached to Tx descriptor and free the Tx
163  * descriptor.
164  *
165  * Return:
166  */
167 static void
168 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
169 {
170 	struct dp_pdev *pdev = tx_desc->pdev;
171 	struct dp_soc *soc;
172 	uint8_t comp_status = 0;
173 
174 	qdf_assert(pdev);
175 
176 	soc = pdev->soc;
177 
178 	if (tx_desc->frm_type == dp_tx_frm_tso)
179 		dp_tx_tso_desc_release(soc, tx_desc);
180 
181 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
182 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
183 
184 	if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
185 		dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
186 
187 	qdf_atomic_dec(&pdev->num_tx_outstanding);
188 
189 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
190 		qdf_atomic_dec(&pdev->num_tx_exception);
191 
192 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
193 				hal_tx_comp_get_buffer_source(&tx_desc->comp))
194 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp);
195 	else
196 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
197 
198 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
199 		"Tx Completion Release desc %d status %d outstanding %d",
200 		tx_desc->id, comp_status,
201 		qdf_atomic_read(&pdev->num_tx_outstanding));
202 
203 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
204 	return;
205 }
206 
207 /**
208  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
209  * @vdev: DP vdev Handle
210  * @nbuf: skb
211  *
212  * Prepares and fills HTT metadata in the frame pre-header for special frames
213  * that should be transmitted using varying transmit parameters.
214  * There are 2 VDEV modes that currently needs this special metadata -
215  *  1) Mesh Mode
216  *  2) DSRC Mode
217  *
218  * Return: HTT metadata size
219  *
220  */
221 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
222 		uint32_t *meta_data)
223 {
224 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
225 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
226 
227 	uint8_t htt_desc_size;
228 
229 	/* Size rounded of multiple of 8 bytes */
230 	uint8_t htt_desc_size_aligned;
231 
232 	uint8_t *hdr = NULL;
233 
234 	/*
235 	 * Metadata - HTT MSDU Extension header
236 	 */
237 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
238 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
239 
240 	if (vdev->mesh_vdev) {
241 
242 		/* Fill and add HTT metaheader */
243 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
244 		if (hdr == NULL) {
245 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
246 					"Error in filling HTT metadata\n");
247 
248 			return 0;
249 		}
250 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
251 
252 	} else if (vdev->opmode == wlan_op_mode_ocb) {
253 		/* Todo - Add support for DSRC */
254 	}
255 
256 	return htt_desc_size_aligned;
257 }
258 
259 /**
260  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
261  * @tso_seg: TSO segment to process
262  * @ext_desc: Pointer to MSDU extension descriptor
263  *
264  * Return: void
265  */
266 #if defined(FEATURE_TSO)
267 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
268 		void *ext_desc)
269 {
270 	uint8_t num_frag;
271 	uint32_t tso_flags;
272 
273 	/*
274 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
275 	 * tcp_flag_mask
276 	 *
277 	 * Checksum enable flags are set in TCL descriptor and not in Extension
278 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
279 	 */
280 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
281 
282 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
283 
284 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
285 		tso_seg->tso_flags.ip_len);
286 
287 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
288 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
289 
290 
291 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
292 		uint32_t lo = 0;
293 		uint32_t hi = 0;
294 
295 		qdf_dmaaddr_to_32s(
296 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
297 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
298 			tso_seg->tso_frags[num_frag].length);
299 	}
300 
301 	return;
302 }
303 #else
304 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
305 		void *ext_desc)
306 {
307 	return;
308 }
309 #endif
310 
311 #if defined(FEATURE_TSO)
312 /**
313  * dp_tx_free_tso_seg() - Loop through the tso segments
314  *                        allocated and free them
315  *
316  * @soc: soc handle
317  * @free_seg: list of tso segments
318  * @msdu_info: msdu descriptor
319  *
320  * Return - void
321  */
322 static void dp_tx_free_tso_seg(struct dp_soc *soc,
323 	struct qdf_tso_seg_elem_t *free_seg,
324 	struct dp_tx_msdu_info_s *msdu_info)
325 {
326 	struct qdf_tso_seg_elem_t *next_seg;
327 
328 	while (free_seg) {
329 		next_seg = free_seg->next;
330 		dp_tx_tso_desc_free(soc,
331 			msdu_info->tx_queue.desc_pool_id,
332 			free_seg);
333 		free_seg = next_seg;
334 	}
335 }
336 
337 /**
338  * dp_tx_free_tso_num_seg() - Loop through the tso num segments
339  *                            allocated and free them
340  *
341  * @soc:  soc handle
342  * @free_seg: list of tso segments
343  * @msdu_info: msdu descriptor
344  * Return - void
345  */
346 static void dp_tx_free_tso_num_seg(struct dp_soc *soc,
347 	struct qdf_tso_num_seg_elem_t *free_seg,
348 	struct dp_tx_msdu_info_s *msdu_info)
349 {
350 	struct qdf_tso_num_seg_elem_t *next_seg;
351 
352 	while (free_seg) {
353 		next_seg = free_seg->next;
354 		dp_tso_num_seg_free(soc,
355 			msdu_info->tx_queue.desc_pool_id,
356 			free_seg);
357 		free_seg = next_seg;
358 	}
359 }
360 
361 /**
362  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
363  * @vdev: virtual device handle
364  * @msdu: network buffer
365  * @msdu_info: meta data associated with the msdu
366  *
367  * Return: QDF_STATUS_SUCCESS success
368  */
369 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
370 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
371 {
372 	struct qdf_tso_seg_elem_t *tso_seg;
373 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
374 	struct dp_soc *soc = vdev->pdev->soc;
375 	struct qdf_tso_info_t *tso_info;
376 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
377 
378 	tso_info = &msdu_info->u.tso_info;
379 	tso_info->curr_seg = NULL;
380 	tso_info->tso_seg_list = NULL;
381 	tso_info->num_segs = num_seg;
382 	msdu_info->frm_type = dp_tx_frm_tso;
383 	tso_info->tso_num_seg_list = NULL;
384 
385 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
386 
387 	while (num_seg) {
388 		tso_seg = dp_tx_tso_desc_alloc(
389 				soc, msdu_info->tx_queue.desc_pool_id);
390 		if (tso_seg) {
391 			tso_seg->next = tso_info->tso_seg_list;
392 			tso_info->tso_seg_list = tso_seg;
393 			num_seg--;
394 		} else {
395 			struct qdf_tso_seg_elem_t *free_seg =
396 				tso_info->tso_seg_list;
397 
398 			dp_tx_free_tso_seg(soc, free_seg, msdu_info);
399 
400 			return QDF_STATUS_E_NOMEM;
401 		}
402 	}
403 
404 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
405 
406 	tso_num_seg = dp_tso_num_seg_alloc(soc,
407 			msdu_info->tx_queue.desc_pool_id);
408 
409 	if (tso_num_seg) {
410 		tso_num_seg->next = tso_info->tso_num_seg_list;
411 		tso_info->tso_num_seg_list = tso_num_seg;
412 	} else {
413 		/* Bug: free tso_num_seg and tso_seg */
414 		/* Free the already allocated num of segments */
415 		struct qdf_tso_seg_elem_t *free_seg =
416 					tso_info->tso_seg_list;
417 
418 		TSO_DEBUG(" %s: Failed alloc - Number of segs for a TSO packet",
419 			__func__);
420 		dp_tx_free_tso_seg(soc, free_seg, msdu_info);
421 
422 		return QDF_STATUS_E_NOMEM;
423 	}
424 
425 	msdu_info->num_seg =
426 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
427 
428 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
429 			msdu_info->num_seg);
430 
431 	if (!(msdu_info->num_seg)) {
432 		dp_tx_free_tso_seg(soc, tso_info->tso_seg_list, msdu_info);
433 		dp_tx_free_tso_num_seg(soc, tso_info->tso_num_seg_list,
434 					msdu_info);
435 		return QDF_STATUS_E_INVAL;
436 	}
437 
438 	tso_info->curr_seg = tso_info->tso_seg_list;
439 
440 	return QDF_STATUS_SUCCESS;
441 }
442 #else
443 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
444 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
445 {
446 	return QDF_STATUS_E_NOMEM;
447 }
448 #endif
449 
450 /**
451  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
452  * @vdev: DP Vdev handle
453  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
454  * @desc_pool_id: Descriptor Pool ID
455  *
456  * Return:
457  */
458 static
459 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
460 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
461 {
462 	uint8_t i;
463 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
464 	struct dp_tx_seg_info_s *seg_info;
465 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
466 	struct dp_soc *soc = vdev->pdev->soc;
467 
468 	/* Allocate an extension descriptor */
469 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
470 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
471 
472 	if (!msdu_ext_desc) {
473 		DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
474 		return NULL;
475 	}
476 
477 	if (msdu_info->exception_fw &&
478 			qdf_unlikely(vdev->mesh_vdev)) {
479 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
480 				&msdu_info->meta_data[0],
481 				sizeof(struct htt_tx_msdu_desc_ext2_t));
482 		qdf_atomic_inc(&vdev->pdev->num_tx_exception);
483 	}
484 
485 	switch (msdu_info->frm_type) {
486 	case dp_tx_frm_sg:
487 	case dp_tx_frm_me:
488 	case dp_tx_frm_raw:
489 		seg_info = msdu_info->u.sg_info.curr_seg;
490 		/* Update the buffer pointers in MSDU Extension Descriptor */
491 		for (i = 0; i < seg_info->frag_cnt; i++) {
492 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
493 				seg_info->frags[i].paddr_lo,
494 				seg_info->frags[i].paddr_hi,
495 				seg_info->frags[i].len);
496 		}
497 
498 		break;
499 
500 	case dp_tx_frm_tso:
501 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
502 				&cached_ext_desc[0]);
503 		break;
504 
505 
506 	default:
507 		break;
508 	}
509 
510 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
511 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
512 
513 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
514 			msdu_ext_desc->vaddr);
515 
516 	return msdu_ext_desc;
517 }
518 
519 /**
520  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
521  * @vdev: DP vdev handle
522  * @nbuf: skb
523  * @desc_pool_id: Descriptor pool ID
524  * @meta_data: Metadata to the fw
525  * @tx_exc_metadata: Handle that holds exception path metadata
526  * Allocate and prepare Tx descriptor with msdu information.
527  *
528  * Return: Pointer to Tx Descriptor on success,
529  *         NULL on failure
530  */
531 static
532 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
533 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
534 		struct dp_tx_msdu_info_s *msdu_info,
535 		struct cdp_tx_exception_metadata *tx_exc_metadata)
536 {
537 	uint8_t align_pad;
538 	uint8_t is_exception = 0;
539 	uint8_t htt_hdr_size;
540 	struct ether_header *eh;
541 	struct dp_tx_desc_s *tx_desc;
542 	struct dp_pdev *pdev = vdev->pdev;
543 	struct dp_soc *soc = pdev->soc;
544 
545 	/* Allocate software Tx descriptor */
546 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
547 	if (qdf_unlikely(!tx_desc)) {
548 		DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
549 		return NULL;
550 	}
551 
552 	/* Flow control/Congestion Control counters */
553 	qdf_atomic_inc(&pdev->num_tx_outstanding);
554 
555 	/* Initialize the SW tx descriptor */
556 	tx_desc->nbuf = nbuf;
557 	tx_desc->frm_type = dp_tx_frm_std;
558 	tx_desc->tx_encap_type = (tx_exc_metadata ?
559 			tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
560 	tx_desc->vdev = vdev;
561 	tx_desc->pdev = pdev;
562 	tx_desc->msdu_ext_desc = NULL;
563 	tx_desc->pkt_offset = 0;
564 
565 	/*
566 	 * For special modes (vdev_type == ocb or mesh), data frames should be
567 	 * transmitted using varying transmit parameters (tx spec) which include
568 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
569 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
570 	 * These frames are sent as exception packets to firmware.
571 	 *
572 	 * HW requirement is that metadata should always point to a
573 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
574 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
575 	 *  to get 8-byte aligned start address along with align_pad added
576 	 *
577 	 *  |-----------------------------|
578 	 *  |                             |
579 	 *  |-----------------------------| <-----Buffer Pointer Address given
580 	 *  |                             |  ^    in HW descriptor (aligned)
581 	 *  |       HTT Metadata          |  |
582 	 *  |                             |  |
583 	 *  |                             |  | Packet Offset given in descriptor
584 	 *  |                             |  |
585 	 *  |-----------------------------|  |
586 	 *  |       Alignment Pad         |  v
587 	 *  |-----------------------------| <----- Actual buffer start address
588 	 *  |        SKB Data             |           (Unaligned)
589 	 *  |                             |
590 	 *  |                             |
591 	 *  |                             |
592 	 *  |                             |
593 	 *  |                             |
594 	 *  |-----------------------------|
595 	 */
596 	if (qdf_unlikely((msdu_info->exception_fw)) ||
597 				(vdev->opmode == wlan_op_mode_ocb)) {
598 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
599 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
600 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
601 					"qdf_nbuf_push_head failed\n");
602 			goto failure;
603 		}
604 
605 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
606 				msdu_info->meta_data);
607 		if (htt_hdr_size == 0)
608 			goto failure;
609 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
610 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
611 		is_exception = 1;
612 	}
613 
614 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
615 				qdf_nbuf_map(soc->osdev, nbuf,
616 					QDF_DMA_TO_DEVICE))) {
617 		/* Handle failure */
618 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
619 				"qdf_nbuf_map failed\n");
620 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
621 		goto failure;
622 	}
623 
624 	if (qdf_unlikely(vdev->nawds_enabled)) {
625 		eh = (struct ether_header *) qdf_nbuf_data(nbuf);
626 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
627 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
628 			is_exception = 1;
629 		}
630 	}
631 
632 #if !TQM_BYPASS_WAR
633 	if (is_exception || tx_exc_metadata)
634 #endif
635 	{
636 		/* Temporary WAR due to TQM VP issues */
637 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
638 		qdf_atomic_inc(&pdev->num_tx_exception);
639 	}
640 
641 	return tx_desc;
642 
643 failure:
644 	dp_tx_desc_release(tx_desc, desc_pool_id);
645 	return NULL;
646 }
647 
648 /**
649  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
650  * @vdev: DP vdev handle
651  * @nbuf: skb
652  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
653  * @desc_pool_id : Descriptor Pool ID
654  *
655  * Allocate and prepare Tx descriptor with msdu and fragment descritor
656  * information. For frames wth fragments, allocate and prepare
657  * an MSDU extension descriptor
658  *
659  * Return: Pointer to Tx Descriptor on success,
660  *         NULL on failure
661  */
662 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
663 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
664 		uint8_t desc_pool_id)
665 {
666 	struct dp_tx_desc_s *tx_desc;
667 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
668 	struct dp_pdev *pdev = vdev->pdev;
669 	struct dp_soc *soc = pdev->soc;
670 
671 	/* Allocate software Tx descriptor */
672 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
673 	if (!tx_desc) {
674 		DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
675 		return NULL;
676 	}
677 
678 	/* Flow control/Congestion Control counters */
679 	qdf_atomic_inc(&pdev->num_tx_outstanding);
680 
681 	/* Initialize the SW tx descriptor */
682 	tx_desc->nbuf = nbuf;
683 	tx_desc->frm_type = msdu_info->frm_type;
684 	tx_desc->tx_encap_type = vdev->tx_encap_type;
685 	tx_desc->vdev = vdev;
686 	tx_desc->pdev = pdev;
687 	tx_desc->pkt_offset = 0;
688 	tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
689 	tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
690 
691 	/* Handle scattered frames - TSO/SG/ME */
692 	/* Allocate and prepare an extension descriptor for scattered frames */
693 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
694 	if (!msdu_ext_desc) {
695 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
696 				"%s Tx Extension Descriptor Alloc Fail\n",
697 				__func__);
698 		goto failure;
699 	}
700 
701 #if TQM_BYPASS_WAR
702 	/* Temporary WAR due to TQM VP issues */
703 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
704 	qdf_atomic_inc(&pdev->num_tx_exception);
705 #endif
706 	if (qdf_unlikely(msdu_info->exception_fw))
707 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
708 
709 	tx_desc->msdu_ext_desc = msdu_ext_desc;
710 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
711 
712 	return tx_desc;
713 failure:
714 	dp_tx_desc_release(tx_desc, desc_pool_id);
715 	return NULL;
716 }
717 
718 /**
719  * dp_tx_prepare_raw() - Prepare RAW packet TX
720  * @vdev: DP vdev handle
721  * @nbuf: buffer pointer
722  * @seg_info: Pointer to Segment info Descriptor to be prepared
723  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
724  *     descriptor
725  *
726  * Return:
727  */
728 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
729 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
730 {
731 	qdf_nbuf_t curr_nbuf = NULL;
732 	uint16_t total_len = 0;
733 	qdf_dma_addr_t paddr;
734 	int32_t i;
735 	int32_t mapped_buf_num = 0;
736 
737 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
738 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
739 
740 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
741 
742 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
743 	if (qos_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS)
744 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
745 
746 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
747 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
748 
749 		if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf,
750 					QDF_DMA_TO_DEVICE)) {
751 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
752 				"%s dma map error \n", __func__);
753 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
754 			mapped_buf_num = i;
755 			goto error;
756 		}
757 
758 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
759 		seg_info->frags[i].paddr_lo = paddr;
760 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
761 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
762 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
763 		total_len += qdf_nbuf_len(curr_nbuf);
764 	}
765 
766 	seg_info->frag_cnt = i;
767 	seg_info->total_len = total_len;
768 	seg_info->next = NULL;
769 
770 	sg_info->curr_seg = seg_info;
771 
772 	msdu_info->frm_type = dp_tx_frm_raw;
773 	msdu_info->num_seg = 1;
774 
775 	return nbuf;
776 
777 error:
778 	i = 0;
779 	while (nbuf) {
780 		curr_nbuf = nbuf;
781 		if (i < mapped_buf_num) {
782 			qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE);
783 			i++;
784 		}
785 		nbuf = qdf_nbuf_next(nbuf);
786 		qdf_nbuf_free(curr_nbuf);
787 	}
788 	return NULL;
789 
790 }
791 
792 /**
793  * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
794  * @soc: DP Soc Handle
795  * @vdev: DP vdev handle
796  * @tx_desc: Tx Descriptor Handle
797  * @tid: TID from HLOS for overriding default DSCP-TID mapping
798  * @fw_metadata: Metadata to send to Target Firmware along with frame
799  * @ring_id: Ring ID of H/W ring to which we enqueue the packet
800  * @tx_exc_metadata: Handle that holds exception path meta data
801  *
802  *  Gets the next free TCL HW DMA descriptor and sets up required parameters
803  *  from software Tx descriptor
804  *
805  * Return:
806  */
807 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
808 				   struct dp_tx_desc_s *tx_desc, uint8_t tid,
809 				   uint16_t fw_metadata, uint8_t ring_id,
810 				   struct cdp_tx_exception_metadata
811 					*tx_exc_metadata)
812 {
813 	uint8_t type;
814 	uint16_t length;
815 	void *hal_tx_desc, *hal_tx_desc_cached;
816 	qdf_dma_addr_t dma_addr;
817 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
818 
819 	enum cdp_sec_type sec_type = (tx_exc_metadata ?
820 			tx_exc_metadata->sec_type : vdev->sec_type);
821 
822 	/* Return Buffer Manager ID */
823 	uint8_t bm_id = ring_id;
824 	void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng;
825 
826 	hal_tx_desc_cached = (void *) cached_desc;
827 	qdf_mem_zero_outline(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
828 
829 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
830 		length = HAL_TX_EXT_DESC_WITH_META_DATA;
831 		type = HAL_TX_BUF_TYPE_EXT_DESC;
832 		dma_addr = tx_desc->msdu_ext_desc->paddr;
833 	} else {
834 		length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
835 		type = HAL_TX_BUF_TYPE_BUFFER;
836 		dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
837 	}
838 
839 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
840 	hal_tx_desc_set_buf_addr(hal_tx_desc_cached,
841 			dma_addr , bm_id, tx_desc->id, type);
842 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
843 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
844 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
845 	hal_tx_desc_set_lmac_id(hal_tx_desc_cached,
846 					HAL_TX_DESC_DEFAULT_LMAC_ID);
847 	hal_tx_desc_set_dscp_tid_table_id(hal_tx_desc_cached,
848 			vdev->dscp_tid_map_id);
849 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
850 			sec_type_map[sec_type]);
851 
852 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
853 			"%s length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
854 			__func__, length, type, (uint64_t)dma_addr,
855 			tx_desc->pkt_offset, tx_desc->id);
856 
857 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
858 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
859 
860 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
861 			vdev->hal_desc_addr_search_flags);
862 
863 	/* verify checksum offload configuration*/
864 	if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) &&
865 		((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
866 		|| qdf_nbuf_is_tso(tx_desc->nbuf)))  {
867 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
868 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
869 	}
870 
871 	if (tid != HTT_TX_EXT_TID_INVALID)
872 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
873 
874 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
875 		hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1);
876 
877 
878 	/* Sync cached descriptor with HW */
879 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
880 
881 	if (!hal_tx_desc) {
882 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
883 			  "%s TCL ring full ring_id:%d\n", __func__, ring_id);
884 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
885 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
886 		return QDF_STATUS_E_RESOURCES;
887 	}
888 
889 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
890 
891 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
892 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
893 
894 	/*
895 	 * If one packet is enqueued in HW, PM usage count needs to be
896 	 * incremented by one to prevent future runtime suspend. This
897 	 * should be tied with the success of enqueuing. It will be
898 	 * decremented after the packet has been sent.
899 	 */
900 	hif_pm_runtime_get_noresume(soc->hif_handle);
901 
902 	return QDF_STATUS_SUCCESS;
903 }
904 
905 
906 /**
907  * dp_cce_classify() - Classify the frame based on CCE rules
908  * @vdev: DP vdev handle
909  * @nbuf: skb
910  *
911  * Classify frames based on CCE rules
912  * Return: bool( true if classified,
913  *               else false)
914  */
915 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
916 {
917 	struct ether_header *eh = NULL;
918 	uint16_t   ether_type;
919 	qdf_llc_t *llcHdr;
920 	qdf_nbuf_t nbuf_clone = NULL;
921 	qdf_dot3_qosframe_t *qos_wh = NULL;
922 
923 	/* for mesh packets don't do any classification */
924 	if (qdf_unlikely(vdev->mesh_vdev))
925 		return false;
926 
927 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
928 		eh = (struct ether_header *) qdf_nbuf_data(nbuf);
929 		ether_type = eh->ether_type;
930 		llcHdr = (qdf_llc_t *)(nbuf->data +
931 					sizeof(struct ether_header));
932 	} else {
933 		qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
934 		/* For encrypted packets don't do any classification */
935 		if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
936 			return false;
937 
938 		if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
939 			if (qdf_unlikely(
940 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
941 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
942 
943 				ether_type = *(uint16_t *)(nbuf->data
944 						+ QDF_IEEE80211_4ADDR_HDR_LEN
945 						+ sizeof(qdf_llc_t)
946 						- sizeof(ether_type));
947 				llcHdr = (qdf_llc_t *)(nbuf->data +
948 						QDF_IEEE80211_4ADDR_HDR_LEN);
949 			} else {
950 				ether_type = *(uint16_t *)(nbuf->data
951 						+ QDF_IEEE80211_3ADDR_HDR_LEN
952 						+ sizeof(qdf_llc_t)
953 						- sizeof(ether_type));
954 				llcHdr = (qdf_llc_t *)(nbuf->data +
955 					QDF_IEEE80211_3ADDR_HDR_LEN);
956 			}
957 
958 			if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
959 				&& (ether_type ==
960 				qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
961 
962 				DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
963 				return true;
964 			}
965 		}
966 
967 		return false;
968 	}
969 
970 	if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
971 		ether_type = *(uint16_t *)(nbuf->data + 2*ETHER_ADDR_LEN +
972 				sizeof(*llcHdr));
973 		nbuf_clone = qdf_nbuf_clone(nbuf);
974 		if (qdf_unlikely(nbuf_clone)) {
975 			qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
976 
977 			if (ether_type == htons(ETHERTYPE_8021Q)) {
978 				qdf_nbuf_pull_head(nbuf_clone,
979 						sizeof(qdf_net_vlanhdr_t));
980 			}
981 		}
982 	} else {
983 		if (ether_type == htons(ETHERTYPE_8021Q)) {
984 			nbuf_clone = qdf_nbuf_clone(nbuf);
985 			if (qdf_unlikely(nbuf_clone)) {
986 				qdf_nbuf_pull_head(nbuf_clone,
987 					sizeof(qdf_net_vlanhdr_t));
988 			}
989 		}
990 	}
991 
992 	if (qdf_unlikely(nbuf_clone))
993 		nbuf = nbuf_clone;
994 
995 
996 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
997 		|| qdf_nbuf_is_ipv4_arp_pkt(nbuf)
998 		|| qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
999 		|| qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
1000 		|| (qdf_nbuf_is_ipv4_pkt(nbuf)
1001 			&& qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
1002 		|| (qdf_nbuf_is_ipv6_pkt(nbuf) &&
1003 			qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
1004 		if (qdf_unlikely(nbuf_clone != NULL))
1005 			qdf_nbuf_free(nbuf_clone);
1006 		return true;
1007 	}
1008 
1009 	if (qdf_unlikely(nbuf_clone != NULL))
1010 		qdf_nbuf_free(nbuf_clone);
1011 
1012 	return false;
1013 }
1014 
1015 /**
1016  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1017  * @vdev: DP vdev handle
1018  * @nbuf: skb
1019  *
1020  * Extract the DSCP or PCP information from frame and map into TID value.
1021  * Software based TID classification is required when more than 2 DSCP-TID
1022  * mapping tables are needed.
1023  * Hardware supports 2 DSCP-TID mapping tables
1024  *
1025  * Return: void
1026  */
1027 static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1028 		struct dp_tx_msdu_info_s *msdu_info)
1029 {
1030 	uint8_t tos = 0, dscp_tid_override = 0;
1031 	uint8_t *hdr_ptr, *L3datap;
1032 	uint8_t is_mcast = 0;
1033 	struct ether_header *eh = NULL;
1034 	qdf_ethervlan_header_t *evh = NULL;
1035 	uint16_t   ether_type;
1036 	qdf_llc_t *llcHdr;
1037 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1038 
1039 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1040 
1041 	if (vdev->dscp_tid_map_id <= 1)
1042 		return;
1043 
1044 	/* for mesh packets don't do any classification */
1045 	if (qdf_unlikely(vdev->mesh_vdev))
1046 		return;
1047 
1048 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1049 		eh = (struct ether_header *) nbuf->data;
1050 		hdr_ptr = eh->ether_dhost;
1051 		L3datap = hdr_ptr + sizeof(struct ether_header);
1052 	} else {
1053 		qdf_dot3_qosframe_t *qos_wh =
1054 			(qdf_dot3_qosframe_t *) nbuf->data;
1055 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1056 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1057 		return;
1058 	}
1059 
1060 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1061 	ether_type = eh->ether_type;
1062 
1063 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(struct ether_header));
1064 	/*
1065 	 * Check if packet is dot3 or eth2 type.
1066 	 */
1067 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1068 		ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN +
1069 				sizeof(*llcHdr));
1070 
1071 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1072 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1073 				sizeof(*llcHdr);
1074 			ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN
1075 					+ sizeof(*llcHdr) +
1076 					sizeof(qdf_net_vlanhdr_t));
1077 		} else {
1078 			L3datap = hdr_ptr + sizeof(struct ether_header) +
1079 				sizeof(*llcHdr);
1080 		}
1081 	} else {
1082 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1083 			evh = (qdf_ethervlan_header_t *) eh;
1084 			ether_type = evh->ether_type;
1085 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1086 		}
1087 	}
1088 
1089 	/*
1090 	 * Find priority from IP TOS DSCP field
1091 	 */
1092 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1093 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1094 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1095 			/* Only for unicast frames */
1096 			if (!is_mcast) {
1097 				/* send it on VO queue */
1098 				msdu_info->tid = DP_VO_TID;
1099 			}
1100 		} else {
1101 			/*
1102 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1103 			 * from TOS byte.
1104 			 */
1105 			tos = ip->ip_tos;
1106 			dscp_tid_override = 1;
1107 
1108 		}
1109 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1110 		/* TODO
1111 		 * use flowlabel
1112 		 *igmpmld cases to be handled in phase 2
1113 		 */
1114 		unsigned long ver_pri_flowlabel;
1115 		unsigned long pri;
1116 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1117 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1118 			DP_IPV6_PRIORITY_SHIFT;
1119 		tos = pri;
1120 		dscp_tid_override = 1;
1121 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1122 		msdu_info->tid = DP_VO_TID;
1123 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1124 		/* Only for unicast frames */
1125 		if (!is_mcast) {
1126 			/* send ucast arp on VO queue */
1127 			msdu_info->tid = DP_VO_TID;
1128 		}
1129 	}
1130 
1131 	/*
1132 	 * Assign all MCAST packets to BE
1133 	 */
1134 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1135 		if (is_mcast) {
1136 			tos = 0;
1137 			dscp_tid_override = 1;
1138 		}
1139 	}
1140 
1141 	if (dscp_tid_override == 1) {
1142 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1143 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1144 	}
1145 	return;
1146 }
1147 
1148 #ifdef CONVERGED_TDLS_ENABLE
1149 /**
1150  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1151  * @tx_desc: TX descriptor
1152  *
1153  * Return: None
1154  */
1155 static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1156 {
1157 	if (tx_desc->vdev) {
1158 		if (tx_desc->vdev->is_tdls_frame)
1159 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1160 			tx_desc->vdev->is_tdls_frame = false;
1161 	}
1162 }
1163 
1164 /**
1165  * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1166  * @tx_desc: TX descriptor
1167  * @vdev: datapath vdev handle
1168  *
1169  * Return: None
1170  */
1171 static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
1172 				  struct dp_vdev *vdev)
1173 {
1174 	struct hal_tx_completion_status ts = {0};
1175 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1176 
1177 	hal_tx_comp_get_status(&tx_desc->comp, &ts);
1178 	if (vdev->tx_non_std_data_callback.func) {
1179 		qdf_nbuf_set_next(tx_desc->nbuf, NULL);
1180 		vdev->tx_non_std_data_callback.func(
1181 				vdev->tx_non_std_data_callback.ctxt,
1182 				nbuf, ts.status);
1183 		return;
1184 	}
1185 }
1186 #endif
1187 
1188 /**
1189  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1190  * @vdev: DP vdev handle
1191  * @nbuf: skb
1192  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1193  * @meta_data: Metadata to the fw
1194  * @tx_q: Tx queue to be used for this Tx frame
1195  * @peer_id: peer_id of the peer in case of NAWDS frames
1196  * @tx_exc_metadata: Handle that holds exception path metadata
1197  *
1198  * Return: NULL on success,
1199  *         nbuf when it fails to send
1200  */
1201 static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1202 		struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
1203 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1204 {
1205 	struct dp_pdev *pdev = vdev->pdev;
1206 	struct dp_soc *soc = pdev->soc;
1207 	struct dp_tx_desc_s *tx_desc;
1208 	QDF_STATUS status;
1209 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
1210 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1211 	uint16_t htt_tcl_metadata = 0;
1212 	uint8_t tid = msdu_info->tid;
1213 
1214 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
1215 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
1216 			msdu_info, tx_exc_metadata);
1217 	if (!tx_desc) {
1218 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1219 			  "%s Tx_desc prepare Fail vdev %pK queue %d\n",
1220 			  __func__, vdev, tx_q->desc_pool_id);
1221 		return nbuf;
1222 	}
1223 
1224 	if (qdf_unlikely(soc->cce_disable)) {
1225 		if (dp_cce_classify(vdev, nbuf) == true) {
1226 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1227 			tid = DP_VO_TID;
1228 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1229 		}
1230 	}
1231 
1232 	dp_tx_update_tdls_flags(tx_desc);
1233 
1234 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1235 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1236 				"%s %d : HAL RING Access Failed -- %pK\n",
1237 				__func__, __LINE__, hal_srng);
1238 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1239 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1240 		goto fail_return;
1241 	}
1242 
1243 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
1244 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1245 		HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
1246 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
1247 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
1248 				HTT_TCL_METADATA_TYPE_PEER_BASED);
1249 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
1250 				peer_id);
1251 	} else
1252 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1253 
1254 
1255 	if (msdu_info->exception_fw) {
1256 		HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1257 	}
1258 
1259 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
1260 	status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
1261 			htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
1262 
1263 	if (status != QDF_STATUS_SUCCESS) {
1264 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1265 			  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n",
1266 			  __func__, tx_desc, tx_q->ring_id);
1267 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1268 		goto fail_return;
1269 	}
1270 
1271 	nbuf = NULL;
1272 
1273 fail_return:
1274 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1275 		hal_srng_access_end(soc->hal_soc, hal_srng);
1276 		hif_pm_runtime_put(soc->hif_handle);
1277 	} else {
1278 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1279 	}
1280 
1281 	return nbuf;
1282 }
1283 
1284 /**
1285  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
1286  * @vdev: DP vdev handle
1287  * @nbuf: skb
1288  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
1289  *
1290  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
1291  *
1292  * Return: NULL on success,
1293  *         nbuf when it fails to send
1294  */
1295 #if QDF_LOCK_STATS
1296 static noinline
1297 #else
1298 static
1299 #endif
1300 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1301 				    struct dp_tx_msdu_info_s *msdu_info)
1302 {
1303 	uint8_t i;
1304 	struct dp_pdev *pdev = vdev->pdev;
1305 	struct dp_soc *soc = pdev->soc;
1306 	struct dp_tx_desc_s *tx_desc;
1307 	bool is_cce_classified = false;
1308 	QDF_STATUS status;
1309 	uint16_t htt_tcl_metadata = 0;
1310 
1311 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1312 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1313 
1314 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1315 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1316 				"%s %d : HAL RING Access Failed -- %pK\n",
1317 				__func__, __LINE__, hal_srng);
1318 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1319 		return nbuf;
1320 	}
1321 
1322 	if (qdf_unlikely(soc->cce_disable)) {
1323 		is_cce_classified = dp_cce_classify(vdev, nbuf);
1324 		if (is_cce_classified) {
1325 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1326 			msdu_info->tid = DP_VO_TID;
1327 		}
1328 	}
1329 
1330 	if (msdu_info->frm_type == dp_tx_frm_me)
1331 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1332 
1333 	i = 0;
1334 	/* Print statement to track i and num_seg */
1335 	/*
1336 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
1337 	 * descriptors using information in msdu_info
1338 	 */
1339 	while (i < msdu_info->num_seg) {
1340 		/*
1341 		 * Setup Tx descriptor for an MSDU, and MSDU extension
1342 		 * descriptor
1343 		 */
1344 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
1345 				tx_q->desc_pool_id);
1346 
1347 		if (!tx_desc) {
1348 			if (msdu_info->frm_type == dp_tx_frm_me) {
1349 				dp_tx_me_free_buf(pdev,
1350 					(void *)(msdu_info->u.sg_info
1351 						.curr_seg->frags[0].vaddr));
1352 			}
1353 			goto done;
1354 		}
1355 
1356 		if (msdu_info->frm_type == dp_tx_frm_me) {
1357 			tx_desc->me_buffer =
1358 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
1359 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
1360 		}
1361 
1362 		if (is_cce_classified)
1363 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1364 
1365 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1366 		if (msdu_info->exception_fw) {
1367 			HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1368 		}
1369 
1370 		/*
1371 		 * Enqueue the Tx MSDU descriptor to HW for transmit
1372 		 */
1373 		status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
1374 			htt_tcl_metadata, tx_q->ring_id, NULL);
1375 
1376 		if (status != QDF_STATUS_SUCCESS) {
1377 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1378 				  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n",
1379 				  __func__, tx_desc, tx_q->ring_id);
1380 
1381 			if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
1382 				dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
1383 
1384 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1385 			goto done;
1386 		}
1387 
1388 		/*
1389 		 * TODO
1390 		 * if tso_info structure can be modified to have curr_seg
1391 		 * as first element, following 2 blocks of code (for TSO and SG)
1392 		 * can be combined into 1
1393 		 */
1394 
1395 		/*
1396 		 * For frames with multiple segments (TSO, ME), jump to next
1397 		 * segment.
1398 		 */
1399 		if (msdu_info->frm_type == dp_tx_frm_tso) {
1400 			if (msdu_info->u.tso_info.curr_seg->next) {
1401 				msdu_info->u.tso_info.curr_seg =
1402 					msdu_info->u.tso_info.curr_seg->next;
1403 
1404 				/*
1405 				 * If this is a jumbo nbuf, then increment the number of
1406 				 * nbuf users for each additional segment of the msdu.
1407 				 * This will ensure that the skb is freed only after
1408 				 * receiving tx completion for all segments of an nbuf
1409 				 */
1410 				qdf_nbuf_inc_users(nbuf);
1411 
1412 				/* Check with MCL if this is needed */
1413 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
1414 			}
1415 		}
1416 
1417 		/*
1418 		 * For Multicast-Unicast converted packets,
1419 		 * each converted frame (for a client) is represented as
1420 		 * 1 segment
1421 		 */
1422 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
1423 				(msdu_info->frm_type == dp_tx_frm_me)) {
1424 			if (msdu_info->u.sg_info.curr_seg->next) {
1425 				msdu_info->u.sg_info.curr_seg =
1426 					msdu_info->u.sg_info.curr_seg->next;
1427 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1428 			}
1429 		}
1430 		i++;
1431 	}
1432 
1433 	nbuf = NULL;
1434 
1435 done:
1436 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1437 		hal_srng_access_end(soc->hal_soc, hal_srng);
1438 		hif_pm_runtime_put(soc->hif_handle);
1439 	} else {
1440 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1441 	}
1442 
1443 	return nbuf;
1444 }
1445 
1446 /**
1447  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
1448  *                     for SG frames
1449  * @vdev: DP vdev handle
1450  * @nbuf: skb
1451  * @seg_info: Pointer to Segment info Descriptor to be prepared
1452  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1453  *
1454  * Return: NULL on success,
1455  *         nbuf when it fails to send
1456  */
1457 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1458 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1459 {
1460 	uint32_t cur_frag, nr_frags;
1461 	qdf_dma_addr_t paddr;
1462 	struct dp_tx_sg_info_s *sg_info;
1463 
1464 	sg_info = &msdu_info->u.sg_info;
1465 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
1466 
1467 	if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
1468 				QDF_DMA_TO_DEVICE)) {
1469 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1470 				"dma map error\n");
1471 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1472 
1473 		qdf_nbuf_free(nbuf);
1474 		return NULL;
1475 	}
1476 
1477 	paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1478 	seg_info->frags[0].paddr_lo = paddr;
1479 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
1480 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
1481 	seg_info->frags[0].vaddr = (void *) nbuf;
1482 
1483 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
1484 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
1485 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
1486 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1487 					"frag dma map error\n");
1488 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1489 			qdf_nbuf_free(nbuf);
1490 			return NULL;
1491 		}
1492 
1493 		paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1494 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
1495 		seg_info->frags[cur_frag + 1].paddr_hi =
1496 			((uint64_t) paddr) >> 32;
1497 		seg_info->frags[cur_frag + 1].len =
1498 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
1499 	}
1500 
1501 	seg_info->frag_cnt = (cur_frag + 1);
1502 	seg_info->total_len = qdf_nbuf_len(nbuf);
1503 	seg_info->next = NULL;
1504 
1505 	sg_info->curr_seg = seg_info;
1506 
1507 	msdu_info->frm_type = dp_tx_frm_sg;
1508 	msdu_info->num_seg = 1;
1509 
1510 	return nbuf;
1511 }
1512 
1513 #ifdef MESH_MODE_SUPPORT
1514 
1515 /**
1516  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
1517 				and prepare msdu_info for mesh frames.
1518  * @vdev: DP vdev handle
1519  * @nbuf: skb
1520  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1521  *
1522  * Return: NULL on failure,
1523  *         nbuf when extracted successfully
1524  */
1525 static
1526 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1527 				struct dp_tx_msdu_info_s *msdu_info)
1528 {
1529 	struct meta_hdr_s *mhdr;
1530 	struct htt_tx_msdu_desc_ext2_t *meta_data =
1531 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
1532 
1533 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1534 
1535 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
1536 		msdu_info->exception_fw = 0;
1537 		goto remove_meta_hdr;
1538 	}
1539 
1540 	msdu_info->exception_fw = 1;
1541 
1542 	qdf_mem_set(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t), 0);
1543 
1544 	meta_data->host_tx_desc_pool = 1;
1545 	meta_data->update_peer_cache = 1;
1546 	meta_data->learning_frame = 1;
1547 
1548 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
1549 		meta_data->power = mhdr->power;
1550 
1551 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
1552 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
1553 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
1554 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
1555 
1556 		meta_data->dyn_bw = 1;
1557 
1558 		meta_data->valid_pwr = 1;
1559 		meta_data->valid_mcs_mask = 1;
1560 		meta_data->valid_nss_mask = 1;
1561 		meta_data->valid_preamble_type  = 1;
1562 		meta_data->valid_retries = 1;
1563 		meta_data->valid_bw_info = 1;
1564 	}
1565 
1566 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
1567 		meta_data->encrypt_type = 0;
1568 		meta_data->valid_encrypt_type = 1;
1569 		meta_data->learning_frame = 0;
1570 	}
1571 
1572 	meta_data->valid_key_flags = 1;
1573 	meta_data->key_flags = (mhdr->keyix & 0x3);
1574 
1575 remove_meta_hdr:
1576 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
1577 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1578 				"qdf_nbuf_pull_head failed\n");
1579 		qdf_nbuf_free(nbuf);
1580 		return NULL;
1581 	}
1582 
1583 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
1584 		msdu_info->tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1585 	else
1586 		msdu_info->tid = qdf_nbuf_get_priority(nbuf);
1587 
1588 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1589 			"%s , Meta hdr %0x %0x %0x %0x %0x %0x"
1590 			" tid %d to_fw %d\n",
1591 			__func__, msdu_info->meta_data[0],
1592 			msdu_info->meta_data[1],
1593 			msdu_info->meta_data[2],
1594 			msdu_info->meta_data[3],
1595 			msdu_info->meta_data[4],
1596 			msdu_info->meta_data[5],
1597 			msdu_info->tid, msdu_info->exception_fw);
1598 
1599 	return nbuf;
1600 }
1601 #else
1602 static
1603 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1604 				struct dp_tx_msdu_info_s *msdu_info)
1605 {
1606 	return nbuf;
1607 }
1608 
1609 #endif
1610 
1611 #ifdef DP_FEATURE_NAWDS_TX
1612 /**
1613  * dp_tx_prepare_nawds(): Tramit NAWDS frames
1614  * @vdev: dp_vdev handle
1615  * @nbuf: skb
1616  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1617  * @tx_q: Tx queue to be used for this Tx frame
1618  * @meta_data: Meta date for mesh
1619  * @peer_id: peer_id of the peer in case of NAWDS frames
1620  *
1621  * return: NULL on success nbuf on failure
1622  */
1623 static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1624 		struct dp_tx_msdu_info_s *msdu_info)
1625 {
1626 	struct dp_peer *peer = NULL;
1627 	struct dp_soc *soc = vdev->pdev->soc;
1628 	struct dp_ast_entry *ast_entry = NULL;
1629 	struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1630 	uint16_t peer_id = HTT_INVALID_PEER;
1631 
1632 	struct dp_peer *sa_peer = NULL;
1633 	qdf_nbuf_t nbuf_copy;
1634 
1635 	qdf_spin_lock_bh(&(soc->ast_lock));
1636 	ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost));
1637 
1638 	if (ast_entry)
1639 		sa_peer = ast_entry->peer;
1640 
1641 	qdf_spin_unlock_bh(&(soc->ast_lock));
1642 
1643 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1644 		if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
1645 				(peer->nawds_enabled)) {
1646 			if (sa_peer == peer) {
1647 				QDF_TRACE(QDF_MODULE_ID_DP,
1648 						QDF_TRACE_LEVEL_DEBUG,
1649 						" %s: broadcast multicast packet",
1650 						 __func__);
1651 				DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
1652 				continue;
1653 			}
1654 
1655 			nbuf_copy = qdf_nbuf_copy(nbuf);
1656 			if (!nbuf_copy) {
1657 				QDF_TRACE(QDF_MODULE_ID_DP,
1658 						QDF_TRACE_LEVEL_ERROR,
1659 						"nbuf copy failed");
1660 			}
1661 
1662 			peer_id = peer->peer_ids[0];
1663 			nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy,
1664 					msdu_info, peer_id, NULL);
1665 			if (nbuf_copy != NULL) {
1666 				qdf_nbuf_free(nbuf_copy);
1667 				continue;
1668 			}
1669 			DP_STATS_INC_PKT(peer, tx.nawds_mcast,
1670 						1, qdf_nbuf_len(nbuf));
1671 		}
1672 	}
1673 	if (peer_id == HTT_INVALID_PEER)
1674 		return nbuf;
1675 
1676 	return NULL;
1677 }
1678 #endif
1679 
1680 /**
1681  * dp_check_exc_metadata() - Checks if parameters are valid
1682  * @tx_exc - holds all exception path parameters
1683  *
1684  * Returns true when all the parameters are valid else false
1685  *
1686  */
1687 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
1688 {
1689 	if ((tx_exc->tid > DP_MAX_TIDS && tx_exc->tid != HTT_INVALID_TID) ||
1690 	    tx_exc->tx_encap_type > htt_cmn_pkt_num_types ||
1691 	    tx_exc->sec_type > cdp_num_sec_types) {
1692 		return false;
1693 	}
1694 
1695 	return true;
1696 }
1697 
1698 /**
1699  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
1700  * @vap_dev: DP vdev handle
1701  * @nbuf: skb
1702  * @tx_exc_metadata: Handle that holds exception path meta data
1703  *
1704  * Entry point for Core Tx layer (DP_TX) invoked from
1705  * hard_start_xmit in OSIF/HDD to transmit frames through fw
1706  *
1707  * Return: NULL on success,
1708  *         nbuf when it fails to send
1709  */
1710 qdf_nbuf_t dp_tx_send_exception(void *vap_dev, qdf_nbuf_t nbuf,
1711 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1712 {
1713 	struct ether_header *eh = NULL;
1714 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1715 	struct dp_tx_msdu_info_s msdu_info;
1716 
1717 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1718 
1719 	msdu_info.tid = tx_exc_metadata->tid;
1720 
1721 	eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1722 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1723 			"%s , skb %pM",
1724 			__func__, nbuf->data);
1725 
1726 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
1727 
1728 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
1729 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1730 			"Invalid parameters in exception path");
1731 		goto fail;
1732 	}
1733 
1734 	/* Basic sanity checks for unsupported packets */
1735 
1736 	/* MESH mode */
1737 	if (qdf_unlikely(vdev->mesh_vdev)) {
1738 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1739 			"Mesh mode is not supported in exception path");
1740 		goto fail;
1741 	}
1742 
1743 	/* TSO or SG */
1744 	if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
1745 	    qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
1746 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1747 			  "TSO and SG are not supported in exception path");
1748 
1749 		goto fail;
1750 	}
1751 
1752 	/* RAW */
1753 	if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
1754 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1755 			  "Raw frame is not supported in exception path");
1756 		goto fail;
1757 	}
1758 
1759 
1760 	/* Mcast enhancement*/
1761 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
1762 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
1763 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1764 					  "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW\n");
1765 		}
1766 	}
1767 
1768 	/*
1769 	 * Get HW Queue to use for this frame.
1770 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
1771 	 * dedicated for data and 1 for command.
1772 	 * "queue_id" maps to one hardware ring.
1773 	 *  With each ring, we also associate a unique Tx descriptor pool
1774 	 *  to minimize lock contention for these resources.
1775 	 */
1776 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
1777 
1778 	/* Reset the control block */
1779 	qdf_nbuf_reset_ctxt(nbuf);
1780 
1781 	/*  Single linear frame */
1782 	/*
1783 	 * If nbuf is a simple linear frame, use send_single function to
1784 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
1785 	 * SRNG. There is no need to setup a MSDU extension descriptor.
1786 	 */
1787 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
1788 			tx_exc_metadata->peer_id, tx_exc_metadata);
1789 
1790 	return nbuf;
1791 
1792 fail:
1793 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1794 			"pkt send failed");
1795 	return nbuf;
1796 }
1797 
1798 /**
1799  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
1800  * @vap_dev: DP vdev handle
1801  * @nbuf: skb
1802  *
1803  * Entry point for Core Tx layer (DP_TX) invoked from
1804  * hard_start_xmit in OSIF/HDD
1805  *
1806  * Return: NULL on success,
1807  *         nbuf when it fails to send
1808  */
1809 #ifdef MESH_MODE_SUPPORT
1810 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
1811 {
1812 	struct meta_hdr_s *mhdr;
1813 	qdf_nbuf_t nbuf_mesh = NULL;
1814 	qdf_nbuf_t nbuf_clone = NULL;
1815 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1816 	uint8_t no_enc_frame = 0;
1817 
1818 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
1819 	if (nbuf_mesh == NULL) {
1820 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1821 				"qdf_nbuf_unshare failed\n");
1822 		return nbuf;
1823 	}
1824 	nbuf = nbuf_mesh;
1825 
1826 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1827 
1828 	if ((vdev->sec_type != cdp_sec_type_none) &&
1829 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
1830 		no_enc_frame = 1;
1831 
1832 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
1833 		       !no_enc_frame) {
1834 		nbuf_clone = qdf_nbuf_clone(nbuf);
1835 		if (nbuf_clone == NULL) {
1836 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1837 				"qdf_nbuf_clone failed\n");
1838 			return nbuf;
1839 		}
1840 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
1841 	}
1842 
1843 	if (nbuf_clone) {
1844 		if (!dp_tx_send(vap_dev, nbuf_clone)) {
1845 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
1846 		} else
1847 			qdf_nbuf_free(nbuf_clone);
1848 	}
1849 
1850 	if (no_enc_frame)
1851 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
1852 	else
1853 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
1854 
1855 	nbuf = dp_tx_send(vap_dev, nbuf);
1856 	if ((nbuf == NULL) && no_enc_frame) {
1857 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
1858 	}
1859 
1860 	return nbuf;
1861 }
1862 
1863 #else
1864 
1865 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
1866 {
1867 	return dp_tx_send(vap_dev, nbuf);
1868 }
1869 
1870 #endif
1871 
1872 /**
1873  * dp_tx_send() - Transmit a frame on a given VAP
1874  * @vap_dev: DP vdev handle
1875  * @nbuf: skb
1876  *
1877  * Entry point for Core Tx layer (DP_TX) invoked from
1878  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
1879  * cases
1880  *
1881  * Return: NULL on success,
1882  *         nbuf when it fails to send
1883  */
1884 qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
1885 {
1886 	struct ether_header *eh = NULL;
1887 	struct dp_tx_msdu_info_s msdu_info;
1888 	struct dp_tx_seg_info_s seg_info;
1889 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1890 	uint16_t peer_id = HTT_INVALID_PEER;
1891 	qdf_nbuf_t nbuf_mesh = NULL;
1892 
1893 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1894 	qdf_mem_set(&seg_info, sizeof(seg_info), 0x0);
1895 
1896 	eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1897 
1898 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1899 			"%s , skb %pM",
1900 			__func__, nbuf->data);
1901 
1902 	/*
1903 	 * Set Default Host TID value to invalid TID
1904 	 * (TID override disabled)
1905 	 */
1906 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
1907 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
1908 
1909 	if (qdf_unlikely(vdev->mesh_vdev)) {
1910 		nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
1911 								&msdu_info);
1912 		if (nbuf_mesh == NULL) {
1913 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1914 					"Extracting mesh metadata failed\n");
1915 			return nbuf;
1916 		}
1917 		nbuf = nbuf_mesh;
1918 	}
1919 
1920 	/*
1921 	 * Get HW Queue to use for this frame.
1922 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
1923 	 * dedicated for data and 1 for command.
1924 	 * "queue_id" maps to one hardware ring.
1925 	 *  With each ring, we also associate a unique Tx descriptor pool
1926 	 *  to minimize lock contention for these resources.
1927 	 */
1928 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
1929 
1930 	/*
1931 	 * TCL H/W supports 2 DSCP-TID mapping tables.
1932 	 *  Table 1 - Default DSCP-TID mapping table
1933 	 *  Table 2 - 1 DSCP-TID override table
1934 	 *
1935 	 * If we need a different DSCP-TID mapping for this vap,
1936 	 * call tid_classify to extract DSCP/ToS from frame and
1937 	 * map to a TID and store in msdu_info. This is later used
1938 	 * to fill in TCL Input descriptor (per-packet TID override).
1939 	 */
1940 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
1941 
1942 	/* Reset the control block */
1943 	qdf_nbuf_reset_ctxt(nbuf);
1944 
1945 	/*
1946 	 * Classify the frame and call corresponding
1947 	 * "prepare" function which extracts the segment (TSO)
1948 	 * and fragmentation information (for TSO , SG, ME, or Raw)
1949 	 * into MSDU_INFO structure which is later used to fill
1950 	 * SW and HW descriptors.
1951 	 */
1952 	if (qdf_nbuf_is_tso(nbuf)) {
1953 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1954 			  "%s TSO frame %pK\n", __func__, vdev);
1955 		DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1,
1956 				qdf_nbuf_len(nbuf));
1957 
1958 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
1959 			DP_STATS_INC(vdev, tx_i.tso.dropped_host, 1);
1960 			return nbuf;
1961 		}
1962 
1963 		goto send_multiple;
1964 	}
1965 
1966 	/* SG */
1967 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
1968 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
1969 
1970 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1971 			 "%s non-TSO SG frame %pK\n", __func__, vdev);
1972 
1973 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
1974 				qdf_nbuf_len(nbuf));
1975 
1976 		goto send_multiple;
1977 	}
1978 
1979 #ifdef ATH_SUPPORT_IQUE
1980 	/* Mcast to Ucast Conversion*/
1981 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
1982 		eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1983 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
1984 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1985 				  "%s Mcast frm for ME %pK\n", __func__, vdev);
1986 
1987 			DP_STATS_INC_PKT(vdev,
1988 					tx_i.mcast_en.mcast_pkt, 1,
1989 					qdf_nbuf_len(nbuf));
1990 			if (dp_tx_prepare_send_me(vdev, nbuf) ==
1991 					QDF_STATUS_SUCCESS) {
1992 				return NULL;
1993 			}
1994 		}
1995 	}
1996 #endif
1997 
1998 	/* RAW */
1999 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
2000 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
2001 		if (nbuf == NULL)
2002 			return NULL;
2003 
2004 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2005 			  "%s Raw frame %pK\n", __func__, vdev);
2006 
2007 		goto send_multiple;
2008 
2009 	}
2010 
2011 	/*  Single linear frame */
2012 	/*
2013 	 * If nbuf is a simple linear frame, use send_single function to
2014 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2015 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2016 	 */
2017 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
2018 
2019 	return nbuf;
2020 
2021 send_multiple:
2022 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2023 
2024 	return nbuf;
2025 }
2026 
2027 /**
2028  * dp_tx_reinject_handler() - Tx Reinject Handler
2029  * @tx_desc: software descriptor head pointer
2030  * @status : Tx completion status from HTT descriptor
2031  *
2032  * This function reinjects frames back to Target.
2033  * Todo - Host queue needs to be added
2034  *
2035  * Return: none
2036  */
2037 static
2038 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2039 {
2040 	struct dp_vdev *vdev;
2041 	struct dp_peer *peer = NULL;
2042 	uint32_t peer_id = HTT_INVALID_PEER;
2043 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2044 	qdf_nbuf_t nbuf_copy = NULL;
2045 	struct dp_tx_msdu_info_s msdu_info;
2046 	struct dp_peer *sa_peer = NULL;
2047 	struct dp_ast_entry *ast_entry = NULL;
2048 	struct dp_soc *soc = NULL;
2049 	struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
2050 #ifdef WDS_VENDOR_EXTENSION
2051 	int is_mcast = 0, is_ucast = 0;
2052 	int num_peers_3addr = 0;
2053 	struct ether_header *eth_hdr = (struct ether_header *)(qdf_nbuf_data(nbuf));
2054 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
2055 #endif
2056 
2057 	vdev = tx_desc->vdev;
2058 	soc = vdev->pdev->soc;
2059 
2060 	qdf_assert(vdev);
2061 
2062 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
2063 
2064 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2065 
2066 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2067 			"%s Tx reinject path\n", __func__);
2068 
2069 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
2070 			qdf_nbuf_len(tx_desc->nbuf));
2071 
2072 	qdf_spin_lock_bh(&(soc->ast_lock));
2073 
2074 	ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost));
2075 
2076 	if (ast_entry)
2077 		sa_peer = ast_entry->peer;
2078 
2079 	qdf_spin_unlock_bh(&(soc->ast_lock));
2080 
2081 #ifdef WDS_VENDOR_EXTENSION
2082 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
2083 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
2084 	} else {
2085 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
2086 	}
2087 	is_ucast = !is_mcast;
2088 
2089 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2090 		if (peer->bss_peer)
2091 			continue;
2092 
2093 		/* Detect wds peers that use 3-addr framing for mcast.
2094 		 * if there are any, the bss_peer is used to send the
2095 		 * the mcast frame using 3-addr format. all wds enabled
2096 		 * peers that use 4-addr framing for mcast frames will
2097 		 * be duplicated and sent as 4-addr frames below.
2098 		 */
2099 		if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
2100 			num_peers_3addr = 1;
2101 			break;
2102 		}
2103 	}
2104 #endif
2105 
2106 	if (qdf_unlikely(vdev->mesh_vdev)) {
2107 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
2108 	} else {
2109 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2110 			if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
2111 #ifdef WDS_VENDOR_EXTENSION
2112 			/*
2113 			 * . if 3-addr STA, then send on BSS Peer
2114 			 * . if Peer WDS enabled and accept 4-addr mcast,
2115 			 * send mcast on that peer only
2116 			 * . if Peer WDS enabled and accept 4-addr ucast,
2117 			 * send ucast on that peer only
2118 			 */
2119 			((peer->bss_peer && num_peers_3addr && is_mcast) ||
2120 			 (peer->wds_enabled &&
2121 				  ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
2122 				   (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
2123 #else
2124 			((peer->bss_peer &&
2125 			  !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) ||
2126 				 peer->nawds_enabled)) {
2127 #endif
2128 				peer_id = DP_INVALID_PEER;
2129 
2130 				if (peer->nawds_enabled) {
2131 					peer_id = peer->peer_ids[0];
2132 					if (sa_peer == peer) {
2133 						QDF_TRACE(
2134 							QDF_MODULE_ID_DP,
2135 							QDF_TRACE_LEVEL_DEBUG,
2136 							" %s: multicast packet",
2137 							__func__);
2138 						DP_STATS_INC(peer,
2139 							tx.nawds_mcast_drop, 1);
2140 						continue;
2141 					}
2142 				}
2143 
2144 				nbuf_copy = qdf_nbuf_copy(nbuf);
2145 
2146 				if (!nbuf_copy) {
2147 					QDF_TRACE(QDF_MODULE_ID_DP,
2148 						QDF_TRACE_LEVEL_DEBUG,
2149 						FL("nbuf copy failed"));
2150 					break;
2151 				}
2152 
2153 				nbuf_copy = dp_tx_send_msdu_single(vdev,
2154 						nbuf_copy,
2155 						&msdu_info,
2156 						peer_id,
2157 						NULL);
2158 
2159 				if (nbuf_copy) {
2160 					QDF_TRACE(QDF_MODULE_ID_DP,
2161 						QDF_TRACE_LEVEL_DEBUG,
2162 						FL("pkt send failed"));
2163 					qdf_nbuf_free(nbuf_copy);
2164 				} else {
2165 					if (peer_id != DP_INVALID_PEER)
2166 						DP_STATS_INC_PKT(peer,
2167 							tx.nawds_mcast,
2168 							1, qdf_nbuf_len(nbuf));
2169 				}
2170 			}
2171 		}
2172 	}
2173 
2174 	if (vdev->nawds_enabled) {
2175 		peer_id = DP_INVALID_PEER;
2176 
2177 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
2178 					1, qdf_nbuf_len(nbuf));
2179 
2180 		nbuf = dp_tx_send_msdu_single(vdev,
2181 				nbuf,
2182 				&msdu_info,
2183 				peer_id, NULL);
2184 
2185 		if (nbuf) {
2186 			QDF_TRACE(QDF_MODULE_ID_DP,
2187 				QDF_TRACE_LEVEL_DEBUG,
2188 				FL("pkt send failed"));
2189 			qdf_nbuf_free(nbuf);
2190 		}
2191 	} else
2192 		qdf_nbuf_free(nbuf);
2193 
2194 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2195 }
2196 
2197 /**
2198  * dp_tx_inspect_handler() - Tx Inspect Handler
2199  * @tx_desc: software descriptor head pointer
2200  * @status : Tx completion status from HTT descriptor
2201  *
2202  * Handles Tx frames sent back to Host for inspection
2203  * (ProxyARP)
2204  *
2205  * Return: none
2206  */
2207 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2208 {
2209 
2210 	struct dp_soc *soc;
2211 	struct dp_pdev *pdev = tx_desc->pdev;
2212 
2213 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2214 			"%s Tx inspect path\n",
2215 			__func__);
2216 
2217 	qdf_assert(pdev);
2218 
2219 	soc = pdev->soc;
2220 
2221 	DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
2222 			qdf_nbuf_len(tx_desc->nbuf));
2223 
2224 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
2225 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2226 }
2227 
2228 #ifdef FEATURE_PERPKT_INFO
2229 /**
2230  * dp_get_completion_indication_for_stack() - send completion to stack
2231  * @soc :  dp_soc handle
2232  * @pdev:  dp_pdev handle
2233  * @peer_id: peer_id of the peer for which completion came
2234  * @ppdu_id: ppdu_id
2235  * @first_msdu: first msdu
2236  * @last_msdu: last msdu
2237  * @netbuf: Buffer pointer for free
2238  *
2239  * This function is used for indication whether buffer needs to be
2240  * send to stack for free or not
2241 */
2242 QDF_STATUS
2243 dp_get_completion_indication_for_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2244 		      uint16_t peer_id, uint32_t ppdu_id, uint8_t first_msdu,
2245 		      uint8_t last_msdu, qdf_nbuf_t netbuf)
2246 {
2247 	struct tx_capture_hdr *ppdu_hdr;
2248 	struct dp_peer *peer = NULL;
2249 
2250 	if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode))
2251 		return QDF_STATUS_E_NOSUPPORT;
2252 
2253 	peer = (peer_id == HTT_INVALID_PEER) ? NULL :
2254 			dp_peer_find_by_id(soc, peer_id);
2255 
2256 	if (!peer) {
2257 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2258 				FL("Peer Invalid"));
2259 		return QDF_STATUS_E_INVAL;
2260 	}
2261 
2262 	if (pdev->mcopy_mode) {
2263 		if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
2264 			(pdev->m_copy_id.tx_peer_id == peer_id)) {
2265 			return QDF_STATUS_E_INVAL;
2266 		}
2267 
2268 		pdev->m_copy_id.tx_ppdu_id = ppdu_id;
2269 		pdev->m_copy_id.tx_peer_id = peer_id;
2270 	}
2271 
2272 	if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) {
2273 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2274 				FL("No headroom"));
2275 		return QDF_STATUS_E_NOMEM;
2276 	}
2277 
2278 	ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
2279 	qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
2280 					IEEE80211_ADDR_LEN);
2281 	ppdu_hdr->ppdu_id = ppdu_id;
2282 	qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
2283 			IEEE80211_ADDR_LEN);
2284 	ppdu_hdr->peer_id = peer_id;
2285 	ppdu_hdr->first_msdu = first_msdu;
2286 	ppdu_hdr->last_msdu = last_msdu;
2287 
2288 	return QDF_STATUS_SUCCESS;
2289 }
2290 
2291 
2292 /**
2293  * dp_send_completion_to_stack() - send completion to stack
2294  * @soc :  dp_soc handle
2295  * @pdev:  dp_pdev handle
2296  * @peer_id: peer_id of the peer for which completion came
2297  * @ppdu_id: ppdu_id
2298  * @netbuf: Buffer pointer for free
2299  *
2300  * This function is used to send completion to stack
2301  * to free buffer
2302 */
2303 void  dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2304 					uint16_t peer_id, uint32_t ppdu_id,
2305 					qdf_nbuf_t netbuf)
2306 {
2307 	dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
2308 				netbuf, peer_id,
2309 				WDI_NO_VAL, pdev->pdev_id);
2310 }
2311 #else
2312 static QDF_STATUS
2313 dp_get_completion_indication_for_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2314 		      uint16_t peer_id, uint32_t ppdu_id, uint8_t first_msdu,
2315 		      uint8_t last_msdu, qdf_nbuf_t netbuf)
2316 {
2317 	return QDF_STATUS_E_NOSUPPORT;
2318 }
2319 
2320 static void
2321 dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2322 		      uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
2323 {
2324 }
2325 #endif
2326 
2327 /**
2328  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2329  * @soc: Soc handle
2330  * @desc: software Tx descriptor to be processed
2331  *
2332  * Return: none
2333  */
2334 static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
2335 		struct dp_tx_desc_s *desc)
2336 {
2337 	struct dp_vdev *vdev = desc->vdev;
2338 	qdf_nbuf_t nbuf = desc->nbuf;
2339 
2340 	/* If it is TDLS mgmt, don't unmap or free the frame */
2341 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2342 		return dp_non_std_tx_comp_free_buff(desc, vdev);
2343 
2344 	/* 0 : MSDU buffer, 1 : MLE */
2345 	if (desc->msdu_ext_desc) {
2346 		/* TSO free */
2347 		if (hal_tx_ext_desc_get_tso_enable(
2348 					desc->msdu_ext_desc->vaddr)) {
2349 			/* If remaining number of segment is 0
2350 			 * actual TSO may unmap and free */
2351 			if (qdf_nbuf_get_users(nbuf) == 1)
2352 				__qdf_nbuf_unmap_single(soc->osdev,
2353 						nbuf,
2354 						QDF_DMA_TO_DEVICE);
2355 
2356 			qdf_nbuf_free(nbuf);
2357 			return;
2358 		}
2359 	}
2360 
2361 	qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
2362 
2363 	if (qdf_likely(!vdev->mesh_vdev))
2364 		qdf_nbuf_free(nbuf);
2365 	else {
2366 		if (desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2367 			qdf_nbuf_free(nbuf);
2368 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2369 		} else
2370 			vdev->osif_tx_free_ext((nbuf));
2371 	}
2372 }
2373 
2374 /**
2375  * dp_tx_mec_handler() - Tx  MEC Notify Handler
2376  * @vdev: pointer to dp dev handler
2377  * @status : Tx completion status from HTT descriptor
2378  *
2379  * Handles MEC notify event sent from fw to Host
2380  *
2381  * Return: none
2382  */
2383 #ifdef FEATURE_WDS
2384 void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
2385 {
2386 
2387 	struct dp_soc *soc;
2388 	uint32_t flags = IEEE80211_NODE_F_WDS_HM;
2389 	struct dp_peer *peer;
2390 	uint8_t mac_addr[DP_MAC_ADDR_LEN], i;
2391 
2392 	if (!vdev->wds_enabled)
2393 		return;
2394 
2395 	soc = vdev->pdev->soc;
2396 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
2397 	peer = TAILQ_FIRST(&vdev->peer_list);
2398 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2399 
2400 	if (!peer) {
2401 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2402 				FL("peer is NULL"));
2403 		return;
2404 	}
2405 
2406 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2407 			"%s Tx MEC Handler\n",
2408 			__func__);
2409 
2410 	for (i = 0; i < DP_MAC_ADDR_LEN; i++)
2411 		mac_addr[(DP_MAC_ADDR_LEN - 1) - i] =
2412 					status[(DP_MAC_ADDR_LEN - 2) + i];
2413 
2414 	if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, DP_MAC_ADDR_LEN))
2415 		dp_peer_add_ast(soc,
2416 				peer,
2417 				mac_addr,
2418 				CDP_TXRX_AST_TYPE_MEC,
2419 				flags);
2420 }
2421 #endif
2422 
2423 /**
2424  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
2425  * @tx_desc: software descriptor head pointer
2426  * @status : Tx completion status from HTT descriptor
2427  *
2428  * This function will process HTT Tx indication messages from Target
2429  *
2430  * Return: none
2431  */
2432 static
2433 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2434 {
2435 	uint8_t tx_status;
2436 	struct dp_pdev *pdev;
2437 	struct dp_vdev *vdev;
2438 	struct dp_soc *soc;
2439 	uint32_t *htt_status_word = (uint32_t *) status;
2440 
2441 	qdf_assert(tx_desc->pdev);
2442 
2443 	pdev = tx_desc->pdev;
2444 	vdev = tx_desc->vdev;
2445 	soc = pdev->soc;
2446 
2447 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_status_word[0]);
2448 
2449 	switch (tx_status) {
2450 	case HTT_TX_FW2WBM_TX_STATUS_OK:
2451 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
2452 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
2453 	{
2454 		dp_tx_comp_free_buf(soc, tx_desc);
2455 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2456 		break;
2457 	}
2458 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
2459 	{
2460 		dp_tx_reinject_handler(tx_desc, status);
2461 		break;
2462 	}
2463 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
2464 	{
2465 		dp_tx_inspect_handler(tx_desc, status);
2466 		break;
2467 	}
2468 	case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
2469 	{
2470 		dp_tx_mec_handler(vdev, status);
2471 		break;
2472 	}
2473 	default:
2474 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2475 				"%s Invalid HTT tx_status %d\n",
2476 				__func__, tx_status);
2477 		break;
2478 	}
2479 }
2480 
2481 #ifdef MESH_MODE_SUPPORT
2482 /**
2483  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
2484  *                                         in mesh meta header
2485  * @tx_desc: software descriptor head pointer
2486  * @ts: pointer to tx completion stats
2487  * Return: none
2488  */
2489 static
2490 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2491 		struct hal_tx_completion_status *ts)
2492 {
2493 	struct meta_hdr_s *mhdr;
2494 	qdf_nbuf_t netbuf = tx_desc->nbuf;
2495 
2496 	if (!tx_desc->msdu_ext_desc) {
2497 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
2498 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2499 				"netbuf %pK offset %d\n",
2500 				netbuf, tx_desc->pkt_offset);
2501 			return;
2502 		}
2503 	}
2504 	if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
2505 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2506 			"netbuf %pK offset %d\n", netbuf,
2507 			sizeof(struct meta_hdr_s));
2508 		return;
2509 	}
2510 
2511 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
2512 	mhdr->rssi = ts->ack_frame_rssi;
2513 	mhdr->channel = tx_desc->pdev->operating_channel;
2514 }
2515 
2516 #else
2517 static
2518 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2519 		struct hal_tx_completion_status *ts)
2520 {
2521 }
2522 
2523 #endif
2524 
2525 /**
2526  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
2527  * @peer: Handle to DP peer
2528  * @ts: pointer to HAL Tx completion stats
2529  * @length: MSDU length
2530  *
2531  * Return: None
2532  */
2533 static void dp_tx_update_peer_stats(struct dp_peer *peer,
2534 		struct hal_tx_completion_status *ts, uint32_t length)
2535 {
2536 	struct dp_pdev *pdev = peer->vdev->pdev;
2537 	struct dp_soc *soc = pdev->soc;
2538 	uint8_t mcs, pkt_type;
2539 
2540 	mcs = ts->mcs;
2541 	pkt_type = ts->pkt_type;
2542 
2543 	if (!ts->release_src == HAL_TX_COMP_RELEASE_SOURCE_TQM)
2544 		return;
2545 
2546 	if (peer->bss_peer) {
2547 		DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
2548 	} else {
2549 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
2550 			DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
2551 		}
2552 		DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
2553 	}
2554 
2555 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
2556 			(ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
2557 
2558 	DP_STATS_INCC(peer, tx.dropped.fw_rem, 1,
2559 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
2560 
2561 	DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
2562 			(ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
2563 
2564 	DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
2565 			(ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
2566 
2567 	DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
2568 			(ts->status == HAL_TX_TQM_RR_FW_REASON1));
2569 
2570 	DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
2571 			(ts->status == HAL_TX_TQM_RR_FW_REASON2));
2572 
2573 	DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
2574 			(ts->status == HAL_TX_TQM_RR_FW_REASON3));
2575 
2576 	if (!ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
2577 		return;
2578 
2579 	DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
2580 
2581 	DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
2582 	DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
2583 
2584 	if (!(soc->process_tx_status))
2585 		return;
2586 
2587 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2588 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
2589 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2590 			((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
2591 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2592 			((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
2593 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2594 			((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
2595 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2596 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
2597 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2598 			((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
2599 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2600 			((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2601 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2602 			((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2603 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2604 			((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2605 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2606 			((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2607 	DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
2608 	DP_STATS_INC(peer, tx.bw[ts->bw], 1);
2609 	DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
2610 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
2611 	DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
2612 	DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
2613 	DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
2614 	DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
2615 
2616 	if (soc->cdp_soc.ol_ops->update_dp_stats) {
2617 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->osif_pdev,
2618 				&peer->stats, ts->peer_id,
2619 				UPDATE_PEER_STATS);
2620 	}
2621 }
2622 
2623 /**
2624  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
2625  * @tx_desc: software descriptor head pointer
2626  * @length: packet length
2627  *
2628  * Return: none
2629  */
2630 static inline void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
2631 		uint32_t length)
2632 {
2633 	struct hal_tx_completion_status ts;
2634 	struct dp_soc *soc = NULL;
2635 	struct dp_vdev *vdev = tx_desc->vdev;
2636 	struct dp_peer *peer = NULL;
2637 	struct ether_header *eh =
2638 		(struct ether_header *)qdf_nbuf_data(tx_desc->nbuf);
2639 
2640 	hal_tx_comp_get_status(&tx_desc->comp, &ts);
2641 
2642 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2643 				"-------------------- \n"
2644 				"Tx Completion Stats: \n"
2645 				"-------------------- \n"
2646 				"ack_frame_rssi = %d \n"
2647 				"first_msdu = %d \n"
2648 				"last_msdu = %d \n"
2649 				"msdu_part_of_amsdu = %d \n"
2650 				"rate_stats valid = %d \n"
2651 				"bw = %d \n"
2652 				"pkt_type = %d \n"
2653 				"stbc = %d \n"
2654 				"ldpc = %d \n"
2655 				"sgi = %d \n"
2656 				"mcs = %d \n"
2657 				"ofdma = %d \n"
2658 				"tones_in_ru = %d \n"
2659 				"tsf = %d \n"
2660 				"ppdu_id = %d \n"
2661 				"transmit_cnt = %d \n"
2662 				"tid = %d \n"
2663 				"peer_id = %d \n",
2664 				ts.ack_frame_rssi, ts.first_msdu, ts.last_msdu,
2665 				ts.msdu_part_of_amsdu, ts.valid, ts.bw,
2666 				ts.pkt_type, ts.stbc, ts.ldpc, ts.sgi,
2667 				ts.mcs, ts.ofdma, ts.tones_in_ru, ts.tsf,
2668 				ts.ppdu_id, ts.transmit_cnt, ts.tid,
2669 				ts.peer_id);
2670 
2671 	if (!vdev) {
2672 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2673 				"invalid vdev");
2674 		goto out;
2675 	}
2676 
2677 	soc = vdev->pdev->soc;
2678 
2679 	/* Update SoC level stats */
2680 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
2681 			(ts.status == HAL_TX_TQM_RR_REM_CMD_REM));
2682 
2683 	/* Update per-packet stats */
2684 	if (qdf_unlikely(vdev->mesh_vdev) &&
2685 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
2686 		dp_tx_comp_fill_tx_completion_stats(tx_desc, &ts);
2687 
2688 	/* Update peer level stats */
2689 	peer = dp_peer_find_by_id(soc, ts.peer_id);
2690 	if (!peer) {
2691 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2692 				"invalid peer");
2693 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
2694 		goto out;
2695 	}
2696 
2697 	if (qdf_likely(peer->vdev->tx_encap_type ==
2698 				htt_cmn_pkt_type_ethernet)) {
2699 		if (peer->bss_peer && IEEE80211_IS_BROADCAST(eh->ether_dhost))
2700 			DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
2701 	}
2702 
2703 	dp_tx_update_peer_stats(peer, &ts, length);
2704 
2705 out:
2706 	return;
2707 }
2708 
2709 /**
2710  * dp_tx_comp_process_desc() - Tx complete software descriptor handler
2711  * @soc: core txrx main context
2712  * @comp_head: software descriptor head pointer
2713  *
2714  * This function will process batch of descriptors reaped by dp_tx_comp_handler
2715  * and release the software descriptors after processing is complete
2716  *
2717  * Return: none
2718  */
2719 static void dp_tx_comp_process_desc(struct dp_soc *soc,
2720 		struct dp_tx_desc_s *comp_head)
2721 {
2722 	struct dp_tx_desc_s *desc;
2723 	struct dp_tx_desc_s *next;
2724 	struct hal_tx_completion_status ts = {0};
2725 	uint32_t length;
2726 	struct dp_peer *peer;
2727 
2728 	DP_HIST_INIT();
2729 	desc = comp_head;
2730 
2731 	while (desc) {
2732 		hal_tx_comp_get_status(&desc->comp, &ts);
2733 		peer = dp_peer_find_by_id(soc, ts.peer_id);
2734 		length = qdf_nbuf_len(desc->nbuf);
2735 
2736 		dp_tx_comp_process_tx_status(desc, length);
2737 
2738 		/*currently m_copy/tx_capture is not supported for scatter gather packets*/
2739 		if (!(desc->msdu_ext_desc) && (dp_get_completion_indication_for_stack(soc,
2740 					desc->pdev, ts.peer_id, ts.ppdu_id,
2741 					ts.first_msdu, ts.last_msdu,
2742 					desc->nbuf) == QDF_STATUS_SUCCESS)) {
2743 			qdf_nbuf_unmap(soc->osdev, desc->nbuf,
2744 						QDF_DMA_TO_DEVICE);
2745 
2746 			dp_send_completion_to_stack(soc, desc->pdev, ts.peer_id,
2747 				ts.ppdu_id, desc->nbuf);
2748 		} else {
2749 			dp_tx_comp_free_buf(soc, desc);
2750 		}
2751 
2752 		DP_HIST_PACKET_COUNT_INC(desc->pdev->pdev_id);
2753 
2754 		next = desc->next;
2755 		dp_tx_desc_release(desc, desc->pool_id);
2756 		desc = next;
2757 	}
2758 	DP_TX_HIST_STATS_PER_PDEV();
2759 }
2760 
2761 /**
2762  * dp_tx_comp_handler() - Tx completion handler
2763  * @soc: core txrx main context
2764  * @ring_id: completion ring id
2765  * @quota: No. of packets/descriptors that can be serviced in one loop
2766  *
2767  * This function will collect hardware release ring element contents and
2768  * handle descriptor contents. Based on contents, free packet or handle error
2769  * conditions
2770  *
2771  * Return: none
2772  */
2773 uint32_t dp_tx_comp_handler(struct dp_soc *soc, void *hal_srng, uint32_t quota)
2774 {
2775 	void *tx_comp_hal_desc;
2776 	uint8_t buffer_src;
2777 	uint8_t pool_id;
2778 	uint32_t tx_desc_id;
2779 	struct dp_tx_desc_s *tx_desc = NULL;
2780 	struct dp_tx_desc_s *head_desc = NULL;
2781 	struct dp_tx_desc_s *tail_desc = NULL;
2782 	uint32_t num_processed;
2783 	uint32_t count;
2784 
2785 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
2786 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2787 				"%s %d : HAL RING Access Failed -- %pK\n",
2788 				__func__, __LINE__, hal_srng);
2789 		return 0;
2790 	}
2791 
2792 	num_processed = 0;
2793 	count = 0;
2794 
2795 	/* Find head descriptor from completion ring */
2796 	while (qdf_likely(tx_comp_hal_desc =
2797 			hal_srng_dst_get_next(soc->hal_soc, hal_srng))) {
2798 
2799 		buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
2800 
2801 		/* If this buffer was not released by TQM or FW, then it is not
2802 		 * Tx completion indication, assert */
2803 		if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
2804 				(buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
2805 
2806 			QDF_TRACE(QDF_MODULE_ID_DP,
2807 					QDF_TRACE_LEVEL_FATAL,
2808 					"Tx comp release_src != TQM | FW");
2809 
2810 			qdf_assert_always(0);
2811 		}
2812 
2813 		/* Get descriptor id */
2814 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
2815 		pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
2816 			DP_TX_DESC_ID_POOL_OS;
2817 
2818 		/* Pool ID is out of limit. Error */
2819 		if (pool_id > wlan_cfg_get_num_tx_desc_pool(
2820 					soc->wlan_cfg_ctx)) {
2821 			QDF_TRACE(QDF_MODULE_ID_DP,
2822 					QDF_TRACE_LEVEL_FATAL,
2823 					"Tx Comp pool id %d not valid",
2824 					pool_id);
2825 
2826 			qdf_assert_always(0);
2827 		}
2828 
2829 		/* Find Tx descriptor */
2830 		tx_desc = dp_tx_desc_find(soc, pool_id,
2831 				(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
2832 				DP_TX_DESC_ID_PAGE_OS,
2833 				(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
2834 				DP_TX_DESC_ID_OFFSET_OS);
2835 
2836 		/*
2837 		 * If the release source is FW, process the HTT status
2838 		 */
2839 		if (qdf_unlikely(buffer_src ==
2840 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
2841 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
2842 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
2843 					htt_tx_status);
2844 			dp_tx_process_htt_completion(tx_desc,
2845 					htt_tx_status);
2846 		} else {
2847 			/* Pool id is not matching. Error */
2848 			if (tx_desc->pool_id != pool_id) {
2849 				QDF_TRACE(QDF_MODULE_ID_DP,
2850 					QDF_TRACE_LEVEL_FATAL,
2851 					"Tx Comp pool id %d not matched %d",
2852 					pool_id, tx_desc->pool_id);
2853 
2854 				qdf_assert_always(0);
2855 			}
2856 
2857 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
2858 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
2859 				QDF_TRACE(QDF_MODULE_ID_DP,
2860 					QDF_TRACE_LEVEL_FATAL,
2861 					"Txdesc invalid, flgs = %x,id = %d",
2862 					tx_desc->flags,	tx_desc_id);
2863 				qdf_assert_always(0);
2864 			}
2865 
2866 			/* First ring descriptor on the cycle */
2867 			if (!head_desc) {
2868 				head_desc = tx_desc;
2869 				tail_desc = tx_desc;
2870 			}
2871 
2872 			tail_desc->next = tx_desc;
2873 			tx_desc->next = NULL;
2874 			tail_desc = tx_desc;
2875 
2876 			/* Collect hw completion contents */
2877 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
2878 					&tx_desc->comp, 1);
2879 
2880 		}
2881 
2882 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
2883 		/* Decrement PM usage count if the packet has been sent.*/
2884 		hif_pm_runtime_put(soc->hif_handle);
2885 
2886 		/*
2887 		 * Processed packet count is more than given quota
2888 		 * stop to processing
2889 		 */
2890 		if ((num_processed >= quota))
2891 			break;
2892 
2893 		count++;
2894 	}
2895 
2896 	hal_srng_access_end(soc->hal_soc, hal_srng);
2897 
2898 	/* Process the reaped descriptors */
2899 	if (head_desc)
2900 		dp_tx_comp_process_desc(soc, head_desc);
2901 
2902 	return num_processed;
2903 }
2904 
2905 #ifdef CONVERGED_TDLS_ENABLE
2906 /**
2907  * dp_tx_non_std() - Allow the control-path SW to send data frames
2908  *
2909  * @data_vdev - which vdev should transmit the tx data frames
2910  * @tx_spec - what non-standard handling to apply to the tx data frames
2911  * @msdu_list - NULL-terminated list of tx MSDUs
2912  *
2913  * Return: NULL on success,
2914  *         nbuf when it fails to send
2915  */
2916 qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle,
2917 			enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
2918 {
2919 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
2920 
2921 	if (tx_spec & OL_TX_SPEC_NO_FREE)
2922 		vdev->is_tdls_frame = true;
2923 	return dp_tx_send(vdev_handle, msdu_list);
2924 }
2925 #endif
2926 
2927 /**
2928  * dp_tx_vdev_attach() - attach vdev to dp tx
2929  * @vdev: virtual device instance
2930  *
2931  * Return: QDF_STATUS_SUCCESS: success
2932  *         QDF_STATUS_E_RESOURCES: Error return
2933  */
2934 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
2935 {
2936 	/*
2937 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
2938 	 */
2939 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
2940 			HTT_TCL_METADATA_TYPE_VDEV_BASED);
2941 
2942 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
2943 			vdev->vdev_id);
2944 
2945 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
2946 			DP_SW2HW_MACID(vdev->pdev->pdev_id));
2947 
2948 	/*
2949 	 * Set HTT Extension Valid bit to 0 by default
2950 	 */
2951 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
2952 
2953 	dp_tx_vdev_update_search_flags(vdev);
2954 
2955 	return QDF_STATUS_SUCCESS;
2956 }
2957 
2958 /**
2959  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
2960  * @vdev: virtual device instance
2961  *
2962  * Return: void
2963  *
2964  */
2965 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
2966 {
2967 	/*
2968 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
2969 	 * for TDLS link
2970 	 *
2971 	 * Enable AddrY (SA based search) only for non-WDS STA and
2972 	 * ProxySTA VAP modes.
2973 	 *
2974 	 * In all other VAP modes, only DA based search should be
2975 	 * enabled
2976 	 */
2977 	if (vdev->opmode == wlan_op_mode_sta &&
2978 	    vdev->tdls_link_connected)
2979 		vdev->hal_desc_addr_search_flags =
2980 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
2981 	else if ((vdev->opmode == wlan_op_mode_sta &&
2982 				(!vdev->wds_enabled || vdev->proxysta_vdev)))
2983 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
2984 	else
2985 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
2986 }
2987 
2988 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2989 static void dp_tx_desc_flush(struct dp_vdev *vdev)
2990 {
2991 }
2992 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
2993 
2994 /* dp_tx_desc_flush() - release resources associated
2995  *                      to tx_desc
2996  * @vdev: virtual device instance
2997  *
2998  * This function will free all outstanding Tx buffers,
2999  * including ME buffer for which either free during
3000  * completion didn't happened or completion is not
3001  * received.
3002 */
3003 static void dp_tx_desc_flush(struct dp_vdev *vdev)
3004 {
3005 	uint8_t i, num_pool;
3006 	uint32_t j;
3007 	uint32_t num_desc;
3008 	struct dp_soc *soc = vdev->pdev->soc;
3009 	struct dp_tx_desc_s *tx_desc = NULL;
3010 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
3011 
3012 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3013 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3014 
3015 	for (i = 0; i < num_pool; i++) {
3016 		for (j = 0; j < num_desc; j++) {
3017 			tx_desc_pool = &((soc)->tx_desc[(i)]);
3018 			if (tx_desc_pool &&
3019 				tx_desc_pool->desc_pages.cacheable_pages) {
3020 				tx_desc = dp_tx_desc_find(soc, i,
3021 					(j & DP_TX_DESC_ID_PAGE_MASK) >>
3022 					DP_TX_DESC_ID_PAGE_OS,
3023 					(j & DP_TX_DESC_ID_OFFSET_MASK) >>
3024 					DP_TX_DESC_ID_OFFSET_OS);
3025 
3026 				if (tx_desc && (tx_desc->vdev == vdev) &&
3027 					(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) {
3028 					dp_tx_comp_free_buf(soc, tx_desc);
3029 					dp_tx_desc_release(tx_desc, i);
3030 				}
3031 			}
3032 		}
3033 	}
3034 }
3035 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3036 
3037 /**
3038  * dp_tx_vdev_detach() - detach vdev from dp tx
3039  * @vdev: virtual device instance
3040  *
3041  * Return: QDF_STATUS_SUCCESS: success
3042  *         QDF_STATUS_E_RESOURCES: Error return
3043  */
3044 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
3045 {
3046 	dp_tx_desc_flush(vdev);
3047 	return QDF_STATUS_SUCCESS;
3048 }
3049 
3050 /**
3051  * dp_tx_pdev_attach() - attach pdev to dp tx
3052  * @pdev: physical device instance
3053  *
3054  * Return: QDF_STATUS_SUCCESS: success
3055  *         QDF_STATUS_E_RESOURCES: Error return
3056  */
3057 QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
3058 {
3059 	struct dp_soc *soc = pdev->soc;
3060 
3061 	/* Initialize Flow control counters */
3062 	qdf_atomic_init(&pdev->num_tx_exception);
3063 	qdf_atomic_init(&pdev->num_tx_outstanding);
3064 
3065 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3066 		/* Initialize descriptors in TCL Ring */
3067 		hal_tx_init_data_ring(soc->hal_soc,
3068 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
3069 	}
3070 
3071 	return QDF_STATUS_SUCCESS;
3072 }
3073 
3074 /**
3075  * dp_tx_pdev_detach() - detach pdev from dp tx
3076  * @pdev: physical device instance
3077  *
3078  * Return: QDF_STATUS_SUCCESS: success
3079  *         QDF_STATUS_E_RESOURCES: Error return
3080  */
3081 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
3082 {
3083 	dp_tx_me_exit(pdev);
3084 	return QDF_STATUS_SUCCESS;
3085 }
3086 
3087 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3088 /* Pools will be allocated dynamically */
3089 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3090 					int num_desc)
3091 {
3092 	uint8_t i;
3093 
3094 	for (i = 0; i < num_pool; i++) {
3095 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
3096 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
3097 	}
3098 
3099 	return 0;
3100 }
3101 
3102 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3103 {
3104 	uint8_t i;
3105 
3106 	for (i = 0; i < num_pool; i++)
3107 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
3108 }
3109 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3110 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3111 					int num_desc)
3112 {
3113 	uint8_t i;
3114 
3115 	/* Allocate software Tx descriptor pools */
3116 	for (i = 0; i < num_pool; i++) {
3117 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
3118 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3119 					"%s Tx Desc Pool alloc %d failed %pK\n",
3120 					__func__, i, soc);
3121 			return ENOMEM;
3122 		}
3123 	}
3124 	return 0;
3125 }
3126 
3127 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3128 {
3129 	uint8_t i;
3130 
3131 	for (i = 0; i < num_pool; i++) {
3132 		qdf_assert_always(!soc->tx_desc[i].num_allocated);
3133 		if (dp_tx_desc_pool_free(soc, i)) {
3134 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3135 				"%s Tx Desc Pool Free failed\n", __func__);
3136 		}
3137 	}
3138 }
3139 
3140 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3141 
3142 /**
3143  * dp_tx_soc_detach() - detach soc from dp tx
3144  * @soc: core txrx main context
3145  *
3146  * This function will detach dp tx into main device context
3147  * will free dp tx resource and initialize resources
3148  *
3149  * Return: QDF_STATUS_SUCCESS: success
3150  *         QDF_STATUS_E_RESOURCES: Error return
3151  */
3152 QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
3153 {
3154 	uint8_t num_pool;
3155 	uint16_t num_desc;
3156 	uint16_t num_ext_desc;
3157 	uint8_t i;
3158 
3159 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3160 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3161 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
3162 
3163 	dp_tx_flow_control_deinit(soc);
3164 	dp_tx_delete_static_pools(soc, num_pool);
3165 
3166 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3167 			"%s Tx Desc Pool Free num_pool = %d, descs = %d\n",
3168 			__func__, num_pool, num_desc);
3169 
3170 	for (i = 0; i < num_pool; i++) {
3171 		if (dp_tx_ext_desc_pool_free(soc, i)) {
3172 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3173 					"%s Tx Ext Desc Pool Free failed\n",
3174 					__func__);
3175 			return QDF_STATUS_E_RESOURCES;
3176 		}
3177 	}
3178 
3179 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3180 			"%s MSDU Ext Desc Pool %d Free descs = %d\n",
3181 			__func__, num_pool, num_ext_desc);
3182 
3183 	for (i = 0; i < num_pool; i++) {
3184 		dp_tx_tso_desc_pool_free(soc, i);
3185 	}
3186 
3187 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3188 			"%s TSO Desc Pool %d Free descs = %d\n",
3189 			__func__, num_pool, num_desc);
3190 
3191 
3192 	for (i = 0; i < num_pool; i++)
3193 		dp_tx_tso_num_seg_pool_free(soc, i);
3194 
3195 
3196 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3197 		"%s TSO Num of seg Desc Pool %d Free descs = %d\n",
3198 		__func__, num_pool, num_desc);
3199 
3200 	return QDF_STATUS_SUCCESS;
3201 }
3202 
3203 /**
3204  * dp_tx_soc_attach() - attach soc to dp tx
3205  * @soc: core txrx main context
3206  *
3207  * This function will attach dp tx into main device context
3208  * will allocate dp tx resource and initialize resources
3209  *
3210  * Return: QDF_STATUS_SUCCESS: success
3211  *         QDF_STATUS_E_RESOURCES: Error return
3212  */
3213 QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
3214 {
3215 	uint8_t i;
3216 	uint8_t num_pool;
3217 	uint32_t num_desc;
3218 	uint32_t num_ext_desc;
3219 
3220 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3221 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3222 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
3223 
3224 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
3225 		goto fail;
3226 
3227 	dp_tx_flow_control_init(soc);
3228 
3229 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3230 			"%s Tx Desc Alloc num_pool = %d, descs = %d\n",
3231 			__func__, num_pool, num_desc);
3232 
3233 	/* Allocate extension tx descriptor pools */
3234 	for (i = 0; i < num_pool; i++) {
3235 		if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
3236 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3237 				"MSDU Ext Desc Pool alloc %d failed %pK\n",
3238 				i, soc);
3239 
3240 			goto fail;
3241 		}
3242 	}
3243 
3244 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3245 			"%s MSDU Ext Desc Alloc %d, descs = %d\n",
3246 			__func__, num_pool, num_ext_desc);
3247 
3248 	for (i = 0; i < num_pool; i++) {
3249 		if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
3250 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3251 				"TSO Desc Pool alloc %d failed %pK\n",
3252 				i, soc);
3253 
3254 			goto fail;
3255 		}
3256 	}
3257 
3258 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3259 			"%s TSO Desc Alloc %d, descs = %d\n",
3260 			__func__, num_pool, num_desc);
3261 
3262 	for (i = 0; i < num_pool; i++) {
3263 		if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
3264 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3265 				"TSO Num of seg Pool alloc %d failed %pK\n",
3266 				i, soc);
3267 
3268 			goto fail;
3269 		}
3270 	}
3271 
3272 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3273 			"%s TSO Num of seg pool Alloc %d, descs = %d\n",
3274 			__func__, num_pool, num_desc);
3275 
3276 	/* Initialize descriptors in TCL Rings */
3277 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3278 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3279 			hal_tx_init_data_ring(soc->hal_soc,
3280 					soc->tcl_data_ring[i].hal_srng);
3281 		}
3282 	}
3283 
3284 	/*
3285 	 * todo - Add a runtime config option to enable this.
3286 	 */
3287 	/*
3288 	 * Due to multiple issues on NPR EMU, enable it selectively
3289 	 * only for NPR EMU, should be removed, once NPR platforms
3290 	 * are stable.
3291 	 */
3292 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
3293 
3294 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3295 			"%s HAL Tx init Success\n", __func__);
3296 
3297 	return QDF_STATUS_SUCCESS;
3298 
3299 fail:
3300 	/* Detach will take care of freeing only allocated resources */
3301 	dp_tx_soc_detach(soc);
3302 	return QDF_STATUS_E_RESOURCES;
3303 }
3304 
3305 /*
3306  * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
3307  * pdev: pointer to DP PDEV structure
3308  * seg_info_head: Pointer to the head of list
3309  *
3310  * return: void
3311  */
3312 static void dp_tx_me_mem_free(struct dp_pdev *pdev,
3313 		struct dp_tx_seg_info_s *seg_info_head)
3314 {
3315 	struct dp_tx_me_buf_t *mc_uc_buf;
3316 	struct dp_tx_seg_info_s *seg_info_new = NULL;
3317 	qdf_nbuf_t nbuf = NULL;
3318 	uint64_t phy_addr;
3319 
3320 	while (seg_info_head) {
3321 		nbuf = seg_info_head->nbuf;
3322 		mc_uc_buf = (struct dp_tx_me_buf_t *)
3323 			seg_info_head->frags[0].vaddr;
3324 		phy_addr = seg_info_head->frags[0].paddr_hi;
3325 		phy_addr =  (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
3326 		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
3327 				phy_addr,
3328 				QDF_DMA_TO_DEVICE , DP_MAC_ADDR_LEN);
3329 		dp_tx_me_free_buf(pdev, mc_uc_buf);
3330 		qdf_nbuf_free(nbuf);
3331 		seg_info_new = seg_info_head;
3332 		seg_info_head = seg_info_head->next;
3333 		qdf_mem_free(seg_info_new);
3334 	}
3335 }
3336 
3337 /**
3338  * dp_tx_me_send_convert_ucast(): function to convert multicast to unicast
3339  * @vdev: DP VDEV handle
3340  * @nbuf: Multicast nbuf
3341  * @newmac: Table of the clients to which packets have to be sent
3342  * @new_mac_cnt: No of clients
3343  *
3344  * return: no of converted packets
3345  */
3346 uint16_t
3347 dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
3348 		uint8_t newmac[][DP_MAC_ADDR_LEN], uint8_t new_mac_cnt)
3349 {
3350 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
3351 	struct dp_pdev *pdev = vdev->pdev;
3352 	struct ether_header *eh;
3353 	uint8_t *data;
3354 	uint16_t len;
3355 
3356 	/* reference to frame dst addr */
3357 	uint8_t *dstmac;
3358 	/* copy of original frame src addr */
3359 	uint8_t srcmac[DP_MAC_ADDR_LEN];
3360 
3361 	/* local index into newmac */
3362 	uint8_t new_mac_idx = 0;
3363 	struct dp_tx_me_buf_t *mc_uc_buf;
3364 	qdf_nbuf_t  nbuf_clone;
3365 	struct dp_tx_msdu_info_s msdu_info;
3366 	struct dp_tx_seg_info_s *seg_info_head = NULL;
3367 	struct dp_tx_seg_info_s *seg_info_tail = NULL;
3368 	struct dp_tx_seg_info_s *seg_info_new;
3369 	struct dp_tx_frag_info_s data_frag;
3370 	qdf_dma_addr_t paddr_data;
3371 	qdf_dma_addr_t paddr_mcbuf = 0;
3372 	uint8_t empty_entry_mac[DP_MAC_ADDR_LEN] = {0};
3373 	QDF_STATUS status;
3374 
3375 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
3376 
3377 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3378 
3379 	eh = (struct ether_header *) nbuf;
3380 	qdf_mem_copy(srcmac, eh->ether_shost, DP_MAC_ADDR_LEN);
3381 
3382 	len = qdf_nbuf_len(nbuf);
3383 
3384 	data = qdf_nbuf_data(nbuf);
3385 
3386 	status = qdf_nbuf_map(vdev->osdev, nbuf,
3387 			QDF_DMA_TO_DEVICE);
3388 
3389 	if (status) {
3390 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3391 				"Mapping failure Error:%d", status);
3392 		DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
3393 		qdf_nbuf_free(nbuf);
3394 		return 1;
3395 	}
3396 
3397 	paddr_data = qdf_nbuf_get_frag_paddr(nbuf, 0) + IEEE80211_ADDR_LEN;
3398 
3399 	/*preparing data fragment*/
3400 	data_frag.vaddr = qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN;
3401 	data_frag.paddr_lo = (uint32_t)paddr_data;
3402 	data_frag.paddr_hi = (((uint64_t) paddr_data)  >> 32);
3403 	data_frag.len = len - DP_MAC_ADDR_LEN;
3404 
3405 	for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
3406 		dstmac = newmac[new_mac_idx];
3407 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3408 				"added mac addr (%pM)", dstmac);
3409 
3410 		/* Check for NULL Mac Address */
3411 		if (!qdf_mem_cmp(dstmac, empty_entry_mac, DP_MAC_ADDR_LEN))
3412 			continue;
3413 
3414 		/* frame to self mac. skip */
3415 		if (!qdf_mem_cmp(dstmac, srcmac, DP_MAC_ADDR_LEN))
3416 			continue;
3417 
3418 		/*
3419 		 * TODO: optimize to avoid malloc in per-packet path
3420 		 * For eg. seg_pool can be made part of vdev structure
3421 		 */
3422 		seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
3423 
3424 		if (!seg_info_new) {
3425 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3426 					"alloc failed");
3427 			DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
3428 			goto fail_seg_alloc;
3429 		}
3430 
3431 		mc_uc_buf = dp_tx_me_alloc_buf(pdev);
3432 		if (mc_uc_buf == NULL)
3433 			goto fail_buf_alloc;
3434 
3435 		/*
3436 		 * TODO: Check if we need to clone the nbuf
3437 		 * Or can we just use the reference for all cases
3438 		 */
3439 		if (new_mac_idx < (new_mac_cnt - 1)) {
3440 			nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
3441 			if (nbuf_clone == NULL) {
3442 				DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
3443 				goto fail_clone;
3444 			}
3445 		} else {
3446 			/*
3447 			 * Update the ref
3448 			 * to account for frame sent without cloning
3449 			 */
3450 			qdf_nbuf_ref(nbuf);
3451 			nbuf_clone = nbuf;
3452 		}
3453 
3454 		qdf_mem_copy(mc_uc_buf->data, dstmac, DP_MAC_ADDR_LEN);
3455 
3456 		status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
3457 				QDF_DMA_TO_DEVICE, DP_MAC_ADDR_LEN,
3458 				&paddr_mcbuf);
3459 
3460 		if (status) {
3461 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3462 					"Mapping failure Error:%d", status);
3463 			DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
3464 			goto fail_map;
3465 		}
3466 
3467 		seg_info_new->frags[0].vaddr =  (uint8_t *)mc_uc_buf;
3468 		seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
3469 		seg_info_new->frags[0].paddr_hi =
3470 			((uint64_t) paddr_mcbuf >> 32);
3471 		seg_info_new->frags[0].len = DP_MAC_ADDR_LEN;
3472 
3473 		seg_info_new->frags[1] = data_frag;
3474 		seg_info_new->nbuf = nbuf_clone;
3475 		seg_info_new->frag_cnt = 2;
3476 		seg_info_new->total_len = len;
3477 
3478 		seg_info_new->next = NULL;
3479 
3480 		if (seg_info_head == NULL)
3481 			seg_info_head = seg_info_new;
3482 		else
3483 			seg_info_tail->next = seg_info_new;
3484 
3485 		seg_info_tail = seg_info_new;
3486 	}
3487 
3488 	if (!seg_info_head) {
3489 		goto free_return;
3490 	}
3491 
3492 	msdu_info.u.sg_info.curr_seg = seg_info_head;
3493 	msdu_info.num_seg = new_mac_cnt;
3494 	msdu_info.frm_type = dp_tx_frm_me;
3495 
3496 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
3497 	dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3498 
3499 	while (seg_info_head->next) {
3500 		seg_info_new = seg_info_head;
3501 		seg_info_head = seg_info_head->next;
3502 		qdf_mem_free(seg_info_new);
3503 	}
3504 	qdf_mem_free(seg_info_head);
3505 
3506 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
3507 	qdf_nbuf_free(nbuf);
3508 	return new_mac_cnt;
3509 
3510 fail_map:
3511 	qdf_nbuf_free(nbuf_clone);
3512 
3513 fail_clone:
3514 	dp_tx_me_free_buf(pdev, mc_uc_buf);
3515 
3516 fail_buf_alloc:
3517 	qdf_mem_free(seg_info_new);
3518 
3519 fail_seg_alloc:
3520 	dp_tx_me_mem_free(pdev, seg_info_head);
3521 
3522 free_return:
3523 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
3524 	qdf_nbuf_free(nbuf);
3525 	return 1;
3526 }
3527 
3528