xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision f227fb7417bd8f9ec0db40caeedb4abfac1f67a3)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "dp_tx.h"
21 #include "dp_tx_desc.h"
22 #include "dp_peer.h"
23 #include "dp_types.h"
24 #include "hal_tx.h"
25 #include "qdf_mem.h"
26 #include "qdf_nbuf.h"
27 #include <wlan_cfg.h>
28 #ifdef MESH_MODE_SUPPORT
29 #include "if_meta_hdr.h"
30 #endif
31 
32 #ifdef TX_PER_PDEV_DESC_POOL
33 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
34 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
35 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
36 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
37 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
38 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
39 #else
40 	#ifdef TX_PER_VDEV_DESC_POOL
41 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
42 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
43 	#else
44 		#define DP_TX_GET_DESC_POOL_ID(vdev) qdf_get_cpu()
45 		#define DP_TX_GET_RING_ID(vdev) vdev->pdev->soc->tx_ring_map[qdf_get_cpu()]
46 	#endif /* TX_PER_VDEV_DESC_POOL */
47 #endif /* TX_PER_PDEV_DESC_POOL */
48 
49 /* TODO Add support in TSO */
50 #define DP_DESC_NUM_FRAG(x) 0
51 
52 /* disable TQM_BYPASS */
53 #define TQM_BYPASS_WAR 0
54 
55 /* invalid peer id for reinject*/
56 #define DP_INVALID_PEER 0XFFFE
57 
58 /*mapping between hal encrypt type and cdp_sec_type*/
59 #define MAX_CDP_SEC_TYPE 12
60 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
61 					HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
62 					HAL_TX_ENCRYPT_TYPE_WEP_128,
63 					HAL_TX_ENCRYPT_TYPE_WEP_104,
64 					HAL_TX_ENCRYPT_TYPE_WEP_40,
65 					HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
66 					HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
67 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
68 					HAL_TX_ENCRYPT_TYPE_WAPI,
69 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
70 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
71 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
72 					HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
73 
74 /**
75  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
76  * @vdev: DP Virtual device handle
77  * @nbuf: Buffer pointer
78  * @queue: queue ids container for nbuf
79  *
80  * TX packet queue has 2 instances, software descriptors id and dma ring id
81  * Based on tx feature and hardware configuration queue id combination could be
82  * different.
83  * For example -
84  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
85  * With no XPS,lock based resource protection, Descriptor pool ids are different
86  * for each vdev, dma ring id will be same as single pdev id
87  *
88  * Return: None
89  */
90 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
91 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
92 {
93 	/* get flow id */
94 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
95 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
96 
97 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
98 			"%s, pool_id:%d ring_id: %d",
99 			__func__, queue->desc_pool_id, queue->ring_id);
100 
101 	return;
102 }
103 
104 #if defined(FEATURE_TSO)
105 /**
106  * dp_tx_tso_desc_release() - Release the tso segment
107  *                            after unmapping all the fragments
108  *
109  * @pdev - physical device handle
110  * @tx_desc - Tx software descriptor
111  */
112 static void dp_tx_tso_desc_release(struct dp_soc *soc,
113 		struct dp_tx_desc_s *tx_desc)
114 {
115 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
116 	if (qdf_unlikely(tx_desc->tso_desc == NULL)) {
117 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
118 			"%s %d TSO desc is NULL!",
119 			__func__, __LINE__);
120 		qdf_assert(0);
121 	} else if (qdf_unlikely(tx_desc->tso_num_desc == NULL)) {
122 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
123 			"%s %d TSO common info is NULL!",
124 			__func__, __LINE__);
125 		qdf_assert(0);
126 	} else {
127 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
128 			(struct qdf_tso_num_seg_elem_t *) tx_desc->tso_num_desc;
129 
130 		if (tso_num_desc->num_seg.tso_cmn_num_seg > 1) {
131 			tso_num_desc->num_seg.tso_cmn_num_seg--;
132 			qdf_nbuf_unmap_tso_segment(soc->osdev,
133 					tx_desc->tso_desc, false);
134 		} else {
135 			tso_num_desc->num_seg.tso_cmn_num_seg--;
136 			qdf_assert(tso_num_desc->num_seg.tso_cmn_num_seg == 0);
137 			qdf_nbuf_unmap_tso_segment(soc->osdev,
138 					tx_desc->tso_desc, true);
139 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
140 					tx_desc->tso_num_desc);
141 			tx_desc->tso_num_desc = NULL;
142 		}
143 		dp_tx_tso_desc_free(soc,
144 				tx_desc->pool_id, tx_desc->tso_desc);
145 		tx_desc->tso_desc = NULL;
146 	}
147 }
148 #else
149 static void dp_tx_tso_desc_release(struct dp_soc *soc,
150 		struct dp_tx_desc_s *tx_desc)
151 {
152 	return;
153 }
154 #endif
155 /**
156  * dp_tx_desc_release() - Release Tx Descriptor
157  * @tx_desc : Tx Descriptor
158  * @desc_pool_id: Descriptor Pool ID
159  *
160  * Deallocate all resources attached to Tx descriptor and free the Tx
161  * descriptor.
162  *
163  * Return:
164  */
165 static void
166 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
167 {
168 	struct dp_pdev *pdev = tx_desc->pdev;
169 	struct dp_soc *soc;
170 	uint8_t comp_status = 0;
171 
172 	qdf_assert(pdev);
173 
174 	soc = pdev->soc;
175 
176 	if (tx_desc->frm_type == dp_tx_frm_tso)
177 		dp_tx_tso_desc_release(soc, tx_desc);
178 
179 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
180 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
181 
182 	qdf_atomic_dec(&pdev->num_tx_outstanding);
183 
184 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
185 		qdf_atomic_dec(&pdev->num_tx_exception);
186 
187 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
188 				hal_tx_comp_get_buffer_source(&tx_desc->comp))
189 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp);
190 	else
191 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
192 
193 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
194 		"Tx Completion Release desc %d status %d outstanding %d",
195 		tx_desc->id, comp_status,
196 		qdf_atomic_read(&pdev->num_tx_outstanding));
197 
198 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
199 	return;
200 }
201 
202 /**
203  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
204  * @vdev: DP vdev Handle
205  * @nbuf: skb
206  *
207  * Prepares and fills HTT metadata in the frame pre-header for special frames
208  * that should be transmitted using varying transmit parameters.
209  * There are 2 VDEV modes that currently needs this special metadata -
210  *  1) Mesh Mode
211  *  2) DSRC Mode
212  *
213  * Return: HTT metadata size
214  *
215  */
216 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
217 		uint32_t *meta_data)
218 {
219 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
220 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
221 
222 	uint8_t htt_desc_size;
223 
224 	/* Size rounded of multiple of 8 bytes */
225 	uint8_t htt_desc_size_aligned;
226 
227 	uint8_t *hdr = NULL;
228 
229 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 1);
230 
231 	/*
232 	 * Metadata - HTT MSDU Extension header
233 	 */
234 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
235 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
236 
237 	if (vdev->mesh_vdev) {
238 
239 		/* Fill and add HTT metaheader */
240 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
241 		if (hdr == NULL) {
242 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
243 					"Error in filling HTT metadata\n");
244 
245 			return 0;
246 		}
247 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
248 
249 	} else if (vdev->opmode == wlan_op_mode_ocb) {
250 		/* Todo - Add support for DSRC */
251 	}
252 
253 	return htt_desc_size_aligned;
254 }
255 
256 /**
257  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
258  * @tso_seg: TSO segment to process
259  * @ext_desc: Pointer to MSDU extension descriptor
260  *
261  * Return: void
262  */
263 #if defined(FEATURE_TSO)
264 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
265 		void *ext_desc)
266 {
267 	uint8_t num_frag;
268 	uint32_t tso_flags;
269 
270 	/*
271 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
272 	 * tcp_flag_mask
273 	 *
274 	 * Checksum enable flags are set in TCL descriptor and not in Extension
275 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
276 	 */
277 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
278 
279 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
280 
281 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
282 		tso_seg->tso_flags.ip_len);
283 
284 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
285 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
286 
287 
288 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
289 		uint32_t lo = 0;
290 		uint32_t hi = 0;
291 
292 		qdf_dmaaddr_to_32s(
293 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
294 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
295 			tso_seg->tso_frags[num_frag].length);
296 	}
297 
298 	return;
299 }
300 #else
301 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
302 		void *ext_desc)
303 {
304 	return;
305 }
306 #endif
307 
308 #if defined(FEATURE_TSO)
309 /**
310  * dp_tx_free_tso_seg() - Loop through the tso segments
311  *                        allocated and free them
312  *
313  * @soc: soc handle
314  * @free_seg: list of tso segments
315  * @msdu_info: msdu descriptor
316  *
317  * Return - void
318  */
319 static void dp_tx_free_tso_seg(struct dp_soc *soc,
320 	struct qdf_tso_seg_elem_t *free_seg,
321 	struct dp_tx_msdu_info_s *msdu_info)
322 {
323 	struct qdf_tso_seg_elem_t *next_seg;
324 
325 	while (free_seg) {
326 		next_seg = free_seg->next;
327 		dp_tx_tso_desc_free(soc,
328 			msdu_info->tx_queue.desc_pool_id,
329 			free_seg);
330 		free_seg = next_seg;
331 	}
332 }
333 
334 /**
335  * dp_tx_free_tso_num_seg() - Loop through the tso num segments
336  *                            allocated and free them
337  *
338  * @soc:  soc handle
339  * @free_seg: list of tso segments
340  * @msdu_info: msdu descriptor
341  * Return - void
342  */
343 static void dp_tx_free_tso_num_seg(struct dp_soc *soc,
344 	struct qdf_tso_num_seg_elem_t *free_seg,
345 	struct dp_tx_msdu_info_s *msdu_info)
346 {
347 	struct qdf_tso_num_seg_elem_t *next_seg;
348 
349 	while (free_seg) {
350 		next_seg = free_seg->next;
351 		dp_tso_num_seg_free(soc,
352 			msdu_info->tx_queue.desc_pool_id,
353 			free_seg);
354 		free_seg = next_seg;
355 	}
356 }
357 
358 /**
359  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
360  * @vdev: virtual device handle
361  * @msdu: network buffer
362  * @msdu_info: meta data associated with the msdu
363  *
364  * Return: QDF_STATUS_SUCCESS success
365  */
366 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
367 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
368 {
369 	struct qdf_tso_seg_elem_t *tso_seg;
370 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
371 	struct dp_soc *soc = vdev->pdev->soc;
372 	struct qdf_tso_info_t *tso_info;
373 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
374 
375 	tso_info = &msdu_info->u.tso_info;
376 	tso_info->curr_seg = NULL;
377 	tso_info->tso_seg_list = NULL;
378 	tso_info->num_segs = num_seg;
379 	msdu_info->frm_type = dp_tx_frm_tso;
380 	tso_info->tso_num_seg_list = NULL;
381 
382 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
383 
384 	while (num_seg) {
385 		tso_seg = dp_tx_tso_desc_alloc(
386 				soc, msdu_info->tx_queue.desc_pool_id);
387 		if (tso_seg) {
388 			tso_seg->next = tso_info->tso_seg_list;
389 			tso_info->tso_seg_list = tso_seg;
390 			num_seg--;
391 		} else {
392 			struct qdf_tso_seg_elem_t *free_seg =
393 				tso_info->tso_seg_list;
394 
395 			dp_tx_free_tso_seg(soc, free_seg, msdu_info);
396 
397 			return QDF_STATUS_E_NOMEM;
398 		}
399 	}
400 
401 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
402 
403 	tso_num_seg = dp_tso_num_seg_alloc(soc,
404 			msdu_info->tx_queue.desc_pool_id);
405 
406 	if (tso_num_seg) {
407 		tso_num_seg->next = tso_info->tso_num_seg_list;
408 		tso_info->tso_num_seg_list = tso_num_seg;
409 	} else {
410 		/* Bug: free tso_num_seg and tso_seg */
411 		/* Free the already allocated num of segments */
412 		struct qdf_tso_seg_elem_t *free_seg =
413 					tso_info->tso_seg_list;
414 
415 		TSO_DEBUG(" %s: Failed alloc - Number of segs for a TSO packet",
416 			__func__);
417 		dp_tx_free_tso_seg(soc, free_seg, msdu_info);
418 
419 		return QDF_STATUS_E_NOMEM;
420 	}
421 
422 	msdu_info->num_seg =
423 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
424 
425 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
426 			msdu_info->num_seg);
427 
428 	if (!(msdu_info->num_seg)) {
429 		dp_tx_free_tso_seg(soc, tso_info->tso_seg_list, msdu_info);
430 		dp_tx_free_tso_num_seg(soc, tso_info->tso_num_seg_list,
431 					msdu_info);
432 		return QDF_STATUS_E_INVAL;
433 	}
434 
435 	tso_info->curr_seg = tso_info->tso_seg_list;
436 
437 	return QDF_STATUS_SUCCESS;
438 }
439 #else
440 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
441 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
442 {
443 	return QDF_STATUS_E_NOMEM;
444 }
445 #endif
446 
447 /**
448  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
449  * @vdev: DP Vdev handle
450  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
451  * @desc_pool_id: Descriptor Pool ID
452  *
453  * Return:
454  */
455 static
456 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
457 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
458 {
459 	uint8_t i;
460 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
461 	struct dp_tx_seg_info_s *seg_info;
462 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
463 	struct dp_soc *soc = vdev->pdev->soc;
464 
465 	/* Allocate an extension descriptor */
466 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
467 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
468 
469 	if (!msdu_ext_desc) {
470 		DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
471 		return NULL;
472 	}
473 
474 	if (qdf_unlikely(vdev->mesh_vdev)) {
475 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
476 				&msdu_info->meta_data[0],
477 				sizeof(struct htt_tx_msdu_desc_ext2_t));
478 		qdf_atomic_inc(&vdev->pdev->num_tx_exception);
479 		HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 1);
480 	}
481 
482 	switch (msdu_info->frm_type) {
483 	case dp_tx_frm_sg:
484 	case dp_tx_frm_me:
485 	case dp_tx_frm_raw:
486 		seg_info = msdu_info->u.sg_info.curr_seg;
487 		/* Update the buffer pointers in MSDU Extension Descriptor */
488 		for (i = 0; i < seg_info->frag_cnt; i++) {
489 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
490 				seg_info->frags[i].paddr_lo,
491 				seg_info->frags[i].paddr_hi,
492 				seg_info->frags[i].len);
493 		}
494 
495 		break;
496 
497 	case dp_tx_frm_tso:
498 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
499 				&cached_ext_desc[0]);
500 		break;
501 
502 
503 	default:
504 		break;
505 	}
506 
507 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
508 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
509 
510 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
511 			msdu_ext_desc->vaddr);
512 
513 	return msdu_ext_desc;
514 }
515 
516 /**
517  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
518  * @vdev: DP vdev handle
519  * @nbuf: skb
520  * @desc_pool_id: Descriptor pool ID
521  * Allocate and prepare Tx descriptor with msdu information.
522  *
523  * Return: Pointer to Tx Descriptor on success,
524  *         NULL on failure
525  */
526 static
527 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
528 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
529 		uint32_t *meta_data)
530 {
531 	uint8_t align_pad;
532 	uint8_t is_exception = 0;
533 	uint8_t htt_hdr_size;
534 	struct ether_header *eh;
535 	struct dp_tx_desc_s *tx_desc;
536 	struct dp_pdev *pdev = vdev->pdev;
537 	struct dp_soc *soc = pdev->soc;
538 
539 	/* Allocate software Tx descriptor */
540 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
541 	if (qdf_unlikely(!tx_desc)) {
542 		DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
543 		return NULL;
544 	}
545 
546 	/* Flow control/Congestion Control counters */
547 	qdf_atomic_inc(&pdev->num_tx_outstanding);
548 
549 	/* Initialize the SW tx descriptor */
550 	tx_desc->nbuf = nbuf;
551 	tx_desc->frm_type = dp_tx_frm_std;
552 	tx_desc->tx_encap_type = vdev->tx_encap_type;
553 	tx_desc->vdev = vdev;
554 	tx_desc->pdev = pdev;
555 	tx_desc->msdu_ext_desc = NULL;
556 	tx_desc->pkt_offset = 0;
557 
558 	/*
559 	 * For special modes (vdev_type == ocb or mesh), data frames should be
560 	 * transmitted using varying transmit parameters (tx spec) which include
561 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
562 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
563 	 * These frames are sent as exception packets to firmware.
564 	 *
565 	 * HW requirement is that metadata should always point to a
566 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
567 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
568 	 *  to get 8-byte aligned start address along with align_pad added
569 	 *
570 	 *  |-----------------------------|
571 	 *  |                             |
572 	 *  |-----------------------------| <-----Buffer Pointer Address given
573 	 *  |                             |  ^    in HW descriptor (aligned)
574 	 *  |       HTT Metadata          |  |
575 	 *  |                             |  |
576 	 *  |                             |  | Packet Offset given in descriptor
577 	 *  |                             |  |
578 	 *  |-----------------------------|  |
579 	 *  |       Alignment Pad         |  v
580 	 *  |-----------------------------| <----- Actual buffer start address
581 	 *  |        SKB Data             |           (Unaligned)
582 	 *  |                             |
583 	 *  |                             |
584 	 *  |                             |
585 	 *  |                             |
586 	 *  |                             |
587 	 *  |-----------------------------|
588 	 */
589 	if (qdf_unlikely(vdev->mesh_vdev ||
590 				(vdev->opmode == wlan_op_mode_ocb))) {
591 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
592 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
593 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
594 					"qdf_nbuf_push_head failed\n");
595 			goto failure;
596 		}
597 
598 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
599 				meta_data);
600 		if (htt_hdr_size == 0)
601 			goto failure;
602 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
603 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
604 		is_exception = 1;
605 	}
606 
607 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
608 				qdf_nbuf_map(soc->osdev, nbuf,
609 					QDF_DMA_TO_DEVICE))) {
610 		/* Handle failure */
611 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
612 				"qdf_nbuf_map failed\n");
613 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
614 		goto failure;
615 	}
616 
617 	if (qdf_unlikely(vdev->nawds_enabled)) {
618 		eh = (struct ether_header *) qdf_nbuf_data(nbuf);
619 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
620 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
621 			is_exception = 1;
622 		}
623 	}
624 
625 #if !TQM_BYPASS_WAR
626 	if (is_exception)
627 #endif
628 	{
629 		/* Temporary WAR due to TQM VP issues */
630 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
631 		qdf_atomic_inc(&pdev->num_tx_exception);
632 	}
633 
634 	return tx_desc;
635 
636 failure:
637 	dp_tx_desc_release(tx_desc, desc_pool_id);
638 	return NULL;
639 }
640 
641 /**
642  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
643  * @vdev: DP vdev handle
644  * @nbuf: skb
645  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
646  * @desc_pool_id : Descriptor Pool ID
647  *
648  * Allocate and prepare Tx descriptor with msdu and fragment descritor
649  * information. For frames wth fragments, allocate and prepare
650  * an MSDU extension descriptor
651  *
652  * Return: Pointer to Tx Descriptor on success,
653  *         NULL on failure
654  */
655 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
656 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
657 		uint8_t desc_pool_id)
658 {
659 	struct dp_tx_desc_s *tx_desc;
660 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
661 	struct dp_pdev *pdev = vdev->pdev;
662 	struct dp_soc *soc = pdev->soc;
663 
664 	/* Allocate software Tx descriptor */
665 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
666 	if (!tx_desc) {
667 		DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
668 		return NULL;
669 	}
670 
671 	/* Flow control/Congestion Control counters */
672 	qdf_atomic_inc(&pdev->num_tx_outstanding);
673 
674 	/* Initialize the SW tx descriptor */
675 	tx_desc->nbuf = nbuf;
676 	tx_desc->frm_type = msdu_info->frm_type;
677 	tx_desc->tx_encap_type = vdev->tx_encap_type;
678 	tx_desc->vdev = vdev;
679 	tx_desc->pdev = pdev;
680 	tx_desc->pkt_offset = 0;
681 	tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
682 	tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
683 
684 	/* Handle scattered frames - TSO/SG/ME */
685 	/* Allocate and prepare an extension descriptor for scattered frames */
686 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
687 	if (!msdu_ext_desc) {
688 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
689 				"%s Tx Extension Descriptor Alloc Fail\n",
690 				__func__);
691 		goto failure;
692 	}
693 
694 #if TQM_BYPASS_WAR
695 	/* Temporary WAR due to TQM VP issues */
696 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
697 	qdf_atomic_inc(&pdev->num_tx_exception);
698 #endif
699 	if (qdf_unlikely(vdev->mesh_vdev))
700 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
701 
702 	tx_desc->msdu_ext_desc = msdu_ext_desc;
703 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
704 
705 	return tx_desc;
706 failure:
707 	dp_tx_desc_release(tx_desc, desc_pool_id);
708 	return NULL;
709 }
710 
711 /**
712  * dp_tx_prepare_raw() - Prepare RAW packet TX
713  * @vdev: DP vdev handle
714  * @nbuf: buffer pointer
715  * @seg_info: Pointer to Segment info Descriptor to be prepared
716  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
717  *     descriptor
718  *
719  * Return:
720  */
721 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
722 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
723 {
724 	qdf_nbuf_t curr_nbuf = NULL;
725 	uint16_t total_len = 0;
726 	qdf_dma_addr_t paddr;
727 	int32_t i;
728 	int32_t mapped_buf_num = 0;
729 
730 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
731 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
732 
733 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
734 
735 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
736 	if (qos_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS)
737 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
738 
739 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
740 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
741 
742 		if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf,
743 					QDF_DMA_TO_DEVICE)) {
744 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
745 				"%s dma map error \n", __func__);
746 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
747 			mapped_buf_num = i;
748 			goto error;
749 		}
750 
751 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
752 		seg_info->frags[i].paddr_lo = paddr;
753 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
754 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
755 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
756 		total_len += qdf_nbuf_len(curr_nbuf);
757 	}
758 
759 	seg_info->frag_cnt = i;
760 	seg_info->total_len = total_len;
761 	seg_info->next = NULL;
762 
763 	sg_info->curr_seg = seg_info;
764 
765 	msdu_info->frm_type = dp_tx_frm_raw;
766 	msdu_info->num_seg = 1;
767 
768 	return nbuf;
769 
770 error:
771 	i = 0;
772 	while (nbuf) {
773 		curr_nbuf = nbuf;
774 		if (i < mapped_buf_num) {
775 			qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE);
776 			i++;
777 		}
778 		nbuf = qdf_nbuf_next(nbuf);
779 		qdf_nbuf_free(curr_nbuf);
780 	}
781 	return NULL;
782 
783 }
784 
785 /**
786  * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
787  * @soc: DP Soc Handle
788  * @vdev: DP vdev handle
789  * @tx_desc: Tx Descriptor Handle
790  * @tid: TID from HLOS for overriding default DSCP-TID mapping
791  * @fw_metadata: Metadata to send to Target Firmware along with frame
792  * @ring_id: Ring ID of H/W ring to which we enqueue the packet
793  *
794  *  Gets the next free TCL HW DMA descriptor and sets up required parameters
795  *  from software Tx descriptor
796  *
797  * Return:
798  */
799 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
800 				   struct dp_tx_desc_s *tx_desc, uint8_t tid,
801 				   uint16_t fw_metadata, uint8_t ring_id)
802 {
803 	uint8_t type;
804 	uint16_t length;
805 	void *hal_tx_desc, *hal_tx_desc_cached;
806 	qdf_dma_addr_t dma_addr;
807 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
808 
809 	/* Return Buffer Manager ID */
810 	uint8_t bm_id = ring_id;
811 	void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng;
812 
813 	hal_tx_desc_cached = (void *) cached_desc;
814 	qdf_mem_zero_outline(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
815 
816 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
817 		length = HAL_TX_EXT_DESC_WITH_META_DATA;
818 		type = HAL_TX_BUF_TYPE_EXT_DESC;
819 		dma_addr = tx_desc->msdu_ext_desc->paddr;
820 	} else {
821 		length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
822 		type = HAL_TX_BUF_TYPE_BUFFER;
823 		dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
824 	}
825 
826 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
827 	hal_tx_desc_set_buf_addr(hal_tx_desc_cached,
828 			dma_addr , bm_id, tx_desc->id, type);
829 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
830 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
831 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
832 	hal_tx_desc_set_dscp_tid_table_id(hal_tx_desc_cached,
833 			vdev->dscp_tid_map_id);
834 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
835 			sec_type_map[vdev->sec_type]);
836 
837 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
838 			"%s length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
839 			__func__, length, type, (uint64_t)dma_addr,
840 			tx_desc->pkt_offset, tx_desc->id);
841 
842 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
843 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
844 
845 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
846 			vdev->hal_desc_addr_search_flags);
847 
848 	/* verify checksum offload configuration*/
849 	if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) &&
850 		((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
851 		|| qdf_nbuf_is_tso(tx_desc->nbuf)))  {
852 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
853 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
854 	}
855 
856 	if (tid != HTT_TX_EXT_TID_INVALID)
857 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
858 
859 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
860 		hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1);
861 
862 
863 	/* Sync cached descriptor with HW */
864 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
865 
866 	if (!hal_tx_desc) {
867 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
868 			  "%s TCL ring full ring_id:%d\n", __func__, ring_id);
869 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
870 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
871 		return QDF_STATUS_E_RESOURCES;
872 	}
873 
874 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
875 
876 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
877 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
878 
879 	/*
880 	 * If one packet is enqueued in HW, PM usage count needs to be
881 	 * incremented by one to prevent future runtime suspend. This
882 	 * should be tied with the success of enqueuing. It will be
883 	 * decremented after the packet has been sent.
884 	 */
885 	hif_pm_runtime_get_noresume(soc->hif_handle);
886 
887 	return QDF_STATUS_SUCCESS;
888 }
889 
890 
891 /**
892  * dp_cce_classify() - Classify the frame based on CCE rules
893  * @vdev: DP vdev handle
894  * @nbuf: skb
895  *
896  * Classify frames based on CCE rules
897  * Return: bool( true if classified,
898  *               else false)
899  */
900 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
901 {
902 	struct ether_header *eh = NULL;
903 	uint16_t   ether_type;
904 	qdf_llc_t *llcHdr;
905 	qdf_nbuf_t nbuf_clone = NULL;
906 	qdf_dot3_qosframe_t *qos_wh = NULL;
907 
908 	/* for mesh packets don't do any classification */
909 	if (qdf_unlikely(vdev->mesh_vdev))
910 		return false;
911 
912 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
913 		eh = (struct ether_header *) qdf_nbuf_data(nbuf);
914 		ether_type = eh->ether_type;
915 		llcHdr = (qdf_llc_t *)(nbuf->data +
916 					sizeof(struct ether_header));
917 	} else {
918 		qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
919 
920 		if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
921 			if (qdf_unlikely(
922 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
923 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
924 
925 				ether_type = *(uint16_t *)(nbuf->data
926 						+ QDF_IEEE80211_4ADDR_HDR_LEN
927 						+ sizeof(qdf_llc_t)
928 						- sizeof(ether_type));
929 				llcHdr = (qdf_llc_t *)(nbuf->data +
930 						QDF_IEEE80211_4ADDR_HDR_LEN);
931 			} else {
932 				ether_type = *(uint16_t *)(nbuf->data
933 						+ QDF_IEEE80211_3ADDR_HDR_LEN
934 						+ sizeof(qdf_llc_t)
935 						- sizeof(ether_type));
936 				llcHdr = (qdf_llc_t *)(nbuf->data +
937 					QDF_IEEE80211_3ADDR_HDR_LEN);
938 			}
939 
940 			if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
941 				&& (ether_type ==
942 				qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
943 
944 				DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
945 				return true;
946 			}
947 		}
948 
949 		return false;
950 	}
951 
952 	if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
953 		ether_type = *(uint16_t *)(nbuf->data + 2*ETHER_ADDR_LEN +
954 				sizeof(*llcHdr));
955 		nbuf_clone = qdf_nbuf_clone(nbuf);
956 		qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
957 
958 		if (ether_type == htons(ETHERTYPE_8021Q)) {
959 			qdf_nbuf_pull_head(nbuf_clone,
960 						sizeof(qdf_net_vlanhdr_t));
961 		}
962 	} else {
963 		if (ether_type == htons(ETHERTYPE_8021Q)) {
964 			nbuf_clone = qdf_nbuf_clone(nbuf);
965 			qdf_nbuf_pull_head(nbuf_clone,
966 					sizeof(qdf_net_vlanhdr_t));
967 		}
968 	}
969 
970 	if (qdf_unlikely(nbuf_clone))
971 		nbuf = nbuf_clone;
972 
973 
974 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
975 		|| qdf_nbuf_is_ipv4_arp_pkt(nbuf)
976 		|| qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
977 		|| qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
978 		|| (qdf_nbuf_is_ipv4_pkt(nbuf)
979 			&& qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
980 		|| (qdf_nbuf_is_ipv6_pkt(nbuf) &&
981 			qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
982 		if (qdf_unlikely(nbuf_clone != NULL))
983 			qdf_nbuf_free(nbuf_clone);
984 		return true;
985 	}
986 
987 	if (qdf_unlikely(nbuf_clone != NULL))
988 		qdf_nbuf_free(nbuf_clone);
989 
990 	return false;
991 }
992 
993 /**
994  * dp_tx_classify_tid() - Obtain TID to be used for this frame
995  * @vdev: DP vdev handle
996  * @nbuf: skb
997  *
998  * Extract the DSCP or PCP information from frame and map into TID value.
999  * Software based TID classification is required when more than 2 DSCP-TID
1000  * mapping tables are needed.
1001  * Hardware supports 2 DSCP-TID mapping tables
1002  *
1003  * Return: void
1004  */
1005 static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1006 		struct dp_tx_msdu_info_s *msdu_info)
1007 {
1008 	uint8_t tos = 0, dscp_tid_override = 0;
1009 	uint8_t *hdr_ptr, *L3datap;
1010 	uint8_t is_mcast = 0;
1011 	struct ether_header *eh = NULL;
1012 	qdf_ethervlan_header_t *evh = NULL;
1013 	uint16_t   ether_type;
1014 	qdf_llc_t *llcHdr;
1015 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1016 
1017 	/* for mesh packets don't do any classification */
1018 	if (qdf_unlikely(vdev->mesh_vdev))
1019 		return;
1020 
1021 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1022 		eh = (struct ether_header *) nbuf->data;
1023 		hdr_ptr = eh->ether_dhost;
1024 		L3datap = hdr_ptr + sizeof(struct ether_header);
1025 	} else {
1026 		qdf_dot3_qosframe_t *qos_wh =
1027 			(qdf_dot3_qosframe_t *) nbuf->data;
1028 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1029 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1030 		return;
1031 	}
1032 
1033 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1034 	ether_type = eh->ether_type;
1035 
1036 	/*
1037 	 * Check if packet is dot3 or eth2 type.
1038 	 */
1039 	if (IS_LLC_PRESENT(ether_type)) {
1040 		ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN +
1041 				sizeof(*llcHdr));
1042 
1043 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1044 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1045 				sizeof(*llcHdr);
1046 			ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN
1047 					+ sizeof(*llcHdr) +
1048 					sizeof(qdf_net_vlanhdr_t));
1049 		} else {
1050 			L3datap = hdr_ptr + sizeof(struct ether_header) +
1051 				sizeof(*llcHdr);
1052 		}
1053 	} else {
1054 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1055 			evh = (qdf_ethervlan_header_t *) eh;
1056 			ether_type = evh->ether_type;
1057 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1058 		}
1059 	}
1060 
1061 	/*
1062 	 * Find priority from IP TOS DSCP field
1063 	 */
1064 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1065 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1066 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1067 			/* Only for unicast frames */
1068 			if (!is_mcast) {
1069 				/* send it on VO queue */
1070 				msdu_info->tid = DP_VO_TID;
1071 			}
1072 		} else {
1073 			/*
1074 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1075 			 * from TOS byte.
1076 			 */
1077 			tos = ip->ip_tos;
1078 			dscp_tid_override = 1;
1079 
1080 		}
1081 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1082 		/* TODO
1083 		 * use flowlabel
1084 		 *igmpmld cases to be handled in phase 2
1085 		 */
1086 		unsigned long ver_pri_flowlabel;
1087 		unsigned long pri;
1088 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1089 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1090 			DP_IPV6_PRIORITY_SHIFT;
1091 		tos = pri;
1092 		dscp_tid_override = 1;
1093 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1094 		msdu_info->tid = DP_VO_TID;
1095 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1096 		/* Only for unicast frames */
1097 		if (!is_mcast) {
1098 			/* send ucast arp on VO queue */
1099 			msdu_info->tid = DP_VO_TID;
1100 		}
1101 	}
1102 
1103 	/*
1104 	 * Assign all MCAST packets to BE
1105 	 */
1106 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1107 		if (is_mcast) {
1108 			tos = 0;
1109 			dscp_tid_override = 1;
1110 		}
1111 	}
1112 
1113 	if (dscp_tid_override == 1) {
1114 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1115 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1116 	}
1117 	return;
1118 }
1119 
1120 #ifdef CONVERGED_TDLS_ENABLE
1121 /**
1122  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1123  * @tx_desc: TX descriptor
1124  *
1125  * Return: None
1126  */
1127 static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1128 {
1129 	if (tx_desc->vdev) {
1130 		if (tx_desc->vdev->is_tdls_frame)
1131 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1132 			tx_desc->vdev->is_tdls_frame = false;
1133 	}
1134 }
1135 
1136 /**
1137  * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1138  * @tx_desc: TX descriptor
1139  * @vdev: datapath vdev handle
1140  *
1141  * Return: None
1142  */
1143 static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
1144 				  struct dp_vdev *vdev)
1145 {
1146 	struct hal_tx_completion_status ts = {0};
1147 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1148 
1149 	hal_tx_comp_get_status(&tx_desc->comp, &ts);
1150 	if (vdev->tx_non_std_data_callback.func) {
1151 		qdf_nbuf_set_next(tx_desc->nbuf, NULL);
1152 		vdev->tx_non_std_data_callback.func(
1153 				vdev->tx_non_std_data_callback.ctxt,
1154 				nbuf, ts.status);
1155 		return;
1156 	}
1157 }
1158 #endif
1159 
1160 /**
1161  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1162  * @vdev: DP vdev handle
1163  * @nbuf: skb
1164  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1165  * @tx_q: Tx queue to be used for this Tx frame
1166  * @peer_id: peer_id of the peer in case of NAWDS frames
1167  *
1168  * Return: NULL on success,
1169  *         nbuf when it fails to send
1170  */
1171 static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1172 		uint8_t tid, struct dp_tx_queue *tx_q,
1173 		uint32_t *meta_data, uint16_t peer_id)
1174 {
1175 	struct dp_pdev *pdev = vdev->pdev;
1176 	struct dp_soc *soc = pdev->soc;
1177 	struct dp_tx_desc_s *tx_desc;
1178 	QDF_STATUS status;
1179 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1180 	uint16_t htt_tcl_metadata = 0;
1181 
1182 	HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 0);
1183 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
1184 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id, meta_data);
1185 	if (!tx_desc) {
1186 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1187 			  "%s Tx_desc prepare Fail vdev %pK queue %d\n",
1188 			  __func__, vdev, tx_q->desc_pool_id);
1189 		return nbuf;
1190 	}
1191 
1192 	if (qdf_unlikely(soc->cce_disable)) {
1193 		if (dp_cce_classify(vdev, nbuf) == true) {
1194 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1195 			tid = DP_VO_TID;
1196 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1197 		}
1198 	}
1199 
1200 	dp_tx_update_tdls_flags(tx_desc);
1201 
1202 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1203 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1204 				"%s %d : HAL RING Access Failed -- %pK\n",
1205 				__func__, __LINE__, hal_srng);
1206 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1207 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1208 		goto fail_return;
1209 	}
1210 
1211 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
1212 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1213 		HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
1214 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
1215 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
1216 				HTT_TCL_METADATA_TYPE_PEER_BASED);
1217 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
1218 				peer_id);
1219 	} else
1220 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1221 
1222 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
1223 	status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
1224 			htt_tcl_metadata, tx_q->ring_id);
1225 
1226 	if (status != QDF_STATUS_SUCCESS) {
1227 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1228 			  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n",
1229 			  __func__, tx_desc, tx_q->ring_id);
1230 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1231 		goto fail_return;
1232 	}
1233 
1234 	nbuf = NULL;
1235 
1236 fail_return:
1237 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1238 		hal_srng_access_end(soc->hal_soc, hal_srng);
1239 		hif_pm_runtime_put(soc->hif_handle);
1240 	} else {
1241 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1242 	}
1243 
1244 	return nbuf;
1245 }
1246 
1247 /**
1248  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
1249  * @vdev: DP vdev handle
1250  * @nbuf: skb
1251  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
1252  *
1253  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
1254  *
1255  * Return: NULL on success,
1256  *         nbuf when it fails to send
1257  */
1258 #if QDF_LOCK_STATS
1259 static noinline
1260 #else
1261 static
1262 #endif
1263 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1264 				    struct dp_tx_msdu_info_s *msdu_info)
1265 {
1266 	uint8_t i;
1267 	struct dp_pdev *pdev = vdev->pdev;
1268 	struct dp_soc *soc = pdev->soc;
1269 	struct dp_tx_desc_s *tx_desc;
1270 	bool is_cce_classified = false;
1271 	QDF_STATUS status;
1272 
1273 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1274 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1275 
1276 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1277 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1278 				"%s %d : HAL RING Access Failed -- %pK\n",
1279 				__func__, __LINE__, hal_srng);
1280 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1281 		return nbuf;
1282 	}
1283 
1284 	if (qdf_unlikely(soc->cce_disable)) {
1285 		is_cce_classified = dp_cce_classify(vdev, nbuf);
1286 		if (is_cce_classified) {
1287 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1288 			msdu_info->tid = DP_VO_TID;
1289 		}
1290 	}
1291 
1292 	if (msdu_info->frm_type == dp_tx_frm_me)
1293 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1294 
1295 	i = 0;
1296 	/* Print statement to track i and num_seg */
1297 	/*
1298 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
1299 	 * descriptors using information in msdu_info
1300 	 */
1301 	while (i < msdu_info->num_seg) {
1302 		/*
1303 		 * Setup Tx descriptor for an MSDU, and MSDU extension
1304 		 * descriptor
1305 		 */
1306 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
1307 				tx_q->desc_pool_id);
1308 
1309 		if (!tx_desc) {
1310 			if (msdu_info->frm_type == dp_tx_frm_me) {
1311 				dp_tx_me_free_buf(pdev,
1312 					(void *)(msdu_info->u.sg_info
1313 						.curr_seg->frags[0].vaddr));
1314 			}
1315 			goto done;
1316 		}
1317 
1318 		if (msdu_info->frm_type == dp_tx_frm_me) {
1319 			tx_desc->me_buffer =
1320 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
1321 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
1322 		}
1323 
1324 		if (is_cce_classified)
1325 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1326 
1327 		/*
1328 		 * Enqueue the Tx MSDU descriptor to HW for transmit
1329 		 */
1330 		status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
1331 			vdev->htt_tcl_metadata, tx_q->ring_id);
1332 
1333 		if (status != QDF_STATUS_SUCCESS) {
1334 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1335 				  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n",
1336 				  __func__, tx_desc, tx_q->ring_id);
1337 
1338 			if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
1339 				dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
1340 
1341 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1342 			goto done;
1343 		}
1344 
1345 		/*
1346 		 * TODO
1347 		 * if tso_info structure can be modified to have curr_seg
1348 		 * as first element, following 2 blocks of code (for TSO and SG)
1349 		 * can be combined into 1
1350 		 */
1351 
1352 		/*
1353 		 * For frames with multiple segments (TSO, ME), jump to next
1354 		 * segment.
1355 		 */
1356 		if (msdu_info->frm_type == dp_tx_frm_tso) {
1357 			if (msdu_info->u.tso_info.curr_seg->next) {
1358 				msdu_info->u.tso_info.curr_seg =
1359 					msdu_info->u.tso_info.curr_seg->next;
1360 
1361 				/*
1362 				 * If this is a jumbo nbuf, then increment the number of
1363 				 * nbuf users for each additional segment of the msdu.
1364 				 * This will ensure that the skb is freed only after
1365 				 * receiving tx completion for all segments of an nbuf
1366 				 */
1367 				qdf_nbuf_inc_users(nbuf);
1368 
1369 				/* Check with MCL if this is needed */
1370 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
1371 			}
1372 		}
1373 
1374 		/*
1375 		 * For Multicast-Unicast converted packets,
1376 		 * each converted frame (for a client) is represented as
1377 		 * 1 segment
1378 		 */
1379 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
1380 				(msdu_info->frm_type == dp_tx_frm_me)) {
1381 			if (msdu_info->u.sg_info.curr_seg->next) {
1382 				msdu_info->u.sg_info.curr_seg =
1383 					msdu_info->u.sg_info.curr_seg->next;
1384 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1385 			}
1386 		}
1387 		i++;
1388 	}
1389 
1390 	nbuf = NULL;
1391 
1392 done:
1393 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1394 		hal_srng_access_end(soc->hal_soc, hal_srng);
1395 		hif_pm_runtime_put(soc->hif_handle);
1396 	} else {
1397 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1398 	}
1399 
1400 	return nbuf;
1401 }
1402 
1403 /**
1404  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
1405  *                     for SG frames
1406  * @vdev: DP vdev handle
1407  * @nbuf: skb
1408  * @seg_info: Pointer to Segment info Descriptor to be prepared
1409  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1410  *
1411  * Return: NULL on success,
1412  *         nbuf when it fails to send
1413  */
1414 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1415 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1416 {
1417 	uint32_t cur_frag, nr_frags;
1418 	qdf_dma_addr_t paddr;
1419 	struct dp_tx_sg_info_s *sg_info;
1420 
1421 	sg_info = &msdu_info->u.sg_info;
1422 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
1423 
1424 	if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
1425 				QDF_DMA_TO_DEVICE)) {
1426 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1427 				"dma map error\n");
1428 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1429 
1430 		qdf_nbuf_free(nbuf);
1431 		return NULL;
1432 	}
1433 
1434 	paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1435 	seg_info->frags[0].paddr_lo = paddr;
1436 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
1437 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
1438 	seg_info->frags[0].vaddr = (void *) nbuf;
1439 
1440 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
1441 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
1442 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
1443 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1444 					"frag dma map error\n");
1445 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1446 			qdf_nbuf_free(nbuf);
1447 			return NULL;
1448 		}
1449 
1450 		paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1451 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
1452 		seg_info->frags[cur_frag + 1].paddr_hi =
1453 			((uint64_t) paddr) >> 32;
1454 		seg_info->frags[cur_frag + 1].len =
1455 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
1456 	}
1457 
1458 	seg_info->frag_cnt = (cur_frag + 1);
1459 	seg_info->total_len = qdf_nbuf_len(nbuf);
1460 	seg_info->next = NULL;
1461 
1462 	sg_info->curr_seg = seg_info;
1463 
1464 	msdu_info->frm_type = dp_tx_frm_sg;
1465 	msdu_info->num_seg = 1;
1466 
1467 	return nbuf;
1468 }
1469 
1470 #ifdef MESH_MODE_SUPPORT
1471 
1472 /**
1473  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
1474 				and prepare msdu_info for mesh frames.
1475  * @vdev: DP vdev handle
1476  * @nbuf: skb
1477  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1478  *
1479  * Return: NULL on failure,
1480  *         nbuf when extracted successfully
1481  */
1482 static
1483 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1484 				struct dp_tx_msdu_info_s *msdu_info)
1485 {
1486 	struct meta_hdr_s *mhdr;
1487 	struct htt_tx_msdu_desc_ext2_t *meta_data =
1488 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
1489 
1490 	nbuf = qdf_nbuf_unshare(nbuf);
1491 	if (nbuf == NULL) {
1492 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1493 				"qdf_nbuf_unshare failed\n");
1494 		return nbuf;
1495 	}
1496 
1497 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1498 
1499 	qdf_mem_set(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t), 0);
1500 
1501 	meta_data->host_tx_desc_pool = 1;
1502 
1503 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
1504 		meta_data->power = mhdr->power;
1505 
1506 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
1507 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
1508 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
1509 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
1510 
1511 		meta_data->dyn_bw = 1;
1512 
1513 		meta_data->valid_pwr = 1;
1514 		meta_data->valid_mcs_mask = 1;
1515 		meta_data->valid_nss_mask = 1;
1516 		meta_data->valid_preamble_type  = 1;
1517 		meta_data->valid_retries = 1;
1518 		meta_data->valid_bw_info = 1;
1519 	}
1520 
1521 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
1522 		meta_data->encrypt_type = 0;
1523 		meta_data->valid_encrypt_type = 1;
1524 	}
1525 
1526 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
1527 		msdu_info->tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1528 	else
1529 		msdu_info->tid = qdf_nbuf_get_priority(nbuf);
1530 
1531 	meta_data->valid_key_flags = 1;
1532 	meta_data->key_flags = (mhdr->keyix & 0x3);
1533 
1534 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
1535 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1536 				"qdf_nbuf_pull_head failed\n");
1537 		qdf_nbuf_free(nbuf);
1538 		return NULL;
1539 	}
1540 
1541 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1542 			"%s , Meta hdr %0x %0x %0x %0x %0x\n",
1543 			__func__, msdu_info->meta_data[0],
1544 			msdu_info->meta_data[1],
1545 			msdu_info->meta_data[2],
1546 			msdu_info->meta_data[3],
1547 			msdu_info->meta_data[4]);
1548 
1549 	return nbuf;
1550 }
1551 #else
1552 static
1553 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1554 				struct dp_tx_msdu_info_s *msdu_info)
1555 {
1556 	return nbuf;
1557 }
1558 
1559 #endif
1560 
1561 #ifdef DP_FEATURE_NAWDS_TX
1562 /**
1563  * dp_tx_prepare_nawds(): Tramit NAWDS frames
1564  * @vdev: dp_vdev handle
1565  * @nbuf: skb
1566  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1567  * @tx_q: Tx queue to be used for this Tx frame
1568  * @meta_data: Meta date for mesh
1569  * @peer_id: peer_id of the peer in case of NAWDS frames
1570  *
1571  * return: NULL on success nbuf on failure
1572  */
1573 static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1574 		uint8_t tid, struct dp_tx_queue *tx_q, uint32_t *meta_data)
1575 {
1576 	struct dp_peer *peer = NULL;
1577 	struct dp_soc *soc = vdev->pdev->soc;
1578 	struct dp_ast_entry *ast_entry = NULL;
1579 	struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1580 	uint16_t peer_id = HTT_INVALID_PEER;
1581 
1582 	struct dp_peer *sa_peer = NULL;
1583 	qdf_nbuf_t nbuf_copy;
1584 
1585 	qdf_spin_lock_bh(&(soc->ast_lock));
1586 	ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost), 0);
1587 	if (ast_entry)
1588 		sa_peer = ast_entry->peer;
1589 
1590 	qdf_spin_unlock_bh(&(soc->ast_lock));
1591 
1592 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1593 		if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
1594 				(peer->nawds_enabled)) {
1595 			if (sa_peer == peer) {
1596 				QDF_TRACE(QDF_MODULE_ID_DP,
1597 						QDF_TRACE_LEVEL_DEBUG,
1598 						" %s: broadcast multicast packet",
1599 						 __func__);
1600 				DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
1601 				continue;
1602 			}
1603 
1604 			nbuf_copy = qdf_nbuf_copy(nbuf);
1605 			if (!nbuf_copy) {
1606 				QDF_TRACE(QDF_MODULE_ID_DP,
1607 						QDF_TRACE_LEVEL_ERROR,
1608 						"nbuf copy failed");
1609 			}
1610 
1611 			peer_id = peer->peer_ids[0];
1612 			nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy, tid,
1613 					tx_q, meta_data, peer_id);
1614 			if (nbuf_copy != NULL) {
1615 				qdf_nbuf_free(nbuf_copy);
1616 				continue;
1617 			}
1618 			DP_STATS_INC_PKT(peer, tx.nawds_mcast,
1619 						1, qdf_nbuf_len(nbuf));
1620 		}
1621 	}
1622 	if (peer_id == HTT_INVALID_PEER)
1623 		return nbuf;
1624 
1625 	return NULL;
1626 }
1627 #endif
1628 
1629 /**
1630  * dp_tx_send() - Transmit a frame on a given VAP
1631  * @vap_dev: DP vdev handle
1632  * @nbuf: skb
1633  *
1634  * Entry point for Core Tx layer (DP_TX) invoked from
1635  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
1636  * cases
1637  *
1638  * Return: NULL on success,
1639  *         nbuf when it fails to send
1640  */
1641 qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
1642 {
1643 	struct ether_header *eh = NULL;
1644 	struct dp_tx_msdu_info_s msdu_info;
1645 	struct dp_tx_seg_info_s seg_info;
1646 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1647 	uint16_t peer_id = HTT_INVALID_PEER;
1648 	qdf_nbuf_t nbuf_mesh = NULL;
1649 
1650 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1651 	qdf_mem_set(&seg_info, sizeof(seg_info), 0x0);
1652 
1653 	eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1654 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1655 			"%s , skb %0x:%0x:%0x:%0x:%0x:%0x\n",
1656 			__func__, nbuf->data[0], nbuf->data[1], nbuf->data[2],
1657 			nbuf->data[3], nbuf->data[4], nbuf->data[5]);
1658 	/*
1659 	 * Set Default Host TID value to invalid TID
1660 	 * (TID override disabled)
1661 	 */
1662 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
1663 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
1664 
1665 	if (qdf_unlikely(vdev->mesh_vdev)) {
1666 		nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
1667 								&msdu_info);
1668 		if (nbuf_mesh == NULL) {
1669 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1670 					"Extracting mesh metadata failed\n");
1671 			return nbuf;
1672 		}
1673 		nbuf = nbuf_mesh;
1674 	}
1675 
1676 	/*
1677 	 * Get HW Queue to use for this frame.
1678 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
1679 	 * dedicated for data and 1 for command.
1680 	 * "queue_id" maps to one hardware ring.
1681 	 *  With each ring, we also associate a unique Tx descriptor pool
1682 	 *  to minimize lock contention for these resources.
1683 	 */
1684 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
1685 
1686 	/*
1687 	 * TCL H/W supports 2 DSCP-TID mapping tables.
1688 	 *  Table 1 - Default DSCP-TID mapping table
1689 	 *  Table 2 - 1 DSCP-TID override table
1690 	 *
1691 	 * If we need a different DSCP-TID mapping for this vap,
1692 	 * call tid_classify to extract DSCP/ToS from frame and
1693 	 * map to a TID and store in msdu_info. This is later used
1694 	 * to fill in TCL Input descriptor (per-packet TID override).
1695 	 */
1696 	if (vdev->dscp_tid_map_id > 1)
1697 		dp_tx_classify_tid(vdev, nbuf, &msdu_info);
1698 
1699 	/* Reset the control block */
1700 	qdf_nbuf_reset_ctxt(nbuf);
1701 
1702 	/*
1703 	 * Classify the frame and call corresponding
1704 	 * "prepare" function which extracts the segment (TSO)
1705 	 * and fragmentation information (for TSO , SG, ME, or Raw)
1706 	 * into MSDU_INFO structure which is later used to fill
1707 	 * SW and HW descriptors.
1708 	 */
1709 	if (qdf_nbuf_is_tso(nbuf)) {
1710 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1711 			  "%s TSO frame %pK\n", __func__, vdev);
1712 		DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1,
1713 				qdf_nbuf_len(nbuf));
1714 
1715 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
1716 			DP_STATS_INC(vdev, tx_i.tso.dropped_host, 1);
1717 			return nbuf;
1718 		}
1719 
1720 		goto send_multiple;
1721 	}
1722 
1723 	/* SG */
1724 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
1725 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
1726 
1727 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1728 			 "%s non-TSO SG frame %pK\n", __func__, vdev);
1729 
1730 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
1731 				qdf_nbuf_len(nbuf));
1732 
1733 		goto send_multiple;
1734 	}
1735 
1736 #ifdef ATH_SUPPORT_IQUE
1737 	/* Mcast to Ucast Conversion*/
1738 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
1739 		eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1740 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
1741 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1742 				  "%s Mcast frm for ME %pK\n", __func__, vdev);
1743 
1744 			DP_STATS_INC_PKT(vdev,
1745 					tx_i.mcast_en.mcast_pkt, 1,
1746 					qdf_nbuf_len(nbuf));
1747 			if (dp_tx_prepare_send_me(vdev, nbuf) > 0) {
1748 				qdf_nbuf_free(nbuf);
1749 				return NULL;
1750 			}
1751 		}
1752 	}
1753 #endif
1754 
1755 	/* RAW */
1756 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
1757 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
1758 		if (nbuf == NULL)
1759 			return NULL;
1760 
1761 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1762 			  "%s Raw frame %pK\n", __func__, vdev);
1763 
1764 		goto send_multiple;
1765 
1766 	}
1767 
1768 	/*  Single linear frame */
1769 	/*
1770 	 * If nbuf is a simple linear frame, use send_single function to
1771 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
1772 	 * SRNG. There is no need to setup a MSDU extension descriptor.
1773 	 */
1774 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, msdu_info.tid,
1775 			&msdu_info.tx_queue, msdu_info.meta_data, peer_id);
1776 
1777 	return nbuf;
1778 
1779 send_multiple:
1780 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
1781 
1782 	return nbuf;
1783 }
1784 
1785 /**
1786  * dp_tx_reinject_handler() - Tx Reinject Handler
1787  * @tx_desc: software descriptor head pointer
1788  * @status : Tx completion status from HTT descriptor
1789  *
1790  * This function reinjects frames back to Target.
1791  * Todo - Host queue needs to be added
1792  *
1793  * Return: none
1794  */
1795 static
1796 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
1797 {
1798 	struct dp_vdev *vdev;
1799 	struct dp_peer *peer = NULL;
1800 	uint32_t peer_id = HTT_INVALID_PEER;
1801 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1802 	qdf_nbuf_t nbuf_copy = NULL;
1803 	struct dp_tx_msdu_info_s msdu_info;
1804 	struct dp_peer *sa_peer = NULL;
1805 	struct dp_ast_entry *ast_entry = NULL;
1806 	struct dp_soc *soc = NULL;
1807 	struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1808 #ifdef WDS_VENDOR_EXTENSION
1809 	int is_mcast = 0, is_ucast = 0;
1810 	int num_peers_3addr = 0;
1811 	struct ether_header *eth_hdr = (struct ether_header *)(qdf_nbuf_data(nbuf));
1812 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
1813 #endif
1814 
1815 	vdev = tx_desc->vdev;
1816 	soc = vdev->pdev->soc;
1817 
1818 	qdf_assert(vdev);
1819 
1820 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1821 
1822 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
1823 
1824 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1825 			"%s Tx reinject path\n", __func__);
1826 
1827 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
1828 			qdf_nbuf_len(tx_desc->nbuf));
1829 
1830 	qdf_spin_lock_bh(&(soc->ast_lock));
1831 
1832 	ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost), 0);
1833 	if (ast_entry)
1834 		sa_peer = ast_entry->peer;
1835 
1836 	qdf_spin_unlock_bh(&(soc->ast_lock));
1837 
1838 #ifdef WDS_VENDOR_EXTENSION
1839 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1840 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
1841 	} else {
1842 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
1843 	}
1844 	is_ucast = !is_mcast;
1845 
1846 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1847 		if (peer->bss_peer)
1848 			continue;
1849 
1850 		/* Detect wds peers that use 3-addr framing for mcast.
1851 		 * if there are any, the bss_peer is used to send the
1852 		 * the mcast frame using 3-addr format. all wds enabled
1853 		 * peers that use 4-addr framing for mcast frames will
1854 		 * be duplicated and sent as 4-addr frames below.
1855 		 */
1856 		if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
1857 			num_peers_3addr = 1;
1858 			break;
1859 		}
1860 	}
1861 #endif
1862 
1863 	if (qdf_unlikely(vdev->mesh_vdev)) {
1864 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
1865 	} else {
1866 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1867 			if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
1868 #ifdef WDS_VENDOR_EXTENSION
1869 			/*
1870 			 * . if 3-addr STA, then send on BSS Peer
1871 			 * . if Peer WDS enabled and accept 4-addr mcast,
1872 			 * send mcast on that peer only
1873 			 * . if Peer WDS enabled and accept 4-addr ucast,
1874 			 * send ucast on that peer only
1875 			 */
1876 			((peer->bss_peer && num_peers_3addr && is_mcast) ||
1877 			 (peer->wds_enabled &&
1878 				  ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
1879 				   (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
1880 #else
1881 			((peer->bss_peer &&
1882 			  !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) ||
1883 				 peer->nawds_enabled)) {
1884 #endif
1885 				peer_id = DP_INVALID_PEER;
1886 
1887 				if (peer->nawds_enabled) {
1888 					peer_id = peer->peer_ids[0];
1889 					if (sa_peer == peer) {
1890 						QDF_TRACE(
1891 							QDF_MODULE_ID_DP,
1892 							QDF_TRACE_LEVEL_DEBUG,
1893 							" %s: multicast packet",
1894 							__func__);
1895 						DP_STATS_INC(peer,
1896 							tx.nawds_mcast_drop, 1);
1897 						continue;
1898 					}
1899 				}
1900 
1901 				nbuf_copy = qdf_nbuf_copy(nbuf);
1902 
1903 				if (!nbuf_copy) {
1904 					QDF_TRACE(QDF_MODULE_ID_DP,
1905 						QDF_TRACE_LEVEL_DEBUG,
1906 						FL("nbuf copy failed"));
1907 					break;
1908 				}
1909 
1910 				nbuf_copy = dp_tx_send_msdu_single(vdev,
1911 						nbuf_copy,
1912 						msdu_info.tid,
1913 						&msdu_info.tx_queue,
1914 						msdu_info.meta_data,
1915 						peer_id);
1916 
1917 				if (nbuf_copy) {
1918 					QDF_TRACE(QDF_MODULE_ID_DP,
1919 						QDF_TRACE_LEVEL_DEBUG,
1920 						FL("pkt send failed"));
1921 					qdf_nbuf_free(nbuf_copy);
1922 				} else {
1923 					if (peer_id != DP_INVALID_PEER)
1924 						DP_STATS_INC_PKT(peer,
1925 							tx.nawds_mcast,
1926 							1, qdf_nbuf_len(nbuf));
1927 				}
1928 			}
1929 		}
1930 	}
1931 
1932 	if (vdev->nawds_enabled) {
1933 		peer_id = DP_INVALID_PEER;
1934 
1935 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
1936 					1, qdf_nbuf_len(nbuf));
1937 
1938 		nbuf = dp_tx_send_msdu_single(vdev,
1939 				nbuf, msdu_info.tid,
1940 				&msdu_info.tx_queue,
1941 				msdu_info.meta_data, peer_id);
1942 
1943 		if (nbuf) {
1944 			QDF_TRACE(QDF_MODULE_ID_DP,
1945 				QDF_TRACE_LEVEL_DEBUG,
1946 				FL("pkt send failed"));
1947 			qdf_nbuf_free(nbuf);
1948 		}
1949 	} else
1950 		qdf_nbuf_free(nbuf);
1951 
1952 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
1953 }
1954 
1955 /**
1956  * dp_tx_inspect_handler() - Tx Inspect Handler
1957  * @tx_desc: software descriptor head pointer
1958  * @status : Tx completion status from HTT descriptor
1959  *
1960  * Handles Tx frames sent back to Host for inspection
1961  * (ProxyARP)
1962  *
1963  * Return: none
1964  */
1965 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
1966 {
1967 
1968 	struct dp_soc *soc;
1969 	struct dp_pdev *pdev = tx_desc->pdev;
1970 
1971 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1972 			"%s Tx inspect path\n",
1973 			__func__);
1974 
1975 	qdf_assert(pdev);
1976 
1977 	soc = pdev->soc;
1978 
1979 	DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
1980 			qdf_nbuf_len(tx_desc->nbuf));
1981 
1982 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
1983 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
1984 }
1985 
1986 #ifdef FEATURE_PERPKT_INFO
1987 QDF_STATUS
1988 dp_send_compl_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
1989 		      uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
1990 {
1991 	struct tx_capture_hdr *ppdu_hdr;
1992 	struct dp_peer *peer = NULL;
1993 
1994 	if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode))
1995 		return QDF_STATUS_E_NOSUPPORT;
1996 
1997 	peer = (peer_id == HTT_INVALID_PEER) ? NULL :
1998 			dp_peer_find_by_id(soc, peer_id);
1999 
2000 	if (!peer) {
2001 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2002 				FL("Peer Invalid"));
2003 		return QDF_STATUS_E_INVAL;
2004 	}
2005 
2006 	if (pdev->mcopy_mode) {
2007 		if ((pdev->am_copy_id.tx_ppdu_id == ppdu_id) &&
2008 			(pdev->am_copy_id.tx_peer_id == peer_id)) {
2009 			return QDF_STATUS_E_INVAL;
2010 		}
2011 
2012 		pdev->am_copy_id.tx_ppdu_id = ppdu_id;
2013 		pdev->am_copy_id.tx_peer_id = peer_id;
2014 	}
2015 
2016 	if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) {
2017 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2018 				FL("No headroom"));
2019 		return QDF_STATUS_E_NOMEM;
2020 	}
2021 
2022 	ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
2023 	qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
2024 					IEEE80211_ADDR_LEN);
2025 	ppdu_hdr->ppdu_id = ppdu_id;
2026 	qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
2027 			IEEE80211_ADDR_LEN);
2028 
2029 	dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
2030 				netbuf, peer_id,
2031 				WDI_NO_VAL, pdev->pdev_id);
2032 
2033 	return QDF_STATUS_SUCCESS;
2034 }
2035 #else
2036 static QDF_STATUS
2037 dp_send_compl_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2038 		      uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
2039 {
2040 	return QDF_STATUS_E_NOSUPPORT;
2041 }
2042 #endif
2043 
2044 /**
2045  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2046  * @soc: Soc handle
2047  * @desc: software Tx descriptor to be processed
2048  *
2049  * Return: none
2050  */
2051 static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
2052 		struct dp_tx_desc_s *desc)
2053 {
2054 	struct dp_vdev *vdev = desc->vdev;
2055 	qdf_nbuf_t nbuf = desc->nbuf;
2056 	struct hal_tx_completion_status ts = {0};
2057 
2058 	if (desc)
2059 		hal_tx_comp_get_status(&desc->comp, &ts);
2060 
2061 	/* If it is TDLS mgmt, don't unmap or free the frame */
2062 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2063 		return dp_non_std_tx_comp_free_buff(desc, vdev);
2064 
2065 	/* 0 : MSDU buffer, 1 : MLE */
2066 	if (desc->msdu_ext_desc) {
2067 		/* TSO free */
2068 		if (hal_tx_ext_desc_get_tso_enable(
2069 					desc->msdu_ext_desc->vaddr)) {
2070 			/* If remaining number of segment is 0
2071 			 * actual TSO may unmap and free */
2072 			if (!DP_DESC_NUM_FRAG(desc)) {
2073 				qdf_nbuf_unmap(soc->osdev, nbuf,
2074 						QDF_DMA_TO_DEVICE);
2075 				qdf_nbuf_free(nbuf);
2076 				return;
2077 			}
2078 		}
2079 	}
2080 
2081 	if (desc->flags & DP_TX_DESC_FLAG_ME)
2082 		dp_tx_me_free_buf(desc->pdev, desc->me_buffer);
2083 
2084 	qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
2085 
2086 	if (dp_send_compl_to_stack(soc, desc->pdev, ts.peer_id,
2087 			ts.ppdu_id, nbuf) == QDF_STATUS_SUCCESS)
2088 		return;
2089 
2090 	if (!vdev->mesh_vdev) {
2091 		qdf_nbuf_free(nbuf);
2092 	} else {
2093 		vdev->osif_tx_free_ext((nbuf));
2094 	}
2095 }
2096 
2097 /**
2098  * dp_tx_mec_handler() - Tx  MEC Notify Handler
2099  * @vdev: pointer to dp dev handler
2100  * @status : Tx completion status from HTT descriptor
2101  *
2102  * Handles MEC notify event sent from fw to Host
2103  *
2104  * Return: none
2105  */
2106 #ifdef FEATURE_WDS
2107 void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
2108 {
2109 
2110 	struct dp_soc *soc;
2111 	uint32_t flags = IEEE80211_NODE_F_WDS_HM;
2112 	struct dp_peer *peer;
2113 	uint8_t mac_addr[DP_MAC_ADDR_LEN], i;
2114 
2115 	soc = vdev->pdev->soc;
2116 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
2117 	peer = TAILQ_FIRST(&vdev->peer_list);
2118 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2119 
2120 	if (!peer) {
2121 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2122 				FL("peer is NULL"));
2123 		return;
2124 	}
2125 
2126 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2127 			"%s Tx MEC Handler\n",
2128 			__func__);
2129 
2130 	for (i = 0; i < DP_MAC_ADDR_LEN; i++)
2131 		mac_addr[(DP_MAC_ADDR_LEN - 1) - i] =
2132 					status[(DP_MAC_ADDR_LEN - 2) + i];
2133 
2134 	if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, DP_MAC_ADDR_LEN) &&
2135 		!dp_peer_add_ast(soc, peer, mac_addr, dp_ast_type_mec)) {
2136 			soc->cdp_soc.ol_ops->peer_add_wds_entry(
2137 				vdev->osif_vdev,
2138 				mac_addr,
2139 				vdev->mac_addr.raw,
2140 				flags);
2141 	}
2142 }
2143 #else
2144 static void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
2145 {
2146 }
2147 #endif
2148 
2149 /**
2150  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
2151  * @tx_desc: software descriptor head pointer
2152  * @status : Tx completion status from HTT descriptor
2153  *
2154  * This function will process HTT Tx indication messages from Target
2155  *
2156  * Return: none
2157  */
2158 static
2159 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2160 {
2161 	uint8_t tx_status;
2162 	struct dp_pdev *pdev;
2163 	struct dp_vdev *vdev;
2164 	struct dp_soc *soc;
2165 	uint32_t *htt_status_word = (uint32_t *) status;
2166 
2167 	qdf_assert(tx_desc->pdev);
2168 
2169 	pdev = tx_desc->pdev;
2170 	vdev = tx_desc->vdev;
2171 	soc = pdev->soc;
2172 
2173 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_status_word[0]);
2174 
2175 	switch (tx_status) {
2176 	case HTT_TX_FW2WBM_TX_STATUS_OK:
2177 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
2178 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
2179 	{
2180 		dp_tx_comp_free_buf(soc, tx_desc);
2181 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2182 		break;
2183 	}
2184 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
2185 	{
2186 		dp_tx_reinject_handler(tx_desc, status);
2187 		break;
2188 	}
2189 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
2190 	{
2191 		dp_tx_inspect_handler(tx_desc, status);
2192 		break;
2193 	}
2194 	case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
2195 	{
2196 		dp_tx_mec_handler(vdev, status);
2197 		break;
2198 	}
2199 	default:
2200 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2201 				"%s Invalid HTT tx_status %d\n",
2202 				__func__, tx_status);
2203 		break;
2204 	}
2205 }
2206 
2207 #ifdef MESH_MODE_SUPPORT
2208 /**
2209  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
2210  *                                         in mesh meta header
2211  * @tx_desc: software descriptor head pointer
2212  * @ts: pointer to tx completion stats
2213  * Return: none
2214  */
2215 static
2216 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2217 		struct hal_tx_completion_status *ts)
2218 {
2219 	struct meta_hdr_s *mhdr;
2220 	qdf_nbuf_t netbuf = tx_desc->nbuf;
2221 
2222 	if (!tx_desc->msdu_ext_desc) {
2223 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
2224 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2225 				"netbuf %pK offset %d\n",
2226 				netbuf, tx_desc->pkt_offset);
2227 			return;
2228 		}
2229 	}
2230 	if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
2231 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2232 			"netbuf %pK offset %d\n", netbuf,
2233 			sizeof(struct meta_hdr_s));
2234 		return;
2235 	}
2236 
2237 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
2238 	mhdr->rssi = ts->ack_frame_rssi;
2239 	mhdr->channel = tx_desc->pdev->operating_channel;
2240 }
2241 
2242 #else
2243 static
2244 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2245 		struct hal_tx_completion_status *ts)
2246 {
2247 }
2248 
2249 #endif
2250 
2251 /**
2252  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
2253  * @peer: Handle to DP peer
2254  * @ts: pointer to HAL Tx completion stats
2255  * @length: MSDU length
2256  *
2257  * Return: None
2258  */
2259 static void dp_tx_update_peer_stats(struct dp_peer *peer,
2260 		struct hal_tx_completion_status *ts, uint32_t length)
2261 {
2262 	struct dp_pdev *pdev = peer->vdev->pdev;
2263 	struct dp_soc *soc = pdev->soc;
2264 	uint8_t mcs, pkt_type;
2265 
2266 	mcs = ts->mcs;
2267 	pkt_type = ts->pkt_type;
2268 
2269 
2270 	if (!ts->release_src == HAL_TX_COMP_RELEASE_SOURCE_TQM)
2271 		return;
2272 
2273 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
2274 			(ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
2275 
2276 	DP_STATS_INCC(peer, tx.dropped.fw_rem, 1,
2277 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
2278 
2279 	DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
2280 			(ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
2281 
2282 	DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
2283 			(ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
2284 
2285 	if (!ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
2286 		return;
2287 
2288 	DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
2289 
2290 	DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
2291 
2292 	if (!(soc->process_tx_status))
2293 		return;
2294 
2295 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1,
2296 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
2297 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2298 			((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
2299 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1,
2300 			((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
2301 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2302 			((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
2303 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1,
2304 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
2305 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2306 			((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
2307 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1,
2308 			((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2309 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2310 			((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2311 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1,
2312 			((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2313 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2314 			((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2315 	DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
2316 	DP_STATS_INC(peer, tx.bw[ts->bw], 1);
2317 	DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
2318 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
2319 	DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
2320 	DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
2321 	DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
2322 	DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
2323 
2324 	if (soc->cdp_soc.ol_ops->update_dp_stats) {
2325 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->osif_pdev,
2326 				&peer->stats, ts->peer_id,
2327 				UPDATE_PEER_STATS);
2328 	}
2329 }
2330 
2331 /**
2332  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
2333  * @tx_desc: software descriptor head pointer
2334  * @length: packet length
2335  *
2336  * Return: none
2337  */
2338 static inline void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
2339 		uint32_t length)
2340 {
2341 	struct hal_tx_completion_status ts;
2342 	struct dp_soc *soc = NULL;
2343 	struct dp_vdev *vdev = tx_desc->vdev;
2344 	struct dp_peer *peer = NULL;
2345 	hal_tx_comp_get_status(&tx_desc->comp, &ts);
2346 
2347 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2348 				"-------------------- \n"
2349 				"Tx Completion Stats: \n"
2350 				"-------------------- \n"
2351 				"ack_frame_rssi = %d \n"
2352 				"first_msdu = %d \n"
2353 				"last_msdu = %d \n"
2354 				"msdu_part_of_amsdu = %d \n"
2355 				"rate_stats valid = %d \n"
2356 				"bw = %d \n"
2357 				"pkt_type = %d \n"
2358 				"stbc = %d \n"
2359 				"ldpc = %d \n"
2360 				"sgi = %d \n"
2361 				"mcs = %d \n"
2362 				"ofdma = %d \n"
2363 				"tones_in_ru = %d \n"
2364 				"tsf = %d \n"
2365 				"ppdu_id = %d \n"
2366 				"transmit_cnt = %d \n"
2367 				"tid = %d \n"
2368 				"peer_id = %d \n",
2369 				ts.ack_frame_rssi, ts.first_msdu, ts.last_msdu,
2370 				ts.msdu_part_of_amsdu, ts.valid, ts.bw,
2371 				ts.pkt_type, ts.stbc, ts.ldpc, ts.sgi,
2372 				ts.mcs, ts.ofdma, ts.tones_in_ru, ts.tsf,
2373 				ts.ppdu_id, ts.transmit_cnt, ts.tid,
2374 				ts.peer_id);
2375 
2376 	if (!vdev) {
2377 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2378 				"invalid vdev");
2379 		goto out;
2380 	}
2381 
2382 	soc = vdev->pdev->soc;
2383 
2384 	/* Update SoC level stats */
2385 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
2386 			(ts.status == HAL_TX_TQM_RR_REM_CMD_REM));
2387 
2388 	/* Update per-packet stats */
2389 	if (qdf_unlikely(vdev->mesh_vdev))
2390 		dp_tx_comp_fill_tx_completion_stats(tx_desc, &ts);
2391 
2392 	/* Update peer level stats */
2393 	peer = dp_peer_find_by_id(soc, ts.peer_id);
2394 	if (!peer) {
2395 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2396 				"invalid peer");
2397 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
2398 		goto out;
2399 	}
2400 
2401 	dp_tx_update_peer_stats(peer, &ts, length);
2402 
2403 out:
2404 	return;
2405 }
2406 
2407 /**
2408  * dp_tx_comp_process_desc() - Tx complete software descriptor handler
2409  * @soc: core txrx main context
2410  * @comp_head: software descriptor head pointer
2411  *
2412  * This function will process batch of descriptors reaped by dp_tx_comp_handler
2413  * and release the software descriptors after processing is complete
2414  *
2415  * Return: none
2416  */
2417 static void dp_tx_comp_process_desc(struct dp_soc *soc,
2418 		struct dp_tx_desc_s *comp_head)
2419 {
2420 	struct dp_tx_desc_s *desc;
2421 	struct dp_tx_desc_s *next;
2422 	struct hal_tx_completion_status ts = {0};
2423 	uint32_t length;
2424 	struct dp_peer *peer;
2425 
2426 	DP_HIST_INIT();
2427 	desc = comp_head;
2428 
2429 	while (desc) {
2430 		hal_tx_comp_get_status(&desc->comp, &ts);
2431 		peer = dp_peer_find_by_id(soc, ts.peer_id);
2432 		length = qdf_nbuf_len(desc->nbuf);
2433 
2434 		dp_tx_comp_process_tx_status(desc, length);
2435 
2436 		dp_tx_comp_free_buf(soc, desc);
2437 
2438 		DP_HIST_PACKET_COUNT_INC(desc->pdev->pdev_id);
2439 
2440 		next = desc->next;
2441 		dp_tx_desc_release(desc, desc->pool_id);
2442 		desc = next;
2443 	}
2444 	DP_TX_HIST_STATS_PER_PDEV();
2445 }
2446 
2447 /**
2448  * dp_tx_comp_handler() - Tx completion handler
2449  * @soc: core txrx main context
2450  * @ring_id: completion ring id
2451  * @quota: No. of packets/descriptors that can be serviced in one loop
2452  *
2453  * This function will collect hardware release ring element contents and
2454  * handle descriptor contents. Based on contents, free packet or handle error
2455  * conditions
2456  *
2457  * Return: none
2458  */
2459 uint32_t dp_tx_comp_handler(struct dp_soc *soc, void *hal_srng, uint32_t quota)
2460 {
2461 	void *tx_comp_hal_desc;
2462 	uint8_t buffer_src;
2463 	uint8_t pool_id;
2464 	uint32_t tx_desc_id;
2465 	struct dp_tx_desc_s *tx_desc = NULL;
2466 	struct dp_tx_desc_s *head_desc = NULL;
2467 	struct dp_tx_desc_s *tail_desc = NULL;
2468 	uint32_t num_processed;
2469 	uint32_t count;
2470 
2471 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
2472 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2473 				"%s %d : HAL RING Access Failed -- %pK\n",
2474 				__func__, __LINE__, hal_srng);
2475 		return 0;
2476 	}
2477 
2478 	num_processed = 0;
2479 	count = 0;
2480 
2481 	/* Find head descriptor from completion ring */
2482 	while (qdf_likely(tx_comp_hal_desc =
2483 			hal_srng_dst_get_next(soc->hal_soc, hal_srng))) {
2484 
2485 		buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
2486 
2487 		/* If this buffer was not released by TQM or FW, then it is not
2488 		 * Tx completion indication, assert */
2489 		if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
2490 				(buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
2491 
2492 			QDF_TRACE(QDF_MODULE_ID_DP,
2493 					QDF_TRACE_LEVEL_FATAL,
2494 					"Tx comp release_src != TQM | FW");
2495 
2496 			qdf_assert_always(0);
2497 		}
2498 
2499 		/* Get descriptor id */
2500 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
2501 		pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
2502 			DP_TX_DESC_ID_POOL_OS;
2503 
2504 		/* Pool ID is out of limit. Error */
2505 		if (pool_id > wlan_cfg_get_num_tx_desc_pool(
2506 					soc->wlan_cfg_ctx)) {
2507 			QDF_TRACE(QDF_MODULE_ID_DP,
2508 					QDF_TRACE_LEVEL_FATAL,
2509 					"Tx Comp pool id %d not valid",
2510 					pool_id);
2511 
2512 			qdf_assert_always(0);
2513 		}
2514 
2515 		/* Find Tx descriptor */
2516 		tx_desc = dp_tx_desc_find(soc, pool_id,
2517 				(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
2518 				DP_TX_DESC_ID_PAGE_OS,
2519 				(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
2520 				DP_TX_DESC_ID_OFFSET_OS);
2521 
2522 		/* Pool id is not matching. Error */
2523 		if (tx_desc && (tx_desc->pool_id != pool_id)) {
2524 			QDF_TRACE(QDF_MODULE_ID_DP,
2525 					QDF_TRACE_LEVEL_FATAL,
2526 					"Tx Comp pool id %d not matched %d",
2527 					pool_id, tx_desc->pool_id);
2528 
2529 			qdf_assert_always(0);
2530 		}
2531 
2532 		if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
2533 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
2534 			QDF_TRACE(QDF_MODULE_ID_DP,
2535 					QDF_TRACE_LEVEL_FATAL,
2536 					"Txdesc invalid, flgs = %x,id = %d",
2537 					tx_desc->flags,	tx_desc_id);
2538 
2539 			qdf_assert_always(0);
2540 		}
2541 
2542 		/*
2543 		 * If the release source is FW, process the HTT status
2544 		 */
2545 		if (qdf_unlikely(buffer_src ==
2546 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
2547 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
2548 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
2549 					htt_tx_status);
2550 			dp_tx_process_htt_completion(tx_desc,
2551 					htt_tx_status);
2552 		} else {
2553 
2554 			/* First ring descriptor on the cycle */
2555 			if (!head_desc) {
2556 				head_desc = tx_desc;
2557 				tail_desc = tx_desc;
2558 			}
2559 
2560 			tail_desc->next = tx_desc;
2561 			tx_desc->next = NULL;
2562 			tail_desc = tx_desc;
2563 
2564 			/* Collect hw completion contents */
2565 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
2566 					&tx_desc->comp, 1);
2567 
2568 		}
2569 
2570 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
2571 		/* Decrement PM usage count if the packet has been sent.*/
2572 		hif_pm_runtime_put(soc->hif_handle);
2573 
2574 		/*
2575 		 * Processed packet count is more than given quota
2576 		 * stop to processing
2577 		 */
2578 		if ((num_processed >= quota))
2579 			break;
2580 
2581 		count++;
2582 	}
2583 
2584 	hal_srng_access_end(soc->hal_soc, hal_srng);
2585 
2586 	/* Process the reaped descriptors */
2587 	if (head_desc)
2588 		dp_tx_comp_process_desc(soc, head_desc);
2589 
2590 	return num_processed;
2591 }
2592 
2593 #ifdef CONVERGED_TDLS_ENABLE
2594 /**
2595  * dp_tx_non_std() - Allow the control-path SW to send data frames
2596  *
2597  * @data_vdev - which vdev should transmit the tx data frames
2598  * @tx_spec - what non-standard handling to apply to the tx data frames
2599  * @msdu_list - NULL-terminated list of tx MSDUs
2600  *
2601  * Return: NULL on success,
2602  *         nbuf when it fails to send
2603  */
2604 qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle,
2605 			enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
2606 {
2607 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
2608 
2609 	if (tx_spec & OL_TX_SPEC_NO_FREE)
2610 		vdev->is_tdls_frame = true;
2611 	return dp_tx_send(vdev_handle, msdu_list);
2612 }
2613 #endif
2614 
2615 /**
2616  * dp_tx_vdev_attach() - attach vdev to dp tx
2617  * @vdev: virtual device instance
2618  *
2619  * Return: QDF_STATUS_SUCCESS: success
2620  *         QDF_STATUS_E_RESOURCES: Error return
2621  */
2622 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
2623 {
2624 	/*
2625 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
2626 	 */
2627 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
2628 			HTT_TCL_METADATA_TYPE_VDEV_BASED);
2629 
2630 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
2631 			vdev->vdev_id);
2632 
2633 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
2634 			DP_SW2HW_MACID(vdev->pdev->pdev_id));
2635 
2636 	/*
2637 	 * Set HTT Extension Valid bit to 0 by default
2638 	 */
2639 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
2640 
2641 	dp_tx_vdev_update_search_flags(vdev);
2642 
2643 	return QDF_STATUS_SUCCESS;
2644 }
2645 
2646 /**
2647  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
2648  * @vdev: virtual device instance
2649  *
2650  * Return: void
2651  *
2652  */
2653 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
2654 {
2655 	/*
2656 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
2657 	 * for TDLS link
2658 	 *
2659 	 * Enable AddrY (SA based search) only for non-WDS STA and
2660 	 * ProxySTA VAP modes.
2661 	 *
2662 	 * In all other VAP modes, only DA based search should be
2663 	 * enabled
2664 	 */
2665 	if (vdev->opmode == wlan_op_mode_sta &&
2666 	    vdev->tdls_link_connected)
2667 		vdev->hal_desc_addr_search_flags =
2668 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
2669 	else if ((vdev->opmode == wlan_op_mode_sta &&
2670 				(!vdev->wds_enabled || vdev->proxysta_vdev)))
2671 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
2672 	else
2673 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
2674 }
2675 
2676 /**
2677  * dp_tx_vdev_detach() - detach vdev from dp tx
2678  * @vdev: virtual device instance
2679  *
2680  * Return: QDF_STATUS_SUCCESS: success
2681  *         QDF_STATUS_E_RESOURCES: Error return
2682  */
2683 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
2684 {
2685 	return QDF_STATUS_SUCCESS;
2686 }
2687 
2688 /**
2689  * dp_tx_pdev_attach() - attach pdev to dp tx
2690  * @pdev: physical device instance
2691  *
2692  * Return: QDF_STATUS_SUCCESS: success
2693  *         QDF_STATUS_E_RESOURCES: Error return
2694  */
2695 QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
2696 {
2697 	struct dp_soc *soc = pdev->soc;
2698 
2699 	/* Initialize Flow control counters */
2700 	qdf_atomic_init(&pdev->num_tx_exception);
2701 	qdf_atomic_init(&pdev->num_tx_outstanding);
2702 
2703 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2704 		/* Initialize descriptors in TCL Ring */
2705 		hal_tx_init_data_ring(soc->hal_soc,
2706 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
2707 	}
2708 
2709 	return QDF_STATUS_SUCCESS;
2710 }
2711 
2712 /**
2713  * dp_tx_pdev_detach() - detach pdev from dp tx
2714  * @pdev: physical device instance
2715  *
2716  * Return: QDF_STATUS_SUCCESS: success
2717  *         QDF_STATUS_E_RESOURCES: Error return
2718  */
2719 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
2720 {
2721 	/* What should do here? */
2722 	return QDF_STATUS_SUCCESS;
2723 }
2724 
2725 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2726 /* Pools will be allocated dynamically */
2727 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
2728 					int num_desc)
2729 {
2730 	uint8_t i;
2731 
2732 	for (i = 0; i < num_pool; i++) {
2733 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
2734 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
2735 	}
2736 
2737 	return 0;
2738 }
2739 
2740 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
2741 {
2742 	uint8_t i;
2743 
2744 	for (i = 0; i < num_pool; i++)
2745 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
2746 }
2747 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
2748 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
2749 					int num_desc)
2750 {
2751 	uint8_t i;
2752 
2753 	/* Allocate software Tx descriptor pools */
2754 	for (i = 0; i < num_pool; i++) {
2755 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
2756 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2757 					"%s Tx Desc Pool alloc %d failed %pK\n",
2758 					__func__, i, soc);
2759 			return ENOMEM;
2760 		}
2761 	}
2762 	return 0;
2763 }
2764 
2765 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
2766 {
2767 	uint8_t i;
2768 
2769 	for (i = 0; i < num_pool; i++) {
2770 		if (dp_tx_desc_pool_free(soc, i)) {
2771 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2772 				"%s Tx Desc Pool Free failed\n", __func__);
2773 		}
2774 	}
2775 }
2776 
2777 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
2778 
2779 /**
2780  * dp_tx_soc_detach() - detach soc from dp tx
2781  * @soc: core txrx main context
2782  *
2783  * This function will detach dp tx into main device context
2784  * will free dp tx resource and initialize resources
2785  *
2786  * Return: QDF_STATUS_SUCCESS: success
2787  *         QDF_STATUS_E_RESOURCES: Error return
2788  */
2789 QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
2790 {
2791 	uint8_t num_pool;
2792 	uint16_t num_desc;
2793 	uint16_t num_ext_desc;
2794 	uint8_t i;
2795 
2796 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
2797 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
2798 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
2799 
2800 	dp_tx_flow_control_deinit(soc);
2801 	dp_tx_delete_static_pools(soc, num_pool);
2802 
2803 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2804 			"%s Tx Desc Pool Free num_pool = %d, descs = %d\n",
2805 			__func__, num_pool, num_desc);
2806 
2807 	for (i = 0; i < num_pool; i++) {
2808 		if (dp_tx_ext_desc_pool_free(soc, i)) {
2809 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2810 					"%s Tx Ext Desc Pool Free failed\n",
2811 					__func__);
2812 			return QDF_STATUS_E_RESOURCES;
2813 		}
2814 	}
2815 
2816 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2817 			"%s MSDU Ext Desc Pool %d Free descs = %d\n",
2818 			__func__, num_pool, num_ext_desc);
2819 
2820 	for (i = 0; i < num_pool; i++) {
2821 		dp_tx_tso_desc_pool_free(soc, i);
2822 	}
2823 
2824 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2825 			"%s TSO Desc Pool %d Free descs = %d\n",
2826 			__func__, num_pool, num_desc);
2827 
2828 
2829 	for (i = 0; i < num_pool; i++)
2830 		dp_tx_tso_num_seg_pool_free(soc, i);
2831 
2832 
2833 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2834 		"%s TSO Num of seg Desc Pool %d Free descs = %d\n",
2835 		__func__, num_pool, num_desc);
2836 
2837 	return QDF_STATUS_SUCCESS;
2838 }
2839 
2840 /**
2841  * dp_tx_soc_attach() - attach soc to dp tx
2842  * @soc: core txrx main context
2843  *
2844  * This function will attach dp tx into main device context
2845  * will allocate dp tx resource and initialize resources
2846  *
2847  * Return: QDF_STATUS_SUCCESS: success
2848  *         QDF_STATUS_E_RESOURCES: Error return
2849  */
2850 QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
2851 {
2852 	uint8_t i;
2853 	uint8_t num_pool;
2854 	uint32_t num_desc;
2855 	uint32_t num_ext_desc;
2856 
2857 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
2858 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
2859 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
2860 
2861 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
2862 		goto fail;
2863 
2864 	dp_tx_flow_control_init(soc);
2865 
2866 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2867 			"%s Tx Desc Alloc num_pool = %d, descs = %d\n",
2868 			__func__, num_pool, num_desc);
2869 
2870 	/* Allocate extension tx descriptor pools */
2871 	for (i = 0; i < num_pool; i++) {
2872 		if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
2873 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2874 				"MSDU Ext Desc Pool alloc %d failed %pK\n",
2875 				i, soc);
2876 
2877 			goto fail;
2878 		}
2879 	}
2880 
2881 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2882 			"%s MSDU Ext Desc Alloc %d, descs = %d\n",
2883 			__func__, num_pool, num_ext_desc);
2884 
2885 	for (i = 0; i < num_pool; i++) {
2886 		if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
2887 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2888 				"TSO Desc Pool alloc %d failed %pK\n",
2889 				i, soc);
2890 
2891 			goto fail;
2892 		}
2893 	}
2894 
2895 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2896 			"%s TSO Desc Alloc %d, descs = %d\n",
2897 			__func__, num_pool, num_desc);
2898 
2899 	for (i = 0; i < num_pool; i++) {
2900 		if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
2901 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2902 				"TSO Num of seg Pool alloc %d failed %pK\n",
2903 				i, soc);
2904 
2905 			goto fail;
2906 		}
2907 	}
2908 
2909 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2910 			"%s TSO Num of seg pool Alloc %d, descs = %d\n",
2911 			__func__, num_pool, num_desc);
2912 
2913 	/* Initialize descriptors in TCL Rings */
2914 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2915 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2916 			hal_tx_init_data_ring(soc->hal_soc,
2917 					soc->tcl_data_ring[i].hal_srng);
2918 		}
2919 	}
2920 
2921 	/*
2922 	 * todo - Add a runtime config option to enable this.
2923 	 */
2924 	/*
2925 	 * Due to multiple issues on NPR EMU, enable it selectively
2926 	 * only for NPR EMU, should be removed, once NPR platforms
2927 	 * are stable.
2928 	 */
2929 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
2930 
2931 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2932 			"%s HAL Tx init Success\n", __func__);
2933 
2934 	return QDF_STATUS_SUCCESS;
2935 
2936 fail:
2937 	/* Detach will take care of freeing only allocated resources */
2938 	dp_tx_soc_detach(soc);
2939 	return QDF_STATUS_E_RESOURCES;
2940 }
2941 
2942 /*
2943  * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
2944  * pdev: pointer to DP PDEV structure
2945  * seg_info_head: Pointer to the head of list
2946  *
2947  * return: void
2948  */
2949 static inline void dp_tx_me_mem_free(struct dp_pdev *pdev,
2950 		struct dp_tx_seg_info_s *seg_info_head)
2951 {
2952 	struct dp_tx_me_buf_t *mc_uc_buf;
2953 	struct dp_tx_seg_info_s *seg_info_new = NULL;
2954 	qdf_nbuf_t nbuf = NULL;
2955 	uint64_t phy_addr;
2956 
2957 	while (seg_info_head) {
2958 		nbuf = seg_info_head->nbuf;
2959 		mc_uc_buf = (struct dp_tx_me_buf_t *)
2960 			seg_info_head->frags[0].vaddr;
2961 		phy_addr = seg_info_head->frags[0].paddr_hi;
2962 		phy_addr =  (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
2963 		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
2964 				phy_addr,
2965 				QDF_DMA_TO_DEVICE , DP_MAC_ADDR_LEN);
2966 		dp_tx_me_free_buf(pdev, mc_uc_buf);
2967 		qdf_nbuf_free(nbuf);
2968 		seg_info_new = seg_info_head;
2969 		seg_info_head = seg_info_head->next;
2970 		qdf_mem_free(seg_info_new);
2971 	}
2972 }
2973 
2974 /**
2975  * dp_tx_me_send_convert_ucast(): fuction to convert multicast to unicast
2976  * @vdev: DP VDEV handle
2977  * @nbuf: Multicast nbuf
2978  * @newmac: Table of the clients to which packets have to be sent
2979  * @new_mac_cnt: No of clients
2980  *
2981  * return: no of converted packets
2982  */
2983 uint16_t
2984 dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
2985 		uint8_t newmac[][DP_MAC_ADDR_LEN], uint8_t new_mac_cnt)
2986 {
2987 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
2988 	struct dp_pdev *pdev = vdev->pdev;
2989 	struct ether_header *eh;
2990 	uint8_t *data;
2991 	uint16_t len;
2992 
2993 	/* reference to frame dst addr */
2994 	uint8_t *dstmac;
2995 	/* copy of original frame src addr */
2996 	uint8_t srcmac[DP_MAC_ADDR_LEN];
2997 
2998 	/* local index into newmac */
2999 	uint8_t new_mac_idx = 0;
3000 	struct dp_tx_me_buf_t *mc_uc_buf;
3001 	qdf_nbuf_t  nbuf_clone;
3002 	struct dp_tx_msdu_info_s msdu_info;
3003 	struct dp_tx_seg_info_s *seg_info_head = NULL;
3004 	struct dp_tx_seg_info_s *seg_info_tail = NULL;
3005 	struct dp_tx_seg_info_s *seg_info_new;
3006 	struct dp_tx_frag_info_s data_frag;
3007 	qdf_dma_addr_t paddr_data;
3008 	qdf_dma_addr_t paddr_mcbuf = 0;
3009 	uint8_t empty_entry_mac[DP_MAC_ADDR_LEN] = {0};
3010 	QDF_STATUS status;
3011 
3012 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
3013 
3014 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3015 
3016 	eh = (struct ether_header *) nbuf;
3017 	qdf_mem_copy(srcmac, eh->ether_shost, DP_MAC_ADDR_LEN);
3018 
3019 	len = qdf_nbuf_len(nbuf);
3020 
3021 	data = qdf_nbuf_data(nbuf);
3022 
3023 	status = qdf_nbuf_map(vdev->osdev, nbuf,
3024 			QDF_DMA_TO_DEVICE);
3025 
3026 	if (status) {
3027 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3028 				"Mapping failure Error:%d", status);
3029 		DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
3030 		return 0;
3031 	}
3032 
3033 	paddr_data = qdf_nbuf_get_frag_paddr(nbuf, 0) + IEEE80211_ADDR_LEN;
3034 
3035 	/*preparing data fragment*/
3036 	data_frag.vaddr = qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN;
3037 	data_frag.paddr_lo = (uint32_t)paddr_data;
3038 	data_frag.paddr_hi = (((uint64_t) paddr_data)  >> 32);
3039 	data_frag.len = len - DP_MAC_ADDR_LEN;
3040 
3041 	for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
3042 		dstmac = newmac[new_mac_idx];
3043 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3044 				"added mac addr (%pM)", dstmac);
3045 
3046 		/* Check for NULL Mac Address */
3047 		if (!qdf_mem_cmp(dstmac, empty_entry_mac, DP_MAC_ADDR_LEN))
3048 			continue;
3049 
3050 		/* frame to self mac. skip */
3051 		if (!qdf_mem_cmp(dstmac, srcmac, DP_MAC_ADDR_LEN))
3052 			continue;
3053 
3054 		/*
3055 		 * TODO: optimize to avoid malloc in per-packet path
3056 		 * For eg. seg_pool can be made part of vdev structure
3057 		 */
3058 		seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
3059 
3060 		if (!seg_info_new) {
3061 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3062 					"alloc failed");
3063 			DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
3064 			goto fail_seg_alloc;
3065 		}
3066 
3067 		mc_uc_buf = dp_tx_me_alloc_buf(pdev);
3068 		if (mc_uc_buf == NULL)
3069 			goto fail_buf_alloc;
3070 
3071 		/*
3072 		 * TODO: Check if we need to clone the nbuf
3073 		 * Or can we just use the reference for all cases
3074 		 */
3075 		if (new_mac_idx < (new_mac_cnt - 1)) {
3076 			nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
3077 			if (nbuf_clone == NULL) {
3078 				DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
3079 				goto fail_clone;
3080 			}
3081 		} else {
3082 			/*
3083 			 * Update the ref
3084 			 * to account for frame sent without cloning
3085 			 */
3086 			qdf_nbuf_ref(nbuf);
3087 			nbuf_clone = nbuf;
3088 		}
3089 
3090 		qdf_mem_copy(mc_uc_buf->data, dstmac, DP_MAC_ADDR_LEN);
3091 
3092 		status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
3093 				QDF_DMA_TO_DEVICE, DP_MAC_ADDR_LEN,
3094 				&paddr_mcbuf);
3095 
3096 		if (status) {
3097 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3098 					"Mapping failure Error:%d", status);
3099 			DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
3100 			goto fail_map;
3101 		}
3102 
3103 		seg_info_new->frags[0].vaddr =  (uint8_t *)mc_uc_buf;
3104 		seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
3105 		seg_info_new->frags[0].paddr_hi =
3106 			((uint64_t) paddr_mcbuf >> 32);
3107 		seg_info_new->frags[0].len = DP_MAC_ADDR_LEN;
3108 
3109 		seg_info_new->frags[1] = data_frag;
3110 		seg_info_new->nbuf = nbuf_clone;
3111 		seg_info_new->frag_cnt = 2;
3112 		seg_info_new->total_len = len;
3113 
3114 		seg_info_new->next = NULL;
3115 
3116 		if (seg_info_head == NULL)
3117 			seg_info_head = seg_info_new;
3118 		else
3119 			seg_info_tail->next = seg_info_new;
3120 
3121 		seg_info_tail = seg_info_new;
3122 	}
3123 
3124 	if (!seg_info_head)
3125 		return 0;
3126 
3127 	msdu_info.u.sg_info.curr_seg = seg_info_head;
3128 	msdu_info.num_seg = new_mac_cnt;
3129 	msdu_info.frm_type = dp_tx_frm_me;
3130 
3131 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
3132 	dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3133 
3134 	while (seg_info_head->next) {
3135 		seg_info_new = seg_info_head;
3136 		seg_info_head = seg_info_head->next;
3137 		qdf_mem_free(seg_info_new);
3138 	}
3139 	qdf_mem_free(seg_info_head);
3140 
3141 	return new_mac_cnt;
3142 
3143 fail_map:
3144 	qdf_nbuf_free(nbuf_clone);
3145 
3146 fail_clone:
3147 	dp_tx_me_free_buf(pdev, mc_uc_buf);
3148 
3149 fail_buf_alloc:
3150 	qdf_mem_free(seg_info_new);
3151 
3152 fail_seg_alloc:
3153 	dp_tx_me_mem_free(pdev, seg_info_head);
3154 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
3155 	return 0;
3156 }
3157