xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision a175314c51a4ce5cec2835cc8a8c7dc0c1810915)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "dp_tx.h"
21 #include "dp_tx_desc.h"
22 #include "dp_peer.h"
23 #include "dp_types.h"
24 #include "hal_tx.h"
25 #include "qdf_mem.h"
26 #include "qdf_nbuf.h"
27 #include "qdf_net_types.h"
28 #include <wlan_cfg.h>
29 #ifdef MESH_MODE_SUPPORT
30 #include "if_meta_hdr.h"
31 #endif
32 
33 #define DP_TX_QUEUE_MASK 0x3
34 
35 /* TODO Add support in TSO */
36 #define DP_DESC_NUM_FRAG(x) 0
37 
38 /* disable TQM_BYPASS */
39 #define TQM_BYPASS_WAR 0
40 
41 /* invalid peer id for reinject*/
42 #define DP_INVALID_PEER 0XFFFE
43 
44 /*mapping between hal encrypt type and cdp_sec_type*/
45 #define MAX_CDP_SEC_TYPE 12
46 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
47 					HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
48 					HAL_TX_ENCRYPT_TYPE_WEP_128,
49 					HAL_TX_ENCRYPT_TYPE_WEP_104,
50 					HAL_TX_ENCRYPT_TYPE_WEP_40,
51 					HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
52 					HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
53 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
54 					HAL_TX_ENCRYPT_TYPE_WAPI,
55 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
56 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
57 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
58 					HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
59 
60 /**
61  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
62  * @vdev: DP Virtual device handle
63  * @nbuf: Buffer pointer
64  * @queue: queue ids container for nbuf
65  *
66  * TX packet queue has 2 instances, software descriptors id and dma ring id
67  * Based on tx feature and hardware configuration queue id combination could be
68  * different.
69  * For example -
70  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
71  * With no XPS,lock based resource protection, Descriptor pool ids are different
72  * for each vdev, dma ring id will be same as single pdev id
73  *
74  * Return: None
75  */
76 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
77 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
78 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
79 {
80 	uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK;
81 	queue->desc_pool_id = queue_offset;
82 	queue->ring_id = vdev->pdev->soc->tx_ring_map[queue_offset];
83 
84 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
85 			"%s, pool_id:%d ring_id: %d",
86 			__func__, queue->desc_pool_id, queue->ring_id);
87 
88 	return;
89 }
90 #else /* QCA_OL_TX_MULTIQ_SUPPORT */
91 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
92 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
93 {
94 	/* get flow id */
95 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
96 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
97 
98 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
99 			"%s, pool_id:%d ring_id: %d",
100 			__func__, queue->desc_pool_id, queue->ring_id);
101 
102 	return;
103 }
104 #endif
105 
106 #if defined(FEATURE_TSO)
107 /**
108  * dp_tx_tso_unmap_segment() - Unmap TSO segment
109  *
110  * @soc - core txrx main context
111  * @tx_desc - Tx software descriptor
112  */
113 static void dp_tx_tso_unmap_segment(struct dp_soc *soc,
114 				    struct dp_tx_desc_s *tx_desc)
115 {
116 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
117 	if (qdf_unlikely(!tx_desc->tso_desc)) {
118 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
119 			  "%s %d TSO desc is NULL!",
120 			  __func__, __LINE__);
121 		qdf_assert(0);
122 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
123 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
124 			  "%s %d TSO num desc is NULL!",
125 			  __func__, __LINE__);
126 		qdf_assert(0);
127 	} else {
128 		bool is_last_seg;
129 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
130 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
131 
132 		if (tso_num_desc->num_seg.tso_cmn_num_seg > 1)
133 			is_last_seg = false;
134 		else
135 			is_last_seg = true;
136 		tso_num_desc->num_seg.tso_cmn_num_seg--;
137 		qdf_nbuf_unmap_tso_segment(soc->osdev,
138 					   tx_desc->tso_desc, is_last_seg);
139 	}
140 }
141 
142 /**
143  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
144  *                            back to the freelist
145  *
146  * @soc - soc device handle
147  * @tx_desc - Tx software descriptor
148  */
149 static void dp_tx_tso_desc_release(struct dp_soc *soc,
150 				   struct dp_tx_desc_s *tx_desc)
151 {
152 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
153 	if (qdf_unlikely(!tx_desc->tso_desc)) {
154 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
155 			  "%s %d TSO desc is NULL!",
156 			  __func__, __LINE__);
157 		qdf_assert(0);
158 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
159 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
160 			  "%s %d TSO num desc is NULL!",
161 			  __func__, __LINE__);
162 		qdf_assert(0);
163 	} else {
164 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
165 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
166 
167 		/* Add the tso num segment into the free list */
168 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
169 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
170 					    tx_desc->tso_num_desc);
171 			tx_desc->tso_num_desc = NULL;
172 		}
173 
174 		/* Add the tso segment into the free list*/
175 		dp_tx_tso_desc_free(soc,
176 				    tx_desc->pool_id, tx_desc->tso_desc);
177 		tx_desc->tso_desc = NULL;
178 	}
179 }
180 #else
181 static void dp_tx_tso_unmap_segment(struct dp_soc *soc,
182 				    struct dp_tx_desc_s *tx_desc)
183 
184 {
185 }
186 
187 static void dp_tx_tso_desc_release(struct dp_soc *soc,
188 				   struct dp_tx_desc_s *tx_desc)
189 {
190 }
191 #endif
192 /**
193  * dp_tx_desc_release() - Release Tx Descriptor
194  * @tx_desc : Tx Descriptor
195  * @desc_pool_id: Descriptor Pool ID
196  *
197  * Deallocate all resources attached to Tx descriptor and free the Tx
198  * descriptor.
199  *
200  * Return:
201  */
202 static void
203 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
204 {
205 	struct dp_pdev *pdev = tx_desc->pdev;
206 	struct dp_soc *soc;
207 	uint8_t comp_status = 0;
208 
209 	qdf_assert(pdev);
210 
211 	soc = pdev->soc;
212 
213 	if (tx_desc->frm_type == dp_tx_frm_tso)
214 		dp_tx_tso_desc_release(soc, tx_desc);
215 
216 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
217 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
218 
219 	if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
220 		dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
221 
222 	qdf_atomic_dec(&pdev->num_tx_outstanding);
223 
224 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
225 		qdf_atomic_dec(&pdev->num_tx_exception);
226 
227 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
228 				hal_tx_comp_get_buffer_source(&tx_desc->comp))
229 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp);
230 	else
231 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
232 
233 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
234 		"Tx Completion Release desc %d status %d outstanding %d",
235 		tx_desc->id, comp_status,
236 		qdf_atomic_read(&pdev->num_tx_outstanding));
237 
238 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
239 	return;
240 }
241 
242 /**
243  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
244  * @vdev: DP vdev Handle
245  * @nbuf: skb
246  *
247  * Prepares and fills HTT metadata in the frame pre-header for special frames
248  * that should be transmitted using varying transmit parameters.
249  * There are 2 VDEV modes that currently needs this special metadata -
250  *  1) Mesh Mode
251  *  2) DSRC Mode
252  *
253  * Return: HTT metadata size
254  *
255  */
256 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
257 		uint32_t *meta_data)
258 {
259 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
260 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
261 
262 	uint8_t htt_desc_size;
263 
264 	/* Size rounded of multiple of 8 bytes */
265 	uint8_t htt_desc_size_aligned;
266 
267 	uint8_t *hdr = NULL;
268 
269 	/*
270 	 * Metadata - HTT MSDU Extension header
271 	 */
272 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
273 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
274 
275 	if (vdev->mesh_vdev) {
276 
277 		/* Fill and add HTT metaheader */
278 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
279 		if (hdr == NULL) {
280 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
281 					"Error in filling HTT metadata\n");
282 
283 			return 0;
284 		}
285 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
286 
287 	} else if (vdev->opmode == wlan_op_mode_ocb) {
288 		/* Todo - Add support for DSRC */
289 	}
290 
291 	return htt_desc_size_aligned;
292 }
293 
294 /**
295  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
296  * @tso_seg: TSO segment to process
297  * @ext_desc: Pointer to MSDU extension descriptor
298  *
299  * Return: void
300  */
301 #if defined(FEATURE_TSO)
302 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
303 		void *ext_desc)
304 {
305 	uint8_t num_frag;
306 	uint32_t tso_flags;
307 
308 	/*
309 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
310 	 * tcp_flag_mask
311 	 *
312 	 * Checksum enable flags are set in TCL descriptor and not in Extension
313 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
314 	 */
315 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
316 
317 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
318 
319 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
320 		tso_seg->tso_flags.ip_len);
321 
322 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
323 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
324 
325 
326 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
327 		uint32_t lo = 0;
328 		uint32_t hi = 0;
329 
330 		qdf_dmaaddr_to_32s(
331 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
332 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
333 			tso_seg->tso_frags[num_frag].length);
334 	}
335 
336 	return;
337 }
338 #else
339 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
340 		void *ext_desc)
341 {
342 	return;
343 }
344 #endif
345 
346 #if defined(FEATURE_TSO)
347 /**
348  * dp_tx_free_tso_seg() - Loop through the tso segments
349  *                        allocated and free them
350  *
351  * @soc: soc handle
352  * @free_seg: list of tso segments
353  * @msdu_info: msdu descriptor
354  *
355  * Return - void
356  */
357 static void dp_tx_free_tso_seg(struct dp_soc *soc,
358 	struct qdf_tso_seg_elem_t *free_seg,
359 	struct dp_tx_msdu_info_s *msdu_info)
360 {
361 	struct qdf_tso_seg_elem_t *next_seg;
362 
363 	while (free_seg) {
364 		next_seg = free_seg->next;
365 		dp_tx_tso_desc_free(soc,
366 			msdu_info->tx_queue.desc_pool_id,
367 			free_seg);
368 		free_seg = next_seg;
369 	}
370 }
371 
372 /**
373  * dp_tx_free_tso_num_seg() - Loop through the tso num segments
374  *                            allocated and free them
375  *
376  * @soc:  soc handle
377  * @free_seg: list of tso segments
378  * @msdu_info: msdu descriptor
379  * Return - void
380  */
381 static void dp_tx_free_tso_num_seg(struct dp_soc *soc,
382 	struct qdf_tso_num_seg_elem_t *free_seg,
383 	struct dp_tx_msdu_info_s *msdu_info)
384 {
385 	struct qdf_tso_num_seg_elem_t *next_seg;
386 
387 	while (free_seg) {
388 		next_seg = free_seg->next;
389 		dp_tso_num_seg_free(soc,
390 			msdu_info->tx_queue.desc_pool_id,
391 			free_seg);
392 		free_seg = next_seg;
393 	}
394 }
395 
396 /**
397  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
398  * @vdev: virtual device handle
399  * @msdu: network buffer
400  * @msdu_info: meta data associated with the msdu
401  *
402  * Return: QDF_STATUS_SUCCESS success
403  */
404 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
405 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
406 {
407 	struct qdf_tso_seg_elem_t *tso_seg;
408 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
409 	struct dp_soc *soc = vdev->pdev->soc;
410 	struct qdf_tso_info_t *tso_info;
411 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
412 
413 	tso_info = &msdu_info->u.tso_info;
414 	tso_info->curr_seg = NULL;
415 	tso_info->tso_seg_list = NULL;
416 	tso_info->num_segs = num_seg;
417 	msdu_info->frm_type = dp_tx_frm_tso;
418 	tso_info->tso_num_seg_list = NULL;
419 
420 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
421 
422 	while (num_seg) {
423 		tso_seg = dp_tx_tso_desc_alloc(
424 				soc, msdu_info->tx_queue.desc_pool_id);
425 		if (tso_seg) {
426 			tso_seg->next = tso_info->tso_seg_list;
427 			tso_info->tso_seg_list = tso_seg;
428 			num_seg--;
429 		} else {
430 			struct qdf_tso_seg_elem_t *free_seg =
431 				tso_info->tso_seg_list;
432 
433 			dp_tx_free_tso_seg(soc, free_seg, msdu_info);
434 
435 			return QDF_STATUS_E_NOMEM;
436 		}
437 	}
438 
439 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
440 
441 	tso_num_seg = dp_tso_num_seg_alloc(soc,
442 			msdu_info->tx_queue.desc_pool_id);
443 
444 	if (tso_num_seg) {
445 		tso_num_seg->next = tso_info->tso_num_seg_list;
446 		tso_info->tso_num_seg_list = tso_num_seg;
447 	} else {
448 		/* Bug: free tso_num_seg and tso_seg */
449 		/* Free the already allocated num of segments */
450 		struct qdf_tso_seg_elem_t *free_seg =
451 					tso_info->tso_seg_list;
452 
453 		TSO_DEBUG(" %s: Failed alloc - Number of segs for a TSO packet",
454 			__func__);
455 		dp_tx_free_tso_seg(soc, free_seg, msdu_info);
456 
457 		return QDF_STATUS_E_NOMEM;
458 	}
459 
460 	msdu_info->num_seg =
461 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
462 
463 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
464 			msdu_info->num_seg);
465 
466 	if (!(msdu_info->num_seg)) {
467 		dp_tx_free_tso_seg(soc, tso_info->tso_seg_list, msdu_info);
468 		dp_tx_free_tso_num_seg(soc, tso_info->tso_num_seg_list,
469 					msdu_info);
470 		return QDF_STATUS_E_INVAL;
471 	}
472 
473 	tso_info->curr_seg = tso_info->tso_seg_list;
474 
475 	return QDF_STATUS_SUCCESS;
476 }
477 #else
478 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
479 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
480 {
481 	return QDF_STATUS_E_NOMEM;
482 }
483 #endif
484 
485 /**
486  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
487  * @vdev: DP Vdev handle
488  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
489  * @desc_pool_id: Descriptor Pool ID
490  *
491  * Return:
492  */
493 static
494 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
495 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
496 {
497 	uint8_t i;
498 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
499 	struct dp_tx_seg_info_s *seg_info;
500 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
501 	struct dp_soc *soc = vdev->pdev->soc;
502 
503 	/* Allocate an extension descriptor */
504 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
505 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
506 
507 	if (!msdu_ext_desc) {
508 		DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
509 		return NULL;
510 	}
511 
512 	if (msdu_info->exception_fw &&
513 			qdf_unlikely(vdev->mesh_vdev)) {
514 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
515 				&msdu_info->meta_data[0],
516 				sizeof(struct htt_tx_msdu_desc_ext2_t));
517 		qdf_atomic_inc(&vdev->pdev->num_tx_exception);
518 	}
519 
520 	switch (msdu_info->frm_type) {
521 	case dp_tx_frm_sg:
522 	case dp_tx_frm_me:
523 	case dp_tx_frm_raw:
524 		seg_info = msdu_info->u.sg_info.curr_seg;
525 		/* Update the buffer pointers in MSDU Extension Descriptor */
526 		for (i = 0; i < seg_info->frag_cnt; i++) {
527 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
528 				seg_info->frags[i].paddr_lo,
529 				seg_info->frags[i].paddr_hi,
530 				seg_info->frags[i].len);
531 		}
532 
533 		break;
534 
535 	case dp_tx_frm_tso:
536 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
537 				&cached_ext_desc[0]);
538 		break;
539 
540 
541 	default:
542 		break;
543 	}
544 
545 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
546 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
547 
548 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
549 			msdu_ext_desc->vaddr);
550 
551 	return msdu_ext_desc;
552 }
553 
554 /**
555  * dp_tx_trace_pkt() - Trace TX packet at DP layer
556  *
557  * @skb: skb to be traced
558  * @msdu_id: msdu_id of the packet
559  * @vdev_id: vdev_id of the packet
560  *
561  * Return: None
562  */
563 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
564 			    uint8_t vdev_id)
565 {
566 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
567 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
568 	DPTRACE(qdf_dp_trace_ptr(skb,
569 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
570 				 QDF_TRACE_DEFAULT_PDEV_ID,
571 				 qdf_nbuf_data_addr(skb),
572 				 sizeof(qdf_nbuf_data(skb)),
573 				 msdu_id, vdev_id));
574 
575 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
576 
577 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
578 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
579 				      msdu_id, QDF_TX));
580 }
581 
582 /**
583  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
584  * @vdev: DP vdev handle
585  * @nbuf: skb
586  * @desc_pool_id: Descriptor pool ID
587  * @meta_data: Metadata to the fw
588  * @tx_exc_metadata: Handle that holds exception path metadata
589  * Allocate and prepare Tx descriptor with msdu information.
590  *
591  * Return: Pointer to Tx Descriptor on success,
592  *         NULL on failure
593  */
594 static
595 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
596 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
597 		struct dp_tx_msdu_info_s *msdu_info,
598 		struct cdp_tx_exception_metadata *tx_exc_metadata)
599 {
600 	uint8_t align_pad;
601 	uint8_t is_exception = 0;
602 	uint8_t htt_hdr_size;
603 	struct ether_header *eh;
604 	struct dp_tx_desc_s *tx_desc;
605 	struct dp_pdev *pdev = vdev->pdev;
606 	struct dp_soc *soc = pdev->soc;
607 
608 	/* Allocate software Tx descriptor */
609 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
610 	if (qdf_unlikely(!tx_desc)) {
611 		DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
612 		return NULL;
613 	}
614 
615 	/* Flow control/Congestion Control counters */
616 	qdf_atomic_inc(&pdev->num_tx_outstanding);
617 
618 	/* Initialize the SW tx descriptor */
619 	tx_desc->nbuf = nbuf;
620 	tx_desc->frm_type = dp_tx_frm_std;
621 	tx_desc->tx_encap_type = (tx_exc_metadata ?
622 			tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
623 	tx_desc->vdev = vdev;
624 	tx_desc->pdev = pdev;
625 	tx_desc->msdu_ext_desc = NULL;
626 	tx_desc->pkt_offset = 0;
627 
628 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
629 
630 	/*
631 	 * For special modes (vdev_type == ocb or mesh), data frames should be
632 	 * transmitted using varying transmit parameters (tx spec) which include
633 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
634 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
635 	 * These frames are sent as exception packets to firmware.
636 	 *
637 	 * HW requirement is that metadata should always point to a
638 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
639 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
640 	 *  to get 8-byte aligned start address along with align_pad added
641 	 *
642 	 *  |-----------------------------|
643 	 *  |                             |
644 	 *  |-----------------------------| <-----Buffer Pointer Address given
645 	 *  |                             |  ^    in HW descriptor (aligned)
646 	 *  |       HTT Metadata          |  |
647 	 *  |                             |  |
648 	 *  |                             |  | Packet Offset given in descriptor
649 	 *  |                             |  |
650 	 *  |-----------------------------|  |
651 	 *  |       Alignment Pad         |  v
652 	 *  |-----------------------------| <----- Actual buffer start address
653 	 *  |        SKB Data             |           (Unaligned)
654 	 *  |                             |
655 	 *  |                             |
656 	 *  |                             |
657 	 *  |                             |
658 	 *  |                             |
659 	 *  |-----------------------------|
660 	 */
661 	if (qdf_unlikely((msdu_info->exception_fw)) ||
662 				(vdev->opmode == wlan_op_mode_ocb)) {
663 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
664 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
665 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
666 					"qdf_nbuf_push_head failed\n");
667 			goto failure;
668 		}
669 
670 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
671 				msdu_info->meta_data);
672 		if (htt_hdr_size == 0)
673 			goto failure;
674 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
675 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
676 		is_exception = 1;
677 	}
678 
679 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
680 				qdf_nbuf_map(soc->osdev, nbuf,
681 					QDF_DMA_TO_DEVICE))) {
682 		/* Handle failure */
683 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
684 				"qdf_nbuf_map failed\n");
685 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
686 		goto failure;
687 	}
688 
689 	if (qdf_unlikely(vdev->nawds_enabled)) {
690 		eh = (struct ether_header *) qdf_nbuf_data(nbuf);
691 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
692 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
693 			is_exception = 1;
694 		}
695 	}
696 
697 #if !TQM_BYPASS_WAR
698 	if (is_exception || tx_exc_metadata)
699 #endif
700 	{
701 		/* Temporary WAR due to TQM VP issues */
702 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
703 		qdf_atomic_inc(&pdev->num_tx_exception);
704 	}
705 
706 	return tx_desc;
707 
708 failure:
709 	dp_tx_desc_release(tx_desc, desc_pool_id);
710 	return NULL;
711 }
712 
713 /**
714  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
715  * @vdev: DP vdev handle
716  * @nbuf: skb
717  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
718  * @desc_pool_id : Descriptor Pool ID
719  *
720  * Allocate and prepare Tx descriptor with msdu and fragment descritor
721  * information. For frames wth fragments, allocate and prepare
722  * an MSDU extension descriptor
723  *
724  * Return: Pointer to Tx Descriptor on success,
725  *         NULL on failure
726  */
727 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
728 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
729 		uint8_t desc_pool_id)
730 {
731 	struct dp_tx_desc_s *tx_desc;
732 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
733 	struct dp_pdev *pdev = vdev->pdev;
734 	struct dp_soc *soc = pdev->soc;
735 
736 	/* Allocate software Tx descriptor */
737 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
738 	if (!tx_desc) {
739 		DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
740 		return NULL;
741 	}
742 
743 	/* Flow control/Congestion Control counters */
744 	qdf_atomic_inc(&pdev->num_tx_outstanding);
745 
746 	/* Initialize the SW tx descriptor */
747 	tx_desc->nbuf = nbuf;
748 	tx_desc->frm_type = msdu_info->frm_type;
749 	tx_desc->tx_encap_type = vdev->tx_encap_type;
750 	tx_desc->vdev = vdev;
751 	tx_desc->pdev = pdev;
752 	tx_desc->pkt_offset = 0;
753 	tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
754 	tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
755 
756 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
757 
758 	/* Handle scattered frames - TSO/SG/ME */
759 	/* Allocate and prepare an extension descriptor for scattered frames */
760 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
761 	if (!msdu_ext_desc) {
762 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
763 				"%s Tx Extension Descriptor Alloc Fail\n",
764 				__func__);
765 		goto failure;
766 	}
767 
768 #if TQM_BYPASS_WAR
769 	/* Temporary WAR due to TQM VP issues */
770 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
771 	qdf_atomic_inc(&pdev->num_tx_exception);
772 #endif
773 	if (qdf_unlikely(msdu_info->exception_fw))
774 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
775 
776 	tx_desc->msdu_ext_desc = msdu_ext_desc;
777 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
778 
779 	return tx_desc;
780 failure:
781 	dp_tx_desc_release(tx_desc, desc_pool_id);
782 	return NULL;
783 }
784 
785 /**
786  * dp_tx_prepare_raw() - Prepare RAW packet TX
787  * @vdev: DP vdev handle
788  * @nbuf: buffer pointer
789  * @seg_info: Pointer to Segment info Descriptor to be prepared
790  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
791  *     descriptor
792  *
793  * Return:
794  */
795 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
796 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
797 {
798 	qdf_nbuf_t curr_nbuf = NULL;
799 	uint16_t total_len = 0;
800 	qdf_dma_addr_t paddr;
801 	int32_t i;
802 	int32_t mapped_buf_num = 0;
803 
804 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
805 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
806 
807 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
808 
809 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
810 	if (qos_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS)
811 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
812 
813 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
814 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
815 
816 		if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf,
817 					QDF_DMA_TO_DEVICE)) {
818 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
819 				"%s dma map error \n", __func__);
820 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
821 			mapped_buf_num = i;
822 			goto error;
823 		}
824 
825 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
826 		seg_info->frags[i].paddr_lo = paddr;
827 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
828 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
829 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
830 		total_len += qdf_nbuf_len(curr_nbuf);
831 	}
832 
833 	seg_info->frag_cnt = i;
834 	seg_info->total_len = total_len;
835 	seg_info->next = NULL;
836 
837 	sg_info->curr_seg = seg_info;
838 
839 	msdu_info->frm_type = dp_tx_frm_raw;
840 	msdu_info->num_seg = 1;
841 
842 	return nbuf;
843 
844 error:
845 	i = 0;
846 	while (nbuf) {
847 		curr_nbuf = nbuf;
848 		if (i < mapped_buf_num) {
849 			qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE);
850 			i++;
851 		}
852 		nbuf = qdf_nbuf_next(nbuf);
853 		qdf_nbuf_free(curr_nbuf);
854 	}
855 	return NULL;
856 
857 }
858 
859 /**
860  * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
861  * @soc: DP Soc Handle
862  * @vdev: DP vdev handle
863  * @tx_desc: Tx Descriptor Handle
864  * @tid: TID from HLOS for overriding default DSCP-TID mapping
865  * @fw_metadata: Metadata to send to Target Firmware along with frame
866  * @ring_id: Ring ID of H/W ring to which we enqueue the packet
867  * @tx_exc_metadata: Handle that holds exception path meta data
868  *
869  *  Gets the next free TCL HW DMA descriptor and sets up required parameters
870  *  from software Tx descriptor
871  *
872  * Return:
873  */
874 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
875 				   struct dp_tx_desc_s *tx_desc, uint8_t tid,
876 				   uint16_t fw_metadata, uint8_t ring_id,
877 				   struct cdp_tx_exception_metadata
878 					*tx_exc_metadata)
879 {
880 	uint8_t type;
881 	uint16_t length;
882 	void *hal_tx_desc, *hal_tx_desc_cached;
883 	qdf_dma_addr_t dma_addr;
884 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
885 
886 	enum cdp_sec_type sec_type = (tx_exc_metadata ?
887 			tx_exc_metadata->sec_type : vdev->sec_type);
888 
889 	/* Return Buffer Manager ID */
890 	uint8_t bm_id = ring_id;
891 	void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng;
892 
893 	hal_tx_desc_cached = (void *) cached_desc;
894 	qdf_mem_zero_outline(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
895 
896 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
897 		length = HAL_TX_EXT_DESC_WITH_META_DATA;
898 		type = HAL_TX_BUF_TYPE_EXT_DESC;
899 		dma_addr = tx_desc->msdu_ext_desc->paddr;
900 	} else {
901 		length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
902 		type = HAL_TX_BUF_TYPE_BUFFER;
903 		dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
904 	}
905 
906 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
907 	hal_tx_desc_set_buf_addr(hal_tx_desc_cached,
908 			dma_addr , bm_id, tx_desc->id, type);
909 
910 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id))
911 		return QDF_STATUS_E_RESOURCES;
912 
913 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
914 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
915 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
916 	hal_tx_desc_set_lmac_id(hal_tx_desc_cached,
917 					HAL_TX_DESC_DEFAULT_LMAC_ID);
918 	hal_tx_desc_set_dscp_tid_table_id(hal_tx_desc_cached,
919 			vdev->dscp_tid_map_id);
920 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
921 			sec_type_map[sec_type]);
922 
923 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
924 			"%s length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
925 			__func__, length, type, (uint64_t)dma_addr,
926 			tx_desc->pkt_offset, tx_desc->id);
927 
928 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
929 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
930 
931 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
932 			vdev->hal_desc_addr_search_flags);
933 
934 	/* verify checksum offload configuration*/
935 	if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) &&
936 		((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
937 		|| qdf_nbuf_is_tso(tx_desc->nbuf)))  {
938 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
939 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
940 	}
941 
942 	if (tid != HTT_TX_EXT_TID_INVALID)
943 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
944 
945 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
946 		hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1);
947 
948 
949 	/* Sync cached descriptor with HW */
950 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
951 
952 	if (!hal_tx_desc) {
953 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
954 			  "%s TCL ring full ring_id:%d\n", __func__, ring_id);
955 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
956 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
957 		return QDF_STATUS_E_RESOURCES;
958 	}
959 
960 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
961 
962 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
963 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
964 
965 	/*
966 	 * If one packet is enqueued in HW, PM usage count needs to be
967 	 * incremented by one to prevent future runtime suspend. This
968 	 * should be tied with the success of enqueuing. It will be
969 	 * decremented after the packet has been sent.
970 	 */
971 	hif_pm_runtime_get_noresume(soc->hif_handle);
972 
973 	return QDF_STATUS_SUCCESS;
974 }
975 
976 
977 /**
978  * dp_cce_classify() - Classify the frame based on CCE rules
979  * @vdev: DP vdev handle
980  * @nbuf: skb
981  *
982  * Classify frames based on CCE rules
983  * Return: bool( true if classified,
984  *               else false)
985  */
986 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
987 {
988 	struct ether_header *eh = NULL;
989 	uint16_t   ether_type;
990 	qdf_llc_t *llcHdr;
991 	qdf_nbuf_t nbuf_clone = NULL;
992 	qdf_dot3_qosframe_t *qos_wh = NULL;
993 
994 	/* for mesh packets don't do any classification */
995 	if (qdf_unlikely(vdev->mesh_vdev))
996 		return false;
997 
998 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
999 		eh = (struct ether_header *) qdf_nbuf_data(nbuf);
1000 		ether_type = eh->ether_type;
1001 		llcHdr = (qdf_llc_t *)(nbuf->data +
1002 					sizeof(struct ether_header));
1003 	} else {
1004 		qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1005 		/* For encrypted packets don't do any classification */
1006 		if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
1007 			return false;
1008 
1009 		if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
1010 			if (qdf_unlikely(
1011 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
1012 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
1013 
1014 				ether_type = *(uint16_t *)(nbuf->data
1015 						+ QDF_IEEE80211_4ADDR_HDR_LEN
1016 						+ sizeof(qdf_llc_t)
1017 						- sizeof(ether_type));
1018 				llcHdr = (qdf_llc_t *)(nbuf->data +
1019 						QDF_IEEE80211_4ADDR_HDR_LEN);
1020 			} else {
1021 				ether_type = *(uint16_t *)(nbuf->data
1022 						+ QDF_IEEE80211_3ADDR_HDR_LEN
1023 						+ sizeof(qdf_llc_t)
1024 						- sizeof(ether_type));
1025 				llcHdr = (qdf_llc_t *)(nbuf->data +
1026 					QDF_IEEE80211_3ADDR_HDR_LEN);
1027 			}
1028 
1029 			if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
1030 				&& (ether_type ==
1031 				qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
1032 
1033 				DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
1034 				return true;
1035 			}
1036 		}
1037 
1038 		return false;
1039 	}
1040 
1041 	if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
1042 		ether_type = *(uint16_t *)(nbuf->data + 2*ETHER_ADDR_LEN +
1043 				sizeof(*llcHdr));
1044 		nbuf_clone = qdf_nbuf_clone(nbuf);
1045 		if (qdf_unlikely(nbuf_clone)) {
1046 			qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
1047 
1048 			if (ether_type == htons(ETHERTYPE_8021Q)) {
1049 				qdf_nbuf_pull_head(nbuf_clone,
1050 						sizeof(qdf_net_vlanhdr_t));
1051 			}
1052 		}
1053 	} else {
1054 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1055 			nbuf_clone = qdf_nbuf_clone(nbuf);
1056 			if (qdf_unlikely(nbuf_clone)) {
1057 				qdf_nbuf_pull_head(nbuf_clone,
1058 					sizeof(qdf_net_vlanhdr_t));
1059 			}
1060 		}
1061 	}
1062 
1063 	if (qdf_unlikely(nbuf_clone))
1064 		nbuf = nbuf_clone;
1065 
1066 
1067 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
1068 		|| qdf_nbuf_is_ipv4_arp_pkt(nbuf)
1069 		|| qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
1070 		|| qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
1071 		|| (qdf_nbuf_is_ipv4_pkt(nbuf)
1072 			&& qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
1073 		|| (qdf_nbuf_is_ipv6_pkt(nbuf) &&
1074 			qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
1075 		if (qdf_unlikely(nbuf_clone != NULL))
1076 			qdf_nbuf_free(nbuf_clone);
1077 		return true;
1078 	}
1079 
1080 	if (qdf_unlikely(nbuf_clone != NULL))
1081 		qdf_nbuf_free(nbuf_clone);
1082 
1083 	return false;
1084 }
1085 
1086 /**
1087  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1088  * @vdev: DP vdev handle
1089  * @nbuf: skb
1090  *
1091  * Extract the DSCP or PCP information from frame and map into TID value.
1092  * Software based TID classification is required when more than 2 DSCP-TID
1093  * mapping tables are needed.
1094  * Hardware supports 2 DSCP-TID mapping tables
1095  *
1096  * Return: void
1097  */
1098 static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1099 		struct dp_tx_msdu_info_s *msdu_info)
1100 {
1101 	uint8_t tos = 0, dscp_tid_override = 0;
1102 	uint8_t *hdr_ptr, *L3datap;
1103 	uint8_t is_mcast = 0;
1104 	struct ether_header *eh = NULL;
1105 	qdf_ethervlan_header_t *evh = NULL;
1106 	uint16_t   ether_type;
1107 	qdf_llc_t *llcHdr;
1108 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1109 
1110 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1111 
1112 	if (vdev->dscp_tid_map_id <= 1)
1113 		return;
1114 
1115 	/* for mesh packets don't do any classification */
1116 	if (qdf_unlikely(vdev->mesh_vdev))
1117 		return;
1118 
1119 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1120 		eh = (struct ether_header *) nbuf->data;
1121 		hdr_ptr = eh->ether_dhost;
1122 		L3datap = hdr_ptr + sizeof(struct ether_header);
1123 	} else {
1124 		qdf_dot3_qosframe_t *qos_wh =
1125 			(qdf_dot3_qosframe_t *) nbuf->data;
1126 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1127 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1128 		return;
1129 	}
1130 
1131 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1132 	ether_type = eh->ether_type;
1133 
1134 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(struct ether_header));
1135 	/*
1136 	 * Check if packet is dot3 or eth2 type.
1137 	 */
1138 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1139 		ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN +
1140 				sizeof(*llcHdr));
1141 
1142 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1143 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1144 				sizeof(*llcHdr);
1145 			ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN
1146 					+ sizeof(*llcHdr) +
1147 					sizeof(qdf_net_vlanhdr_t));
1148 		} else {
1149 			L3datap = hdr_ptr + sizeof(struct ether_header) +
1150 				sizeof(*llcHdr);
1151 		}
1152 	} else {
1153 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1154 			evh = (qdf_ethervlan_header_t *) eh;
1155 			ether_type = evh->ether_type;
1156 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1157 		}
1158 	}
1159 
1160 	/*
1161 	 * Find priority from IP TOS DSCP field
1162 	 */
1163 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1164 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1165 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1166 			/* Only for unicast frames */
1167 			if (!is_mcast) {
1168 				/* send it on VO queue */
1169 				msdu_info->tid = DP_VO_TID;
1170 			}
1171 		} else {
1172 			/*
1173 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1174 			 * from TOS byte.
1175 			 */
1176 			tos = ip->ip_tos;
1177 			dscp_tid_override = 1;
1178 
1179 		}
1180 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1181 		/* TODO
1182 		 * use flowlabel
1183 		 *igmpmld cases to be handled in phase 2
1184 		 */
1185 		unsigned long ver_pri_flowlabel;
1186 		unsigned long pri;
1187 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1188 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1189 			DP_IPV6_PRIORITY_SHIFT;
1190 		tos = pri;
1191 		dscp_tid_override = 1;
1192 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1193 		msdu_info->tid = DP_VO_TID;
1194 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1195 		/* Only for unicast frames */
1196 		if (!is_mcast) {
1197 			/* send ucast arp on VO queue */
1198 			msdu_info->tid = DP_VO_TID;
1199 		}
1200 	}
1201 
1202 	/*
1203 	 * Assign all MCAST packets to BE
1204 	 */
1205 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1206 		if (is_mcast) {
1207 			tos = 0;
1208 			dscp_tid_override = 1;
1209 		}
1210 	}
1211 
1212 	if (dscp_tid_override == 1) {
1213 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1214 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1215 	}
1216 	return;
1217 }
1218 
1219 #ifdef CONVERGED_TDLS_ENABLE
1220 /**
1221  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1222  * @tx_desc: TX descriptor
1223  *
1224  * Return: None
1225  */
1226 static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1227 {
1228 	if (tx_desc->vdev) {
1229 		if (tx_desc->vdev->is_tdls_frame)
1230 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1231 			tx_desc->vdev->is_tdls_frame = false;
1232 	}
1233 }
1234 
1235 /**
1236  * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1237  * @tx_desc: TX descriptor
1238  * @vdev: datapath vdev handle
1239  *
1240  * Return: None
1241  */
1242 static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
1243 				  struct dp_vdev *vdev)
1244 {
1245 	struct hal_tx_completion_status ts = {0};
1246 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1247 
1248 	hal_tx_comp_get_status(&tx_desc->comp, &ts);
1249 	if (vdev->tx_non_std_data_callback.func) {
1250 		qdf_nbuf_set_next(tx_desc->nbuf, NULL);
1251 		vdev->tx_non_std_data_callback.func(
1252 				vdev->tx_non_std_data_callback.ctxt,
1253 				nbuf, ts.status);
1254 		return;
1255 	}
1256 }
1257 #endif
1258 
1259 /**
1260  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1261  * @vdev: DP vdev handle
1262  * @nbuf: skb
1263  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1264  * @meta_data: Metadata to the fw
1265  * @tx_q: Tx queue to be used for this Tx frame
1266  * @peer_id: peer_id of the peer in case of NAWDS frames
1267  * @tx_exc_metadata: Handle that holds exception path metadata
1268  *
1269  * Return: NULL on success,
1270  *         nbuf when it fails to send
1271  */
1272 static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1273 		struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
1274 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1275 {
1276 	struct dp_pdev *pdev = vdev->pdev;
1277 	struct dp_soc *soc = pdev->soc;
1278 	struct dp_tx_desc_s *tx_desc;
1279 	QDF_STATUS status;
1280 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
1281 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1282 	uint16_t htt_tcl_metadata = 0;
1283 	uint8_t tid = msdu_info->tid;
1284 
1285 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
1286 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
1287 			msdu_info, tx_exc_metadata);
1288 	if (!tx_desc) {
1289 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1290 			  "%s Tx_desc prepare Fail vdev %pK queue %d\n",
1291 			  __func__, vdev, tx_q->desc_pool_id);
1292 		return nbuf;
1293 	}
1294 
1295 	if (qdf_unlikely(soc->cce_disable)) {
1296 		if (dp_cce_classify(vdev, nbuf) == true) {
1297 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1298 			tid = DP_VO_TID;
1299 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1300 		}
1301 	}
1302 
1303 	dp_tx_update_tdls_flags(tx_desc);
1304 
1305 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1306 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1307 				"%s %d : HAL RING Access Failed -- %pK\n",
1308 				__func__, __LINE__, hal_srng);
1309 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1310 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1311 		goto fail_return;
1312 	}
1313 
1314 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
1315 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1316 		HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
1317 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
1318 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
1319 				HTT_TCL_METADATA_TYPE_PEER_BASED);
1320 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
1321 				peer_id);
1322 	} else
1323 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1324 
1325 
1326 	if (msdu_info->exception_fw) {
1327 		HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1328 	}
1329 
1330 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
1331 	status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
1332 			htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
1333 
1334 	if (status != QDF_STATUS_SUCCESS) {
1335 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1336 			  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n",
1337 			  __func__, tx_desc, tx_q->ring_id);
1338 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1339 		goto fail_return;
1340 	}
1341 
1342 	nbuf = NULL;
1343 
1344 fail_return:
1345 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1346 		hal_srng_access_end(soc->hal_soc, hal_srng);
1347 		hif_pm_runtime_put(soc->hif_handle);
1348 	} else {
1349 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1350 	}
1351 
1352 	return nbuf;
1353 }
1354 
1355 /**
1356  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
1357  * @vdev: DP vdev handle
1358  * @nbuf: skb
1359  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
1360  *
1361  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
1362  *
1363  * Return: NULL on success,
1364  *         nbuf when it fails to send
1365  */
1366 #if QDF_LOCK_STATS
1367 static noinline
1368 #else
1369 static
1370 #endif
1371 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1372 				    struct dp_tx_msdu_info_s *msdu_info)
1373 {
1374 	uint8_t i;
1375 	struct dp_pdev *pdev = vdev->pdev;
1376 	struct dp_soc *soc = pdev->soc;
1377 	struct dp_tx_desc_s *tx_desc;
1378 	bool is_cce_classified = false;
1379 	QDF_STATUS status;
1380 	uint16_t htt_tcl_metadata = 0;
1381 
1382 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1383 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1384 
1385 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1386 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1387 				"%s %d : HAL RING Access Failed -- %pK\n",
1388 				__func__, __LINE__, hal_srng);
1389 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1390 		return nbuf;
1391 	}
1392 
1393 	if (qdf_unlikely(soc->cce_disable)) {
1394 		is_cce_classified = dp_cce_classify(vdev, nbuf);
1395 		if (is_cce_classified) {
1396 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1397 			msdu_info->tid = DP_VO_TID;
1398 		}
1399 	}
1400 
1401 	if (msdu_info->frm_type == dp_tx_frm_me)
1402 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1403 
1404 	i = 0;
1405 	/* Print statement to track i and num_seg */
1406 	/*
1407 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
1408 	 * descriptors using information in msdu_info
1409 	 */
1410 	while (i < msdu_info->num_seg) {
1411 		/*
1412 		 * Setup Tx descriptor for an MSDU, and MSDU extension
1413 		 * descriptor
1414 		 */
1415 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
1416 				tx_q->desc_pool_id);
1417 
1418 		if (!tx_desc) {
1419 			if (msdu_info->frm_type == dp_tx_frm_me) {
1420 				dp_tx_me_free_buf(pdev,
1421 					(void *)(msdu_info->u.sg_info
1422 						.curr_seg->frags[0].vaddr));
1423 			}
1424 			goto done;
1425 		}
1426 
1427 		if (msdu_info->frm_type == dp_tx_frm_me) {
1428 			tx_desc->me_buffer =
1429 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
1430 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
1431 		}
1432 
1433 		if (is_cce_classified)
1434 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1435 
1436 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1437 		if (msdu_info->exception_fw) {
1438 			HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1439 		}
1440 
1441 		/*
1442 		 * Enqueue the Tx MSDU descriptor to HW for transmit
1443 		 */
1444 		status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
1445 			htt_tcl_metadata, tx_q->ring_id, NULL);
1446 
1447 		if (status != QDF_STATUS_SUCCESS) {
1448 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1449 				  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n",
1450 				  __func__, tx_desc, tx_q->ring_id);
1451 
1452 			if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
1453 				dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
1454 
1455 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1456 			goto done;
1457 		}
1458 
1459 		/*
1460 		 * TODO
1461 		 * if tso_info structure can be modified to have curr_seg
1462 		 * as first element, following 2 blocks of code (for TSO and SG)
1463 		 * can be combined into 1
1464 		 */
1465 
1466 		/*
1467 		 * For frames with multiple segments (TSO, ME), jump to next
1468 		 * segment.
1469 		 */
1470 		if (msdu_info->frm_type == dp_tx_frm_tso) {
1471 			if (msdu_info->u.tso_info.curr_seg->next) {
1472 				msdu_info->u.tso_info.curr_seg =
1473 					msdu_info->u.tso_info.curr_seg->next;
1474 
1475 				/*
1476 				 * If this is a jumbo nbuf, then increment the number of
1477 				 * nbuf users for each additional segment of the msdu.
1478 				 * This will ensure that the skb is freed only after
1479 				 * receiving tx completion for all segments of an nbuf
1480 				 */
1481 				qdf_nbuf_inc_users(nbuf);
1482 
1483 				/* Check with MCL if this is needed */
1484 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
1485 			}
1486 		}
1487 
1488 		/*
1489 		 * For Multicast-Unicast converted packets,
1490 		 * each converted frame (for a client) is represented as
1491 		 * 1 segment
1492 		 */
1493 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
1494 				(msdu_info->frm_type == dp_tx_frm_me)) {
1495 			if (msdu_info->u.sg_info.curr_seg->next) {
1496 				msdu_info->u.sg_info.curr_seg =
1497 					msdu_info->u.sg_info.curr_seg->next;
1498 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1499 			}
1500 		}
1501 		i++;
1502 	}
1503 
1504 	nbuf = NULL;
1505 
1506 done:
1507 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1508 		hal_srng_access_end(soc->hal_soc, hal_srng);
1509 		hif_pm_runtime_put(soc->hif_handle);
1510 	} else {
1511 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1512 	}
1513 
1514 	return nbuf;
1515 }
1516 
1517 /**
1518  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
1519  *                     for SG frames
1520  * @vdev: DP vdev handle
1521  * @nbuf: skb
1522  * @seg_info: Pointer to Segment info Descriptor to be prepared
1523  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1524  *
1525  * Return: NULL on success,
1526  *         nbuf when it fails to send
1527  */
1528 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1529 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1530 {
1531 	uint32_t cur_frag, nr_frags;
1532 	qdf_dma_addr_t paddr;
1533 	struct dp_tx_sg_info_s *sg_info;
1534 
1535 	sg_info = &msdu_info->u.sg_info;
1536 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
1537 
1538 	if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
1539 				QDF_DMA_TO_DEVICE)) {
1540 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1541 				"dma map error\n");
1542 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1543 
1544 		qdf_nbuf_free(nbuf);
1545 		return NULL;
1546 	}
1547 
1548 	paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1549 	seg_info->frags[0].paddr_lo = paddr;
1550 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
1551 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
1552 	seg_info->frags[0].vaddr = (void *) nbuf;
1553 
1554 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
1555 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
1556 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
1557 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1558 					"frag dma map error\n");
1559 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1560 			qdf_nbuf_free(nbuf);
1561 			return NULL;
1562 		}
1563 
1564 		paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1565 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
1566 		seg_info->frags[cur_frag + 1].paddr_hi =
1567 			((uint64_t) paddr) >> 32;
1568 		seg_info->frags[cur_frag + 1].len =
1569 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
1570 	}
1571 
1572 	seg_info->frag_cnt = (cur_frag + 1);
1573 	seg_info->total_len = qdf_nbuf_len(nbuf);
1574 	seg_info->next = NULL;
1575 
1576 	sg_info->curr_seg = seg_info;
1577 
1578 	msdu_info->frm_type = dp_tx_frm_sg;
1579 	msdu_info->num_seg = 1;
1580 
1581 	return nbuf;
1582 }
1583 
1584 #ifdef MESH_MODE_SUPPORT
1585 
1586 /**
1587  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
1588 				and prepare msdu_info for mesh frames.
1589  * @vdev: DP vdev handle
1590  * @nbuf: skb
1591  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1592  *
1593  * Return: NULL on failure,
1594  *         nbuf when extracted successfully
1595  */
1596 static
1597 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1598 				struct dp_tx_msdu_info_s *msdu_info)
1599 {
1600 	struct meta_hdr_s *mhdr;
1601 	struct htt_tx_msdu_desc_ext2_t *meta_data =
1602 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
1603 
1604 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1605 
1606 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
1607 		msdu_info->exception_fw = 0;
1608 		goto remove_meta_hdr;
1609 	}
1610 
1611 	msdu_info->exception_fw = 1;
1612 
1613 	qdf_mem_set(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t), 0);
1614 
1615 	meta_data->host_tx_desc_pool = 1;
1616 	meta_data->update_peer_cache = 1;
1617 	meta_data->learning_frame = 1;
1618 
1619 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
1620 		meta_data->power = mhdr->power;
1621 
1622 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
1623 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
1624 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
1625 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
1626 
1627 		meta_data->dyn_bw = 1;
1628 
1629 		meta_data->valid_pwr = 1;
1630 		meta_data->valid_mcs_mask = 1;
1631 		meta_data->valid_nss_mask = 1;
1632 		meta_data->valid_preamble_type  = 1;
1633 		meta_data->valid_retries = 1;
1634 		meta_data->valid_bw_info = 1;
1635 	}
1636 
1637 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
1638 		meta_data->encrypt_type = 0;
1639 		meta_data->valid_encrypt_type = 1;
1640 		meta_data->learning_frame = 0;
1641 	}
1642 
1643 	meta_data->valid_key_flags = 1;
1644 	meta_data->key_flags = (mhdr->keyix & 0x3);
1645 
1646 remove_meta_hdr:
1647 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
1648 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1649 				"qdf_nbuf_pull_head failed\n");
1650 		qdf_nbuf_free(nbuf);
1651 		return NULL;
1652 	}
1653 
1654 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
1655 		msdu_info->tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1656 	else
1657 		msdu_info->tid = qdf_nbuf_get_priority(nbuf);
1658 
1659 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1660 			"%s , Meta hdr %0x %0x %0x %0x %0x %0x"
1661 			" tid %d to_fw %d\n",
1662 			__func__, msdu_info->meta_data[0],
1663 			msdu_info->meta_data[1],
1664 			msdu_info->meta_data[2],
1665 			msdu_info->meta_data[3],
1666 			msdu_info->meta_data[4],
1667 			msdu_info->meta_data[5],
1668 			msdu_info->tid, msdu_info->exception_fw);
1669 
1670 	return nbuf;
1671 }
1672 #else
1673 static
1674 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1675 				struct dp_tx_msdu_info_s *msdu_info)
1676 {
1677 	return nbuf;
1678 }
1679 
1680 #endif
1681 
1682 #ifdef DP_FEATURE_NAWDS_TX
1683 /**
1684  * dp_tx_prepare_nawds(): Tramit NAWDS frames
1685  * @vdev: dp_vdev handle
1686  * @nbuf: skb
1687  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1688  * @tx_q: Tx queue to be used for this Tx frame
1689  * @meta_data: Meta date for mesh
1690  * @peer_id: peer_id of the peer in case of NAWDS frames
1691  *
1692  * return: NULL on success nbuf on failure
1693  */
1694 static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1695 		struct dp_tx_msdu_info_s *msdu_info)
1696 {
1697 	struct dp_peer *peer = NULL;
1698 	struct dp_soc *soc = vdev->pdev->soc;
1699 	struct dp_ast_entry *ast_entry = NULL;
1700 	struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1701 	uint16_t peer_id = HTT_INVALID_PEER;
1702 
1703 	struct dp_peer *sa_peer = NULL;
1704 	qdf_nbuf_t nbuf_copy;
1705 
1706 	qdf_spin_lock_bh(&(soc->ast_lock));
1707 	ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost));
1708 
1709 	if (ast_entry)
1710 		sa_peer = ast_entry->peer;
1711 
1712 	qdf_spin_unlock_bh(&(soc->ast_lock));
1713 
1714 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1715 		if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
1716 				(peer->nawds_enabled)) {
1717 			if (sa_peer == peer) {
1718 				QDF_TRACE(QDF_MODULE_ID_DP,
1719 						QDF_TRACE_LEVEL_DEBUG,
1720 						" %s: broadcast multicast packet",
1721 						 __func__);
1722 				DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
1723 				continue;
1724 			}
1725 
1726 			nbuf_copy = qdf_nbuf_copy(nbuf);
1727 			if (!nbuf_copy) {
1728 				QDF_TRACE(QDF_MODULE_ID_DP,
1729 						QDF_TRACE_LEVEL_ERROR,
1730 						"nbuf copy failed");
1731 			}
1732 
1733 			peer_id = peer->peer_ids[0];
1734 			nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy,
1735 					msdu_info, peer_id, NULL);
1736 			if (nbuf_copy != NULL) {
1737 				qdf_nbuf_free(nbuf_copy);
1738 				continue;
1739 			}
1740 			DP_STATS_INC_PKT(peer, tx.nawds_mcast,
1741 						1, qdf_nbuf_len(nbuf));
1742 		}
1743 	}
1744 	if (peer_id == HTT_INVALID_PEER)
1745 		return nbuf;
1746 
1747 	return NULL;
1748 }
1749 #endif
1750 
1751 /**
1752  * dp_check_exc_metadata() - Checks if parameters are valid
1753  * @tx_exc - holds all exception path parameters
1754  *
1755  * Returns true when all the parameters are valid else false
1756  *
1757  */
1758 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
1759 {
1760 	if ((tx_exc->tid > DP_MAX_TIDS && tx_exc->tid != HTT_INVALID_TID) ||
1761 	    tx_exc->tx_encap_type > htt_cmn_pkt_num_types ||
1762 	    tx_exc->sec_type > cdp_num_sec_types) {
1763 		return false;
1764 	}
1765 
1766 	return true;
1767 }
1768 
1769 /**
1770  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
1771  * @vap_dev: DP vdev handle
1772  * @nbuf: skb
1773  * @tx_exc_metadata: Handle that holds exception path meta data
1774  *
1775  * Entry point for Core Tx layer (DP_TX) invoked from
1776  * hard_start_xmit in OSIF/HDD to transmit frames through fw
1777  *
1778  * Return: NULL on success,
1779  *         nbuf when it fails to send
1780  */
1781 qdf_nbuf_t dp_tx_send_exception(void *vap_dev, qdf_nbuf_t nbuf,
1782 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1783 {
1784 	struct ether_header *eh = NULL;
1785 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1786 	struct dp_tx_msdu_info_s msdu_info;
1787 
1788 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1789 
1790 	msdu_info.tid = tx_exc_metadata->tid;
1791 
1792 	eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1793 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1794 			"%s , skb %pM",
1795 			__func__, nbuf->data);
1796 
1797 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
1798 
1799 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
1800 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1801 			"Invalid parameters in exception path");
1802 		goto fail;
1803 	}
1804 
1805 	/* Basic sanity checks for unsupported packets */
1806 
1807 	/* MESH mode */
1808 	if (qdf_unlikely(vdev->mesh_vdev)) {
1809 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1810 			"Mesh mode is not supported in exception path");
1811 		goto fail;
1812 	}
1813 
1814 	/* TSO or SG */
1815 	if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
1816 	    qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
1817 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1818 			  "TSO and SG are not supported in exception path");
1819 
1820 		goto fail;
1821 	}
1822 
1823 	/* RAW */
1824 	if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
1825 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1826 			  "Raw frame is not supported in exception path");
1827 		goto fail;
1828 	}
1829 
1830 
1831 	/* Mcast enhancement*/
1832 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
1833 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
1834 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1835 					  "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW\n");
1836 		}
1837 	}
1838 
1839 	/*
1840 	 * Get HW Queue to use for this frame.
1841 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
1842 	 * dedicated for data and 1 for command.
1843 	 * "queue_id" maps to one hardware ring.
1844 	 *  With each ring, we also associate a unique Tx descriptor pool
1845 	 *  to minimize lock contention for these resources.
1846 	 */
1847 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
1848 
1849 	/* Reset the control block */
1850 	qdf_nbuf_reset_ctxt(nbuf);
1851 
1852 	/*  Single linear frame */
1853 	/*
1854 	 * If nbuf is a simple linear frame, use send_single function to
1855 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
1856 	 * SRNG. There is no need to setup a MSDU extension descriptor.
1857 	 */
1858 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
1859 			tx_exc_metadata->peer_id, tx_exc_metadata);
1860 
1861 	return nbuf;
1862 
1863 fail:
1864 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1865 			"pkt send failed");
1866 	return nbuf;
1867 }
1868 
1869 /**
1870  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
1871  * @vap_dev: DP vdev handle
1872  * @nbuf: skb
1873  *
1874  * Entry point for Core Tx layer (DP_TX) invoked from
1875  * hard_start_xmit in OSIF/HDD
1876  *
1877  * Return: NULL on success,
1878  *         nbuf when it fails to send
1879  */
1880 #ifdef MESH_MODE_SUPPORT
1881 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
1882 {
1883 	struct meta_hdr_s *mhdr;
1884 	qdf_nbuf_t nbuf_mesh = NULL;
1885 	qdf_nbuf_t nbuf_clone = NULL;
1886 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1887 	uint8_t no_enc_frame = 0;
1888 
1889 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
1890 	if (nbuf_mesh == NULL) {
1891 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1892 				"qdf_nbuf_unshare failed\n");
1893 		return nbuf;
1894 	}
1895 	nbuf = nbuf_mesh;
1896 
1897 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1898 
1899 	if ((vdev->sec_type != cdp_sec_type_none) &&
1900 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
1901 		no_enc_frame = 1;
1902 
1903 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
1904 		       !no_enc_frame) {
1905 		nbuf_clone = qdf_nbuf_clone(nbuf);
1906 		if (nbuf_clone == NULL) {
1907 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1908 				"qdf_nbuf_clone failed\n");
1909 			return nbuf;
1910 		}
1911 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
1912 	}
1913 
1914 	if (nbuf_clone) {
1915 		if (!dp_tx_send(vap_dev, nbuf_clone)) {
1916 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
1917 		} else {
1918 			qdf_nbuf_free(nbuf_clone);
1919 		}
1920 	}
1921 
1922 	if (no_enc_frame)
1923 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
1924 	else
1925 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
1926 
1927 	nbuf = dp_tx_send(vap_dev, nbuf);
1928 	if ((nbuf == NULL) && no_enc_frame) {
1929 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
1930 	}
1931 
1932 	return nbuf;
1933 }
1934 
1935 #else
1936 
1937 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
1938 {
1939 	return dp_tx_send(vap_dev, nbuf);
1940 }
1941 
1942 #endif
1943 
1944 /**
1945  * dp_tx_send() - Transmit a frame on a given VAP
1946  * @vap_dev: DP vdev handle
1947  * @nbuf: skb
1948  *
1949  * Entry point for Core Tx layer (DP_TX) invoked from
1950  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
1951  * cases
1952  *
1953  * Return: NULL on success,
1954  *         nbuf when it fails to send
1955  */
1956 qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
1957 {
1958 	struct ether_header *eh = NULL;
1959 	struct dp_tx_msdu_info_s msdu_info;
1960 	struct dp_tx_seg_info_s seg_info;
1961 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1962 	uint16_t peer_id = HTT_INVALID_PEER;
1963 	qdf_nbuf_t nbuf_mesh = NULL;
1964 
1965 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1966 	qdf_mem_set(&seg_info, sizeof(seg_info), 0x0);
1967 
1968 	eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1969 
1970 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1971 			"%s , skb %pM",
1972 			__func__, nbuf->data);
1973 
1974 	/*
1975 	 * Set Default Host TID value to invalid TID
1976 	 * (TID override disabled)
1977 	 */
1978 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
1979 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
1980 
1981 	if (qdf_unlikely(vdev->mesh_vdev)) {
1982 		nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
1983 								&msdu_info);
1984 		if (nbuf_mesh == NULL) {
1985 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1986 					"Extracting mesh metadata failed\n");
1987 			return nbuf;
1988 		}
1989 		nbuf = nbuf_mesh;
1990 	}
1991 
1992 	/*
1993 	 * Get HW Queue to use for this frame.
1994 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
1995 	 * dedicated for data and 1 for command.
1996 	 * "queue_id" maps to one hardware ring.
1997 	 *  With each ring, we also associate a unique Tx descriptor pool
1998 	 *  to minimize lock contention for these resources.
1999 	 */
2000 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2001 
2002 	/*
2003 	 * TCL H/W supports 2 DSCP-TID mapping tables.
2004 	 *  Table 1 - Default DSCP-TID mapping table
2005 	 *  Table 2 - 1 DSCP-TID override table
2006 	 *
2007 	 * If we need a different DSCP-TID mapping for this vap,
2008 	 * call tid_classify to extract DSCP/ToS from frame and
2009 	 * map to a TID and store in msdu_info. This is later used
2010 	 * to fill in TCL Input descriptor (per-packet TID override).
2011 	 */
2012 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
2013 
2014 	/* Reset the control block */
2015 	qdf_nbuf_reset_ctxt(nbuf);
2016 
2017 	/*
2018 	 * Classify the frame and call corresponding
2019 	 * "prepare" function which extracts the segment (TSO)
2020 	 * and fragmentation information (for TSO , SG, ME, or Raw)
2021 	 * into MSDU_INFO structure which is later used to fill
2022 	 * SW and HW descriptors.
2023 	 */
2024 	if (qdf_nbuf_is_tso(nbuf)) {
2025 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2026 			  "%s TSO frame %pK\n", __func__, vdev);
2027 		DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1,
2028 				qdf_nbuf_len(nbuf));
2029 
2030 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
2031 			DP_STATS_INC(vdev, tx_i.tso.dropped_host, 1);
2032 			return nbuf;
2033 		}
2034 
2035 		goto send_multiple;
2036 	}
2037 
2038 	/* SG */
2039 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2040 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
2041 
2042 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2043 			 "%s non-TSO SG frame %pK\n", __func__, vdev);
2044 
2045 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
2046 				qdf_nbuf_len(nbuf));
2047 
2048 		goto send_multiple;
2049 	}
2050 
2051 #ifdef ATH_SUPPORT_IQUE
2052 	/* Mcast to Ucast Conversion*/
2053 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
2054 		eh = (struct ether_header *)qdf_nbuf_data(nbuf);
2055 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
2056 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2057 				  "%s Mcast frm for ME %pK\n", __func__, vdev);
2058 
2059 			DP_STATS_INC_PKT(vdev,
2060 					tx_i.mcast_en.mcast_pkt, 1,
2061 					qdf_nbuf_len(nbuf));
2062 			if (dp_tx_prepare_send_me(vdev, nbuf) ==
2063 					QDF_STATUS_SUCCESS) {
2064 				return NULL;
2065 			}
2066 		}
2067 	}
2068 #endif
2069 
2070 	/* RAW */
2071 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
2072 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
2073 		if (nbuf == NULL)
2074 			return NULL;
2075 
2076 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2077 			  "%s Raw frame %pK\n", __func__, vdev);
2078 
2079 		goto send_multiple;
2080 
2081 	}
2082 
2083 	/*  Single linear frame */
2084 	/*
2085 	 * If nbuf is a simple linear frame, use send_single function to
2086 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2087 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2088 	 */
2089 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
2090 
2091 	return nbuf;
2092 
2093 send_multiple:
2094 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2095 
2096 	return nbuf;
2097 }
2098 
2099 /**
2100  * dp_tx_reinject_handler() - Tx Reinject Handler
2101  * @tx_desc: software descriptor head pointer
2102  * @status : Tx completion status from HTT descriptor
2103  *
2104  * This function reinjects frames back to Target.
2105  * Todo - Host queue needs to be added
2106  *
2107  * Return: none
2108  */
2109 static
2110 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2111 {
2112 	struct dp_vdev *vdev;
2113 	struct dp_peer *peer = NULL;
2114 	uint32_t peer_id = HTT_INVALID_PEER;
2115 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2116 	qdf_nbuf_t nbuf_copy = NULL;
2117 	struct dp_tx_msdu_info_s msdu_info;
2118 	struct dp_peer *sa_peer = NULL;
2119 	struct dp_ast_entry *ast_entry = NULL;
2120 	struct dp_soc *soc = NULL;
2121 	struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
2122 #ifdef WDS_VENDOR_EXTENSION
2123 	int is_mcast = 0, is_ucast = 0;
2124 	int num_peers_3addr = 0;
2125 	struct ether_header *eth_hdr = (struct ether_header *)(qdf_nbuf_data(nbuf));
2126 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
2127 #endif
2128 
2129 	vdev = tx_desc->vdev;
2130 	soc = vdev->pdev->soc;
2131 
2132 	qdf_assert(vdev);
2133 
2134 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
2135 
2136 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2137 
2138 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2139 			"%s Tx reinject path\n", __func__);
2140 
2141 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
2142 			qdf_nbuf_len(tx_desc->nbuf));
2143 
2144 	qdf_spin_lock_bh(&(soc->ast_lock));
2145 
2146 	ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost));
2147 
2148 	if (ast_entry)
2149 		sa_peer = ast_entry->peer;
2150 
2151 	qdf_spin_unlock_bh(&(soc->ast_lock));
2152 
2153 #ifdef WDS_VENDOR_EXTENSION
2154 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
2155 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
2156 	} else {
2157 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
2158 	}
2159 	is_ucast = !is_mcast;
2160 
2161 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2162 		if (peer->bss_peer)
2163 			continue;
2164 
2165 		/* Detect wds peers that use 3-addr framing for mcast.
2166 		 * if there are any, the bss_peer is used to send the
2167 		 * the mcast frame using 3-addr format. all wds enabled
2168 		 * peers that use 4-addr framing for mcast frames will
2169 		 * be duplicated and sent as 4-addr frames below.
2170 		 */
2171 		if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
2172 			num_peers_3addr = 1;
2173 			break;
2174 		}
2175 	}
2176 #endif
2177 
2178 	if (qdf_unlikely(vdev->mesh_vdev)) {
2179 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
2180 	} else {
2181 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2182 			if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
2183 #ifdef WDS_VENDOR_EXTENSION
2184 			/*
2185 			 * . if 3-addr STA, then send on BSS Peer
2186 			 * . if Peer WDS enabled and accept 4-addr mcast,
2187 			 * send mcast on that peer only
2188 			 * . if Peer WDS enabled and accept 4-addr ucast,
2189 			 * send ucast on that peer only
2190 			 */
2191 			((peer->bss_peer && num_peers_3addr && is_mcast) ||
2192 			 (peer->wds_enabled &&
2193 				  ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
2194 				   (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
2195 #else
2196 			((peer->bss_peer &&
2197 			  !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) ||
2198 				 peer->nawds_enabled)) {
2199 #endif
2200 				peer_id = DP_INVALID_PEER;
2201 
2202 				if (peer->nawds_enabled) {
2203 					peer_id = peer->peer_ids[0];
2204 					if (sa_peer == peer) {
2205 						QDF_TRACE(
2206 							QDF_MODULE_ID_DP,
2207 							QDF_TRACE_LEVEL_DEBUG,
2208 							" %s: multicast packet",
2209 							__func__);
2210 						DP_STATS_INC(peer,
2211 							tx.nawds_mcast_drop, 1);
2212 						continue;
2213 					}
2214 				}
2215 
2216 				nbuf_copy = qdf_nbuf_copy(nbuf);
2217 
2218 				if (!nbuf_copy) {
2219 					QDF_TRACE(QDF_MODULE_ID_DP,
2220 						QDF_TRACE_LEVEL_DEBUG,
2221 						FL("nbuf copy failed"));
2222 					break;
2223 				}
2224 
2225 				nbuf_copy = dp_tx_send_msdu_single(vdev,
2226 						nbuf_copy,
2227 						&msdu_info,
2228 						peer_id,
2229 						NULL);
2230 
2231 				if (nbuf_copy) {
2232 					QDF_TRACE(QDF_MODULE_ID_DP,
2233 						QDF_TRACE_LEVEL_DEBUG,
2234 						FL("pkt send failed"));
2235 					qdf_nbuf_free(nbuf_copy);
2236 				} else {
2237 					if (peer_id != DP_INVALID_PEER)
2238 						DP_STATS_INC_PKT(peer,
2239 							tx.nawds_mcast,
2240 							1, qdf_nbuf_len(nbuf));
2241 				}
2242 			}
2243 		}
2244 	}
2245 
2246 	if (vdev->nawds_enabled) {
2247 		peer_id = DP_INVALID_PEER;
2248 
2249 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
2250 					1, qdf_nbuf_len(nbuf));
2251 
2252 		nbuf = dp_tx_send_msdu_single(vdev,
2253 				nbuf,
2254 				&msdu_info,
2255 				peer_id, NULL);
2256 
2257 		if (nbuf) {
2258 			QDF_TRACE(QDF_MODULE_ID_DP,
2259 				QDF_TRACE_LEVEL_DEBUG,
2260 				FL("pkt send failed"));
2261 			qdf_nbuf_free(nbuf);
2262 		}
2263 	} else
2264 		qdf_nbuf_free(nbuf);
2265 
2266 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2267 }
2268 
2269 /**
2270  * dp_tx_inspect_handler() - Tx Inspect Handler
2271  * @tx_desc: software descriptor head pointer
2272  * @status : Tx completion status from HTT descriptor
2273  *
2274  * Handles Tx frames sent back to Host for inspection
2275  * (ProxyARP)
2276  *
2277  * Return: none
2278  */
2279 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2280 {
2281 
2282 	struct dp_soc *soc;
2283 	struct dp_pdev *pdev = tx_desc->pdev;
2284 
2285 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2286 			"%s Tx inspect path\n",
2287 			__func__);
2288 
2289 	qdf_assert(pdev);
2290 
2291 	soc = pdev->soc;
2292 
2293 	DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
2294 			qdf_nbuf_len(tx_desc->nbuf));
2295 
2296 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
2297 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2298 }
2299 
2300 #ifdef FEATURE_PERPKT_INFO
2301 /**
2302  * dp_get_completion_indication_for_stack() - send completion to stack
2303  * @soc :  dp_soc handle
2304  * @pdev:  dp_pdev handle
2305  * @peer_id: peer_id of the peer for which completion came
2306  * @ppdu_id: ppdu_id
2307  * @first_msdu: first msdu
2308  * @last_msdu: last msdu
2309  * @netbuf: Buffer pointer for free
2310  *
2311  * This function is used for indication whether buffer needs to be
2312  * send to stack for free or not
2313 */
2314 QDF_STATUS
2315 dp_get_completion_indication_for_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2316 		      uint16_t peer_id, uint32_t ppdu_id, uint8_t first_msdu,
2317 		      uint8_t last_msdu, qdf_nbuf_t netbuf)
2318 {
2319 	struct tx_capture_hdr *ppdu_hdr;
2320 	struct dp_peer *peer = NULL;
2321 
2322 	if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode))
2323 		return QDF_STATUS_E_NOSUPPORT;
2324 
2325 	peer = (peer_id == HTT_INVALID_PEER) ? NULL :
2326 			dp_peer_find_by_id(soc, peer_id);
2327 
2328 	if (!peer) {
2329 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2330 				FL("Peer Invalid"));
2331 		return QDF_STATUS_E_INVAL;
2332 	}
2333 
2334 	if (pdev->mcopy_mode) {
2335 		if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
2336 			(pdev->m_copy_id.tx_peer_id == peer_id)) {
2337 			return QDF_STATUS_E_INVAL;
2338 		}
2339 
2340 		pdev->m_copy_id.tx_ppdu_id = ppdu_id;
2341 		pdev->m_copy_id.tx_peer_id = peer_id;
2342 	}
2343 
2344 	if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) {
2345 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2346 				FL("No headroom"));
2347 		return QDF_STATUS_E_NOMEM;
2348 	}
2349 
2350 	ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
2351 	qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
2352 					IEEE80211_ADDR_LEN);
2353 	ppdu_hdr->ppdu_id = ppdu_id;
2354 	qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
2355 			IEEE80211_ADDR_LEN);
2356 	ppdu_hdr->peer_id = peer_id;
2357 	ppdu_hdr->first_msdu = first_msdu;
2358 	ppdu_hdr->last_msdu = last_msdu;
2359 
2360 	return QDF_STATUS_SUCCESS;
2361 }
2362 
2363 
2364 /**
2365  * dp_send_completion_to_stack() - send completion to stack
2366  * @soc :  dp_soc handle
2367  * @pdev:  dp_pdev handle
2368  * @peer_id: peer_id of the peer for which completion came
2369  * @ppdu_id: ppdu_id
2370  * @netbuf: Buffer pointer for free
2371  *
2372  * This function is used to send completion to stack
2373  * to free buffer
2374 */
2375 void  dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2376 					uint16_t peer_id, uint32_t ppdu_id,
2377 					qdf_nbuf_t netbuf)
2378 {
2379 	dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
2380 				netbuf, peer_id,
2381 				WDI_NO_VAL, pdev->pdev_id);
2382 }
2383 #else
2384 static QDF_STATUS
2385 dp_get_completion_indication_for_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2386 		      uint16_t peer_id, uint32_t ppdu_id, uint8_t first_msdu,
2387 		      uint8_t last_msdu, qdf_nbuf_t netbuf)
2388 {
2389 	return QDF_STATUS_E_NOSUPPORT;
2390 }
2391 
2392 static void
2393 dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2394 		      uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
2395 {
2396 }
2397 #endif
2398 
2399 /**
2400  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2401  * @soc: Soc handle
2402  * @desc: software Tx descriptor to be processed
2403  *
2404  * Return: none
2405  */
2406 static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
2407 		struct dp_tx_desc_s *desc)
2408 {
2409 	struct dp_vdev *vdev = desc->vdev;
2410 	qdf_nbuf_t nbuf = desc->nbuf;
2411 
2412 	/* If it is TDLS mgmt, don't unmap or free the frame */
2413 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2414 		return dp_non_std_tx_comp_free_buff(desc, vdev);
2415 
2416 	/* 0 : MSDU buffer, 1 : MLE */
2417 	if (desc->msdu_ext_desc) {
2418 		/* TSO free */
2419 		if (hal_tx_ext_desc_get_tso_enable(
2420 					desc->msdu_ext_desc->vaddr)) {
2421 			/* unmap eash TSO seg before free the nbuf */
2422 			dp_tx_tso_unmap_segment(soc, desc);
2423 			qdf_nbuf_free(nbuf);
2424 			return;
2425 		}
2426 	}
2427 
2428 	qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
2429 
2430 	if (qdf_likely(!vdev->mesh_vdev))
2431 		qdf_nbuf_free(nbuf);
2432 	else {
2433 		if (desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2434 			qdf_nbuf_free(nbuf);
2435 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2436 		} else
2437 			vdev->osif_tx_free_ext((nbuf));
2438 	}
2439 }
2440 
2441 /**
2442  * dp_tx_mec_handler() - Tx  MEC Notify Handler
2443  * @vdev: pointer to dp dev handler
2444  * @status : Tx completion status from HTT descriptor
2445  *
2446  * Handles MEC notify event sent from fw to Host
2447  *
2448  * Return: none
2449  */
2450 #ifdef FEATURE_WDS
2451 void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
2452 {
2453 
2454 	struct dp_soc *soc;
2455 	uint32_t flags = IEEE80211_NODE_F_WDS_HM;
2456 	struct dp_peer *peer;
2457 	uint8_t mac_addr[DP_MAC_ADDR_LEN], i;
2458 
2459 	if (!vdev->wds_enabled)
2460 		return;
2461 
2462 	soc = vdev->pdev->soc;
2463 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
2464 	peer = TAILQ_FIRST(&vdev->peer_list);
2465 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2466 
2467 	if (!peer) {
2468 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2469 				FL("peer is NULL"));
2470 		return;
2471 	}
2472 
2473 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2474 			"%s Tx MEC Handler\n",
2475 			__func__);
2476 
2477 	for (i = 0; i < DP_MAC_ADDR_LEN; i++)
2478 		mac_addr[(DP_MAC_ADDR_LEN - 1) - i] =
2479 					status[(DP_MAC_ADDR_LEN - 2) + i];
2480 
2481 	if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, DP_MAC_ADDR_LEN))
2482 		dp_peer_add_ast(soc,
2483 				peer,
2484 				mac_addr,
2485 				CDP_TXRX_AST_TYPE_MEC,
2486 				flags);
2487 }
2488 #endif
2489 
2490 /**
2491  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
2492  * @tx_desc: software descriptor head pointer
2493  * @status : Tx completion status from HTT descriptor
2494  *
2495  * This function will process HTT Tx indication messages from Target
2496  *
2497  * Return: none
2498  */
2499 static
2500 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2501 {
2502 	uint8_t tx_status;
2503 	struct dp_pdev *pdev;
2504 	struct dp_vdev *vdev;
2505 	struct dp_soc *soc;
2506 	uint32_t *htt_status_word = (uint32_t *) status;
2507 
2508 	qdf_assert(tx_desc->pdev);
2509 
2510 	pdev = tx_desc->pdev;
2511 	vdev = tx_desc->vdev;
2512 	soc = pdev->soc;
2513 
2514 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_status_word[0]);
2515 
2516 	switch (tx_status) {
2517 	case HTT_TX_FW2WBM_TX_STATUS_OK:
2518 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
2519 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
2520 	{
2521 		dp_tx_comp_free_buf(soc, tx_desc);
2522 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2523 		break;
2524 	}
2525 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
2526 	{
2527 		dp_tx_reinject_handler(tx_desc, status);
2528 		break;
2529 	}
2530 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
2531 	{
2532 		dp_tx_inspect_handler(tx_desc, status);
2533 		break;
2534 	}
2535 	case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
2536 	{
2537 		dp_tx_mec_handler(vdev, status);
2538 		break;
2539 	}
2540 	default:
2541 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2542 				"%s Invalid HTT tx_status %d\n",
2543 				__func__, tx_status);
2544 		break;
2545 	}
2546 }
2547 
2548 #ifdef MESH_MODE_SUPPORT
2549 /**
2550  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
2551  *                                         in mesh meta header
2552  * @tx_desc: software descriptor head pointer
2553  * @ts: pointer to tx completion stats
2554  * Return: none
2555  */
2556 static
2557 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2558 		struct hal_tx_completion_status *ts)
2559 {
2560 	struct meta_hdr_s *mhdr;
2561 	qdf_nbuf_t netbuf = tx_desc->nbuf;
2562 
2563 	if (!tx_desc->msdu_ext_desc) {
2564 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
2565 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2566 				"netbuf %pK offset %d\n",
2567 				netbuf, tx_desc->pkt_offset);
2568 			return;
2569 		}
2570 	}
2571 	if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
2572 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2573 			"netbuf %pK offset %d\n", netbuf,
2574 			sizeof(struct meta_hdr_s));
2575 		return;
2576 	}
2577 
2578 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
2579 	mhdr->rssi = ts->ack_frame_rssi;
2580 	mhdr->channel = tx_desc->pdev->operating_channel;
2581 }
2582 
2583 #else
2584 static
2585 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2586 		struct hal_tx_completion_status *ts)
2587 {
2588 }
2589 
2590 #endif
2591 
2592 /**
2593  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
2594  * @peer: Handle to DP peer
2595  * @ts: pointer to HAL Tx completion stats
2596  * @length: MSDU length
2597  *
2598  * Return: None
2599  */
2600 static void dp_tx_update_peer_stats(struct dp_peer *peer,
2601 		struct hal_tx_completion_status *ts, uint32_t length)
2602 {
2603 	struct dp_pdev *pdev = peer->vdev->pdev;
2604 	struct dp_soc *soc = pdev->soc;
2605 	uint8_t mcs, pkt_type;
2606 
2607 	mcs = ts->mcs;
2608 	pkt_type = ts->pkt_type;
2609 
2610 	if (!ts->release_src == HAL_TX_COMP_RELEASE_SOURCE_TQM)
2611 		return;
2612 
2613 	if (peer->bss_peer) {
2614 		DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
2615 	} else {
2616 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
2617 			DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
2618 		}
2619 		DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
2620 	}
2621 
2622 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
2623 			(ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
2624 
2625 	DP_STATS_INCC(peer, tx.dropped.fw_rem, 1,
2626 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
2627 
2628 	DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
2629 			(ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
2630 
2631 	DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
2632 			(ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
2633 
2634 	DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
2635 			(ts->status == HAL_TX_TQM_RR_FW_REASON1));
2636 
2637 	DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
2638 			(ts->status == HAL_TX_TQM_RR_FW_REASON2));
2639 
2640 	DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
2641 			(ts->status == HAL_TX_TQM_RR_FW_REASON3));
2642 
2643 	if (!ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
2644 		return;
2645 
2646 	DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
2647 
2648 	DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
2649 	DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
2650 
2651 	if (!(soc->process_tx_status))
2652 		return;
2653 
2654 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2655 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
2656 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2657 			((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
2658 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2659 			((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
2660 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2661 			((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
2662 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2663 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
2664 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2665 			((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
2666 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2667 			((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2668 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2669 			((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2670 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2671 			((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2672 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2673 			((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2674 	DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
2675 	DP_STATS_INC(peer, tx.bw[ts->bw], 1);
2676 	DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
2677 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
2678 	DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
2679 	DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
2680 	DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
2681 	DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
2682 
2683 	if (soc->cdp_soc.ol_ops->update_dp_stats) {
2684 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
2685 				&peer->stats, ts->peer_id,
2686 				UPDATE_PEER_STATS);
2687 	}
2688 }
2689 
2690 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2691 /**
2692  * dp_tx_flow_pool_lock() - take flow pool lock
2693  * @soc: core txrx main context
2694  * @tx_desc: tx desc
2695  *
2696  * Return: None
2697  */
2698 static inline
2699 void dp_tx_flow_pool_lock(struct dp_soc *soc,
2700 			  struct dp_tx_desc_s *tx_desc)
2701 {
2702 	struct dp_tx_desc_pool_s *pool;
2703 	uint8_t desc_pool_id;
2704 
2705 	desc_pool_id = tx_desc->pool_id;
2706 	pool = &soc->tx_desc[desc_pool_id];
2707 
2708 	qdf_spin_lock_bh(&pool->flow_pool_lock);
2709 }
2710 
2711 /**
2712  * dp_tx_flow_pool_unlock() - release flow pool lock
2713  * @soc: core txrx main context
2714  * @tx_desc: tx desc
2715  *
2716  * Return: None
2717  */
2718 static inline
2719 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
2720 			    struct dp_tx_desc_s *tx_desc)
2721 {
2722 	struct dp_tx_desc_pool_s *pool;
2723 	uint8_t desc_pool_id;
2724 
2725 	desc_pool_id = tx_desc->pool_id;
2726 	pool = &soc->tx_desc[desc_pool_id];
2727 
2728 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
2729 }
2730 #else
2731 static inline
2732 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2733 {
2734 }
2735 
2736 static inline
2737 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2738 {
2739 }
2740 #endif
2741 
2742 /**
2743  * dp_tx_notify_completion() - Notify tx completion for this desc
2744  * @soc: core txrx main context
2745  * @tx_desc: tx desc
2746  * @netbuf:  buffer
2747  *
2748  * Return: none
2749  */
2750 static inline void dp_tx_notify_completion(struct dp_soc *soc,
2751 					   struct dp_tx_desc_s *tx_desc,
2752 					   qdf_nbuf_t netbuf)
2753 {
2754 	void *osif_dev;
2755 	ol_txrx_completion_fp tx_compl_cbk = NULL;
2756 
2757 	qdf_assert(tx_desc);
2758 
2759 	dp_tx_flow_pool_lock(soc, tx_desc);
2760 
2761 	if (!tx_desc->vdev ||
2762 	    !tx_desc->vdev->osif_vdev) {
2763 		dp_tx_flow_pool_unlock(soc, tx_desc);
2764 		return;
2765 	}
2766 
2767 	osif_dev = tx_desc->vdev->osif_vdev;
2768 	tx_compl_cbk = tx_desc->vdev->tx_comp;
2769 	dp_tx_flow_pool_unlock(soc, tx_desc);
2770 
2771 	if (tx_compl_cbk)
2772 		tx_compl_cbk(netbuf, osif_dev);
2773 }
2774 
2775 /**
2776  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
2777  * @tx_desc: software descriptor head pointer
2778  * @length: packet length
2779  *
2780  * Return: none
2781  */
2782 static inline void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
2783 		uint32_t length)
2784 {
2785 	struct hal_tx_completion_status ts;
2786 	struct dp_soc *soc = NULL;
2787 	struct dp_vdev *vdev = tx_desc->vdev;
2788 	struct dp_peer *peer = NULL;
2789 	struct ether_header *eh =
2790 		(struct ether_header *)qdf_nbuf_data(tx_desc->nbuf);
2791 
2792 	hal_tx_comp_get_status(&tx_desc->comp, &ts);
2793 
2794 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2795 				"-------------------- \n"
2796 				"Tx Completion Stats: \n"
2797 				"-------------------- \n"
2798 				"ack_frame_rssi = %d \n"
2799 				"first_msdu = %d \n"
2800 				"last_msdu = %d \n"
2801 				"msdu_part_of_amsdu = %d \n"
2802 				"rate_stats valid = %d \n"
2803 				"bw = %d \n"
2804 				"pkt_type = %d \n"
2805 				"stbc = %d \n"
2806 				"ldpc = %d \n"
2807 				"sgi = %d \n"
2808 				"mcs = %d \n"
2809 				"ofdma = %d \n"
2810 				"tones_in_ru = %d \n"
2811 				"tsf = %d \n"
2812 				"ppdu_id = %d \n"
2813 				"transmit_cnt = %d \n"
2814 				"tid = %d \n"
2815 				"peer_id = %d \n",
2816 				ts.ack_frame_rssi, ts.first_msdu, ts.last_msdu,
2817 				ts.msdu_part_of_amsdu, ts.valid, ts.bw,
2818 				ts.pkt_type, ts.stbc, ts.ldpc, ts.sgi,
2819 				ts.mcs, ts.ofdma, ts.tones_in_ru, ts.tsf,
2820 				ts.ppdu_id, ts.transmit_cnt, ts.tid,
2821 				ts.peer_id);
2822 
2823 	if (!vdev) {
2824 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2825 				"invalid vdev");
2826 		goto out;
2827 	}
2828 
2829 	soc = vdev->pdev->soc;
2830 
2831 	/* Update SoC level stats */
2832 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
2833 			(ts.status == HAL_TX_TQM_RR_REM_CMD_REM));
2834 
2835 	/* Update per-packet stats */
2836 	if (qdf_unlikely(vdev->mesh_vdev) &&
2837 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
2838 		dp_tx_comp_fill_tx_completion_stats(tx_desc, &ts);
2839 
2840 	/* Update peer level stats */
2841 	peer = dp_peer_find_by_id(soc, ts.peer_id);
2842 	if (!peer) {
2843 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2844 				"invalid peer");
2845 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
2846 		goto out;
2847 	}
2848 
2849 	if (qdf_likely(peer->vdev->tx_encap_type ==
2850 				htt_cmn_pkt_type_ethernet)) {
2851 		if (peer->bss_peer && IEEE80211_IS_BROADCAST(eh->ether_dhost))
2852 			DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
2853 	}
2854 
2855 	dp_tx_update_peer_stats(peer, &ts, length);
2856 
2857 out:
2858 	return;
2859 }
2860 
2861 /**
2862  * dp_tx_comp_process_desc() - Tx complete software descriptor handler
2863  * @soc: core txrx main context
2864  * @comp_head: software descriptor head pointer
2865  *
2866  * This function will process batch of descriptors reaped by dp_tx_comp_handler
2867  * and release the software descriptors after processing is complete
2868  *
2869  * Return: none
2870  */
2871 static void dp_tx_comp_process_desc(struct dp_soc *soc,
2872 		struct dp_tx_desc_s *comp_head)
2873 {
2874 	struct dp_tx_desc_s *desc;
2875 	struct dp_tx_desc_s *next;
2876 	struct hal_tx_completion_status ts = {0};
2877 	uint32_t length;
2878 	struct dp_peer *peer;
2879 
2880 	DP_HIST_INIT();
2881 	desc = comp_head;
2882 
2883 	while (desc) {
2884 		hal_tx_comp_get_status(&desc->comp, &ts);
2885 		peer = dp_peer_find_by_id(soc, ts.peer_id);
2886 		length = qdf_nbuf_len(desc->nbuf);
2887 
2888 		/* check tx completion notification */
2889 		if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(desc->nbuf))
2890 			dp_tx_notify_completion(soc, desc, desc->nbuf);
2891 
2892 		dp_tx_comp_process_tx_status(desc, length);
2893 
2894 		DPTRACE(qdf_dp_trace_ptr
2895 				(desc->nbuf,
2896 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
2897 				 QDF_TRACE_DEFAULT_PDEV_ID,
2898 				 qdf_nbuf_data_addr(desc->nbuf),
2899 				 sizeof(qdf_nbuf_data(desc->nbuf)),
2900 				 desc->id, ts.status)
2901 			);
2902 
2903 		/*currently m_copy/tx_capture is not supported for scatter gather packets*/
2904 		if (!(desc->msdu_ext_desc) && (dp_get_completion_indication_for_stack(soc,
2905 					desc->pdev, ts.peer_id, ts.ppdu_id,
2906 					ts.first_msdu, ts.last_msdu,
2907 					desc->nbuf) == QDF_STATUS_SUCCESS)) {
2908 			qdf_nbuf_unmap(soc->osdev, desc->nbuf,
2909 						QDF_DMA_TO_DEVICE);
2910 
2911 			dp_send_completion_to_stack(soc, desc->pdev, ts.peer_id,
2912 				ts.ppdu_id, desc->nbuf);
2913 		} else {
2914 			dp_tx_comp_free_buf(soc, desc);
2915 		}
2916 
2917 		DP_HIST_PACKET_COUNT_INC(desc->pdev->pdev_id);
2918 
2919 		next = desc->next;
2920 		dp_tx_desc_release(desc, desc->pool_id);
2921 		desc = next;
2922 	}
2923 	DP_TX_HIST_STATS_PER_PDEV();
2924 }
2925 
2926 /**
2927  * dp_tx_comp_handler() - Tx completion handler
2928  * @soc: core txrx main context
2929  * @ring_id: completion ring id
2930  * @quota: No. of packets/descriptors that can be serviced in one loop
2931  *
2932  * This function will collect hardware release ring element contents and
2933  * handle descriptor contents. Based on contents, free packet or handle error
2934  * conditions
2935  *
2936  * Return: none
2937  */
2938 uint32_t dp_tx_comp_handler(struct dp_soc *soc, void *hal_srng, uint32_t quota)
2939 {
2940 	void *tx_comp_hal_desc;
2941 	uint8_t buffer_src;
2942 	uint8_t pool_id;
2943 	uint32_t tx_desc_id;
2944 	struct dp_tx_desc_s *tx_desc = NULL;
2945 	struct dp_tx_desc_s *head_desc = NULL;
2946 	struct dp_tx_desc_s *tail_desc = NULL;
2947 	uint32_t num_processed;
2948 	uint32_t count;
2949 
2950 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
2951 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2952 				"%s %d : HAL RING Access Failed -- %pK\n",
2953 				__func__, __LINE__, hal_srng);
2954 		return 0;
2955 	}
2956 
2957 	num_processed = 0;
2958 	count = 0;
2959 
2960 	/* Find head descriptor from completion ring */
2961 	while (qdf_likely(tx_comp_hal_desc =
2962 			hal_srng_dst_get_next(soc->hal_soc, hal_srng))) {
2963 
2964 		buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
2965 
2966 		/* If this buffer was not released by TQM or FW, then it is not
2967 		 * Tx completion indication, assert */
2968 		if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
2969 				(buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
2970 
2971 			QDF_TRACE(QDF_MODULE_ID_DP,
2972 					QDF_TRACE_LEVEL_FATAL,
2973 					"Tx comp release_src != TQM | FW");
2974 
2975 			qdf_assert_always(0);
2976 		}
2977 
2978 		/* Get descriptor id */
2979 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
2980 		pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
2981 			DP_TX_DESC_ID_POOL_OS;
2982 
2983 		if (!dp_tx_is_desc_id_valid(soc, tx_desc_id))
2984 			continue;
2985 
2986 		/* Find Tx descriptor */
2987 		tx_desc = dp_tx_desc_find(soc, pool_id,
2988 				(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
2989 				DP_TX_DESC_ID_PAGE_OS,
2990 				(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
2991 				DP_TX_DESC_ID_OFFSET_OS);
2992 
2993 		/*
2994 		 * If the release source is FW, process the HTT status
2995 		 */
2996 		if (qdf_unlikely(buffer_src ==
2997 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
2998 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
2999 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
3000 					htt_tx_status);
3001 			dp_tx_process_htt_completion(tx_desc,
3002 					htt_tx_status);
3003 		} else {
3004 			/* Pool id is not matching. Error */
3005 			if (tx_desc->pool_id != pool_id) {
3006 				QDF_TRACE(QDF_MODULE_ID_DP,
3007 					QDF_TRACE_LEVEL_FATAL,
3008 					"Tx Comp pool id %d not matched %d",
3009 					pool_id, tx_desc->pool_id);
3010 
3011 				qdf_assert_always(0);
3012 			}
3013 
3014 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
3015 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
3016 				QDF_TRACE(QDF_MODULE_ID_DP,
3017 					QDF_TRACE_LEVEL_FATAL,
3018 					"Txdesc invalid, flgs = %x,id = %d",
3019 					tx_desc->flags,	tx_desc_id);
3020 				qdf_assert_always(0);
3021 			}
3022 
3023 			/* First ring descriptor on the cycle */
3024 			if (!head_desc) {
3025 				head_desc = tx_desc;
3026 				tail_desc = tx_desc;
3027 			}
3028 
3029 			tail_desc->next = tx_desc;
3030 			tx_desc->next = NULL;
3031 			tail_desc = tx_desc;
3032 
3033 			/* Collect hw completion contents */
3034 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
3035 					&tx_desc->comp, 1);
3036 
3037 		}
3038 
3039 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
3040 		/* Decrement PM usage count if the packet has been sent.*/
3041 		hif_pm_runtime_put(soc->hif_handle);
3042 
3043 		/*
3044 		 * Processed packet count is more than given quota
3045 		 * stop to processing
3046 		 */
3047 		if ((num_processed >= quota))
3048 			break;
3049 
3050 		count++;
3051 	}
3052 
3053 	hal_srng_access_end(soc->hal_soc, hal_srng);
3054 
3055 	/* Process the reaped descriptors */
3056 	if (head_desc)
3057 		dp_tx_comp_process_desc(soc, head_desc);
3058 
3059 	return num_processed;
3060 }
3061 
3062 #ifdef CONVERGED_TDLS_ENABLE
3063 /**
3064  * dp_tx_non_std() - Allow the control-path SW to send data frames
3065  *
3066  * @data_vdev - which vdev should transmit the tx data frames
3067  * @tx_spec - what non-standard handling to apply to the tx data frames
3068  * @msdu_list - NULL-terminated list of tx MSDUs
3069  *
3070  * Return: NULL on success,
3071  *         nbuf when it fails to send
3072  */
3073 qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle,
3074 			enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
3075 {
3076 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
3077 
3078 	if (tx_spec & OL_TX_SPEC_NO_FREE)
3079 		vdev->is_tdls_frame = true;
3080 	return dp_tx_send(vdev_handle, msdu_list);
3081 }
3082 #endif
3083 
3084 /**
3085  * dp_tx_vdev_attach() - attach vdev to dp tx
3086  * @vdev: virtual device instance
3087  *
3088  * Return: QDF_STATUS_SUCCESS: success
3089  *         QDF_STATUS_E_RESOURCES: Error return
3090  */
3091 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
3092 {
3093 	/*
3094 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
3095 	 */
3096 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
3097 			HTT_TCL_METADATA_TYPE_VDEV_BASED);
3098 
3099 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
3100 			vdev->vdev_id);
3101 
3102 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
3103 			DP_SW2HW_MACID(vdev->pdev->pdev_id));
3104 
3105 	/*
3106 	 * Set HTT Extension Valid bit to 0 by default
3107 	 */
3108 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
3109 
3110 	dp_tx_vdev_update_search_flags(vdev);
3111 
3112 	return QDF_STATUS_SUCCESS;
3113 }
3114 
3115 /**
3116  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
3117  * @vdev: virtual device instance
3118  *
3119  * Return: void
3120  *
3121  */
3122 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
3123 {
3124 	/*
3125 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
3126 	 * for TDLS link
3127 	 *
3128 	 * Enable AddrY (SA based search) only for non-WDS STA and
3129 	 * ProxySTA VAP modes.
3130 	 *
3131 	 * In all other VAP modes, only DA based search should be
3132 	 * enabled
3133 	 */
3134 	if (vdev->opmode == wlan_op_mode_sta &&
3135 	    vdev->tdls_link_connected)
3136 		vdev->hal_desc_addr_search_flags =
3137 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
3138 	else if ((vdev->opmode == wlan_op_mode_sta &&
3139 				(!vdev->wds_enabled || vdev->proxysta_vdev)))
3140 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
3141 	else
3142 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
3143 }
3144 
3145 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3146 static void dp_tx_desc_flush(struct dp_vdev *vdev)
3147 {
3148 }
3149 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3150 
3151 /* dp_tx_desc_flush() - release resources associated
3152  *                      to tx_desc
3153  * @vdev: virtual device instance
3154  *
3155  * This function will free all outstanding Tx buffers,
3156  * including ME buffer for which either free during
3157  * completion didn't happened or completion is not
3158  * received.
3159 */
3160 static void dp_tx_desc_flush(struct dp_vdev *vdev)
3161 {
3162 	uint8_t i, num_pool;
3163 	uint32_t j;
3164 	uint32_t num_desc;
3165 	struct dp_soc *soc = vdev->pdev->soc;
3166 	struct dp_tx_desc_s *tx_desc = NULL;
3167 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
3168 
3169 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3170 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3171 
3172 	for (i = 0; i < num_pool; i++) {
3173 		for (j = 0; j < num_desc; j++) {
3174 			tx_desc_pool = &((soc)->tx_desc[(i)]);
3175 			if (tx_desc_pool &&
3176 				tx_desc_pool->desc_pages.cacheable_pages) {
3177 				tx_desc = dp_tx_desc_find(soc, i,
3178 					(j & DP_TX_DESC_ID_PAGE_MASK) >>
3179 					DP_TX_DESC_ID_PAGE_OS,
3180 					(j & DP_TX_DESC_ID_OFFSET_MASK) >>
3181 					DP_TX_DESC_ID_OFFSET_OS);
3182 
3183 				if (tx_desc && (tx_desc->vdev == vdev) &&
3184 					(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) {
3185 					dp_tx_comp_free_buf(soc, tx_desc);
3186 					dp_tx_desc_release(tx_desc, i);
3187 				}
3188 			}
3189 		}
3190 	}
3191 }
3192 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3193 
3194 /**
3195  * dp_tx_vdev_detach() - detach vdev from dp tx
3196  * @vdev: virtual device instance
3197  *
3198  * Return: QDF_STATUS_SUCCESS: success
3199  *         QDF_STATUS_E_RESOURCES: Error return
3200  */
3201 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
3202 {
3203 	dp_tx_desc_flush(vdev);
3204 	return QDF_STATUS_SUCCESS;
3205 }
3206 
3207 /**
3208  * dp_tx_pdev_attach() - attach pdev to dp tx
3209  * @pdev: physical device instance
3210  *
3211  * Return: QDF_STATUS_SUCCESS: success
3212  *         QDF_STATUS_E_RESOURCES: Error return
3213  */
3214 QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
3215 {
3216 	struct dp_soc *soc = pdev->soc;
3217 
3218 	/* Initialize Flow control counters */
3219 	qdf_atomic_init(&pdev->num_tx_exception);
3220 	qdf_atomic_init(&pdev->num_tx_outstanding);
3221 
3222 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3223 		/* Initialize descriptors in TCL Ring */
3224 		hal_tx_init_data_ring(soc->hal_soc,
3225 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
3226 	}
3227 
3228 	return QDF_STATUS_SUCCESS;
3229 }
3230 
3231 /**
3232  * dp_tx_pdev_detach() - detach pdev from dp tx
3233  * @pdev: physical device instance
3234  *
3235  * Return: QDF_STATUS_SUCCESS: success
3236  *         QDF_STATUS_E_RESOURCES: Error return
3237  */
3238 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
3239 {
3240 	dp_tx_me_exit(pdev);
3241 	return QDF_STATUS_SUCCESS;
3242 }
3243 
3244 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3245 /* Pools will be allocated dynamically */
3246 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3247 					int num_desc)
3248 {
3249 	uint8_t i;
3250 
3251 	for (i = 0; i < num_pool; i++) {
3252 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
3253 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
3254 	}
3255 
3256 	return 0;
3257 }
3258 
3259 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3260 {
3261 	uint8_t i;
3262 
3263 	for (i = 0; i < num_pool; i++)
3264 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
3265 }
3266 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3267 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3268 					int num_desc)
3269 {
3270 	uint8_t i;
3271 
3272 	/* Allocate software Tx descriptor pools */
3273 	for (i = 0; i < num_pool; i++) {
3274 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
3275 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3276 					"%s Tx Desc Pool alloc %d failed %pK\n",
3277 					__func__, i, soc);
3278 			return ENOMEM;
3279 		}
3280 	}
3281 	return 0;
3282 }
3283 
3284 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3285 {
3286 	uint8_t i;
3287 
3288 	for (i = 0; i < num_pool; i++) {
3289 		qdf_assert_always(!soc->tx_desc[i].num_allocated);
3290 		if (dp_tx_desc_pool_free(soc, i)) {
3291 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3292 				"%s Tx Desc Pool Free failed\n", __func__);
3293 		}
3294 	}
3295 }
3296 
3297 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3298 
3299 /**
3300  * dp_tx_soc_detach() - detach soc from dp tx
3301  * @soc: core txrx main context
3302  *
3303  * This function will detach dp tx into main device context
3304  * will free dp tx resource and initialize resources
3305  *
3306  * Return: QDF_STATUS_SUCCESS: success
3307  *         QDF_STATUS_E_RESOURCES: Error return
3308  */
3309 QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
3310 {
3311 	uint8_t num_pool;
3312 	uint16_t num_desc;
3313 	uint16_t num_ext_desc;
3314 	uint8_t i;
3315 
3316 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3317 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3318 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
3319 
3320 	dp_tx_flow_control_deinit(soc);
3321 	dp_tx_delete_static_pools(soc, num_pool);
3322 
3323 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3324 			"%s Tx Desc Pool Free num_pool = %d, descs = %d\n",
3325 			__func__, num_pool, num_desc);
3326 
3327 	for (i = 0; i < num_pool; i++) {
3328 		if (dp_tx_ext_desc_pool_free(soc, i)) {
3329 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3330 					"%s Tx Ext Desc Pool Free failed\n",
3331 					__func__);
3332 			return QDF_STATUS_E_RESOURCES;
3333 		}
3334 	}
3335 
3336 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3337 			"%s MSDU Ext Desc Pool %d Free descs = %d\n",
3338 			__func__, num_pool, num_ext_desc);
3339 
3340 	for (i = 0; i < num_pool; i++) {
3341 		dp_tx_tso_desc_pool_free(soc, i);
3342 	}
3343 
3344 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3345 			"%s TSO Desc Pool %d Free descs = %d\n",
3346 			__func__, num_pool, num_desc);
3347 
3348 
3349 	for (i = 0; i < num_pool; i++)
3350 		dp_tx_tso_num_seg_pool_free(soc, i);
3351 
3352 
3353 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3354 		"%s TSO Num of seg Desc Pool %d Free descs = %d\n",
3355 		__func__, num_pool, num_desc);
3356 
3357 	return QDF_STATUS_SUCCESS;
3358 }
3359 
3360 /**
3361  * dp_tx_soc_attach() - attach soc to dp tx
3362  * @soc: core txrx main context
3363  *
3364  * This function will attach dp tx into main device context
3365  * will allocate dp tx resource and initialize resources
3366  *
3367  * Return: QDF_STATUS_SUCCESS: success
3368  *         QDF_STATUS_E_RESOURCES: Error return
3369  */
3370 QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
3371 {
3372 	uint8_t i;
3373 	uint8_t num_pool;
3374 	uint32_t num_desc;
3375 	uint32_t num_ext_desc;
3376 
3377 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3378 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3379 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
3380 
3381 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
3382 		goto fail;
3383 
3384 	dp_tx_flow_control_init(soc);
3385 
3386 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3387 			"%s Tx Desc Alloc num_pool = %d, descs = %d\n",
3388 			__func__, num_pool, num_desc);
3389 
3390 	/* Allocate extension tx descriptor pools */
3391 	for (i = 0; i < num_pool; i++) {
3392 		if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
3393 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3394 				"MSDU Ext Desc Pool alloc %d failed %pK\n",
3395 				i, soc);
3396 
3397 			goto fail;
3398 		}
3399 	}
3400 
3401 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3402 			"%s MSDU Ext Desc Alloc %d, descs = %d\n",
3403 			__func__, num_pool, num_ext_desc);
3404 
3405 	for (i = 0; i < num_pool; i++) {
3406 		if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
3407 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3408 				"TSO Desc Pool alloc %d failed %pK\n",
3409 				i, soc);
3410 
3411 			goto fail;
3412 		}
3413 	}
3414 
3415 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3416 			"%s TSO Desc Alloc %d, descs = %d\n",
3417 			__func__, num_pool, num_desc);
3418 
3419 	for (i = 0; i < num_pool; i++) {
3420 		if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
3421 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3422 				"TSO Num of seg Pool alloc %d failed %pK\n",
3423 				i, soc);
3424 
3425 			goto fail;
3426 		}
3427 	}
3428 
3429 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3430 			"%s TSO Num of seg pool Alloc %d, descs = %d\n",
3431 			__func__, num_pool, num_desc);
3432 
3433 	/* Initialize descriptors in TCL Rings */
3434 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3435 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3436 			hal_tx_init_data_ring(soc->hal_soc,
3437 					soc->tcl_data_ring[i].hal_srng);
3438 		}
3439 	}
3440 
3441 	/*
3442 	 * todo - Add a runtime config option to enable this.
3443 	 */
3444 	/*
3445 	 * Due to multiple issues on NPR EMU, enable it selectively
3446 	 * only for NPR EMU, should be removed, once NPR platforms
3447 	 * are stable.
3448 	 */
3449 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
3450 
3451 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3452 			"%s HAL Tx init Success\n", __func__);
3453 
3454 	return QDF_STATUS_SUCCESS;
3455 
3456 fail:
3457 	/* Detach will take care of freeing only allocated resources */
3458 	dp_tx_soc_detach(soc);
3459 	return QDF_STATUS_E_RESOURCES;
3460 }
3461 
3462 /*
3463  * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
3464  * pdev: pointer to DP PDEV structure
3465  * seg_info_head: Pointer to the head of list
3466  *
3467  * return: void
3468  */
3469 static void dp_tx_me_mem_free(struct dp_pdev *pdev,
3470 		struct dp_tx_seg_info_s *seg_info_head)
3471 {
3472 	struct dp_tx_me_buf_t *mc_uc_buf;
3473 	struct dp_tx_seg_info_s *seg_info_new = NULL;
3474 	qdf_nbuf_t nbuf = NULL;
3475 	uint64_t phy_addr;
3476 
3477 	while (seg_info_head) {
3478 		nbuf = seg_info_head->nbuf;
3479 		mc_uc_buf = (struct dp_tx_me_buf_t *)
3480 			seg_info_head->frags[0].vaddr;
3481 		phy_addr = seg_info_head->frags[0].paddr_hi;
3482 		phy_addr =  (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
3483 		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
3484 				phy_addr,
3485 				QDF_DMA_TO_DEVICE , DP_MAC_ADDR_LEN);
3486 		dp_tx_me_free_buf(pdev, mc_uc_buf);
3487 		qdf_nbuf_free(nbuf);
3488 		seg_info_new = seg_info_head;
3489 		seg_info_head = seg_info_head->next;
3490 		qdf_mem_free(seg_info_new);
3491 	}
3492 }
3493 
3494 /**
3495  * dp_tx_me_send_convert_ucast(): function to convert multicast to unicast
3496  * @vdev: DP VDEV handle
3497  * @nbuf: Multicast nbuf
3498  * @newmac: Table of the clients to which packets have to be sent
3499  * @new_mac_cnt: No of clients
3500  *
3501  * return: no of converted packets
3502  */
3503 uint16_t
3504 dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
3505 		uint8_t newmac[][DP_MAC_ADDR_LEN], uint8_t new_mac_cnt)
3506 {
3507 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
3508 	struct dp_pdev *pdev = vdev->pdev;
3509 	struct ether_header *eh;
3510 	uint8_t *data;
3511 	uint16_t len;
3512 
3513 	/* reference to frame dst addr */
3514 	uint8_t *dstmac;
3515 	/* copy of original frame src addr */
3516 	uint8_t srcmac[DP_MAC_ADDR_LEN];
3517 
3518 	/* local index into newmac */
3519 	uint8_t new_mac_idx = 0;
3520 	struct dp_tx_me_buf_t *mc_uc_buf;
3521 	qdf_nbuf_t  nbuf_clone;
3522 	struct dp_tx_msdu_info_s msdu_info;
3523 	struct dp_tx_seg_info_s *seg_info_head = NULL;
3524 	struct dp_tx_seg_info_s *seg_info_tail = NULL;
3525 	struct dp_tx_seg_info_s *seg_info_new;
3526 	struct dp_tx_frag_info_s data_frag;
3527 	qdf_dma_addr_t paddr_data;
3528 	qdf_dma_addr_t paddr_mcbuf = 0;
3529 	uint8_t empty_entry_mac[DP_MAC_ADDR_LEN] = {0};
3530 	QDF_STATUS status;
3531 
3532 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
3533 
3534 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3535 
3536 	eh = (struct ether_header *) nbuf;
3537 	qdf_mem_copy(srcmac, eh->ether_shost, DP_MAC_ADDR_LEN);
3538 
3539 	len = qdf_nbuf_len(nbuf);
3540 
3541 	data = qdf_nbuf_data(nbuf);
3542 
3543 	status = qdf_nbuf_map(vdev->osdev, nbuf,
3544 			QDF_DMA_TO_DEVICE);
3545 
3546 	if (status) {
3547 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3548 				"Mapping failure Error:%d", status);
3549 		DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
3550 		qdf_nbuf_free(nbuf);
3551 		return 1;
3552 	}
3553 
3554 	paddr_data = qdf_nbuf_get_frag_paddr(nbuf, 0) + IEEE80211_ADDR_LEN;
3555 
3556 	/*preparing data fragment*/
3557 	data_frag.vaddr = qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN;
3558 	data_frag.paddr_lo = (uint32_t)paddr_data;
3559 	data_frag.paddr_hi = (((uint64_t) paddr_data)  >> 32);
3560 	data_frag.len = len - DP_MAC_ADDR_LEN;
3561 
3562 	for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
3563 		dstmac = newmac[new_mac_idx];
3564 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3565 				"added mac addr (%pM)", dstmac);
3566 
3567 		/* Check for NULL Mac Address */
3568 		if (!qdf_mem_cmp(dstmac, empty_entry_mac, DP_MAC_ADDR_LEN))
3569 			continue;
3570 
3571 		/* frame to self mac. skip */
3572 		if (!qdf_mem_cmp(dstmac, srcmac, DP_MAC_ADDR_LEN))
3573 			continue;
3574 
3575 		/*
3576 		 * TODO: optimize to avoid malloc in per-packet path
3577 		 * For eg. seg_pool can be made part of vdev structure
3578 		 */
3579 		seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
3580 
3581 		if (!seg_info_new) {
3582 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3583 					"alloc failed");
3584 			DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
3585 			goto fail_seg_alloc;
3586 		}
3587 
3588 		mc_uc_buf = dp_tx_me_alloc_buf(pdev);
3589 		if (mc_uc_buf == NULL)
3590 			goto fail_buf_alloc;
3591 
3592 		/*
3593 		 * TODO: Check if we need to clone the nbuf
3594 		 * Or can we just use the reference for all cases
3595 		 */
3596 		if (new_mac_idx < (new_mac_cnt - 1)) {
3597 			nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
3598 			if (nbuf_clone == NULL) {
3599 				DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
3600 				goto fail_clone;
3601 			}
3602 		} else {
3603 			/*
3604 			 * Update the ref
3605 			 * to account for frame sent without cloning
3606 			 */
3607 			qdf_nbuf_ref(nbuf);
3608 			nbuf_clone = nbuf;
3609 		}
3610 
3611 		qdf_mem_copy(mc_uc_buf->data, dstmac, DP_MAC_ADDR_LEN);
3612 
3613 		status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
3614 				QDF_DMA_TO_DEVICE, DP_MAC_ADDR_LEN,
3615 				&paddr_mcbuf);
3616 
3617 		if (status) {
3618 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3619 					"Mapping failure Error:%d", status);
3620 			DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
3621 			goto fail_map;
3622 		}
3623 
3624 		seg_info_new->frags[0].vaddr =  (uint8_t *)mc_uc_buf;
3625 		seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
3626 		seg_info_new->frags[0].paddr_hi =
3627 			((uint64_t) paddr_mcbuf >> 32);
3628 		seg_info_new->frags[0].len = DP_MAC_ADDR_LEN;
3629 
3630 		seg_info_new->frags[1] = data_frag;
3631 		seg_info_new->nbuf = nbuf_clone;
3632 		seg_info_new->frag_cnt = 2;
3633 		seg_info_new->total_len = len;
3634 
3635 		seg_info_new->next = NULL;
3636 
3637 		if (seg_info_head == NULL)
3638 			seg_info_head = seg_info_new;
3639 		else
3640 			seg_info_tail->next = seg_info_new;
3641 
3642 		seg_info_tail = seg_info_new;
3643 	}
3644 
3645 	if (!seg_info_head) {
3646 		goto free_return;
3647 	}
3648 
3649 	msdu_info.u.sg_info.curr_seg = seg_info_head;
3650 	msdu_info.num_seg = new_mac_cnt;
3651 	msdu_info.frm_type = dp_tx_frm_me;
3652 
3653 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
3654 	dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3655 
3656 	while (seg_info_head->next) {
3657 		seg_info_new = seg_info_head;
3658 		seg_info_head = seg_info_head->next;
3659 		qdf_mem_free(seg_info_new);
3660 	}
3661 	qdf_mem_free(seg_info_head);
3662 
3663 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
3664 	qdf_nbuf_free(nbuf);
3665 	return new_mac_cnt;
3666 
3667 fail_map:
3668 	qdf_nbuf_free(nbuf_clone);
3669 
3670 fail_clone:
3671 	dp_tx_me_free_buf(pdev, mc_uc_buf);
3672 
3673 fail_buf_alloc:
3674 	qdf_mem_free(seg_info_new);
3675 
3676 fail_seg_alloc:
3677 	dp_tx_me_mem_free(pdev, seg_info_head);
3678 
3679 free_return:
3680 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
3681 	qdf_nbuf_free(nbuf);
3682 	return 1;
3683 }
3684 
3685