xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision 27d564647e9b50e713c60b0d7e5ea2a9b0a3ae74)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "hal_hw_headers.h"
21 #include "dp_tx.h"
22 #include "dp_tx_desc.h"
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "hal_tx.h"
26 #include "qdf_mem.h"
27 #include "qdf_nbuf.h"
28 #include "qdf_net_types.h"
29 #include <wlan_cfg.h>
30 #ifdef MESH_MODE_SUPPORT
31 #include "if_meta_hdr.h"
32 #endif
33 
34 #define DP_TX_QUEUE_MASK 0x3
35 
36 /* TODO Add support in TSO */
37 #define DP_DESC_NUM_FRAG(x) 0
38 
39 /* disable TQM_BYPASS */
40 #define TQM_BYPASS_WAR 0
41 
42 /* invalid peer id for reinject*/
43 #define DP_INVALID_PEER 0XFFFE
44 
45 /*mapping between hal encrypt type and cdp_sec_type*/
46 #define MAX_CDP_SEC_TYPE 12
47 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
48 					HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
49 					HAL_TX_ENCRYPT_TYPE_WEP_128,
50 					HAL_TX_ENCRYPT_TYPE_WEP_104,
51 					HAL_TX_ENCRYPT_TYPE_WEP_40,
52 					HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
53 					HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
54 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
55 					HAL_TX_ENCRYPT_TYPE_WAPI,
56 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
57 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
58 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
59 					HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
60 
61 /**
62  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
63  * @vdev: DP Virtual device handle
64  * @nbuf: Buffer pointer
65  * @queue: queue ids container for nbuf
66  *
67  * TX packet queue has 2 instances, software descriptors id and dma ring id
68  * Based on tx feature and hardware configuration queue id combination could be
69  * different.
70  * For example -
71  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
72  * With no XPS,lock based resource protection, Descriptor pool ids are different
73  * for each vdev, dma ring id will be same as single pdev id
74  *
75  * Return: None
76  */
77 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
78 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
79 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
80 {
81 	uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK;
82 	queue->desc_pool_id = queue_offset;
83 	queue->ring_id = vdev->pdev->soc->tx_ring_map[queue_offset];
84 
85 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
86 			"%s, pool_id:%d ring_id: %d",
87 			__func__, queue->desc_pool_id, queue->ring_id);
88 
89 	return;
90 }
91 #else /* QCA_OL_TX_MULTIQ_SUPPORT */
92 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
93 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
94 {
95 	/* get flow id */
96 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
97 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
98 
99 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
100 			"%s, pool_id:%d ring_id: %d",
101 			__func__, queue->desc_pool_id, queue->ring_id);
102 
103 	return;
104 }
105 #endif
106 
107 #if defined(FEATURE_TSO)
108 /**
109  * dp_tx_tso_unmap_segment() - Unmap TSO segment
110  *
111  * @soc - core txrx main context
112  * @tx_desc - Tx software descriptor
113  */
114 static void dp_tx_tso_unmap_segment(struct dp_soc *soc,
115 				    struct dp_tx_desc_s *tx_desc)
116 {
117 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
118 	if (qdf_unlikely(!tx_desc->tso_desc)) {
119 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
120 			  "%s %d TSO desc is NULL!",
121 			  __func__, __LINE__);
122 		qdf_assert(0);
123 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
124 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
125 			  "%s %d TSO num desc is NULL!",
126 			  __func__, __LINE__);
127 		qdf_assert(0);
128 	} else {
129 		bool is_last_seg;
130 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
131 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
132 
133 		if (tso_num_desc->num_seg.tso_cmn_num_seg > 1)
134 			is_last_seg = false;
135 		else
136 			is_last_seg = true;
137 		tso_num_desc->num_seg.tso_cmn_num_seg--;
138 		qdf_nbuf_unmap_tso_segment(soc->osdev,
139 					   tx_desc->tso_desc, is_last_seg);
140 	}
141 }
142 
143 /**
144  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
145  *                            back to the freelist
146  *
147  * @soc - soc device handle
148  * @tx_desc - Tx software descriptor
149  */
150 static void dp_tx_tso_desc_release(struct dp_soc *soc,
151 				   struct dp_tx_desc_s *tx_desc)
152 {
153 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
154 	if (qdf_unlikely(!tx_desc->tso_desc)) {
155 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
156 			  "%s %d TSO desc is NULL!",
157 			  __func__, __LINE__);
158 		qdf_assert(0);
159 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
160 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
161 			  "%s %d TSO num desc is NULL!",
162 			  __func__, __LINE__);
163 		qdf_assert(0);
164 	} else {
165 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
166 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
167 
168 		/* Add the tso num segment into the free list */
169 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
170 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
171 					    tx_desc->tso_num_desc);
172 			tx_desc->tso_num_desc = NULL;
173 		}
174 
175 		/* Add the tso segment into the free list*/
176 		dp_tx_tso_desc_free(soc,
177 				    tx_desc->pool_id, tx_desc->tso_desc);
178 		tx_desc->tso_desc = NULL;
179 	}
180 }
181 #else
182 static void dp_tx_tso_unmap_segment(struct dp_soc *soc,
183 				    struct dp_tx_desc_s *tx_desc)
184 
185 {
186 }
187 
188 static void dp_tx_tso_desc_release(struct dp_soc *soc,
189 				   struct dp_tx_desc_s *tx_desc)
190 {
191 }
192 #endif
193 /**
194  * dp_tx_desc_release() - Release Tx Descriptor
195  * @tx_desc : Tx Descriptor
196  * @desc_pool_id: Descriptor Pool ID
197  *
198  * Deallocate all resources attached to Tx descriptor and free the Tx
199  * descriptor.
200  *
201  * Return:
202  */
203 static void
204 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
205 {
206 	struct dp_pdev *pdev = tx_desc->pdev;
207 	struct dp_soc *soc;
208 	uint8_t comp_status = 0;
209 
210 	qdf_assert(pdev);
211 
212 	soc = pdev->soc;
213 
214 	if (tx_desc->frm_type == dp_tx_frm_tso)
215 		dp_tx_tso_desc_release(soc, tx_desc);
216 
217 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
218 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
219 
220 	if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
221 		dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
222 
223 	qdf_atomic_dec(&pdev->num_tx_outstanding);
224 
225 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
226 		qdf_atomic_dec(&pdev->num_tx_exception);
227 
228 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
229 				hal_tx_comp_get_buffer_source(&tx_desc->comp))
230 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
231 							     soc->hal_soc);
232 	else
233 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
234 
235 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
236 		"Tx Completion Release desc %d status %d outstanding %d",
237 		tx_desc->id, comp_status,
238 		qdf_atomic_read(&pdev->num_tx_outstanding));
239 
240 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
241 	return;
242 }
243 
244 /**
245  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
246  * @vdev: DP vdev Handle
247  * @nbuf: skb
248  *
249  * Prepares and fills HTT metadata in the frame pre-header for special frames
250  * that should be transmitted using varying transmit parameters.
251  * There are 2 VDEV modes that currently needs this special metadata -
252  *  1) Mesh Mode
253  *  2) DSRC Mode
254  *
255  * Return: HTT metadata size
256  *
257  */
258 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
259 		uint32_t *meta_data)
260 {
261 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
262 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
263 
264 	uint8_t htt_desc_size;
265 
266 	/* Size rounded of multiple of 8 bytes */
267 	uint8_t htt_desc_size_aligned;
268 
269 	uint8_t *hdr = NULL;
270 
271 	/*
272 	 * Metadata - HTT MSDU Extension header
273 	 */
274 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
275 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
276 
277 	if (vdev->mesh_vdev) {
278 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
279 					htt_desc_size_aligned)) {
280 			DP_STATS_INC(vdev,
281 				     tx_i.dropped.headroom_insufficient, 1);
282 			return 0;
283 		}
284 		/* Fill and add HTT metaheader */
285 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
286 		if (hdr == NULL) {
287 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
288 					"Error in filling HTT metadata");
289 
290 			return 0;
291 		}
292 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
293 
294 	} else if (vdev->opmode == wlan_op_mode_ocb) {
295 		/* Todo - Add support for DSRC */
296 	}
297 
298 	return htt_desc_size_aligned;
299 }
300 
301 /**
302  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
303  * @tso_seg: TSO segment to process
304  * @ext_desc: Pointer to MSDU extension descriptor
305  *
306  * Return: void
307  */
308 #if defined(FEATURE_TSO)
309 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
310 		void *ext_desc)
311 {
312 	uint8_t num_frag;
313 	uint32_t tso_flags;
314 
315 	/*
316 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
317 	 * tcp_flag_mask
318 	 *
319 	 * Checksum enable flags are set in TCL descriptor and not in Extension
320 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
321 	 */
322 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
323 
324 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
325 
326 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
327 		tso_seg->tso_flags.ip_len);
328 
329 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
330 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
331 
332 
333 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
334 		uint32_t lo = 0;
335 		uint32_t hi = 0;
336 
337 		qdf_dmaaddr_to_32s(
338 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
339 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
340 			tso_seg->tso_frags[num_frag].length);
341 	}
342 
343 	return;
344 }
345 #else
346 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
347 		void *ext_desc)
348 {
349 	return;
350 }
351 #endif
352 
353 #if defined(FEATURE_TSO)
354 /**
355  * dp_tx_free_tso_seg() - Loop through the tso segments
356  *                        allocated and free them
357  *
358  * @soc: soc handle
359  * @free_seg: list of tso segments
360  * @msdu_info: msdu descriptor
361  *
362  * Return - void
363  */
364 static void dp_tx_free_tso_seg(struct dp_soc *soc,
365 	struct qdf_tso_seg_elem_t *free_seg,
366 	struct dp_tx_msdu_info_s *msdu_info)
367 {
368 	struct qdf_tso_seg_elem_t *next_seg;
369 
370 	while (free_seg) {
371 		next_seg = free_seg->next;
372 		dp_tx_tso_desc_free(soc,
373 			msdu_info->tx_queue.desc_pool_id,
374 			free_seg);
375 		free_seg = next_seg;
376 	}
377 }
378 
379 /**
380  * dp_tx_free_tso_num_seg() - Loop through the tso num segments
381  *                            allocated and free them
382  *
383  * @soc:  soc handle
384  * @free_seg: list of tso segments
385  * @msdu_info: msdu descriptor
386  * Return - void
387  */
388 static void dp_tx_free_tso_num_seg(struct dp_soc *soc,
389 	struct qdf_tso_num_seg_elem_t *free_seg,
390 	struct dp_tx_msdu_info_s *msdu_info)
391 {
392 	struct qdf_tso_num_seg_elem_t *next_seg;
393 
394 	while (free_seg) {
395 		next_seg = free_seg->next;
396 		dp_tso_num_seg_free(soc,
397 			msdu_info->tx_queue.desc_pool_id,
398 			free_seg);
399 		free_seg = next_seg;
400 	}
401 }
402 
403 /**
404  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
405  * @vdev: virtual device handle
406  * @msdu: network buffer
407  * @msdu_info: meta data associated with the msdu
408  *
409  * Return: QDF_STATUS_SUCCESS success
410  */
411 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
412 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
413 {
414 	struct qdf_tso_seg_elem_t *tso_seg;
415 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
416 	struct dp_soc *soc = vdev->pdev->soc;
417 	struct qdf_tso_info_t *tso_info;
418 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
419 
420 	tso_info = &msdu_info->u.tso_info;
421 	tso_info->curr_seg = NULL;
422 	tso_info->tso_seg_list = NULL;
423 	tso_info->num_segs = num_seg;
424 	msdu_info->frm_type = dp_tx_frm_tso;
425 	tso_info->tso_num_seg_list = NULL;
426 
427 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
428 
429 	while (num_seg) {
430 		tso_seg = dp_tx_tso_desc_alloc(
431 				soc, msdu_info->tx_queue.desc_pool_id);
432 		if (tso_seg) {
433 			tso_seg->next = tso_info->tso_seg_list;
434 			tso_info->tso_seg_list = tso_seg;
435 			num_seg--;
436 		} else {
437 			struct qdf_tso_seg_elem_t *free_seg =
438 				tso_info->tso_seg_list;
439 
440 			dp_tx_free_tso_seg(soc, free_seg, msdu_info);
441 
442 			return QDF_STATUS_E_NOMEM;
443 		}
444 	}
445 
446 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
447 
448 	tso_num_seg = dp_tso_num_seg_alloc(soc,
449 			msdu_info->tx_queue.desc_pool_id);
450 
451 	if (tso_num_seg) {
452 		tso_num_seg->next = tso_info->tso_num_seg_list;
453 		tso_info->tso_num_seg_list = tso_num_seg;
454 	} else {
455 		/* Bug: free tso_num_seg and tso_seg */
456 		/* Free the already allocated num of segments */
457 		struct qdf_tso_seg_elem_t *free_seg =
458 					tso_info->tso_seg_list;
459 
460 		TSO_DEBUG(" %s: Failed alloc - Number of segs for a TSO packet",
461 			__func__);
462 		dp_tx_free_tso_seg(soc, free_seg, msdu_info);
463 
464 		return QDF_STATUS_E_NOMEM;
465 	}
466 
467 	msdu_info->num_seg =
468 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
469 
470 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
471 			msdu_info->num_seg);
472 
473 	if (!(msdu_info->num_seg)) {
474 		dp_tx_free_tso_seg(soc, tso_info->tso_seg_list, msdu_info);
475 		dp_tx_free_tso_num_seg(soc, tso_info->tso_num_seg_list,
476 					msdu_info);
477 		return QDF_STATUS_E_INVAL;
478 	}
479 
480 	tso_info->curr_seg = tso_info->tso_seg_list;
481 
482 	return QDF_STATUS_SUCCESS;
483 }
484 #else
485 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
486 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
487 {
488 	return QDF_STATUS_E_NOMEM;
489 }
490 #endif
491 
492 /**
493  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
494  * @vdev: DP Vdev handle
495  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
496  * @desc_pool_id: Descriptor Pool ID
497  *
498  * Return:
499  */
500 static
501 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
502 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
503 {
504 	uint8_t i;
505 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
506 	struct dp_tx_seg_info_s *seg_info;
507 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
508 	struct dp_soc *soc = vdev->pdev->soc;
509 
510 	/* Allocate an extension descriptor */
511 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
512 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
513 
514 	if (!msdu_ext_desc) {
515 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
516 		return NULL;
517 	}
518 
519 	if (msdu_info->exception_fw &&
520 			qdf_unlikely(vdev->mesh_vdev)) {
521 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
522 				&msdu_info->meta_data[0],
523 				sizeof(struct htt_tx_msdu_desc_ext2_t));
524 		qdf_atomic_inc(&vdev->pdev->num_tx_exception);
525 	}
526 
527 	switch (msdu_info->frm_type) {
528 	case dp_tx_frm_sg:
529 	case dp_tx_frm_me:
530 	case dp_tx_frm_raw:
531 		seg_info = msdu_info->u.sg_info.curr_seg;
532 		/* Update the buffer pointers in MSDU Extension Descriptor */
533 		for (i = 0; i < seg_info->frag_cnt; i++) {
534 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
535 				seg_info->frags[i].paddr_lo,
536 				seg_info->frags[i].paddr_hi,
537 				seg_info->frags[i].len);
538 		}
539 
540 		break;
541 
542 	case dp_tx_frm_tso:
543 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
544 				&cached_ext_desc[0]);
545 		break;
546 
547 
548 	default:
549 		break;
550 	}
551 
552 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
553 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
554 
555 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
556 			msdu_ext_desc->vaddr);
557 
558 	return msdu_ext_desc;
559 }
560 
561 /**
562  * dp_tx_trace_pkt() - Trace TX packet at DP layer
563  *
564  * @skb: skb to be traced
565  * @msdu_id: msdu_id of the packet
566  * @vdev_id: vdev_id of the packet
567  *
568  * Return: None
569  */
570 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
571 			    uint8_t vdev_id)
572 {
573 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
574 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
575 	DPTRACE(qdf_dp_trace_ptr(skb,
576 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
577 				 QDF_TRACE_DEFAULT_PDEV_ID,
578 				 qdf_nbuf_data_addr(skb),
579 				 sizeof(qdf_nbuf_data(skb)),
580 				 msdu_id, vdev_id));
581 
582 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
583 
584 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
585 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
586 				      msdu_id, QDF_TX));
587 }
588 
589 /**
590  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
591  * @vdev: DP vdev handle
592  * @nbuf: skb
593  * @desc_pool_id: Descriptor pool ID
594  * @meta_data: Metadata to the fw
595  * @tx_exc_metadata: Handle that holds exception path metadata
596  * Allocate and prepare Tx descriptor with msdu information.
597  *
598  * Return: Pointer to Tx Descriptor on success,
599  *         NULL on failure
600  */
601 static
602 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
603 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
604 		struct dp_tx_msdu_info_s *msdu_info,
605 		struct cdp_tx_exception_metadata *tx_exc_metadata)
606 {
607 	uint8_t align_pad;
608 	uint8_t is_exception = 0;
609 	uint8_t htt_hdr_size;
610 	struct ether_header *eh;
611 	struct dp_tx_desc_s *tx_desc;
612 	struct dp_pdev *pdev = vdev->pdev;
613 	struct dp_soc *soc = pdev->soc;
614 
615 	/* Allocate software Tx descriptor */
616 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
617 	if (qdf_unlikely(!tx_desc)) {
618 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
619 		return NULL;
620 	}
621 
622 	/* Flow control/Congestion Control counters */
623 	qdf_atomic_inc(&pdev->num_tx_outstanding);
624 
625 	/* Initialize the SW tx descriptor */
626 	tx_desc->nbuf = nbuf;
627 	tx_desc->frm_type = dp_tx_frm_std;
628 	tx_desc->tx_encap_type = (tx_exc_metadata ?
629 			tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
630 	tx_desc->vdev = vdev;
631 	tx_desc->pdev = pdev;
632 	tx_desc->msdu_ext_desc = NULL;
633 	tx_desc->pkt_offset = 0;
634 
635 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
636 
637 	/* Reset the control block */
638 	qdf_nbuf_reset_ctxt(nbuf);
639 
640 	/*
641 	 * For special modes (vdev_type == ocb or mesh), data frames should be
642 	 * transmitted using varying transmit parameters (tx spec) which include
643 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
644 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
645 	 * These frames are sent as exception packets to firmware.
646 	 *
647 	 * HW requirement is that metadata should always point to a
648 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
649 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
650 	 *  to get 8-byte aligned start address along with align_pad added
651 	 *
652 	 *  |-----------------------------|
653 	 *  |                             |
654 	 *  |-----------------------------| <-----Buffer Pointer Address given
655 	 *  |                             |  ^    in HW descriptor (aligned)
656 	 *  |       HTT Metadata          |  |
657 	 *  |                             |  |
658 	 *  |                             |  | Packet Offset given in descriptor
659 	 *  |                             |  |
660 	 *  |-----------------------------|  |
661 	 *  |       Alignment Pad         |  v
662 	 *  |-----------------------------| <----- Actual buffer start address
663 	 *  |        SKB Data             |           (Unaligned)
664 	 *  |                             |
665 	 *  |                             |
666 	 *  |                             |
667 	 *  |                             |
668 	 *  |                             |
669 	 *  |-----------------------------|
670 	 */
671 	if (qdf_unlikely((msdu_info->exception_fw)) ||
672 				(vdev->opmode == wlan_op_mode_ocb)) {
673 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
674 
675 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
676 			DP_STATS_INC(vdev,
677 				     tx_i.dropped.headroom_insufficient, 1);
678 			goto failure;
679 		}
680 
681 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
682 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
683 					"qdf_nbuf_push_head failed");
684 			goto failure;
685 		}
686 
687 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
688 				msdu_info->meta_data);
689 		if (htt_hdr_size == 0)
690 			goto failure;
691 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
692 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
693 		is_exception = 1;
694 	}
695 
696 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
697 				qdf_nbuf_map(soc->osdev, nbuf,
698 					QDF_DMA_TO_DEVICE))) {
699 		/* Handle failure */
700 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
701 				"qdf_nbuf_map failed");
702 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
703 		goto failure;
704 	}
705 
706 	if (qdf_unlikely(vdev->nawds_enabled)) {
707 		eh = (struct ether_header *) qdf_nbuf_data(nbuf);
708 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
709 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
710 			is_exception = 1;
711 		}
712 	}
713 
714 #if !TQM_BYPASS_WAR
715 	if (is_exception || tx_exc_metadata)
716 #endif
717 	{
718 		/* Temporary WAR due to TQM VP issues */
719 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
720 		qdf_atomic_inc(&pdev->num_tx_exception);
721 	}
722 
723 	return tx_desc;
724 
725 failure:
726 	dp_tx_desc_release(tx_desc, desc_pool_id);
727 	return NULL;
728 }
729 
730 /**
731  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
732  * @vdev: DP vdev handle
733  * @nbuf: skb
734  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
735  * @desc_pool_id : Descriptor Pool ID
736  *
737  * Allocate and prepare Tx descriptor with msdu and fragment descritor
738  * information. For frames wth fragments, allocate and prepare
739  * an MSDU extension descriptor
740  *
741  * Return: Pointer to Tx Descriptor on success,
742  *         NULL on failure
743  */
744 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
745 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
746 		uint8_t desc_pool_id)
747 {
748 	struct dp_tx_desc_s *tx_desc;
749 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
750 	struct dp_pdev *pdev = vdev->pdev;
751 	struct dp_soc *soc = pdev->soc;
752 
753 	/* Allocate software Tx descriptor */
754 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
755 	if (!tx_desc) {
756 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
757 		return NULL;
758 	}
759 
760 	/* Flow control/Congestion Control counters */
761 	qdf_atomic_inc(&pdev->num_tx_outstanding);
762 
763 	/* Initialize the SW tx descriptor */
764 	tx_desc->nbuf = nbuf;
765 	tx_desc->frm_type = msdu_info->frm_type;
766 	tx_desc->tx_encap_type = vdev->tx_encap_type;
767 	tx_desc->vdev = vdev;
768 	tx_desc->pdev = pdev;
769 	tx_desc->pkt_offset = 0;
770 	tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
771 	tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
772 
773 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
774 
775 	/* Reset the control block */
776 	qdf_nbuf_reset_ctxt(nbuf);
777 
778 	/* Handle scattered frames - TSO/SG/ME */
779 	/* Allocate and prepare an extension descriptor for scattered frames */
780 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
781 	if (!msdu_ext_desc) {
782 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
783 				"%s Tx Extension Descriptor Alloc Fail",
784 				__func__);
785 		goto failure;
786 	}
787 
788 #if TQM_BYPASS_WAR
789 	/* Temporary WAR due to TQM VP issues */
790 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
791 	qdf_atomic_inc(&pdev->num_tx_exception);
792 #endif
793 	if (qdf_unlikely(msdu_info->exception_fw))
794 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
795 
796 	tx_desc->msdu_ext_desc = msdu_ext_desc;
797 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
798 
799 	return tx_desc;
800 failure:
801 	dp_tx_desc_release(tx_desc, desc_pool_id);
802 	return NULL;
803 }
804 
805 /**
806  * dp_tx_prepare_raw() - Prepare RAW packet TX
807  * @vdev: DP vdev handle
808  * @nbuf: buffer pointer
809  * @seg_info: Pointer to Segment info Descriptor to be prepared
810  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
811  *     descriptor
812  *
813  * Return:
814  */
815 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
816 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
817 {
818 	qdf_nbuf_t curr_nbuf = NULL;
819 	uint16_t total_len = 0;
820 	qdf_dma_addr_t paddr;
821 	int32_t i;
822 	int32_t mapped_buf_num = 0;
823 
824 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
825 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
826 
827 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
828 
829 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
830 	if (vdev->raw_mode_war &&
831 	    (qos_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS))
832 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
833 
834 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
835 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
836 
837 		if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf,
838 					QDF_DMA_TO_DEVICE)) {
839 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
840 				"%s dma map error ", __func__);
841 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
842 			mapped_buf_num = i;
843 			goto error;
844 		}
845 
846 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
847 		seg_info->frags[i].paddr_lo = paddr;
848 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
849 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
850 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
851 		total_len += qdf_nbuf_len(curr_nbuf);
852 	}
853 
854 	seg_info->frag_cnt = i;
855 	seg_info->total_len = total_len;
856 	seg_info->next = NULL;
857 
858 	sg_info->curr_seg = seg_info;
859 
860 	msdu_info->frm_type = dp_tx_frm_raw;
861 	msdu_info->num_seg = 1;
862 
863 	return nbuf;
864 
865 error:
866 	i = 0;
867 	while (nbuf) {
868 		curr_nbuf = nbuf;
869 		if (i < mapped_buf_num) {
870 			qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE);
871 			i++;
872 		}
873 		nbuf = qdf_nbuf_next(nbuf);
874 		qdf_nbuf_free(curr_nbuf);
875 	}
876 	return NULL;
877 
878 }
879 
880 /**
881  * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
882  * @soc: DP Soc Handle
883  * @vdev: DP vdev handle
884  * @tx_desc: Tx Descriptor Handle
885  * @tid: TID from HLOS for overriding default DSCP-TID mapping
886  * @fw_metadata: Metadata to send to Target Firmware along with frame
887  * @ring_id: Ring ID of H/W ring to which we enqueue the packet
888  * @tx_exc_metadata: Handle that holds exception path meta data
889  *
890  *  Gets the next free TCL HW DMA descriptor and sets up required parameters
891  *  from software Tx descriptor
892  *
893  * Return:
894  */
895 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
896 				   struct dp_tx_desc_s *tx_desc, uint8_t tid,
897 				   uint16_t fw_metadata, uint8_t ring_id,
898 				   struct cdp_tx_exception_metadata
899 					*tx_exc_metadata)
900 {
901 	uint8_t type;
902 	uint16_t length;
903 	void *hal_tx_desc, *hal_tx_desc_cached;
904 	qdf_dma_addr_t dma_addr;
905 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
906 
907 	enum cdp_sec_type sec_type = (tx_exc_metadata ?
908 			tx_exc_metadata->sec_type : vdev->sec_type);
909 
910 	/* Return Buffer Manager ID */
911 	uint8_t bm_id = ring_id;
912 	void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng;
913 
914 	hal_tx_desc_cached = (void *) cached_desc;
915 	qdf_mem_zero_outline(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
916 
917 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
918 		length = HAL_TX_EXT_DESC_WITH_META_DATA;
919 		type = HAL_TX_BUF_TYPE_EXT_DESC;
920 		dma_addr = tx_desc->msdu_ext_desc->paddr;
921 	} else {
922 		length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
923 		type = HAL_TX_BUF_TYPE_BUFFER;
924 		dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
925 	}
926 
927 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
928 	hal_tx_desc_set_buf_addr(hal_tx_desc_cached,
929 					dma_addr, bm_id, tx_desc->id,
930 					type, soc->hal_soc);
931 
932 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id))
933 		return QDF_STATUS_E_RESOURCES;
934 
935 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
936 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
937 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
938 	hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
939 				vdev->pdev->lmac_id);
940 	hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
941 				    vdev->search_type);
942 	hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
943 				     vdev->bss_ast_hash);
944 	hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
945 					  vdev->dscp_tid_map_id);
946 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
947 			sec_type_map[sec_type]);
948 
949 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
950 			"%s length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
951 			__func__, length, type, (uint64_t)dma_addr,
952 			tx_desc->pkt_offset, tx_desc->id);
953 
954 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
955 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
956 
957 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
958 			vdev->hal_desc_addr_search_flags);
959 
960 	/* verify checksum offload configuration*/
961 	if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) &&
962 		((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
963 		|| qdf_nbuf_is_tso(tx_desc->nbuf)))  {
964 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
965 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
966 	}
967 
968 	if (tid != HTT_TX_EXT_TID_INVALID)
969 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
970 
971 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
972 		hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1);
973 
974 
975 	/* Sync cached descriptor with HW */
976 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
977 
978 	if (!hal_tx_desc) {
979 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
980 			  "%s TCL ring full ring_id:%d", __func__, ring_id);
981 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
982 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
983 		return QDF_STATUS_E_RESOURCES;
984 	}
985 
986 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
987 
988 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
989 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
990 
991 	return QDF_STATUS_SUCCESS;
992 }
993 
994 
995 /**
996  * dp_cce_classify() - Classify the frame based on CCE rules
997  * @vdev: DP vdev handle
998  * @nbuf: skb
999  *
1000  * Classify frames based on CCE rules
1001  * Return: bool( true if classified,
1002  *               else false)
1003  */
1004 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1005 {
1006 	struct ether_header *eh = NULL;
1007 	uint16_t   ether_type;
1008 	qdf_llc_t *llcHdr;
1009 	qdf_nbuf_t nbuf_clone = NULL;
1010 	qdf_dot3_qosframe_t *qos_wh = NULL;
1011 
1012 	/* for mesh packets don't do any classification */
1013 	if (qdf_unlikely(vdev->mesh_vdev))
1014 		return false;
1015 
1016 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1017 		eh = (struct ether_header *) qdf_nbuf_data(nbuf);
1018 		ether_type = eh->ether_type;
1019 		llcHdr = (qdf_llc_t *)(nbuf->data +
1020 					sizeof(struct ether_header));
1021 	} else {
1022 		qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1023 		/* For encrypted packets don't do any classification */
1024 		if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
1025 			return false;
1026 
1027 		if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
1028 			if (qdf_unlikely(
1029 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
1030 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
1031 
1032 				ether_type = *(uint16_t *)(nbuf->data
1033 						+ QDF_IEEE80211_4ADDR_HDR_LEN
1034 						+ sizeof(qdf_llc_t)
1035 						- sizeof(ether_type));
1036 				llcHdr = (qdf_llc_t *)(nbuf->data +
1037 						QDF_IEEE80211_4ADDR_HDR_LEN);
1038 			} else {
1039 				ether_type = *(uint16_t *)(nbuf->data
1040 						+ QDF_IEEE80211_3ADDR_HDR_LEN
1041 						+ sizeof(qdf_llc_t)
1042 						- sizeof(ether_type));
1043 				llcHdr = (qdf_llc_t *)(nbuf->data +
1044 					QDF_IEEE80211_3ADDR_HDR_LEN);
1045 			}
1046 
1047 			if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
1048 				&& (ether_type ==
1049 				qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
1050 
1051 				DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
1052 				return true;
1053 			}
1054 		}
1055 
1056 		return false;
1057 	}
1058 
1059 	if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
1060 		ether_type = *(uint16_t *)(nbuf->data + 2*ETHER_ADDR_LEN +
1061 				sizeof(*llcHdr));
1062 		nbuf_clone = qdf_nbuf_clone(nbuf);
1063 		if (qdf_unlikely(nbuf_clone)) {
1064 			qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
1065 
1066 			if (ether_type == htons(ETHERTYPE_8021Q)) {
1067 				qdf_nbuf_pull_head(nbuf_clone,
1068 						sizeof(qdf_net_vlanhdr_t));
1069 			}
1070 		}
1071 	} else {
1072 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1073 			nbuf_clone = qdf_nbuf_clone(nbuf);
1074 			if (qdf_unlikely(nbuf_clone)) {
1075 				qdf_nbuf_pull_head(nbuf_clone,
1076 					sizeof(qdf_net_vlanhdr_t));
1077 			}
1078 		}
1079 	}
1080 
1081 	if (qdf_unlikely(nbuf_clone))
1082 		nbuf = nbuf_clone;
1083 
1084 
1085 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
1086 		|| qdf_nbuf_is_ipv4_arp_pkt(nbuf)
1087 		|| qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
1088 		|| qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
1089 		|| (qdf_nbuf_is_ipv4_pkt(nbuf)
1090 			&& qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
1091 		|| (qdf_nbuf_is_ipv6_pkt(nbuf) &&
1092 			qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
1093 		if (qdf_unlikely(nbuf_clone != NULL))
1094 			qdf_nbuf_free(nbuf_clone);
1095 		return true;
1096 	}
1097 
1098 	if (qdf_unlikely(nbuf_clone != NULL))
1099 		qdf_nbuf_free(nbuf_clone);
1100 
1101 	return false;
1102 }
1103 
1104 /**
1105  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1106  * @vdev: DP vdev handle
1107  * @nbuf: skb
1108  *
1109  * Extract the DSCP or PCP information from frame and map into TID value.
1110  * Software based TID classification is required when more than 2 DSCP-TID
1111  * mapping tables are needed.
1112  * Hardware supports 2 DSCP-TID mapping tables
1113  *
1114  * Return: void
1115  */
1116 static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1117 		struct dp_tx_msdu_info_s *msdu_info)
1118 {
1119 	uint8_t tos = 0, dscp_tid_override = 0;
1120 	uint8_t *hdr_ptr, *L3datap;
1121 	uint8_t is_mcast = 0;
1122 	struct ether_header *eh = NULL;
1123 	qdf_ethervlan_header_t *evh = NULL;
1124 	uint16_t   ether_type;
1125 	qdf_llc_t *llcHdr;
1126 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1127 
1128 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1129 
1130 	if (pdev->soc && vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map)
1131 		return;
1132 
1133 	/* for mesh packets don't do any classification */
1134 	if (qdf_unlikely(vdev->mesh_vdev))
1135 		return;
1136 
1137 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1138 		eh = (struct ether_header *) nbuf->data;
1139 		hdr_ptr = eh->ether_dhost;
1140 		L3datap = hdr_ptr + sizeof(struct ether_header);
1141 	} else {
1142 		qdf_dot3_qosframe_t *qos_wh =
1143 			(qdf_dot3_qosframe_t *) nbuf->data;
1144 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1145 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1146 		return;
1147 	}
1148 
1149 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1150 	ether_type = eh->ether_type;
1151 
1152 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(struct ether_header));
1153 	/*
1154 	 * Check if packet is dot3 or eth2 type.
1155 	 */
1156 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1157 		ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN +
1158 				sizeof(*llcHdr));
1159 
1160 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1161 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1162 				sizeof(*llcHdr);
1163 			ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN
1164 					+ sizeof(*llcHdr) +
1165 					sizeof(qdf_net_vlanhdr_t));
1166 		} else {
1167 			L3datap = hdr_ptr + sizeof(struct ether_header) +
1168 				sizeof(*llcHdr);
1169 		}
1170 	} else {
1171 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1172 			evh = (qdf_ethervlan_header_t *) eh;
1173 			ether_type = evh->ether_type;
1174 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1175 		}
1176 	}
1177 
1178 	/*
1179 	 * Find priority from IP TOS DSCP field
1180 	 */
1181 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1182 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1183 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1184 			/* Only for unicast frames */
1185 			if (!is_mcast) {
1186 				/* send it on VO queue */
1187 				msdu_info->tid = DP_VO_TID;
1188 			}
1189 		} else {
1190 			/*
1191 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1192 			 * from TOS byte.
1193 			 */
1194 			tos = ip->ip_tos;
1195 			dscp_tid_override = 1;
1196 
1197 		}
1198 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1199 		/* TODO
1200 		 * use flowlabel
1201 		 *igmpmld cases to be handled in phase 2
1202 		 */
1203 		unsigned long ver_pri_flowlabel;
1204 		unsigned long pri;
1205 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1206 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1207 			DP_IPV6_PRIORITY_SHIFT;
1208 		tos = pri;
1209 		dscp_tid_override = 1;
1210 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1211 		msdu_info->tid = DP_VO_TID;
1212 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1213 		/* Only for unicast frames */
1214 		if (!is_mcast) {
1215 			/* send ucast arp on VO queue */
1216 			msdu_info->tid = DP_VO_TID;
1217 		}
1218 	}
1219 
1220 	/*
1221 	 * Assign all MCAST packets to BE
1222 	 */
1223 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1224 		if (is_mcast) {
1225 			tos = 0;
1226 			dscp_tid_override = 1;
1227 		}
1228 	}
1229 
1230 	if (dscp_tid_override == 1) {
1231 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1232 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1233 	}
1234 	return;
1235 }
1236 
1237 #ifdef CONVERGED_TDLS_ENABLE
1238 /**
1239  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1240  * @tx_desc: TX descriptor
1241  *
1242  * Return: None
1243  */
1244 static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1245 {
1246 	if (tx_desc->vdev) {
1247 		if (tx_desc->vdev->is_tdls_frame)
1248 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1249 			tx_desc->vdev->is_tdls_frame = false;
1250 	}
1251 }
1252 
1253 /**
1254  * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1255  * @tx_desc: TX descriptor
1256  * @vdev: datapath vdev handle
1257  *
1258  * Return: None
1259  */
1260 static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
1261 				  struct dp_vdev *vdev)
1262 {
1263 	struct hal_tx_completion_status ts = {0};
1264 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1265 
1266 	hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
1267 	if (vdev->tx_non_std_data_callback.func) {
1268 		qdf_nbuf_set_next(tx_desc->nbuf, NULL);
1269 		vdev->tx_non_std_data_callback.func(
1270 				vdev->tx_non_std_data_callback.ctxt,
1271 				nbuf, ts.status);
1272 		return;
1273 	}
1274 }
1275 #endif
1276 
1277 /**
1278  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1279  * @vdev: DP vdev handle
1280  * @nbuf: skb
1281  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1282  * @meta_data: Metadata to the fw
1283  * @tx_q: Tx queue to be used for this Tx frame
1284  * @peer_id: peer_id of the peer in case of NAWDS frames
1285  * @tx_exc_metadata: Handle that holds exception path metadata
1286  *
1287  * Return: NULL on success,
1288  *         nbuf when it fails to send
1289  */
1290 static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1291 		struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
1292 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1293 {
1294 	struct dp_pdev *pdev = vdev->pdev;
1295 	struct dp_soc *soc = pdev->soc;
1296 	struct dp_tx_desc_s *tx_desc;
1297 	QDF_STATUS status;
1298 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
1299 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1300 	uint16_t htt_tcl_metadata = 0;
1301 	uint8_t tid = msdu_info->tid;
1302 
1303 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
1304 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
1305 			msdu_info, tx_exc_metadata);
1306 	if (!tx_desc) {
1307 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1308 			  "%s Tx_desc prepare Fail vdev %pK queue %d",
1309 			  __func__, vdev, tx_q->desc_pool_id);
1310 		return nbuf;
1311 	}
1312 
1313 	if (qdf_unlikely(soc->cce_disable)) {
1314 		if (dp_cce_classify(vdev, nbuf) == true) {
1315 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1316 			tid = DP_VO_TID;
1317 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1318 		}
1319 	}
1320 
1321 	dp_tx_update_tdls_flags(tx_desc);
1322 
1323 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1324 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1325 				"%s %d : HAL RING Access Failed -- %pK",
1326 				__func__, __LINE__, hal_srng);
1327 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1328 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1329 		goto fail_return;
1330 	}
1331 
1332 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
1333 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1334 		HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
1335 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
1336 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
1337 				HTT_TCL_METADATA_TYPE_PEER_BASED);
1338 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
1339 				peer_id);
1340 	} else
1341 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1342 
1343 
1344 	if (msdu_info->exception_fw) {
1345 		HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1346 	}
1347 
1348 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
1349 	status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
1350 			htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
1351 
1352 	if (status != QDF_STATUS_SUCCESS) {
1353 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1354 			  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1355 			  __func__, tx_desc, tx_q->ring_id);
1356 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1357 		goto fail_return;
1358 	}
1359 
1360 	nbuf = NULL;
1361 
1362 fail_return:
1363 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1364 		hal_srng_access_end(soc->hal_soc, hal_srng);
1365 		hif_pm_runtime_put(soc->hif_handle);
1366 	} else {
1367 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1368 	}
1369 
1370 	return nbuf;
1371 }
1372 
1373 /**
1374  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
1375  * @vdev: DP vdev handle
1376  * @nbuf: skb
1377  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
1378  *
1379  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
1380  *
1381  * Return: NULL on success,
1382  *         nbuf when it fails to send
1383  */
1384 #if QDF_LOCK_STATS
1385 static noinline
1386 #else
1387 static
1388 #endif
1389 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1390 				    struct dp_tx_msdu_info_s *msdu_info)
1391 {
1392 	uint8_t i;
1393 	struct dp_pdev *pdev = vdev->pdev;
1394 	struct dp_soc *soc = pdev->soc;
1395 	struct dp_tx_desc_s *tx_desc;
1396 	bool is_cce_classified = false;
1397 	QDF_STATUS status;
1398 	uint16_t htt_tcl_metadata = 0;
1399 
1400 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1401 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1402 
1403 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1404 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1405 				"%s %d : HAL RING Access Failed -- %pK",
1406 				__func__, __LINE__, hal_srng);
1407 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1408 		return nbuf;
1409 	}
1410 
1411 	if (qdf_unlikely(soc->cce_disable)) {
1412 		is_cce_classified = dp_cce_classify(vdev, nbuf);
1413 		if (is_cce_classified) {
1414 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1415 			msdu_info->tid = DP_VO_TID;
1416 		}
1417 	}
1418 
1419 	if (msdu_info->frm_type == dp_tx_frm_me)
1420 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1421 
1422 	i = 0;
1423 	/* Print statement to track i and num_seg */
1424 	/*
1425 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
1426 	 * descriptors using information in msdu_info
1427 	 */
1428 	while (i < msdu_info->num_seg) {
1429 		/*
1430 		 * Setup Tx descriptor for an MSDU, and MSDU extension
1431 		 * descriptor
1432 		 */
1433 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
1434 				tx_q->desc_pool_id);
1435 
1436 		if (!tx_desc) {
1437 			if (msdu_info->frm_type == dp_tx_frm_me) {
1438 				dp_tx_me_free_buf(pdev,
1439 					(void *)(msdu_info->u.sg_info
1440 						.curr_seg->frags[0].vaddr));
1441 			}
1442 			goto done;
1443 		}
1444 
1445 		if (msdu_info->frm_type == dp_tx_frm_me) {
1446 			tx_desc->me_buffer =
1447 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
1448 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
1449 		}
1450 
1451 		if (is_cce_classified)
1452 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1453 
1454 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1455 		if (msdu_info->exception_fw) {
1456 			HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1457 		}
1458 
1459 		/*
1460 		 * Enqueue the Tx MSDU descriptor to HW for transmit
1461 		 */
1462 		status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
1463 			htt_tcl_metadata, tx_q->ring_id, NULL);
1464 
1465 		if (status != QDF_STATUS_SUCCESS) {
1466 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1467 				  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1468 				  __func__, tx_desc, tx_q->ring_id);
1469 
1470 			if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
1471 				dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
1472 
1473 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1474 			goto done;
1475 		}
1476 
1477 		/*
1478 		 * TODO
1479 		 * if tso_info structure can be modified to have curr_seg
1480 		 * as first element, following 2 blocks of code (for TSO and SG)
1481 		 * can be combined into 1
1482 		 */
1483 
1484 		/*
1485 		 * For frames with multiple segments (TSO, ME), jump to next
1486 		 * segment.
1487 		 */
1488 		if (msdu_info->frm_type == dp_tx_frm_tso) {
1489 			if (msdu_info->u.tso_info.curr_seg->next) {
1490 				msdu_info->u.tso_info.curr_seg =
1491 					msdu_info->u.tso_info.curr_seg->next;
1492 
1493 				/*
1494 				 * If this is a jumbo nbuf, then increment the number of
1495 				 * nbuf users for each additional segment of the msdu.
1496 				 * This will ensure that the skb is freed only after
1497 				 * receiving tx completion for all segments of an nbuf
1498 				 */
1499 				qdf_nbuf_inc_users(nbuf);
1500 
1501 				/* Check with MCL if this is needed */
1502 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
1503 			}
1504 		}
1505 
1506 		/*
1507 		 * For Multicast-Unicast converted packets,
1508 		 * each converted frame (for a client) is represented as
1509 		 * 1 segment
1510 		 */
1511 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
1512 				(msdu_info->frm_type == dp_tx_frm_me)) {
1513 			if (msdu_info->u.sg_info.curr_seg->next) {
1514 				msdu_info->u.sg_info.curr_seg =
1515 					msdu_info->u.sg_info.curr_seg->next;
1516 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1517 			}
1518 		}
1519 		i++;
1520 	}
1521 
1522 	nbuf = NULL;
1523 
1524 done:
1525 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1526 		hal_srng_access_end(soc->hal_soc, hal_srng);
1527 		hif_pm_runtime_put(soc->hif_handle);
1528 	} else {
1529 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1530 	}
1531 
1532 	return nbuf;
1533 }
1534 
1535 /**
1536  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
1537  *                     for SG frames
1538  * @vdev: DP vdev handle
1539  * @nbuf: skb
1540  * @seg_info: Pointer to Segment info Descriptor to be prepared
1541  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1542  *
1543  * Return: NULL on success,
1544  *         nbuf when it fails to send
1545  */
1546 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1547 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1548 {
1549 	uint32_t cur_frag, nr_frags;
1550 	qdf_dma_addr_t paddr;
1551 	struct dp_tx_sg_info_s *sg_info;
1552 
1553 	sg_info = &msdu_info->u.sg_info;
1554 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
1555 
1556 	if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
1557 				QDF_DMA_TO_DEVICE)) {
1558 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1559 				"dma map error");
1560 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1561 
1562 		qdf_nbuf_free(nbuf);
1563 		return NULL;
1564 	}
1565 
1566 	paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1567 	seg_info->frags[0].paddr_lo = paddr;
1568 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
1569 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
1570 	seg_info->frags[0].vaddr = (void *) nbuf;
1571 
1572 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
1573 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
1574 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
1575 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1576 					"frag dma map error");
1577 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1578 			qdf_nbuf_free(nbuf);
1579 			return NULL;
1580 		}
1581 
1582 		paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1583 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
1584 		seg_info->frags[cur_frag + 1].paddr_hi =
1585 			((uint64_t) paddr) >> 32;
1586 		seg_info->frags[cur_frag + 1].len =
1587 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
1588 	}
1589 
1590 	seg_info->frag_cnt = (cur_frag + 1);
1591 	seg_info->total_len = qdf_nbuf_len(nbuf);
1592 	seg_info->next = NULL;
1593 
1594 	sg_info->curr_seg = seg_info;
1595 
1596 	msdu_info->frm_type = dp_tx_frm_sg;
1597 	msdu_info->num_seg = 1;
1598 
1599 	return nbuf;
1600 }
1601 
1602 #ifdef MESH_MODE_SUPPORT
1603 
1604 /**
1605  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
1606 				and prepare msdu_info for mesh frames.
1607  * @vdev: DP vdev handle
1608  * @nbuf: skb
1609  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1610  *
1611  * Return: NULL on failure,
1612  *         nbuf when extracted successfully
1613  */
1614 static
1615 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1616 				struct dp_tx_msdu_info_s *msdu_info)
1617 {
1618 	struct meta_hdr_s *mhdr;
1619 	struct htt_tx_msdu_desc_ext2_t *meta_data =
1620 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
1621 
1622 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1623 
1624 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
1625 		msdu_info->exception_fw = 0;
1626 		goto remove_meta_hdr;
1627 	}
1628 
1629 	msdu_info->exception_fw = 1;
1630 
1631 	qdf_mem_set(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t), 0);
1632 
1633 	meta_data->host_tx_desc_pool = 1;
1634 	meta_data->update_peer_cache = 1;
1635 	meta_data->learning_frame = 1;
1636 
1637 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
1638 		meta_data->power = mhdr->power;
1639 
1640 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
1641 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
1642 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
1643 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
1644 
1645 		meta_data->dyn_bw = 1;
1646 
1647 		meta_data->valid_pwr = 1;
1648 		meta_data->valid_mcs_mask = 1;
1649 		meta_data->valid_nss_mask = 1;
1650 		meta_data->valid_preamble_type  = 1;
1651 		meta_data->valid_retries = 1;
1652 		meta_data->valid_bw_info = 1;
1653 	}
1654 
1655 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
1656 		meta_data->encrypt_type = 0;
1657 		meta_data->valid_encrypt_type = 1;
1658 		meta_data->learning_frame = 0;
1659 	}
1660 
1661 	meta_data->valid_key_flags = 1;
1662 	meta_data->key_flags = (mhdr->keyix & 0x3);
1663 
1664 remove_meta_hdr:
1665 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
1666 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1667 				"qdf_nbuf_pull_head failed");
1668 		qdf_nbuf_free(nbuf);
1669 		return NULL;
1670 	}
1671 
1672 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
1673 		msdu_info->tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1674 	else
1675 		msdu_info->tid = qdf_nbuf_get_priority(nbuf);
1676 
1677 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1678 			"%s , Meta hdr %0x %0x %0x %0x %0x %0x"
1679 			" tid %d to_fw %d",
1680 			__func__, msdu_info->meta_data[0],
1681 			msdu_info->meta_data[1],
1682 			msdu_info->meta_data[2],
1683 			msdu_info->meta_data[3],
1684 			msdu_info->meta_data[4],
1685 			msdu_info->meta_data[5],
1686 			msdu_info->tid, msdu_info->exception_fw);
1687 
1688 	return nbuf;
1689 }
1690 #else
1691 static
1692 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1693 				struct dp_tx_msdu_info_s *msdu_info)
1694 {
1695 	return nbuf;
1696 }
1697 
1698 #endif
1699 
1700 #ifdef DP_FEATURE_NAWDS_TX
1701 /**
1702  * dp_tx_prepare_nawds(): Tramit NAWDS frames
1703  * @vdev: dp_vdev handle
1704  * @nbuf: skb
1705  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1706  * @tx_q: Tx queue to be used for this Tx frame
1707  * @meta_data: Meta date for mesh
1708  * @peer_id: peer_id of the peer in case of NAWDS frames
1709  *
1710  * return: NULL on success nbuf on failure
1711  */
1712 static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1713 		struct dp_tx_msdu_info_s *msdu_info)
1714 {
1715 	struct dp_peer *peer = NULL;
1716 	struct dp_soc *soc = vdev->pdev->soc;
1717 	struct dp_ast_entry *ast_entry = NULL;
1718 	struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1719 	uint16_t peer_id = HTT_INVALID_PEER;
1720 
1721 	struct dp_peer *sa_peer = NULL;
1722 	qdf_nbuf_t nbuf_copy;
1723 
1724 	qdf_spin_lock_bh(&(soc->ast_lock));
1725 	ast_entry = dp_peer_ast_hash_find_by_pdevid
1726 				(soc,
1727 				 (uint8_t *)(eh->ether_shost),
1728 				 vdev->pdev->pdev_id);
1729 
1730 	if (ast_entry)
1731 		sa_peer = ast_entry->peer;
1732 
1733 	qdf_spin_unlock_bh(&(soc->ast_lock));
1734 
1735 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1736 		if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
1737 				(peer->nawds_enabled)) {
1738 			if (sa_peer == peer) {
1739 				QDF_TRACE(QDF_MODULE_ID_DP,
1740 						QDF_TRACE_LEVEL_DEBUG,
1741 						" %s: broadcast multicast packet",
1742 						 __func__);
1743 				DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
1744 				continue;
1745 			}
1746 
1747 			nbuf_copy = qdf_nbuf_copy(nbuf);
1748 			if (!nbuf_copy) {
1749 				QDF_TRACE(QDF_MODULE_ID_DP,
1750 						QDF_TRACE_LEVEL_ERROR,
1751 						"nbuf copy failed");
1752 			}
1753 
1754 			peer_id = peer->peer_ids[0];
1755 			nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy,
1756 					msdu_info, peer_id, NULL);
1757 			if (nbuf_copy != NULL) {
1758 				qdf_nbuf_free(nbuf_copy);
1759 				continue;
1760 			}
1761 			DP_STATS_INC_PKT(peer, tx.nawds_mcast,
1762 						1, qdf_nbuf_len(nbuf));
1763 		}
1764 	}
1765 	if (peer_id == HTT_INVALID_PEER)
1766 		return nbuf;
1767 
1768 	return NULL;
1769 }
1770 #endif
1771 
1772 /**
1773  * dp_check_exc_metadata() - Checks if parameters are valid
1774  * @tx_exc - holds all exception path parameters
1775  *
1776  * Returns true when all the parameters are valid else false
1777  *
1778  */
1779 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
1780 {
1781 	if ((tx_exc->tid > DP_MAX_TIDS && tx_exc->tid != HTT_INVALID_TID) ||
1782 	    tx_exc->tx_encap_type > htt_cmn_pkt_num_types ||
1783 	    tx_exc->sec_type > cdp_num_sec_types) {
1784 		return false;
1785 	}
1786 
1787 	return true;
1788 }
1789 
1790 /**
1791  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
1792  * @vap_dev: DP vdev handle
1793  * @nbuf: skb
1794  * @tx_exc_metadata: Handle that holds exception path meta data
1795  *
1796  * Entry point for Core Tx layer (DP_TX) invoked from
1797  * hard_start_xmit in OSIF/HDD to transmit frames through fw
1798  *
1799  * Return: NULL on success,
1800  *         nbuf when it fails to send
1801  */
1802 qdf_nbuf_t dp_tx_send_exception(void *vap_dev, qdf_nbuf_t nbuf,
1803 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1804 {
1805 	struct ether_header *eh = NULL;
1806 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1807 	struct dp_tx_msdu_info_s msdu_info;
1808 
1809 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1810 
1811 	msdu_info.tid = tx_exc_metadata->tid;
1812 
1813 	eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1814 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1815 			"%s , skb %pM",
1816 			__func__, nbuf->data);
1817 
1818 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
1819 
1820 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
1821 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1822 			"Invalid parameters in exception path");
1823 		goto fail;
1824 	}
1825 
1826 	/* Basic sanity checks for unsupported packets */
1827 
1828 	/* MESH mode */
1829 	if (qdf_unlikely(vdev->mesh_vdev)) {
1830 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1831 			"Mesh mode is not supported in exception path");
1832 		goto fail;
1833 	}
1834 
1835 	/* TSO or SG */
1836 	if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
1837 	    qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
1838 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1839 			  "TSO and SG are not supported in exception path");
1840 
1841 		goto fail;
1842 	}
1843 
1844 	/* RAW */
1845 	if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
1846 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1847 			  "Raw frame is not supported in exception path");
1848 		goto fail;
1849 	}
1850 
1851 
1852 	/* Mcast enhancement*/
1853 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
1854 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
1855 		    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
1856 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1857 					  "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW");
1858 		}
1859 	}
1860 
1861 	/*
1862 	 * Get HW Queue to use for this frame.
1863 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
1864 	 * dedicated for data and 1 for command.
1865 	 * "queue_id" maps to one hardware ring.
1866 	 *  With each ring, we also associate a unique Tx descriptor pool
1867 	 *  to minimize lock contention for these resources.
1868 	 */
1869 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
1870 
1871 	/*  Single linear frame */
1872 	/*
1873 	 * If nbuf is a simple linear frame, use send_single function to
1874 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
1875 	 * SRNG. There is no need to setup a MSDU extension descriptor.
1876 	 */
1877 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
1878 			tx_exc_metadata->peer_id, tx_exc_metadata);
1879 
1880 	return nbuf;
1881 
1882 fail:
1883 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1884 			"pkt send failed");
1885 	return nbuf;
1886 }
1887 
1888 /**
1889  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
1890  * @vap_dev: DP vdev handle
1891  * @nbuf: skb
1892  *
1893  * Entry point for Core Tx layer (DP_TX) invoked from
1894  * hard_start_xmit in OSIF/HDD
1895  *
1896  * Return: NULL on success,
1897  *         nbuf when it fails to send
1898  */
1899 #ifdef MESH_MODE_SUPPORT
1900 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
1901 {
1902 	struct meta_hdr_s *mhdr;
1903 	qdf_nbuf_t nbuf_mesh = NULL;
1904 	qdf_nbuf_t nbuf_clone = NULL;
1905 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1906 	uint8_t no_enc_frame = 0;
1907 
1908 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
1909 	if (nbuf_mesh == NULL) {
1910 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1911 				"qdf_nbuf_unshare failed");
1912 		return nbuf;
1913 	}
1914 	nbuf = nbuf_mesh;
1915 
1916 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1917 
1918 	if ((vdev->sec_type != cdp_sec_type_none) &&
1919 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
1920 		no_enc_frame = 1;
1921 
1922 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
1923 		       !no_enc_frame) {
1924 		nbuf_clone = qdf_nbuf_clone(nbuf);
1925 		if (nbuf_clone == NULL) {
1926 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1927 				"qdf_nbuf_clone failed");
1928 			return nbuf;
1929 		}
1930 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
1931 	}
1932 
1933 	if (nbuf_clone) {
1934 		if (!dp_tx_send(vap_dev, nbuf_clone)) {
1935 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
1936 		} else {
1937 			qdf_nbuf_free(nbuf_clone);
1938 		}
1939 	}
1940 
1941 	if (no_enc_frame)
1942 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
1943 	else
1944 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
1945 
1946 	nbuf = dp_tx_send(vap_dev, nbuf);
1947 	if ((nbuf == NULL) && no_enc_frame) {
1948 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
1949 	}
1950 
1951 	return nbuf;
1952 }
1953 
1954 #else
1955 
1956 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
1957 {
1958 	return dp_tx_send(vap_dev, nbuf);
1959 }
1960 
1961 #endif
1962 
1963 /**
1964  * dp_tx_send() - Transmit a frame on a given VAP
1965  * @vap_dev: DP vdev handle
1966  * @nbuf: skb
1967  *
1968  * Entry point for Core Tx layer (DP_TX) invoked from
1969  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
1970  * cases
1971  *
1972  * Return: NULL on success,
1973  *         nbuf when it fails to send
1974  */
1975 qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
1976 {
1977 	struct ether_header *eh = NULL;
1978 	struct dp_tx_msdu_info_s msdu_info;
1979 	struct dp_tx_seg_info_s seg_info;
1980 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1981 	uint16_t peer_id = HTT_INVALID_PEER;
1982 	qdf_nbuf_t nbuf_mesh = NULL;
1983 
1984 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1985 	qdf_mem_set(&seg_info, sizeof(seg_info), 0x0);
1986 
1987 	eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1988 
1989 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1990 			"%s , skb %pM",
1991 			__func__, nbuf->data);
1992 
1993 	/*
1994 	 * Set Default Host TID value to invalid TID
1995 	 * (TID override disabled)
1996 	 */
1997 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
1998 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
1999 
2000 	if (qdf_unlikely(vdev->mesh_vdev)) {
2001 		nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
2002 								&msdu_info);
2003 		if (nbuf_mesh == NULL) {
2004 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2005 					"Extracting mesh metadata failed");
2006 			return nbuf;
2007 		}
2008 		nbuf = nbuf_mesh;
2009 	}
2010 
2011 	/*
2012 	 * Get HW Queue to use for this frame.
2013 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
2014 	 * dedicated for data and 1 for command.
2015 	 * "queue_id" maps to one hardware ring.
2016 	 *  With each ring, we also associate a unique Tx descriptor pool
2017 	 *  to minimize lock contention for these resources.
2018 	 */
2019 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2020 
2021 	/*
2022 	 * TCL H/W supports 2 DSCP-TID mapping tables.
2023 	 *  Table 1 - Default DSCP-TID mapping table
2024 	 *  Table 2 - 1 DSCP-TID override table
2025 	 *
2026 	 * If we need a different DSCP-TID mapping for this vap,
2027 	 * call tid_classify to extract DSCP/ToS from frame and
2028 	 * map to a TID and store in msdu_info. This is later used
2029 	 * to fill in TCL Input descriptor (per-packet TID override).
2030 	 */
2031 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
2032 
2033 	/*
2034 	 * Classify the frame and call corresponding
2035 	 * "prepare" function which extracts the segment (TSO)
2036 	 * and fragmentation information (for TSO , SG, ME, or Raw)
2037 	 * into MSDU_INFO structure which is later used to fill
2038 	 * SW and HW descriptors.
2039 	 */
2040 	if (qdf_nbuf_is_tso(nbuf)) {
2041 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2042 			  "%s TSO frame %pK", __func__, vdev);
2043 		DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1,
2044 				qdf_nbuf_len(nbuf));
2045 
2046 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
2047 			DP_STATS_INC_PKT(vdev, tx_i.tso.dropped_host, 1,
2048 					 qdf_nbuf_len(nbuf));
2049 			return nbuf;
2050 		}
2051 
2052 		goto send_multiple;
2053 	}
2054 
2055 	/* SG */
2056 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2057 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
2058 
2059 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2060 			 "%s non-TSO SG frame %pK", __func__, vdev);
2061 
2062 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
2063 				qdf_nbuf_len(nbuf));
2064 
2065 		goto send_multiple;
2066 	}
2067 
2068 #ifdef ATH_SUPPORT_IQUE
2069 	/* Mcast to Ucast Conversion*/
2070 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
2071 		eh = (struct ether_header *)qdf_nbuf_data(nbuf);
2072 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
2073 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2074 				  "%s Mcast frm for ME %pK", __func__, vdev);
2075 
2076 			DP_STATS_INC_PKT(vdev,
2077 					tx_i.mcast_en.mcast_pkt, 1,
2078 					qdf_nbuf_len(nbuf));
2079 			if (dp_tx_prepare_send_me(vdev, nbuf) ==
2080 					QDF_STATUS_SUCCESS) {
2081 				return NULL;
2082 			}
2083 		}
2084 	}
2085 #endif
2086 
2087 	/* RAW */
2088 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
2089 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
2090 		if (nbuf == NULL)
2091 			return NULL;
2092 
2093 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2094 			  "%s Raw frame %pK", __func__, vdev);
2095 
2096 		goto send_multiple;
2097 
2098 	}
2099 
2100 	/*  Single linear frame */
2101 	/*
2102 	 * If nbuf is a simple linear frame, use send_single function to
2103 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2104 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2105 	 */
2106 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
2107 
2108 	return nbuf;
2109 
2110 send_multiple:
2111 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2112 
2113 	return nbuf;
2114 }
2115 
2116 /**
2117  * dp_tx_reinject_handler() - Tx Reinject Handler
2118  * @tx_desc: software descriptor head pointer
2119  * @status : Tx completion status from HTT descriptor
2120  *
2121  * This function reinjects frames back to Target.
2122  * Todo - Host queue needs to be added
2123  *
2124  * Return: none
2125  */
2126 static
2127 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2128 {
2129 	struct dp_vdev *vdev;
2130 	struct dp_peer *peer = NULL;
2131 	uint32_t peer_id = HTT_INVALID_PEER;
2132 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2133 	qdf_nbuf_t nbuf_copy = NULL;
2134 	struct dp_tx_msdu_info_s msdu_info;
2135 	struct dp_peer *sa_peer = NULL;
2136 	struct dp_ast_entry *ast_entry = NULL;
2137 	struct dp_soc *soc = NULL;
2138 	struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
2139 #ifdef WDS_VENDOR_EXTENSION
2140 	int is_mcast = 0, is_ucast = 0;
2141 	int num_peers_3addr = 0;
2142 	struct ether_header *eth_hdr = (struct ether_header *)(qdf_nbuf_data(nbuf));
2143 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
2144 #endif
2145 
2146 	vdev = tx_desc->vdev;
2147 	soc = vdev->pdev->soc;
2148 
2149 	qdf_assert(vdev);
2150 
2151 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
2152 
2153 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2154 
2155 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2156 			"%s Tx reinject path", __func__);
2157 
2158 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
2159 			qdf_nbuf_len(tx_desc->nbuf));
2160 
2161 	qdf_spin_lock_bh(&(soc->ast_lock));
2162 
2163 	ast_entry = dp_peer_ast_hash_find_by_pdevid
2164 				(soc,
2165 				 (uint8_t *)(eh->ether_shost),
2166 				 vdev->pdev->pdev_id);
2167 
2168 	if (ast_entry)
2169 		sa_peer = ast_entry->peer;
2170 
2171 	qdf_spin_unlock_bh(&(soc->ast_lock));
2172 
2173 #ifdef WDS_VENDOR_EXTENSION
2174 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
2175 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
2176 	} else {
2177 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
2178 	}
2179 	is_ucast = !is_mcast;
2180 
2181 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2182 		if (peer->bss_peer)
2183 			continue;
2184 
2185 		/* Detect wds peers that use 3-addr framing for mcast.
2186 		 * if there are any, the bss_peer is used to send the
2187 		 * the mcast frame using 3-addr format. all wds enabled
2188 		 * peers that use 4-addr framing for mcast frames will
2189 		 * be duplicated and sent as 4-addr frames below.
2190 		 */
2191 		if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
2192 			num_peers_3addr = 1;
2193 			break;
2194 		}
2195 	}
2196 #endif
2197 
2198 	if (qdf_unlikely(vdev->mesh_vdev)) {
2199 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
2200 	} else {
2201 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2202 			if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
2203 #ifdef WDS_VENDOR_EXTENSION
2204 			/*
2205 			 * . if 3-addr STA, then send on BSS Peer
2206 			 * . if Peer WDS enabled and accept 4-addr mcast,
2207 			 * send mcast on that peer only
2208 			 * . if Peer WDS enabled and accept 4-addr ucast,
2209 			 * send ucast on that peer only
2210 			 */
2211 			((peer->bss_peer && num_peers_3addr && is_mcast) ||
2212 			 (peer->wds_enabled &&
2213 				  ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
2214 				   (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
2215 #else
2216 			((peer->bss_peer &&
2217 			  !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) ||
2218 				 peer->nawds_enabled)) {
2219 #endif
2220 				peer_id = DP_INVALID_PEER;
2221 
2222 				if (peer->nawds_enabled) {
2223 					peer_id = peer->peer_ids[0];
2224 					if (sa_peer == peer) {
2225 						QDF_TRACE(
2226 							QDF_MODULE_ID_DP,
2227 							QDF_TRACE_LEVEL_DEBUG,
2228 							" %s: multicast packet",
2229 							__func__);
2230 						DP_STATS_INC(peer,
2231 							tx.nawds_mcast_drop, 1);
2232 						continue;
2233 					}
2234 				}
2235 
2236 				nbuf_copy = qdf_nbuf_copy(nbuf);
2237 
2238 				if (!nbuf_copy) {
2239 					QDF_TRACE(QDF_MODULE_ID_DP,
2240 						QDF_TRACE_LEVEL_DEBUG,
2241 						FL("nbuf copy failed"));
2242 					break;
2243 				}
2244 
2245 				nbuf_copy = dp_tx_send_msdu_single(vdev,
2246 						nbuf_copy,
2247 						&msdu_info,
2248 						peer_id,
2249 						NULL);
2250 
2251 				if (nbuf_copy) {
2252 					QDF_TRACE(QDF_MODULE_ID_DP,
2253 						QDF_TRACE_LEVEL_DEBUG,
2254 						FL("pkt send failed"));
2255 					qdf_nbuf_free(nbuf_copy);
2256 				} else {
2257 					if (peer_id != DP_INVALID_PEER)
2258 						DP_STATS_INC_PKT(peer,
2259 							tx.nawds_mcast,
2260 							1, qdf_nbuf_len(nbuf));
2261 				}
2262 			}
2263 		}
2264 	}
2265 
2266 	if (vdev->nawds_enabled) {
2267 		peer_id = DP_INVALID_PEER;
2268 
2269 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
2270 					1, qdf_nbuf_len(nbuf));
2271 
2272 		nbuf = dp_tx_send_msdu_single(vdev,
2273 				nbuf,
2274 				&msdu_info,
2275 				peer_id, NULL);
2276 
2277 		if (nbuf) {
2278 			QDF_TRACE(QDF_MODULE_ID_DP,
2279 				QDF_TRACE_LEVEL_DEBUG,
2280 				FL("pkt send failed"));
2281 			qdf_nbuf_free(nbuf);
2282 		}
2283 	} else
2284 		qdf_nbuf_free(nbuf);
2285 
2286 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2287 }
2288 
2289 /**
2290  * dp_tx_inspect_handler() - Tx Inspect Handler
2291  * @tx_desc: software descriptor head pointer
2292  * @status : Tx completion status from HTT descriptor
2293  *
2294  * Handles Tx frames sent back to Host for inspection
2295  * (ProxyARP)
2296  *
2297  * Return: none
2298  */
2299 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2300 {
2301 
2302 	struct dp_soc *soc;
2303 	struct dp_pdev *pdev = tx_desc->pdev;
2304 
2305 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2306 			"%s Tx inspect path",
2307 			__func__);
2308 
2309 	qdf_assert(pdev);
2310 
2311 	soc = pdev->soc;
2312 
2313 	DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
2314 			qdf_nbuf_len(tx_desc->nbuf));
2315 
2316 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
2317 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2318 }
2319 
2320 #ifdef FEATURE_PERPKT_INFO
2321 /**
2322  * dp_get_completion_indication_for_stack() - send completion to stack
2323  * @soc : dp_soc handle
2324  * @pdev: dp_pdev handle
2325  * @peer: dp peer handle
2326  * @ts: transmit completion status structure
2327  * @netbuf: Buffer pointer for free
2328  *
2329  * This function is used for indication whether buffer needs to be
2330  * sent to stack for freeing or not
2331 */
2332 QDF_STATUS
2333 dp_get_completion_indication_for_stack(struct dp_soc *soc,
2334 				       struct dp_pdev *pdev,
2335 				       struct dp_peer *peer,
2336 				       struct hal_tx_completion_status *ts,
2337 				       qdf_nbuf_t netbuf)
2338 {
2339 	struct tx_capture_hdr *ppdu_hdr;
2340 	uint16_t peer_id = ts->peer_id;
2341 	uint32_t ppdu_id = ts->ppdu_id;
2342 	uint8_t first_msdu = ts->first_msdu;
2343 	uint8_t last_msdu = ts->last_msdu;
2344 
2345 	if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode))
2346 		return QDF_STATUS_E_NOSUPPORT;
2347 
2348 	if (!peer) {
2349 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2350 				FL("Peer Invalid"));
2351 		return QDF_STATUS_E_INVAL;
2352 	}
2353 
2354 	if (pdev->mcopy_mode) {
2355 		if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
2356 				(pdev->m_copy_id.tx_peer_id == peer_id)) {
2357 			return QDF_STATUS_E_INVAL;
2358 		}
2359 
2360 		pdev->m_copy_id.tx_ppdu_id = ppdu_id;
2361 		pdev->m_copy_id.tx_peer_id = peer_id;
2362 	}
2363 
2364 	if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) {
2365 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2366 				FL("No headroom"));
2367 		return QDF_STATUS_E_NOMEM;
2368 	}
2369 
2370 	ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
2371 	qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
2372 		     IEEE80211_ADDR_LEN);
2373 	qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
2374 		     IEEE80211_ADDR_LEN);
2375 	ppdu_hdr->ppdu_id = ppdu_id;
2376 	ppdu_hdr->peer_id = peer_id;
2377 	ppdu_hdr->first_msdu = first_msdu;
2378 	ppdu_hdr->last_msdu = last_msdu;
2379 
2380 	return QDF_STATUS_SUCCESS;
2381 }
2382 
2383 
2384 /**
2385  * dp_send_completion_to_stack() - send completion to stack
2386  * @soc :  dp_soc handle
2387  * @pdev:  dp_pdev handle
2388  * @peer_id: peer_id of the peer for which completion came
2389  * @ppdu_id: ppdu_id
2390  * @netbuf: Buffer pointer for free
2391  *
2392  * This function is used to send completion to stack
2393  * to free buffer
2394 */
2395 void  dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2396 					uint16_t peer_id, uint32_t ppdu_id,
2397 					qdf_nbuf_t netbuf)
2398 {
2399 	dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
2400 				netbuf, peer_id,
2401 				WDI_NO_VAL, pdev->pdev_id);
2402 }
2403 #else
2404 static QDF_STATUS
2405 dp_get_completion_indication_for_stack(struct dp_soc *soc,
2406 				       struct dp_pdev *pdev,
2407 				       struct dp_peer *peer,
2408 				       struct hal_tx_completion_status *ts,
2409 				       qdf_nbuf_t netbuf)
2410 {
2411 	return QDF_STATUS_E_NOSUPPORT;
2412 }
2413 
2414 static void
2415 dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2416 	uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
2417 {
2418 }
2419 #endif
2420 
2421 /**
2422  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2423  * @soc: Soc handle
2424  * @desc: software Tx descriptor to be processed
2425  *
2426  * Return: none
2427  */
2428 static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
2429 		struct dp_tx_desc_s *desc)
2430 {
2431 	struct dp_vdev *vdev = desc->vdev;
2432 	qdf_nbuf_t nbuf = desc->nbuf;
2433 
2434 	/* If it is TDLS mgmt, don't unmap or free the frame */
2435 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2436 		return dp_non_std_tx_comp_free_buff(desc, vdev);
2437 
2438 	/* 0 : MSDU buffer, 1 : MLE */
2439 	if (desc->msdu_ext_desc) {
2440 		/* TSO free */
2441 		if (hal_tx_ext_desc_get_tso_enable(
2442 					desc->msdu_ext_desc->vaddr)) {
2443 			/* unmap eash TSO seg before free the nbuf */
2444 			dp_tx_tso_unmap_segment(soc, desc);
2445 			qdf_nbuf_free(nbuf);
2446 			return;
2447 		}
2448 	}
2449 
2450 	qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
2451 
2452 	if (qdf_likely(!vdev->mesh_vdev))
2453 		qdf_nbuf_free(nbuf);
2454 	else {
2455 		if (desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2456 			qdf_nbuf_free(nbuf);
2457 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2458 		} else
2459 			vdev->osif_tx_free_ext((nbuf));
2460 	}
2461 }
2462 
2463 /**
2464  * dp_tx_mec_handler() - Tx  MEC Notify Handler
2465  * @vdev: pointer to dp dev handler
2466  * @status : Tx completion status from HTT descriptor
2467  *
2468  * Handles MEC notify event sent from fw to Host
2469  *
2470  * Return: none
2471  */
2472 #ifdef FEATURE_WDS
2473 void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
2474 {
2475 
2476 	struct dp_soc *soc;
2477 	uint32_t flags = IEEE80211_NODE_F_WDS_HM;
2478 	struct dp_peer *peer;
2479 
2480 	if (!vdev->wds_enabled)
2481 		return;
2482 
2483 	/* MEC required only in STA mode */
2484 	if (vdev->opmode != wlan_op_mode_sta)
2485 		return;
2486 
2487 	soc = vdev->pdev->soc;
2488 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
2489 	peer = TAILQ_FIRST(&vdev->peer_list);
2490 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2491 
2492 	if (!peer) {
2493 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2494 				FL("peer is NULL"));
2495 		return;
2496 	}
2497 
2498 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2499 			"%s Tx MEC Handler",
2500 			__func__);
2501 
2502 	/* note: mac address is in (status + sizeof(uint32_t)) */
2503 	if (qdf_mem_cmp((status + sizeof(uint32_t)),
2504 			vdev->mac_addr.raw,
2505 			DP_MAC_ADDR_LEN))
2506 		dp_peer_add_ast(soc,
2507 				peer,
2508 				(status + sizeof(uint32_t)),
2509 				CDP_TXRX_AST_TYPE_MEC,
2510 				flags);
2511 }
2512 #endif
2513 
2514 #ifdef MESH_MODE_SUPPORT
2515 /**
2516  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
2517  *                                         in mesh meta header
2518  * @tx_desc: software descriptor head pointer
2519  * @ts: pointer to tx completion stats
2520  * Return: none
2521  */
2522 static
2523 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2524 		struct hal_tx_completion_status *ts)
2525 {
2526 	struct meta_hdr_s *mhdr;
2527 	qdf_nbuf_t netbuf = tx_desc->nbuf;
2528 
2529 	if (!tx_desc->msdu_ext_desc) {
2530 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
2531 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2532 				"netbuf %pK offset %d",
2533 				netbuf, tx_desc->pkt_offset);
2534 			return;
2535 		}
2536 	}
2537 	if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
2538 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2539 			"netbuf %pK offset %d", netbuf,
2540 			sizeof(struct meta_hdr_s));
2541 		return;
2542 	}
2543 
2544 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
2545 	mhdr->rssi = ts->ack_frame_rssi;
2546 	mhdr->channel = tx_desc->pdev->operating_channel;
2547 }
2548 
2549 #else
2550 static
2551 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2552 		struct hal_tx_completion_status *ts)
2553 {
2554 }
2555 
2556 #endif
2557 
2558 /**
2559  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
2560  * @peer: Handle to DP peer
2561  * @ts: pointer to HAL Tx completion stats
2562  *
2563  * Return: None
2564  */
2565 static inline void
2566 dp_tx_update_peer_stats(struct dp_peer *peer,
2567 			struct hal_tx_completion_status *ts)
2568 {
2569 	struct dp_pdev *pdev = peer->vdev->pdev;
2570 	struct dp_soc *soc = pdev->soc;
2571 	uint8_t mcs, pkt_type;
2572 
2573 	mcs = ts->mcs;
2574 	pkt_type = ts->pkt_type;
2575 
2576 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
2577 		dp_err("Release source is not from TQM");
2578 		return;
2579 	}
2580 
2581 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
2582 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
2583 
2584 	DP_STATS_INCC(peer, tx.dropped.fw_rem, 1,
2585 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
2586 
2587 	DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
2588 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
2589 
2590 	DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
2591 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
2592 
2593 	DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
2594 		     (ts->status == HAL_TX_TQM_RR_FW_REASON1));
2595 
2596 	DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
2597 		     (ts->status == HAL_TX_TQM_RR_FW_REASON2));
2598 
2599 	DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
2600 		     (ts->status == HAL_TX_TQM_RR_FW_REASON3));
2601 
2602 	if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
2603 		dp_err("Tx completion has no valid acknowledgment");
2604 		return;
2605 	}
2606 
2607 	DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
2608 
2609 	DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
2610 	DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
2611 
2612 	/*
2613 	 * Following Rate Statistics are updated from HTT PPDU events from FW.
2614 	 * Return from here if HTT PPDU events are enabled.
2615 	 */
2616 	if (!(soc->process_tx_status))
2617 		return;
2618 
2619 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2620 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
2621 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2622 			((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
2623 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2624 			((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
2625 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2626 			((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
2627 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2628 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
2629 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2630 			((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
2631 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2632 			((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2633 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2634 			((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2635 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2636 			((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2637 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2638 			((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2639 
2640 	DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
2641 	DP_STATS_INC(peer, tx.bw[ts->bw], 1);
2642 	DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
2643 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
2644 	DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
2645 	DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
2646 	DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
2647 
2648 	if (soc->cdp_soc.ol_ops->update_dp_stats) {
2649 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
2650 				&peer->stats, ts->peer_id,
2651 				UPDATE_PEER_STATS);
2652 	}
2653 }
2654 
2655 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2656 /**
2657  * dp_tx_flow_pool_lock() - take flow pool lock
2658  * @soc: core txrx main context
2659  * @tx_desc: tx desc
2660  *
2661  * Return: None
2662  */
2663 static inline
2664 void dp_tx_flow_pool_lock(struct dp_soc *soc,
2665 			  struct dp_tx_desc_s *tx_desc)
2666 {
2667 	struct dp_tx_desc_pool_s *pool;
2668 	uint8_t desc_pool_id;
2669 
2670 	desc_pool_id = tx_desc->pool_id;
2671 	pool = &soc->tx_desc[desc_pool_id];
2672 
2673 	qdf_spin_lock_bh(&pool->flow_pool_lock);
2674 }
2675 
2676 /**
2677  * dp_tx_flow_pool_unlock() - release flow pool lock
2678  * @soc: core txrx main context
2679  * @tx_desc: tx desc
2680  *
2681  * Return: None
2682  */
2683 static inline
2684 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
2685 			    struct dp_tx_desc_s *tx_desc)
2686 {
2687 	struct dp_tx_desc_pool_s *pool;
2688 	uint8_t desc_pool_id;
2689 
2690 	desc_pool_id = tx_desc->pool_id;
2691 	pool = &soc->tx_desc[desc_pool_id];
2692 
2693 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
2694 }
2695 #else
2696 static inline
2697 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2698 {
2699 }
2700 
2701 static inline
2702 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2703 {
2704 }
2705 #endif
2706 
2707 /**
2708  * dp_tx_notify_completion() - Notify tx completion for this desc
2709  * @soc: core txrx main context
2710  * @tx_desc: tx desc
2711  * @netbuf:  buffer
2712  *
2713  * Return: none
2714  */
2715 static inline void dp_tx_notify_completion(struct dp_soc *soc,
2716 					   struct dp_tx_desc_s *tx_desc,
2717 					   qdf_nbuf_t netbuf)
2718 {
2719 	void *osif_dev;
2720 	ol_txrx_completion_fp tx_compl_cbk = NULL;
2721 
2722 	qdf_assert(tx_desc);
2723 
2724 	dp_tx_flow_pool_lock(soc, tx_desc);
2725 
2726 	if (!tx_desc->vdev ||
2727 	    !tx_desc->vdev->osif_vdev) {
2728 		dp_tx_flow_pool_unlock(soc, tx_desc);
2729 		return;
2730 	}
2731 
2732 	osif_dev = tx_desc->vdev->osif_vdev;
2733 	tx_compl_cbk = tx_desc->vdev->tx_comp;
2734 	dp_tx_flow_pool_unlock(soc, tx_desc);
2735 
2736 	if (tx_compl_cbk)
2737 		tx_compl_cbk(netbuf, osif_dev);
2738 }
2739 
2740 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
2741  * @pdev: pdev handle
2742  * @tid: tid value
2743  * @txdesc_ts: timestamp from txdesc
2744  * @ppdu_id: ppdu id
2745  *
2746  * Return: none
2747  */
2748 #ifdef FEATURE_PERPKT_INFO
2749 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
2750 					       uint8_t tid,
2751 					       uint64_t txdesc_ts,
2752 					       uint32_t ppdu_id)
2753 {
2754 	uint64_t delta_ms;
2755 	struct cdp_tx_sojourn_stats *sojourn_stats;
2756 
2757 	if (pdev->enhanced_stats_en == 0)
2758 		return;
2759 
2760 	if (pdev->sojourn_stats.ppdu_seq_id == 0)
2761 		pdev->sojourn_stats.ppdu_seq_id = ppdu_id;
2762 
2763 	if (ppdu_id != pdev->sojourn_stats.ppdu_seq_id) {
2764 		if (!pdev->sojourn_buf)
2765 			return;
2766 
2767 		sojourn_stats = (struct cdp_tx_sojourn_stats *)
2768 					qdf_nbuf_data(pdev->sojourn_buf);
2769 
2770 		qdf_mem_copy(sojourn_stats, &pdev->sojourn_stats,
2771 			     sizeof(struct cdp_tx_sojourn_stats));
2772 
2773 		qdf_mem_zero(&pdev->sojourn_stats,
2774 			     sizeof(struct cdp_tx_sojourn_stats));
2775 
2776 		dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
2777 				     pdev->sojourn_buf, HTT_INVALID_PEER,
2778 				     WDI_NO_VAL, pdev->pdev_id);
2779 
2780 		pdev->sojourn_stats.ppdu_seq_id = ppdu_id;
2781 	}
2782 
2783 	if (tid == HTT_INVALID_TID)
2784 		return;
2785 
2786 	delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
2787 				txdesc_ts;
2788 	qdf_ewma_tx_lag_add(&pdev->sojourn_stats.avg_sojourn_msdu[tid],
2789 			    delta_ms);
2790 	pdev->sojourn_stats.sum_sojourn_msdu[tid] += delta_ms;
2791 	pdev->sojourn_stats.num_msdus[tid]++;
2792 }
2793 #else
2794 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
2795 					       uint8_t tid,
2796 					       uint64_t txdesc_ts,
2797 					       uint32_t ppdu_id)
2798 {
2799 }
2800 #endif
2801 
2802 /**
2803  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
2804  * @soc: DP Soc handle
2805  * @tx_desc: software Tx descriptor
2806  * @ts : Tx completion status from HAL/HTT descriptor
2807  *
2808  * Return: none
2809  */
2810 static inline void
2811 dp_tx_comp_process_desc(struct dp_soc *soc,
2812 			struct dp_tx_desc_s *desc,
2813 			struct hal_tx_completion_status *ts,
2814 			struct dp_peer *peer)
2815 {
2816 	/*
2817 	 * m_copy/tx_capture modes are not supported for
2818 	 * scatter gather packets
2819 	 */
2820 	if (!(desc->msdu_ext_desc) &&
2821 	    (dp_get_completion_indication_for_stack(soc, desc->pdev,
2822 						    peer, ts, desc->nbuf)
2823 			== QDF_STATUS_SUCCESS)) {
2824 		qdf_nbuf_unmap(soc->osdev, desc->nbuf,
2825 			       QDF_DMA_TO_DEVICE);
2826 
2827 		dp_send_completion_to_stack(soc, desc->pdev, ts->peer_id,
2828 					    ts->ppdu_id, desc->nbuf);
2829 	} else {
2830 		dp_tx_comp_free_buf(soc, desc);
2831 	}
2832 }
2833 
2834 /**
2835  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
2836  * @tx_desc: software descriptor head pointer
2837  * @ts: Tx completion status
2838  * @peer: peer handle
2839  *
2840  * Return: none
2841  */
2842 static inline
2843 void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
2844 				  struct hal_tx_completion_status *ts,
2845 				  struct dp_peer *peer)
2846 {
2847 	uint32_t length;
2848 	struct dp_soc *soc = NULL;
2849 	struct dp_vdev *vdev = tx_desc->vdev;
2850 	struct ether_header *eh =
2851 		(struct ether_header *)qdf_nbuf_data(tx_desc->nbuf);
2852 
2853 	if (!vdev) {
2854 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2855 				"invalid vdev");
2856 		goto out;
2857 	}
2858 
2859 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2860 				"-------------------- \n"
2861 				"Tx Completion Stats: \n"
2862 				"-------------------- \n"
2863 				"ack_frame_rssi = %d \n"
2864 				"first_msdu = %d \n"
2865 				"last_msdu = %d \n"
2866 				"msdu_part_of_amsdu = %d \n"
2867 				"rate_stats valid = %d \n"
2868 				"bw = %d \n"
2869 				"pkt_type = %d \n"
2870 				"stbc = %d \n"
2871 				"ldpc = %d \n"
2872 				"sgi = %d \n"
2873 				"mcs = %d \n"
2874 				"ofdma = %d \n"
2875 				"tones_in_ru = %d \n"
2876 				"tsf = %d \n"
2877 				"ppdu_id = %d \n"
2878 				"transmit_cnt = %d \n"
2879 				"tid = %d \n"
2880 				"peer_id = %d\n",
2881 				ts->ack_frame_rssi, ts->first_msdu,
2882 				ts->last_msdu, ts->msdu_part_of_amsdu,
2883 				ts->valid, ts->bw, ts->pkt_type, ts->stbc,
2884 				ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
2885 				ts->tones_in_ru, ts->tsf, ts->ppdu_id,
2886 				ts->transmit_cnt, ts->tid, ts->peer_id);
2887 
2888 	soc = vdev->pdev->soc;
2889 
2890 	/* Update SoC level stats */
2891 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
2892 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
2893 
2894 	/* Update per-packet stats for mesh mode */
2895 	if (qdf_unlikely(vdev->mesh_vdev) &&
2896 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
2897 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
2898 
2899 	length = qdf_nbuf_len(tx_desc->nbuf);
2900 	/* Update peer level stats */
2901 	if (!peer) {
2902 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2903 				"invalid peer");
2904 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
2905 		goto out;
2906 	}
2907 
2908 	if (qdf_likely(!peer->bss_peer)) {
2909 		DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
2910 
2911 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
2912 			DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
2913 	} else {
2914 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
2915 			DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
2916 
2917 			if ((peer->vdev->tx_encap_type ==
2918 				htt_cmn_pkt_type_ethernet) &&
2919 				IEEE80211_IS_BROADCAST(eh->ether_dhost)) {
2920 				DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
2921 			}
2922 		}
2923 	}
2924 
2925 	dp_tx_update_peer_stats(peer, ts);
2926 
2927 out:
2928 	return;
2929 }
2930 /**
2931  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
2932  * @soc: core txrx main context
2933  * @comp_head: software descriptor head pointer
2934  *
2935  * This function will process batch of descriptors reaped by dp_tx_comp_handler
2936  * and release the software descriptors after processing is complete
2937  *
2938  * Return: none
2939  */
2940 static void
2941 dp_tx_comp_process_desc_list(struct dp_soc *soc,
2942 			     struct dp_tx_desc_s *comp_head)
2943 {
2944 	struct dp_tx_desc_s *desc;
2945 	struct dp_tx_desc_s *next;
2946 	struct hal_tx_completion_status ts = {0};
2947 	struct dp_peer *peer;
2948 
2949 	DP_HIST_INIT();
2950 	desc = comp_head;
2951 
2952 	while (desc) {
2953 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
2954 		peer = dp_peer_find_by_id(soc, ts.peer_id);
2955 		dp_tx_comp_process_tx_status(desc, &ts, peer);
2956 		dp_tx_comp_process_desc(soc, desc, &ts, peer);
2957 
2958 		if (peer)
2959 			dp_peer_unref_del_find_by_id(peer);
2960 
2961 		DP_HIST_PACKET_COUNT_INC(desc->pdev->pdev_id);
2962 
2963 		next = desc->next;
2964 
2965 		dp_tx_desc_release(desc, desc->pool_id);
2966 		desc = next;
2967 	}
2968 
2969 	DP_TX_HIST_STATS_PER_PDEV();
2970 }
2971 
2972 /**
2973  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
2974  * @tx_desc: software descriptor head pointer
2975  * @status : Tx completion status from HTT descriptor
2976  *
2977  * This function will process HTT Tx indication messages from Target
2978  *
2979  * Return: none
2980  */
2981 static
2982 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2983 {
2984 	uint8_t tx_status;
2985 	struct dp_pdev *pdev;
2986 	struct dp_vdev *vdev;
2987 	struct dp_soc *soc;
2988 	struct hal_tx_completion_status ts = {0};
2989 	uint32_t *htt_desc = (uint32_t *)status;
2990 	struct dp_peer *peer;
2991 	qdf_nbuf_t nbuf;
2992 
2993 	qdf_assert(tx_desc->pdev);
2994 
2995 	pdev = tx_desc->pdev;
2996 	vdev = tx_desc->vdev;
2997 	soc = pdev->soc;
2998 	nbuf = tx_desc->nbuf;
2999 
3000 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
3001 
3002 	switch (tx_status) {
3003 	case HTT_TX_FW2WBM_TX_STATUS_OK:
3004 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
3005 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
3006 	{
3007 		if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
3008 			ts.peer_id =
3009 				HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
3010 						htt_desc[2]);
3011 			ts.tid =
3012 				HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
3013 						htt_desc[2]);
3014 		} else {
3015 			ts.peer_id = HTT_INVALID_PEER;
3016 			ts.tid = HTT_INVALID_TID;
3017 		}
3018 		ts.ppdu_id =
3019 			HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
3020 					htt_desc[1]);
3021 		ts.ack_frame_rssi =
3022 			HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
3023 					htt_desc[1]);
3024 
3025 		ts.first_msdu = 1;
3026 		ts.last_msdu = 1;
3027 
3028 		if (tx_status != HTT_TX_FW2WBM_TX_STATUS_OK)
3029 			ts.status = HAL_TX_TQM_RR_REM_CMD_REM;
3030 
3031 		peer = dp_peer_find_by_id(soc, ts.peer_id);
3032 		dp_tx_comp_process_tx_status(tx_desc, &ts, peer);
3033 		dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
3034 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3035 
3036 		break;
3037 	}
3038 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
3039 	{
3040 		dp_tx_reinject_handler(tx_desc, status);
3041 		break;
3042 	}
3043 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
3044 	{
3045 		dp_tx_inspect_handler(tx_desc, status);
3046 		break;
3047 	}
3048 	case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
3049 	{
3050 		qdf_mem_copy(((uint8_t *)&htt_desc[1]),
3051 			     &nbuf->data[DP_MAC_ADDR_LEN],
3052 			     DP_MAC_ADDR_LEN);
3053 		dp_tx_mec_handler(vdev, status);
3054 		break;
3055 	}
3056 	default:
3057 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3058 			  "%s Invalid HTT tx_status %d\n",
3059 			  __func__, tx_status);
3060 		break;
3061 	}
3062 }
3063 
3064 /**
3065  * dp_tx_comp_handler() - Tx completion handler
3066  * @soc: core txrx main context
3067  * @ring_id: completion ring id
3068  * @quota: No. of packets/descriptors that can be serviced in one loop
3069  *
3070  * This function will collect hardware release ring element contents and
3071  * handle descriptor contents. Based on contents, free packet or handle error
3072  * conditions
3073  *
3074  * Return: none
3075  */
3076 uint32_t dp_tx_comp_handler(struct dp_soc *soc, void *hal_srng, uint32_t quota)
3077 {
3078 	void *tx_comp_hal_desc;
3079 	uint8_t buffer_src;
3080 	uint8_t pool_id;
3081 	uint32_t tx_desc_id;
3082 	struct dp_tx_desc_s *tx_desc = NULL;
3083 	struct dp_tx_desc_s *head_desc = NULL;
3084 	struct dp_tx_desc_s *tail_desc = NULL;
3085 	uint32_t num_processed;
3086 	uint32_t count;
3087 
3088 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
3089 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3090 				"%s %d : HAL RING Access Failed -- %pK",
3091 				__func__, __LINE__, hal_srng);
3092 		return 0;
3093 	}
3094 
3095 	num_processed = 0;
3096 	count = 0;
3097 
3098 	/* Find head descriptor from completion ring */
3099 	while (qdf_likely(tx_comp_hal_desc =
3100 			hal_srng_dst_get_next(soc->hal_soc, hal_srng))) {
3101 
3102 		buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
3103 
3104 		/* If this buffer was not released by TQM or FW, then it is not
3105 		 * Tx completion indication, assert */
3106 		if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
3107 				(buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
3108 
3109 			QDF_TRACE(QDF_MODULE_ID_DP,
3110 					QDF_TRACE_LEVEL_FATAL,
3111 					"Tx comp release_src != TQM | FW");
3112 
3113 			qdf_assert_always(0);
3114 		}
3115 
3116 		/* Get descriptor id */
3117 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
3118 		pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
3119 			DP_TX_DESC_ID_POOL_OS;
3120 
3121 		if (!dp_tx_is_desc_id_valid(soc, tx_desc_id))
3122 			continue;
3123 
3124 		/* Find Tx descriptor */
3125 		tx_desc = dp_tx_desc_find(soc, pool_id,
3126 				(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
3127 				DP_TX_DESC_ID_PAGE_OS,
3128 				(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
3129 				DP_TX_DESC_ID_OFFSET_OS);
3130 
3131 		/*
3132 		 * If the descriptor is already freed in vdev_detach,
3133 		 * continue to next descriptor
3134 		 */
3135 		if (!tx_desc->vdev) {
3136 			QDF_TRACE(QDF_MODULE_ID_DP,
3137 				  QDF_TRACE_LEVEL_INFO,
3138 				  "Descriptor freed in vdev_detach %d",
3139 				  tx_desc_id);
3140 
3141 			num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
3142 			count++;
3143 			continue;
3144 		}
3145 
3146 		/*
3147 		 * If the release source is FW, process the HTT status
3148 		 */
3149 		if (qdf_unlikely(buffer_src ==
3150 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
3151 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
3152 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
3153 					htt_tx_status);
3154 			dp_tx_process_htt_completion(tx_desc,
3155 					htt_tx_status);
3156 		} else {
3157 			/* Pool id is not matching. Error */
3158 			if (tx_desc->pool_id != pool_id) {
3159 				QDF_TRACE(QDF_MODULE_ID_DP,
3160 					QDF_TRACE_LEVEL_FATAL,
3161 					"Tx Comp pool id %d not matched %d",
3162 					pool_id, tx_desc->pool_id);
3163 
3164 				qdf_assert_always(0);
3165 			}
3166 
3167 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
3168 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
3169 				QDF_TRACE(QDF_MODULE_ID_DP,
3170 					QDF_TRACE_LEVEL_FATAL,
3171 					"Txdesc invalid, flgs = %x,id = %d",
3172 					tx_desc->flags,	tx_desc_id);
3173 				qdf_assert_always(0);
3174 			}
3175 
3176 			/* First ring descriptor on the cycle */
3177 			if (!head_desc) {
3178 				head_desc = tx_desc;
3179 				tail_desc = tx_desc;
3180 			}
3181 
3182 			tail_desc->next = tx_desc;
3183 			tx_desc->next = NULL;
3184 			tail_desc = tx_desc;
3185 
3186 			/* Collect hw completion contents */
3187 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
3188 					&tx_desc->comp, 1);
3189 
3190 		}
3191 
3192 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
3193 
3194 		/*
3195 		 * Processed packet count is more than given quota
3196 		 * stop to processing
3197 		 */
3198 		if ((num_processed >= quota))
3199 			break;
3200 
3201 		count++;
3202 	}
3203 
3204 	hal_srng_access_end(soc->hal_soc, hal_srng);
3205 
3206 	/* Process the reaped descriptors */
3207 	if (head_desc)
3208 		dp_tx_comp_process_desc_list(soc, head_desc);
3209 
3210 	return num_processed;
3211 }
3212 
3213 #ifdef CONVERGED_TDLS_ENABLE
3214 /**
3215  * dp_tx_non_std() - Allow the control-path SW to send data frames
3216  *
3217  * @data_vdev - which vdev should transmit the tx data frames
3218  * @tx_spec - what non-standard handling to apply to the tx data frames
3219  * @msdu_list - NULL-terminated list of tx MSDUs
3220  *
3221  * Return: NULL on success,
3222  *         nbuf when it fails to send
3223  */
3224 qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle,
3225 			enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
3226 {
3227 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
3228 
3229 	if (tx_spec & OL_TX_SPEC_NO_FREE)
3230 		vdev->is_tdls_frame = true;
3231 	return dp_tx_send(vdev_handle, msdu_list);
3232 }
3233 #endif
3234 
3235 /**
3236  * dp_tx_vdev_attach() - attach vdev to dp tx
3237  * @vdev: virtual device instance
3238  *
3239  * Return: QDF_STATUS_SUCCESS: success
3240  *         QDF_STATUS_E_RESOURCES: Error return
3241  */
3242 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
3243 {
3244 	/*
3245 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
3246 	 */
3247 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
3248 			HTT_TCL_METADATA_TYPE_VDEV_BASED);
3249 
3250 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
3251 			vdev->vdev_id);
3252 
3253 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
3254 			DP_SW2HW_MACID(vdev->pdev->pdev_id));
3255 
3256 	/*
3257 	 * Set HTT Extension Valid bit to 0 by default
3258 	 */
3259 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
3260 
3261 	dp_tx_vdev_update_search_flags(vdev);
3262 
3263 	return QDF_STATUS_SUCCESS;
3264 }
3265 
3266 /**
3267  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
3268  * @vdev: virtual device instance
3269  *
3270  * Return: void
3271  *
3272  */
3273 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
3274 {
3275 	struct dp_soc *soc = vdev->pdev->soc;
3276 
3277 	/*
3278 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
3279 	 * for TDLS link
3280 	 *
3281 	 * Enable AddrY (SA based search) only for non-WDS STA and
3282 	 * ProxySTA VAP modes.
3283 	 *
3284 	 * In all other VAP modes, only DA based search should be
3285 	 * enabled
3286 	 */
3287 	if (vdev->opmode == wlan_op_mode_sta &&
3288 	    vdev->tdls_link_connected)
3289 		vdev->hal_desc_addr_search_flags =
3290 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
3291 	else if ((vdev->opmode == wlan_op_mode_sta &&
3292 				(!vdev->wds_enabled || vdev->proxysta_vdev)))
3293 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
3294 	else
3295 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
3296 
3297 	/* Set search type only when peer map v2 messaging is enabled
3298 	 * as we will have the search index (AST hash) only when v2 is
3299 	 * enabled
3300 	 */
3301 	if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
3302 		vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
3303 	else
3304 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
3305 }
3306 
3307 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3308 static void dp_tx_desc_flush(struct dp_vdev *vdev)
3309 {
3310 }
3311 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3312 
3313 /* dp_tx_desc_flush() - release resources associated
3314  *                      to tx_desc
3315  * @vdev: virtual device instance
3316  *
3317  * This function will free all outstanding Tx buffers,
3318  * including ME buffer for which either free during
3319  * completion didn't happened or completion is not
3320  * received.
3321 */
3322 static void dp_tx_desc_flush(struct dp_vdev *vdev)
3323 {
3324 	uint8_t i, num_pool;
3325 	uint32_t j;
3326 	uint32_t num_desc;
3327 	struct dp_soc *soc = vdev->pdev->soc;
3328 	struct dp_tx_desc_s *tx_desc = NULL;
3329 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
3330 
3331 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3332 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3333 
3334 	for (i = 0; i < num_pool; i++) {
3335 		for (j = 0; j < num_desc; j++) {
3336 			tx_desc_pool = &((soc)->tx_desc[(i)]);
3337 			if (tx_desc_pool &&
3338 				tx_desc_pool->desc_pages.cacheable_pages) {
3339 				tx_desc = dp_tx_desc_find(soc, i,
3340 					(j & DP_TX_DESC_ID_PAGE_MASK) >>
3341 					DP_TX_DESC_ID_PAGE_OS,
3342 					(j & DP_TX_DESC_ID_OFFSET_MASK) >>
3343 					DP_TX_DESC_ID_OFFSET_OS);
3344 
3345 				if (tx_desc && (tx_desc->vdev == vdev) &&
3346 					(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) {
3347 					dp_tx_comp_free_buf(soc, tx_desc);
3348 					dp_tx_desc_release(tx_desc, i);
3349 				}
3350 			}
3351 		}
3352 	}
3353 }
3354 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3355 
3356 /**
3357  * dp_tx_vdev_detach() - detach vdev from dp tx
3358  * @vdev: virtual device instance
3359  *
3360  * Return: QDF_STATUS_SUCCESS: success
3361  *         QDF_STATUS_E_RESOURCES: Error return
3362  */
3363 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
3364 {
3365 	dp_tx_desc_flush(vdev);
3366 	return QDF_STATUS_SUCCESS;
3367 }
3368 
3369 /**
3370  * dp_tx_pdev_attach() - attach pdev to dp tx
3371  * @pdev: physical device instance
3372  *
3373  * Return: QDF_STATUS_SUCCESS: success
3374  *         QDF_STATUS_E_RESOURCES: Error return
3375  */
3376 QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
3377 {
3378 	struct dp_soc *soc = pdev->soc;
3379 
3380 	/* Initialize Flow control counters */
3381 	qdf_atomic_init(&pdev->num_tx_exception);
3382 	qdf_atomic_init(&pdev->num_tx_outstanding);
3383 
3384 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3385 		/* Initialize descriptors in TCL Ring */
3386 		hal_tx_init_data_ring(soc->hal_soc,
3387 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
3388 	}
3389 
3390 	return QDF_STATUS_SUCCESS;
3391 }
3392 
3393 /**
3394  * dp_tx_pdev_detach() - detach pdev from dp tx
3395  * @pdev: physical device instance
3396  *
3397  * Return: QDF_STATUS_SUCCESS: success
3398  *         QDF_STATUS_E_RESOURCES: Error return
3399  */
3400 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
3401 {
3402 	dp_tx_me_exit(pdev);
3403 	return QDF_STATUS_SUCCESS;
3404 }
3405 
3406 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3407 /* Pools will be allocated dynamically */
3408 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3409 					int num_desc)
3410 {
3411 	uint8_t i;
3412 
3413 	for (i = 0; i < num_pool; i++) {
3414 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
3415 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
3416 	}
3417 
3418 	return 0;
3419 }
3420 
3421 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3422 {
3423 	uint8_t i;
3424 
3425 	for (i = 0; i < num_pool; i++)
3426 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
3427 }
3428 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3429 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3430 					int num_desc)
3431 {
3432 	uint8_t i;
3433 
3434 	/* Allocate software Tx descriptor pools */
3435 	for (i = 0; i < num_pool; i++) {
3436 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
3437 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3438 					"%s Tx Desc Pool alloc %d failed %pK",
3439 					__func__, i, soc);
3440 			return ENOMEM;
3441 		}
3442 	}
3443 	return 0;
3444 }
3445 
3446 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3447 {
3448 	uint8_t i;
3449 
3450 	for (i = 0; i < num_pool; i++) {
3451 		qdf_assert_always(!soc->tx_desc[i].num_allocated);
3452 		if (dp_tx_desc_pool_free(soc, i)) {
3453 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3454 				"%s Tx Desc Pool Free failed", __func__);
3455 		}
3456 	}
3457 }
3458 
3459 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3460 
3461 /**
3462  * dp_tx_soc_detach() - detach soc from dp tx
3463  * @soc: core txrx main context
3464  *
3465  * This function will detach dp tx into main device context
3466  * will free dp tx resource and initialize resources
3467  *
3468  * Return: QDF_STATUS_SUCCESS: success
3469  *         QDF_STATUS_E_RESOURCES: Error return
3470  */
3471 QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
3472 {
3473 	uint8_t num_pool;
3474 	uint16_t num_desc;
3475 	uint16_t num_ext_desc;
3476 	uint8_t i;
3477 
3478 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3479 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3480 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
3481 
3482 	dp_tx_flow_control_deinit(soc);
3483 	dp_tx_delete_static_pools(soc, num_pool);
3484 
3485 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3486 			"%s Tx Desc Pool Free num_pool = %d, descs = %d",
3487 			__func__, num_pool, num_desc);
3488 
3489 	for (i = 0; i < num_pool; i++) {
3490 		if (dp_tx_ext_desc_pool_free(soc, i)) {
3491 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3492 					"%s Tx Ext Desc Pool Free failed",
3493 					__func__);
3494 			return QDF_STATUS_E_RESOURCES;
3495 		}
3496 	}
3497 
3498 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3499 			"%s MSDU Ext Desc Pool %d Free descs = %d",
3500 			__func__, num_pool, num_ext_desc);
3501 
3502 	for (i = 0; i < num_pool; i++) {
3503 		dp_tx_tso_desc_pool_free(soc, i);
3504 	}
3505 
3506 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3507 			"%s TSO Desc Pool %d Free descs = %d",
3508 			__func__, num_pool, num_desc);
3509 
3510 
3511 	for (i = 0; i < num_pool; i++)
3512 		dp_tx_tso_num_seg_pool_free(soc, i);
3513 
3514 
3515 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3516 		"%s TSO Num of seg Desc Pool %d Free descs = %d",
3517 		__func__, num_pool, num_desc);
3518 
3519 	return QDF_STATUS_SUCCESS;
3520 }
3521 
3522 /**
3523  * dp_tx_soc_attach() - attach soc to dp tx
3524  * @soc: core txrx main context
3525  *
3526  * This function will attach dp tx into main device context
3527  * will allocate dp tx resource and initialize resources
3528  *
3529  * Return: QDF_STATUS_SUCCESS: success
3530  *         QDF_STATUS_E_RESOURCES: Error return
3531  */
3532 QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
3533 {
3534 	uint8_t i;
3535 	uint8_t num_pool;
3536 	uint32_t num_desc;
3537 	uint32_t num_ext_desc;
3538 
3539 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3540 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3541 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
3542 
3543 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
3544 		goto fail;
3545 
3546 	dp_tx_flow_control_init(soc);
3547 
3548 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3549 			"%s Tx Desc Alloc num_pool = %d, descs = %d",
3550 			__func__, num_pool, num_desc);
3551 
3552 	/* Allocate extension tx descriptor pools */
3553 	for (i = 0; i < num_pool; i++) {
3554 		if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
3555 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3556 				"MSDU Ext Desc Pool alloc %d failed %pK",
3557 				i, soc);
3558 
3559 			goto fail;
3560 		}
3561 	}
3562 
3563 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3564 			"%s MSDU Ext Desc Alloc %d, descs = %d",
3565 			__func__, num_pool, num_ext_desc);
3566 
3567 	for (i = 0; i < num_pool; i++) {
3568 		if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
3569 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3570 				"TSO Desc Pool alloc %d failed %pK",
3571 				i, soc);
3572 
3573 			goto fail;
3574 		}
3575 	}
3576 
3577 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3578 			"%s TSO Desc Alloc %d, descs = %d",
3579 			__func__, num_pool, num_desc);
3580 
3581 	for (i = 0; i < num_pool; i++) {
3582 		if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
3583 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3584 				"TSO Num of seg Pool alloc %d failed %pK",
3585 				i, soc);
3586 
3587 			goto fail;
3588 		}
3589 	}
3590 
3591 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3592 			"%s TSO Num of seg pool Alloc %d, descs = %d",
3593 			__func__, num_pool, num_desc);
3594 
3595 	/* Initialize descriptors in TCL Rings */
3596 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3597 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3598 			hal_tx_init_data_ring(soc->hal_soc,
3599 					soc->tcl_data_ring[i].hal_srng);
3600 		}
3601 	}
3602 
3603 	/*
3604 	 * todo - Add a runtime config option to enable this.
3605 	 */
3606 	/*
3607 	 * Due to multiple issues on NPR EMU, enable it selectively
3608 	 * only for NPR EMU, should be removed, once NPR platforms
3609 	 * are stable.
3610 	 */
3611 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
3612 
3613 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3614 			"%s HAL Tx init Success", __func__);
3615 
3616 	return QDF_STATUS_SUCCESS;
3617 
3618 fail:
3619 	/* Detach will take care of freeing only allocated resources */
3620 	dp_tx_soc_detach(soc);
3621 	return QDF_STATUS_E_RESOURCES;
3622 }
3623 
3624 /*
3625  * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
3626  * pdev: pointer to DP PDEV structure
3627  * seg_info_head: Pointer to the head of list
3628  *
3629  * return: void
3630  */
3631 static void dp_tx_me_mem_free(struct dp_pdev *pdev,
3632 		struct dp_tx_seg_info_s *seg_info_head)
3633 {
3634 	struct dp_tx_me_buf_t *mc_uc_buf;
3635 	struct dp_tx_seg_info_s *seg_info_new = NULL;
3636 	qdf_nbuf_t nbuf = NULL;
3637 	uint64_t phy_addr;
3638 
3639 	while (seg_info_head) {
3640 		nbuf = seg_info_head->nbuf;
3641 		mc_uc_buf = (struct dp_tx_me_buf_t *)
3642 			seg_info_head->frags[0].vaddr;
3643 		phy_addr = seg_info_head->frags[0].paddr_hi;
3644 		phy_addr =  (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
3645 		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
3646 				phy_addr,
3647 				QDF_DMA_TO_DEVICE , DP_MAC_ADDR_LEN);
3648 		dp_tx_me_free_buf(pdev, mc_uc_buf);
3649 		qdf_nbuf_free(nbuf);
3650 		seg_info_new = seg_info_head;
3651 		seg_info_head = seg_info_head->next;
3652 		qdf_mem_free(seg_info_new);
3653 	}
3654 }
3655 
3656 /**
3657  * dp_tx_me_send_convert_ucast(): function to convert multicast to unicast
3658  * @vdev: DP VDEV handle
3659  * @nbuf: Multicast nbuf
3660  * @newmac: Table of the clients to which packets have to be sent
3661  * @new_mac_cnt: No of clients
3662  *
3663  * return: no of converted packets
3664  */
3665 uint16_t
3666 dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
3667 		uint8_t newmac[][DP_MAC_ADDR_LEN], uint8_t new_mac_cnt)
3668 {
3669 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
3670 	struct dp_pdev *pdev = vdev->pdev;
3671 	struct ether_header *eh;
3672 	uint8_t *data;
3673 	uint16_t len;
3674 
3675 	/* reference to frame dst addr */
3676 	uint8_t *dstmac;
3677 	/* copy of original frame src addr */
3678 	uint8_t srcmac[DP_MAC_ADDR_LEN];
3679 
3680 	/* local index into newmac */
3681 	uint8_t new_mac_idx = 0;
3682 	struct dp_tx_me_buf_t *mc_uc_buf;
3683 	qdf_nbuf_t  nbuf_clone;
3684 	struct dp_tx_msdu_info_s msdu_info;
3685 	struct dp_tx_seg_info_s *seg_info_head = NULL;
3686 	struct dp_tx_seg_info_s *seg_info_tail = NULL;
3687 	struct dp_tx_seg_info_s *seg_info_new;
3688 	struct dp_tx_frag_info_s data_frag;
3689 	qdf_dma_addr_t paddr_data;
3690 	qdf_dma_addr_t paddr_mcbuf = 0;
3691 	uint8_t empty_entry_mac[DP_MAC_ADDR_LEN] = {0};
3692 	QDF_STATUS status;
3693 
3694 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
3695 
3696 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3697 
3698 	eh = (struct ether_header *) nbuf;
3699 	qdf_mem_copy(srcmac, eh->ether_shost, DP_MAC_ADDR_LEN);
3700 
3701 	len = qdf_nbuf_len(nbuf);
3702 
3703 	data = qdf_nbuf_data(nbuf);
3704 
3705 	status = qdf_nbuf_map(vdev->osdev, nbuf,
3706 			QDF_DMA_TO_DEVICE);
3707 
3708 	if (status) {
3709 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3710 				"Mapping failure Error:%d", status);
3711 		DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
3712 		qdf_nbuf_free(nbuf);
3713 		return 1;
3714 	}
3715 
3716 	paddr_data = qdf_nbuf_get_frag_paddr(nbuf, 0) + IEEE80211_ADDR_LEN;
3717 
3718 	/*preparing data fragment*/
3719 	data_frag.vaddr = qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN;
3720 	data_frag.paddr_lo = (uint32_t)paddr_data;
3721 	data_frag.paddr_hi = (((uint64_t) paddr_data)  >> 32);
3722 	data_frag.len = len - DP_MAC_ADDR_LEN;
3723 
3724 	for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
3725 		dstmac = newmac[new_mac_idx];
3726 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3727 				"added mac addr (%pM)", dstmac);
3728 
3729 		/* Check for NULL Mac Address */
3730 		if (!qdf_mem_cmp(dstmac, empty_entry_mac, DP_MAC_ADDR_LEN))
3731 			continue;
3732 
3733 		/* frame to self mac. skip */
3734 		if (!qdf_mem_cmp(dstmac, srcmac, DP_MAC_ADDR_LEN))
3735 			continue;
3736 
3737 		/*
3738 		 * TODO: optimize to avoid malloc in per-packet path
3739 		 * For eg. seg_pool can be made part of vdev structure
3740 		 */
3741 		seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
3742 
3743 		if (!seg_info_new) {
3744 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3745 					"alloc failed");
3746 			DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
3747 			goto fail_seg_alloc;
3748 		}
3749 
3750 		mc_uc_buf = dp_tx_me_alloc_buf(pdev);
3751 		if (mc_uc_buf == NULL)
3752 			goto fail_buf_alloc;
3753 
3754 		/*
3755 		 * TODO: Check if we need to clone the nbuf
3756 		 * Or can we just use the reference for all cases
3757 		 */
3758 		if (new_mac_idx < (new_mac_cnt - 1)) {
3759 			nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
3760 			if (nbuf_clone == NULL) {
3761 				DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
3762 				goto fail_clone;
3763 			}
3764 		} else {
3765 			/*
3766 			 * Update the ref
3767 			 * to account for frame sent without cloning
3768 			 */
3769 			qdf_nbuf_ref(nbuf);
3770 			nbuf_clone = nbuf;
3771 		}
3772 
3773 		qdf_mem_copy(mc_uc_buf->data, dstmac, DP_MAC_ADDR_LEN);
3774 
3775 		status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
3776 				QDF_DMA_TO_DEVICE, DP_MAC_ADDR_LEN,
3777 				&paddr_mcbuf);
3778 
3779 		if (status) {
3780 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3781 					"Mapping failure Error:%d", status);
3782 			DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
3783 			goto fail_map;
3784 		}
3785 
3786 		seg_info_new->frags[0].vaddr =  (uint8_t *)mc_uc_buf;
3787 		seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
3788 		seg_info_new->frags[0].paddr_hi =
3789 			((uint64_t) paddr_mcbuf >> 32);
3790 		seg_info_new->frags[0].len = DP_MAC_ADDR_LEN;
3791 
3792 		seg_info_new->frags[1] = data_frag;
3793 		seg_info_new->nbuf = nbuf_clone;
3794 		seg_info_new->frag_cnt = 2;
3795 		seg_info_new->total_len = len;
3796 
3797 		seg_info_new->next = NULL;
3798 
3799 		if (seg_info_head == NULL)
3800 			seg_info_head = seg_info_new;
3801 		else
3802 			seg_info_tail->next = seg_info_new;
3803 
3804 		seg_info_tail = seg_info_new;
3805 	}
3806 
3807 	if (!seg_info_head) {
3808 		goto free_return;
3809 	}
3810 
3811 	msdu_info.u.sg_info.curr_seg = seg_info_head;
3812 	msdu_info.num_seg = new_mac_cnt;
3813 	msdu_info.frm_type = dp_tx_frm_me;
3814 
3815 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
3816 	dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3817 
3818 	while (seg_info_head->next) {
3819 		seg_info_new = seg_info_head;
3820 		seg_info_head = seg_info_head->next;
3821 		qdf_mem_free(seg_info_new);
3822 	}
3823 	qdf_mem_free(seg_info_head);
3824 
3825 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
3826 	qdf_nbuf_free(nbuf);
3827 	return new_mac_cnt;
3828 
3829 fail_map:
3830 	qdf_nbuf_free(nbuf_clone);
3831 
3832 fail_clone:
3833 	dp_tx_me_free_buf(pdev, mc_uc_buf);
3834 
3835 fail_buf_alloc:
3836 	qdf_mem_free(seg_info_new);
3837 
3838 fail_seg_alloc:
3839 	dp_tx_me_mem_free(pdev, seg_info_head);
3840 
3841 free_return:
3842 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
3843 	qdf_nbuf_free(nbuf);
3844 	return 1;
3845 }
3846 
3847