xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision 0626a4da6c07f30da06dd6747e8cc290a60371d8)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "hal_hw_headers.h"
21 #include "dp_tx.h"
22 #include "dp_tx_desc.h"
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "hal_tx.h"
26 #include "qdf_mem.h"
27 #include "qdf_nbuf.h"
28 #include "qdf_net_types.h"
29 #include <wlan_cfg.h>
30 #ifdef MESH_MODE_SUPPORT
31 #include "if_meta_hdr.h"
32 #endif
33 
34 #define DP_TX_QUEUE_MASK 0x3
35 
36 /* TODO Add support in TSO */
37 #define DP_DESC_NUM_FRAG(x) 0
38 
39 /* disable TQM_BYPASS */
40 #define TQM_BYPASS_WAR 0
41 
42 /* invalid peer id for reinject*/
43 #define DP_INVALID_PEER 0XFFFE
44 
45 /*mapping between hal encrypt type and cdp_sec_type*/
46 #define MAX_CDP_SEC_TYPE 12
47 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
48 					HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
49 					HAL_TX_ENCRYPT_TYPE_WEP_128,
50 					HAL_TX_ENCRYPT_TYPE_WEP_104,
51 					HAL_TX_ENCRYPT_TYPE_WEP_40,
52 					HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
53 					HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
54 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
55 					HAL_TX_ENCRYPT_TYPE_WAPI,
56 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
57 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
58 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
59 					HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
60 
61 /**
62  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
63  * @vdev: DP Virtual device handle
64  * @nbuf: Buffer pointer
65  * @queue: queue ids container for nbuf
66  *
67  * TX packet queue has 2 instances, software descriptors id and dma ring id
68  * Based on tx feature and hardware configuration queue id combination could be
69  * different.
70  * For example -
71  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
72  * With no XPS,lock based resource protection, Descriptor pool ids are different
73  * for each vdev, dma ring id will be same as single pdev id
74  *
75  * Return: None
76  */
77 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
78 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
79 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
80 {
81 	uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK;
82 	queue->desc_pool_id = queue_offset;
83 	queue->ring_id = vdev->pdev->soc->tx_ring_map[queue_offset];
84 
85 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
86 			"%s, pool_id:%d ring_id: %d",
87 			__func__, queue->desc_pool_id, queue->ring_id);
88 
89 	return;
90 }
91 #else /* QCA_OL_TX_MULTIQ_SUPPORT */
92 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
93 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
94 {
95 	/* get flow id */
96 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
97 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
98 
99 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
100 			"%s, pool_id:%d ring_id: %d",
101 			__func__, queue->desc_pool_id, queue->ring_id);
102 
103 	return;
104 }
105 #endif
106 
107 #if defined(FEATURE_TSO)
108 /**
109  * dp_tx_tso_unmap_segment() - Unmap TSO segment
110  *
111  * @soc - core txrx main context
112  * @tx_desc - Tx software descriptor
113  */
114 static void dp_tx_tso_unmap_segment(struct dp_soc *soc,
115 				    struct dp_tx_desc_s *tx_desc)
116 {
117 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
118 	if (qdf_unlikely(!tx_desc->tso_desc)) {
119 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
120 			  "%s %d TSO desc is NULL!",
121 			  __func__, __LINE__);
122 		qdf_assert(0);
123 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
124 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
125 			  "%s %d TSO num desc is NULL!",
126 			  __func__, __LINE__);
127 		qdf_assert(0);
128 	} else {
129 		bool is_last_seg;
130 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
131 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
132 
133 		if (tso_num_desc->num_seg.tso_cmn_num_seg > 1)
134 			is_last_seg = false;
135 		else
136 			is_last_seg = true;
137 		tso_num_desc->num_seg.tso_cmn_num_seg--;
138 		qdf_nbuf_unmap_tso_segment(soc->osdev,
139 					   tx_desc->tso_desc, is_last_seg);
140 	}
141 }
142 
143 /**
144  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
145  *                            back to the freelist
146  *
147  * @soc - soc device handle
148  * @tx_desc - Tx software descriptor
149  */
150 static void dp_tx_tso_desc_release(struct dp_soc *soc,
151 				   struct dp_tx_desc_s *tx_desc)
152 {
153 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
154 	if (qdf_unlikely(!tx_desc->tso_desc)) {
155 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
156 			  "%s %d TSO desc is NULL!",
157 			  __func__, __LINE__);
158 		qdf_assert(0);
159 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
160 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
161 			  "%s %d TSO num desc is NULL!",
162 			  __func__, __LINE__);
163 		qdf_assert(0);
164 	} else {
165 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
166 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
167 
168 		/* Add the tso num segment into the free list */
169 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
170 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
171 					    tx_desc->tso_num_desc);
172 			tx_desc->tso_num_desc = NULL;
173 		}
174 
175 		/* Add the tso segment into the free list*/
176 		dp_tx_tso_desc_free(soc,
177 				    tx_desc->pool_id, tx_desc->tso_desc);
178 		tx_desc->tso_desc = NULL;
179 	}
180 }
181 #else
182 static void dp_tx_tso_unmap_segment(struct dp_soc *soc,
183 				    struct dp_tx_desc_s *tx_desc)
184 
185 {
186 }
187 
188 static void dp_tx_tso_desc_release(struct dp_soc *soc,
189 				   struct dp_tx_desc_s *tx_desc)
190 {
191 }
192 #endif
193 /**
194  * dp_tx_desc_release() - Release Tx Descriptor
195  * @tx_desc : Tx Descriptor
196  * @desc_pool_id: Descriptor Pool ID
197  *
198  * Deallocate all resources attached to Tx descriptor and free the Tx
199  * descriptor.
200  *
201  * Return:
202  */
203 static void
204 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
205 {
206 	struct dp_pdev *pdev = tx_desc->pdev;
207 	struct dp_soc *soc;
208 	uint8_t comp_status = 0;
209 
210 	qdf_assert(pdev);
211 
212 	soc = pdev->soc;
213 
214 	if (tx_desc->frm_type == dp_tx_frm_tso)
215 		dp_tx_tso_desc_release(soc, tx_desc);
216 
217 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
218 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
219 
220 	if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
221 		dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
222 
223 	qdf_atomic_dec(&pdev->num_tx_outstanding);
224 
225 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
226 		qdf_atomic_dec(&pdev->num_tx_exception);
227 
228 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
229 				hal_tx_comp_get_buffer_source(&tx_desc->comp))
230 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp);
231 	else
232 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
233 
234 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
235 		"Tx Completion Release desc %d status %d outstanding %d",
236 		tx_desc->id, comp_status,
237 		qdf_atomic_read(&pdev->num_tx_outstanding));
238 
239 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
240 	return;
241 }
242 
243 /**
244  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
245  * @vdev: DP vdev Handle
246  * @nbuf: skb
247  *
248  * Prepares and fills HTT metadata in the frame pre-header for special frames
249  * that should be transmitted using varying transmit parameters.
250  * There are 2 VDEV modes that currently needs this special metadata -
251  *  1) Mesh Mode
252  *  2) DSRC Mode
253  *
254  * Return: HTT metadata size
255  *
256  */
257 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
258 		uint32_t *meta_data)
259 {
260 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
261 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
262 
263 	uint8_t htt_desc_size;
264 
265 	/* Size rounded of multiple of 8 bytes */
266 	uint8_t htt_desc_size_aligned;
267 
268 	uint8_t *hdr = NULL;
269 
270 	/*
271 	 * Metadata - HTT MSDU Extension header
272 	 */
273 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
274 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
275 
276 	if (vdev->mesh_vdev) {
277 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
278 					htt_desc_size_aligned)) {
279 			DP_STATS_INC(vdev,
280 				     tx_i.dropped.headroom_insufficient, 1);
281 			return 0;
282 		}
283 		/* Fill and add HTT metaheader */
284 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
285 		if (hdr == NULL) {
286 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
287 					"Error in filling HTT metadata");
288 
289 			return 0;
290 		}
291 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
292 
293 	} else if (vdev->opmode == wlan_op_mode_ocb) {
294 		/* Todo - Add support for DSRC */
295 	}
296 
297 	return htt_desc_size_aligned;
298 }
299 
300 /**
301  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
302  * @tso_seg: TSO segment to process
303  * @ext_desc: Pointer to MSDU extension descriptor
304  *
305  * Return: void
306  */
307 #if defined(FEATURE_TSO)
308 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
309 		void *ext_desc)
310 {
311 	uint8_t num_frag;
312 	uint32_t tso_flags;
313 
314 	/*
315 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
316 	 * tcp_flag_mask
317 	 *
318 	 * Checksum enable flags are set in TCL descriptor and not in Extension
319 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
320 	 */
321 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
322 
323 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
324 
325 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
326 		tso_seg->tso_flags.ip_len);
327 
328 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
329 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
330 
331 
332 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
333 		uint32_t lo = 0;
334 		uint32_t hi = 0;
335 
336 		qdf_dmaaddr_to_32s(
337 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
338 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
339 			tso_seg->tso_frags[num_frag].length);
340 	}
341 
342 	return;
343 }
344 #else
345 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
346 		void *ext_desc)
347 {
348 	return;
349 }
350 #endif
351 
352 #if defined(FEATURE_TSO)
353 /**
354  * dp_tx_free_tso_seg() - Loop through the tso segments
355  *                        allocated and free them
356  *
357  * @soc: soc handle
358  * @free_seg: list of tso segments
359  * @msdu_info: msdu descriptor
360  *
361  * Return - void
362  */
363 static void dp_tx_free_tso_seg(struct dp_soc *soc,
364 	struct qdf_tso_seg_elem_t *free_seg,
365 	struct dp_tx_msdu_info_s *msdu_info)
366 {
367 	struct qdf_tso_seg_elem_t *next_seg;
368 
369 	while (free_seg) {
370 		next_seg = free_seg->next;
371 		dp_tx_tso_desc_free(soc,
372 			msdu_info->tx_queue.desc_pool_id,
373 			free_seg);
374 		free_seg = next_seg;
375 	}
376 }
377 
378 /**
379  * dp_tx_free_tso_num_seg() - Loop through the tso num segments
380  *                            allocated and free them
381  *
382  * @soc:  soc handle
383  * @free_seg: list of tso segments
384  * @msdu_info: msdu descriptor
385  * Return - void
386  */
387 static void dp_tx_free_tso_num_seg(struct dp_soc *soc,
388 	struct qdf_tso_num_seg_elem_t *free_seg,
389 	struct dp_tx_msdu_info_s *msdu_info)
390 {
391 	struct qdf_tso_num_seg_elem_t *next_seg;
392 
393 	while (free_seg) {
394 		next_seg = free_seg->next;
395 		dp_tso_num_seg_free(soc,
396 			msdu_info->tx_queue.desc_pool_id,
397 			free_seg);
398 		free_seg = next_seg;
399 	}
400 }
401 
402 /**
403  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
404  * @vdev: virtual device handle
405  * @msdu: network buffer
406  * @msdu_info: meta data associated with the msdu
407  *
408  * Return: QDF_STATUS_SUCCESS success
409  */
410 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
411 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
412 {
413 	struct qdf_tso_seg_elem_t *tso_seg;
414 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
415 	struct dp_soc *soc = vdev->pdev->soc;
416 	struct qdf_tso_info_t *tso_info;
417 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
418 
419 	tso_info = &msdu_info->u.tso_info;
420 	tso_info->curr_seg = NULL;
421 	tso_info->tso_seg_list = NULL;
422 	tso_info->num_segs = num_seg;
423 	msdu_info->frm_type = dp_tx_frm_tso;
424 	tso_info->tso_num_seg_list = NULL;
425 
426 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
427 
428 	while (num_seg) {
429 		tso_seg = dp_tx_tso_desc_alloc(
430 				soc, msdu_info->tx_queue.desc_pool_id);
431 		if (tso_seg) {
432 			tso_seg->next = tso_info->tso_seg_list;
433 			tso_info->tso_seg_list = tso_seg;
434 			num_seg--;
435 		} else {
436 			struct qdf_tso_seg_elem_t *free_seg =
437 				tso_info->tso_seg_list;
438 
439 			dp_tx_free_tso_seg(soc, free_seg, msdu_info);
440 
441 			return QDF_STATUS_E_NOMEM;
442 		}
443 	}
444 
445 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
446 
447 	tso_num_seg = dp_tso_num_seg_alloc(soc,
448 			msdu_info->tx_queue.desc_pool_id);
449 
450 	if (tso_num_seg) {
451 		tso_num_seg->next = tso_info->tso_num_seg_list;
452 		tso_info->tso_num_seg_list = tso_num_seg;
453 	} else {
454 		/* Bug: free tso_num_seg and tso_seg */
455 		/* Free the already allocated num of segments */
456 		struct qdf_tso_seg_elem_t *free_seg =
457 					tso_info->tso_seg_list;
458 
459 		TSO_DEBUG(" %s: Failed alloc - Number of segs for a TSO packet",
460 			__func__);
461 		dp_tx_free_tso_seg(soc, free_seg, msdu_info);
462 
463 		return QDF_STATUS_E_NOMEM;
464 	}
465 
466 	msdu_info->num_seg =
467 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
468 
469 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
470 			msdu_info->num_seg);
471 
472 	if (!(msdu_info->num_seg)) {
473 		dp_tx_free_tso_seg(soc, tso_info->tso_seg_list, msdu_info);
474 		dp_tx_free_tso_num_seg(soc, tso_info->tso_num_seg_list,
475 					msdu_info);
476 		return QDF_STATUS_E_INVAL;
477 	}
478 
479 	tso_info->curr_seg = tso_info->tso_seg_list;
480 
481 	return QDF_STATUS_SUCCESS;
482 }
483 #else
484 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
485 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
486 {
487 	return QDF_STATUS_E_NOMEM;
488 }
489 #endif
490 
491 /**
492  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
493  * @vdev: DP Vdev handle
494  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
495  * @desc_pool_id: Descriptor Pool ID
496  *
497  * Return:
498  */
499 static
500 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
501 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
502 {
503 	uint8_t i;
504 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
505 	struct dp_tx_seg_info_s *seg_info;
506 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
507 	struct dp_soc *soc = vdev->pdev->soc;
508 
509 	/* Allocate an extension descriptor */
510 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
511 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
512 
513 	if (!msdu_ext_desc) {
514 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
515 		return NULL;
516 	}
517 
518 	if (msdu_info->exception_fw &&
519 			qdf_unlikely(vdev->mesh_vdev)) {
520 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
521 				&msdu_info->meta_data[0],
522 				sizeof(struct htt_tx_msdu_desc_ext2_t));
523 		qdf_atomic_inc(&vdev->pdev->num_tx_exception);
524 	}
525 
526 	switch (msdu_info->frm_type) {
527 	case dp_tx_frm_sg:
528 	case dp_tx_frm_me:
529 	case dp_tx_frm_raw:
530 		seg_info = msdu_info->u.sg_info.curr_seg;
531 		/* Update the buffer pointers in MSDU Extension Descriptor */
532 		for (i = 0; i < seg_info->frag_cnt; i++) {
533 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
534 				seg_info->frags[i].paddr_lo,
535 				seg_info->frags[i].paddr_hi,
536 				seg_info->frags[i].len);
537 		}
538 
539 		break;
540 
541 	case dp_tx_frm_tso:
542 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
543 				&cached_ext_desc[0]);
544 		break;
545 
546 
547 	default:
548 		break;
549 	}
550 
551 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
552 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
553 
554 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
555 			msdu_ext_desc->vaddr);
556 
557 	return msdu_ext_desc;
558 }
559 
560 /**
561  * dp_tx_trace_pkt() - Trace TX packet at DP layer
562  *
563  * @skb: skb to be traced
564  * @msdu_id: msdu_id of the packet
565  * @vdev_id: vdev_id of the packet
566  *
567  * Return: None
568  */
569 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
570 			    uint8_t vdev_id)
571 {
572 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
573 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
574 	DPTRACE(qdf_dp_trace_ptr(skb,
575 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
576 				 QDF_TRACE_DEFAULT_PDEV_ID,
577 				 qdf_nbuf_data_addr(skb),
578 				 sizeof(qdf_nbuf_data(skb)),
579 				 msdu_id, vdev_id));
580 
581 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
582 
583 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
584 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
585 				      msdu_id, QDF_TX));
586 }
587 
588 /**
589  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
590  * @vdev: DP vdev handle
591  * @nbuf: skb
592  * @desc_pool_id: Descriptor pool ID
593  * @meta_data: Metadata to the fw
594  * @tx_exc_metadata: Handle that holds exception path metadata
595  * Allocate and prepare Tx descriptor with msdu information.
596  *
597  * Return: Pointer to Tx Descriptor on success,
598  *         NULL on failure
599  */
600 static
601 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
602 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
603 		struct dp_tx_msdu_info_s *msdu_info,
604 		struct cdp_tx_exception_metadata *tx_exc_metadata)
605 {
606 	uint8_t align_pad;
607 	uint8_t is_exception = 0;
608 	uint8_t htt_hdr_size;
609 	struct ether_header *eh;
610 	struct dp_tx_desc_s *tx_desc;
611 	struct dp_pdev *pdev = vdev->pdev;
612 	struct dp_soc *soc = pdev->soc;
613 
614 	/* Allocate software Tx descriptor */
615 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
616 	if (qdf_unlikely(!tx_desc)) {
617 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
618 		return NULL;
619 	}
620 
621 	/* Flow control/Congestion Control counters */
622 	qdf_atomic_inc(&pdev->num_tx_outstanding);
623 
624 	/* Initialize the SW tx descriptor */
625 	tx_desc->nbuf = nbuf;
626 	tx_desc->frm_type = dp_tx_frm_std;
627 	tx_desc->tx_encap_type = (tx_exc_metadata ?
628 			tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
629 	tx_desc->vdev = vdev;
630 	tx_desc->pdev = pdev;
631 	tx_desc->msdu_ext_desc = NULL;
632 	tx_desc->pkt_offset = 0;
633 
634 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
635 
636 	/* Reset the control block */
637 	qdf_nbuf_reset_ctxt(nbuf);
638 
639 	/*
640 	 * For special modes (vdev_type == ocb or mesh), data frames should be
641 	 * transmitted using varying transmit parameters (tx spec) which include
642 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
643 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
644 	 * These frames are sent as exception packets to firmware.
645 	 *
646 	 * HW requirement is that metadata should always point to a
647 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
648 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
649 	 *  to get 8-byte aligned start address along with align_pad added
650 	 *
651 	 *  |-----------------------------|
652 	 *  |                             |
653 	 *  |-----------------------------| <-----Buffer Pointer Address given
654 	 *  |                             |  ^    in HW descriptor (aligned)
655 	 *  |       HTT Metadata          |  |
656 	 *  |                             |  |
657 	 *  |                             |  | Packet Offset given in descriptor
658 	 *  |                             |  |
659 	 *  |-----------------------------|  |
660 	 *  |       Alignment Pad         |  v
661 	 *  |-----------------------------| <----- Actual buffer start address
662 	 *  |        SKB Data             |           (Unaligned)
663 	 *  |                             |
664 	 *  |                             |
665 	 *  |                             |
666 	 *  |                             |
667 	 *  |                             |
668 	 *  |-----------------------------|
669 	 */
670 	if (qdf_unlikely((msdu_info->exception_fw)) ||
671 				(vdev->opmode == wlan_op_mode_ocb)) {
672 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
673 
674 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
675 			DP_STATS_INC(vdev,
676 				     tx_i.dropped.headroom_insufficient, 1);
677 			goto failure;
678 		}
679 
680 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
681 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
682 					"qdf_nbuf_push_head failed");
683 			goto failure;
684 		}
685 
686 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
687 				msdu_info->meta_data);
688 		if (htt_hdr_size == 0)
689 			goto failure;
690 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
691 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
692 		is_exception = 1;
693 	}
694 
695 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
696 				qdf_nbuf_map(soc->osdev, nbuf,
697 					QDF_DMA_TO_DEVICE))) {
698 		/* Handle failure */
699 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
700 				"qdf_nbuf_map failed");
701 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
702 		goto failure;
703 	}
704 
705 	if (qdf_unlikely(vdev->nawds_enabled)) {
706 		eh = (struct ether_header *) qdf_nbuf_data(nbuf);
707 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
708 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
709 			is_exception = 1;
710 		}
711 	}
712 
713 #if !TQM_BYPASS_WAR
714 	if (is_exception || tx_exc_metadata)
715 #endif
716 	{
717 		/* Temporary WAR due to TQM VP issues */
718 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
719 		qdf_atomic_inc(&pdev->num_tx_exception);
720 	}
721 
722 	return tx_desc;
723 
724 failure:
725 	dp_tx_desc_release(tx_desc, desc_pool_id);
726 	return NULL;
727 }
728 
729 /**
730  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
731  * @vdev: DP vdev handle
732  * @nbuf: skb
733  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
734  * @desc_pool_id : Descriptor Pool ID
735  *
736  * Allocate and prepare Tx descriptor with msdu and fragment descritor
737  * information. For frames wth fragments, allocate and prepare
738  * an MSDU extension descriptor
739  *
740  * Return: Pointer to Tx Descriptor on success,
741  *         NULL on failure
742  */
743 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
744 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
745 		uint8_t desc_pool_id)
746 {
747 	struct dp_tx_desc_s *tx_desc;
748 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
749 	struct dp_pdev *pdev = vdev->pdev;
750 	struct dp_soc *soc = pdev->soc;
751 
752 	/* Allocate software Tx descriptor */
753 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
754 	if (!tx_desc) {
755 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
756 		return NULL;
757 	}
758 
759 	/* Flow control/Congestion Control counters */
760 	qdf_atomic_inc(&pdev->num_tx_outstanding);
761 
762 	/* Initialize the SW tx descriptor */
763 	tx_desc->nbuf = nbuf;
764 	tx_desc->frm_type = msdu_info->frm_type;
765 	tx_desc->tx_encap_type = vdev->tx_encap_type;
766 	tx_desc->vdev = vdev;
767 	tx_desc->pdev = pdev;
768 	tx_desc->pkt_offset = 0;
769 	tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
770 	tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
771 
772 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
773 
774 	/* Reset the control block */
775 	qdf_nbuf_reset_ctxt(nbuf);
776 
777 	/* Handle scattered frames - TSO/SG/ME */
778 	/* Allocate and prepare an extension descriptor for scattered frames */
779 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
780 	if (!msdu_ext_desc) {
781 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
782 				"%s Tx Extension Descriptor Alloc Fail",
783 				__func__);
784 		goto failure;
785 	}
786 
787 #if TQM_BYPASS_WAR
788 	/* Temporary WAR due to TQM VP issues */
789 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
790 	qdf_atomic_inc(&pdev->num_tx_exception);
791 #endif
792 	if (qdf_unlikely(msdu_info->exception_fw))
793 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
794 
795 	tx_desc->msdu_ext_desc = msdu_ext_desc;
796 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
797 
798 	return tx_desc;
799 failure:
800 	dp_tx_desc_release(tx_desc, desc_pool_id);
801 	return NULL;
802 }
803 
804 /**
805  * dp_tx_prepare_raw() - Prepare RAW packet TX
806  * @vdev: DP vdev handle
807  * @nbuf: buffer pointer
808  * @seg_info: Pointer to Segment info Descriptor to be prepared
809  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
810  *     descriptor
811  *
812  * Return:
813  */
814 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
815 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
816 {
817 	qdf_nbuf_t curr_nbuf = NULL;
818 	uint16_t total_len = 0;
819 	qdf_dma_addr_t paddr;
820 	int32_t i;
821 	int32_t mapped_buf_num = 0;
822 
823 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
824 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
825 
826 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
827 
828 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
829 	if (vdev->raw_mode_war &&
830 	    (qos_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS))
831 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
832 
833 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
834 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
835 
836 		if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf,
837 					QDF_DMA_TO_DEVICE)) {
838 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
839 				"%s dma map error ", __func__);
840 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
841 			mapped_buf_num = i;
842 			goto error;
843 		}
844 
845 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
846 		seg_info->frags[i].paddr_lo = paddr;
847 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
848 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
849 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
850 		total_len += qdf_nbuf_len(curr_nbuf);
851 	}
852 
853 	seg_info->frag_cnt = i;
854 	seg_info->total_len = total_len;
855 	seg_info->next = NULL;
856 
857 	sg_info->curr_seg = seg_info;
858 
859 	msdu_info->frm_type = dp_tx_frm_raw;
860 	msdu_info->num_seg = 1;
861 
862 	return nbuf;
863 
864 error:
865 	i = 0;
866 	while (nbuf) {
867 		curr_nbuf = nbuf;
868 		if (i < mapped_buf_num) {
869 			qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE);
870 			i++;
871 		}
872 		nbuf = qdf_nbuf_next(nbuf);
873 		qdf_nbuf_free(curr_nbuf);
874 	}
875 	return NULL;
876 
877 }
878 
879 /**
880  * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
881  * @soc: DP Soc Handle
882  * @vdev: DP vdev handle
883  * @tx_desc: Tx Descriptor Handle
884  * @tid: TID from HLOS for overriding default DSCP-TID mapping
885  * @fw_metadata: Metadata to send to Target Firmware along with frame
886  * @ring_id: Ring ID of H/W ring to which we enqueue the packet
887  * @tx_exc_metadata: Handle that holds exception path meta data
888  *
889  *  Gets the next free TCL HW DMA descriptor and sets up required parameters
890  *  from software Tx descriptor
891  *
892  * Return:
893  */
894 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
895 				   struct dp_tx_desc_s *tx_desc, uint8_t tid,
896 				   uint16_t fw_metadata, uint8_t ring_id,
897 				   struct cdp_tx_exception_metadata
898 					*tx_exc_metadata)
899 {
900 	uint8_t type;
901 	uint16_t length;
902 	void *hal_tx_desc, *hal_tx_desc_cached;
903 	qdf_dma_addr_t dma_addr;
904 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
905 
906 	enum cdp_sec_type sec_type = (tx_exc_metadata ?
907 			tx_exc_metadata->sec_type : vdev->sec_type);
908 
909 	/* Return Buffer Manager ID */
910 	uint8_t bm_id = ring_id;
911 	void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng;
912 
913 	hal_tx_desc_cached = (void *) cached_desc;
914 	qdf_mem_zero_outline(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
915 
916 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
917 		length = HAL_TX_EXT_DESC_WITH_META_DATA;
918 		type = HAL_TX_BUF_TYPE_EXT_DESC;
919 		dma_addr = tx_desc->msdu_ext_desc->paddr;
920 	} else {
921 		length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
922 		type = HAL_TX_BUF_TYPE_BUFFER;
923 		dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
924 	}
925 
926 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
927 	hal_tx_desc_set_buf_addr(hal_tx_desc_cached,
928 					dma_addr, bm_id, tx_desc->id,
929 					type, soc->hal_soc);
930 
931 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id))
932 		return QDF_STATUS_E_RESOURCES;
933 
934 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
935 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
936 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
937 	hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
938 				vdev->pdev->lmac_id);
939 	hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
940 				    vdev->search_type);
941 	hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
942 				     vdev->bss_ast_hash);
943 	hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
944 					  vdev->dscp_tid_map_id);
945 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
946 			sec_type_map[sec_type]);
947 
948 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
949 			"%s length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
950 			__func__, length, type, (uint64_t)dma_addr,
951 			tx_desc->pkt_offset, tx_desc->id);
952 
953 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
954 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
955 
956 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
957 			vdev->hal_desc_addr_search_flags);
958 
959 	/* verify checksum offload configuration*/
960 	if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) &&
961 		((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
962 		|| qdf_nbuf_is_tso(tx_desc->nbuf)))  {
963 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
964 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
965 	}
966 
967 	if (tid != HTT_TX_EXT_TID_INVALID)
968 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
969 
970 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
971 		hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1);
972 
973 
974 	/* Sync cached descriptor with HW */
975 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
976 
977 	if (!hal_tx_desc) {
978 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
979 			  "%s TCL ring full ring_id:%d", __func__, ring_id);
980 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
981 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
982 		return QDF_STATUS_E_RESOURCES;
983 	}
984 
985 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
986 
987 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
988 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
989 
990 	return QDF_STATUS_SUCCESS;
991 }
992 
993 
994 /**
995  * dp_cce_classify() - Classify the frame based on CCE rules
996  * @vdev: DP vdev handle
997  * @nbuf: skb
998  *
999  * Classify frames based on CCE rules
1000  * Return: bool( true if classified,
1001  *               else false)
1002  */
1003 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1004 {
1005 	struct ether_header *eh = NULL;
1006 	uint16_t   ether_type;
1007 	qdf_llc_t *llcHdr;
1008 	qdf_nbuf_t nbuf_clone = NULL;
1009 	qdf_dot3_qosframe_t *qos_wh = NULL;
1010 
1011 	/* for mesh packets don't do any classification */
1012 	if (qdf_unlikely(vdev->mesh_vdev))
1013 		return false;
1014 
1015 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1016 		eh = (struct ether_header *) qdf_nbuf_data(nbuf);
1017 		ether_type = eh->ether_type;
1018 		llcHdr = (qdf_llc_t *)(nbuf->data +
1019 					sizeof(struct ether_header));
1020 	} else {
1021 		qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1022 		/* For encrypted packets don't do any classification */
1023 		if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
1024 			return false;
1025 
1026 		if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
1027 			if (qdf_unlikely(
1028 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
1029 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
1030 
1031 				ether_type = *(uint16_t *)(nbuf->data
1032 						+ QDF_IEEE80211_4ADDR_HDR_LEN
1033 						+ sizeof(qdf_llc_t)
1034 						- sizeof(ether_type));
1035 				llcHdr = (qdf_llc_t *)(nbuf->data +
1036 						QDF_IEEE80211_4ADDR_HDR_LEN);
1037 			} else {
1038 				ether_type = *(uint16_t *)(nbuf->data
1039 						+ QDF_IEEE80211_3ADDR_HDR_LEN
1040 						+ sizeof(qdf_llc_t)
1041 						- sizeof(ether_type));
1042 				llcHdr = (qdf_llc_t *)(nbuf->data +
1043 					QDF_IEEE80211_3ADDR_HDR_LEN);
1044 			}
1045 
1046 			if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
1047 				&& (ether_type ==
1048 				qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
1049 
1050 				DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
1051 				return true;
1052 			}
1053 		}
1054 
1055 		return false;
1056 	}
1057 
1058 	if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
1059 		ether_type = *(uint16_t *)(nbuf->data + 2*ETHER_ADDR_LEN +
1060 				sizeof(*llcHdr));
1061 		nbuf_clone = qdf_nbuf_clone(nbuf);
1062 		if (qdf_unlikely(nbuf_clone)) {
1063 			qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
1064 
1065 			if (ether_type == htons(ETHERTYPE_8021Q)) {
1066 				qdf_nbuf_pull_head(nbuf_clone,
1067 						sizeof(qdf_net_vlanhdr_t));
1068 			}
1069 		}
1070 	} else {
1071 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1072 			nbuf_clone = qdf_nbuf_clone(nbuf);
1073 			if (qdf_unlikely(nbuf_clone)) {
1074 				qdf_nbuf_pull_head(nbuf_clone,
1075 					sizeof(qdf_net_vlanhdr_t));
1076 			}
1077 		}
1078 	}
1079 
1080 	if (qdf_unlikely(nbuf_clone))
1081 		nbuf = nbuf_clone;
1082 
1083 
1084 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
1085 		|| qdf_nbuf_is_ipv4_arp_pkt(nbuf)
1086 		|| qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
1087 		|| qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
1088 		|| (qdf_nbuf_is_ipv4_pkt(nbuf)
1089 			&& qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
1090 		|| (qdf_nbuf_is_ipv6_pkt(nbuf) &&
1091 			qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
1092 		if (qdf_unlikely(nbuf_clone != NULL))
1093 			qdf_nbuf_free(nbuf_clone);
1094 		return true;
1095 	}
1096 
1097 	if (qdf_unlikely(nbuf_clone != NULL))
1098 		qdf_nbuf_free(nbuf_clone);
1099 
1100 	return false;
1101 }
1102 
1103 /**
1104  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1105  * @vdev: DP vdev handle
1106  * @nbuf: skb
1107  *
1108  * Extract the DSCP or PCP information from frame and map into TID value.
1109  * Software based TID classification is required when more than 2 DSCP-TID
1110  * mapping tables are needed.
1111  * Hardware supports 2 DSCP-TID mapping tables
1112  *
1113  * Return: void
1114  */
1115 static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1116 		struct dp_tx_msdu_info_s *msdu_info)
1117 {
1118 	uint8_t tos = 0, dscp_tid_override = 0;
1119 	uint8_t *hdr_ptr, *L3datap;
1120 	uint8_t is_mcast = 0;
1121 	struct ether_header *eh = NULL;
1122 	qdf_ethervlan_header_t *evh = NULL;
1123 	uint16_t   ether_type;
1124 	qdf_llc_t *llcHdr;
1125 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1126 
1127 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1128 
1129 	if (pdev->soc && vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map)
1130 		return;
1131 
1132 	/* for mesh packets don't do any classification */
1133 	if (qdf_unlikely(vdev->mesh_vdev))
1134 		return;
1135 
1136 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1137 		eh = (struct ether_header *) nbuf->data;
1138 		hdr_ptr = eh->ether_dhost;
1139 		L3datap = hdr_ptr + sizeof(struct ether_header);
1140 	} else {
1141 		qdf_dot3_qosframe_t *qos_wh =
1142 			(qdf_dot3_qosframe_t *) nbuf->data;
1143 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1144 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1145 		return;
1146 	}
1147 
1148 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1149 	ether_type = eh->ether_type;
1150 
1151 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(struct ether_header));
1152 	/*
1153 	 * Check if packet is dot3 or eth2 type.
1154 	 */
1155 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1156 		ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN +
1157 				sizeof(*llcHdr));
1158 
1159 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1160 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1161 				sizeof(*llcHdr);
1162 			ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN
1163 					+ sizeof(*llcHdr) +
1164 					sizeof(qdf_net_vlanhdr_t));
1165 		} else {
1166 			L3datap = hdr_ptr + sizeof(struct ether_header) +
1167 				sizeof(*llcHdr);
1168 		}
1169 	} else {
1170 		if (ether_type == htons(ETHERTYPE_8021Q)) {
1171 			evh = (qdf_ethervlan_header_t *) eh;
1172 			ether_type = evh->ether_type;
1173 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1174 		}
1175 	}
1176 
1177 	/*
1178 	 * Find priority from IP TOS DSCP field
1179 	 */
1180 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1181 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1182 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1183 			/* Only for unicast frames */
1184 			if (!is_mcast) {
1185 				/* send it on VO queue */
1186 				msdu_info->tid = DP_VO_TID;
1187 			}
1188 		} else {
1189 			/*
1190 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1191 			 * from TOS byte.
1192 			 */
1193 			tos = ip->ip_tos;
1194 			dscp_tid_override = 1;
1195 
1196 		}
1197 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1198 		/* TODO
1199 		 * use flowlabel
1200 		 *igmpmld cases to be handled in phase 2
1201 		 */
1202 		unsigned long ver_pri_flowlabel;
1203 		unsigned long pri;
1204 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1205 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1206 			DP_IPV6_PRIORITY_SHIFT;
1207 		tos = pri;
1208 		dscp_tid_override = 1;
1209 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1210 		msdu_info->tid = DP_VO_TID;
1211 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1212 		/* Only for unicast frames */
1213 		if (!is_mcast) {
1214 			/* send ucast arp on VO queue */
1215 			msdu_info->tid = DP_VO_TID;
1216 		}
1217 	}
1218 
1219 	/*
1220 	 * Assign all MCAST packets to BE
1221 	 */
1222 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1223 		if (is_mcast) {
1224 			tos = 0;
1225 			dscp_tid_override = 1;
1226 		}
1227 	}
1228 
1229 	if (dscp_tid_override == 1) {
1230 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1231 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1232 	}
1233 	return;
1234 }
1235 
1236 #ifdef CONVERGED_TDLS_ENABLE
1237 /**
1238  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1239  * @tx_desc: TX descriptor
1240  *
1241  * Return: None
1242  */
1243 static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1244 {
1245 	if (tx_desc->vdev) {
1246 		if (tx_desc->vdev->is_tdls_frame)
1247 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1248 			tx_desc->vdev->is_tdls_frame = false;
1249 	}
1250 }
1251 
1252 /**
1253  * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1254  * @tx_desc: TX descriptor
1255  * @vdev: datapath vdev handle
1256  *
1257  * Return: None
1258  */
1259 static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
1260 				  struct dp_vdev *vdev)
1261 {
1262 	struct hal_tx_completion_status ts = {0};
1263 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1264 
1265 	hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
1266 	if (vdev->tx_non_std_data_callback.func) {
1267 		qdf_nbuf_set_next(tx_desc->nbuf, NULL);
1268 		vdev->tx_non_std_data_callback.func(
1269 				vdev->tx_non_std_data_callback.ctxt,
1270 				nbuf, ts.status);
1271 		return;
1272 	}
1273 }
1274 #endif
1275 
1276 /**
1277  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1278  * @vdev: DP vdev handle
1279  * @nbuf: skb
1280  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1281  * @meta_data: Metadata to the fw
1282  * @tx_q: Tx queue to be used for this Tx frame
1283  * @peer_id: peer_id of the peer in case of NAWDS frames
1284  * @tx_exc_metadata: Handle that holds exception path metadata
1285  *
1286  * Return: NULL on success,
1287  *         nbuf when it fails to send
1288  */
1289 static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1290 		struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
1291 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1292 {
1293 	struct dp_pdev *pdev = vdev->pdev;
1294 	struct dp_soc *soc = pdev->soc;
1295 	struct dp_tx_desc_s *tx_desc;
1296 	QDF_STATUS status;
1297 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
1298 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1299 	uint16_t htt_tcl_metadata = 0;
1300 	uint8_t tid = msdu_info->tid;
1301 
1302 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
1303 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
1304 			msdu_info, tx_exc_metadata);
1305 	if (!tx_desc) {
1306 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1307 			  "%s Tx_desc prepare Fail vdev %pK queue %d",
1308 			  __func__, vdev, tx_q->desc_pool_id);
1309 		return nbuf;
1310 	}
1311 
1312 	if (qdf_unlikely(soc->cce_disable)) {
1313 		if (dp_cce_classify(vdev, nbuf) == true) {
1314 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1315 			tid = DP_VO_TID;
1316 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1317 		}
1318 	}
1319 
1320 	dp_tx_update_tdls_flags(tx_desc);
1321 
1322 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1323 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1324 				"%s %d : HAL RING Access Failed -- %pK",
1325 				__func__, __LINE__, hal_srng);
1326 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1327 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1328 		goto fail_return;
1329 	}
1330 
1331 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
1332 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1333 		HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
1334 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
1335 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
1336 				HTT_TCL_METADATA_TYPE_PEER_BASED);
1337 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
1338 				peer_id);
1339 	} else
1340 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1341 
1342 
1343 	if (msdu_info->exception_fw) {
1344 		HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1345 	}
1346 
1347 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
1348 	status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
1349 			htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
1350 
1351 	if (status != QDF_STATUS_SUCCESS) {
1352 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1353 			  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1354 			  __func__, tx_desc, tx_q->ring_id);
1355 		dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1356 		goto fail_return;
1357 	}
1358 
1359 	nbuf = NULL;
1360 
1361 fail_return:
1362 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1363 		hal_srng_access_end(soc->hal_soc, hal_srng);
1364 		hif_pm_runtime_put(soc->hif_handle);
1365 	} else {
1366 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1367 	}
1368 
1369 	return nbuf;
1370 }
1371 
1372 /**
1373  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
1374  * @vdev: DP vdev handle
1375  * @nbuf: skb
1376  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
1377  *
1378  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
1379  *
1380  * Return: NULL on success,
1381  *         nbuf when it fails to send
1382  */
1383 #if QDF_LOCK_STATS
1384 static noinline
1385 #else
1386 static
1387 #endif
1388 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1389 				    struct dp_tx_msdu_info_s *msdu_info)
1390 {
1391 	uint8_t i;
1392 	struct dp_pdev *pdev = vdev->pdev;
1393 	struct dp_soc *soc = pdev->soc;
1394 	struct dp_tx_desc_s *tx_desc;
1395 	bool is_cce_classified = false;
1396 	QDF_STATUS status;
1397 	uint16_t htt_tcl_metadata = 0;
1398 
1399 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1400 	void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
1401 
1402 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1403 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1404 				"%s %d : HAL RING Access Failed -- %pK",
1405 				__func__, __LINE__, hal_srng);
1406 		DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
1407 		return nbuf;
1408 	}
1409 
1410 	if (qdf_unlikely(soc->cce_disable)) {
1411 		is_cce_classified = dp_cce_classify(vdev, nbuf);
1412 		if (is_cce_classified) {
1413 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1414 			msdu_info->tid = DP_VO_TID;
1415 		}
1416 	}
1417 
1418 	if (msdu_info->frm_type == dp_tx_frm_me)
1419 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1420 
1421 	i = 0;
1422 	/* Print statement to track i and num_seg */
1423 	/*
1424 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
1425 	 * descriptors using information in msdu_info
1426 	 */
1427 	while (i < msdu_info->num_seg) {
1428 		/*
1429 		 * Setup Tx descriptor for an MSDU, and MSDU extension
1430 		 * descriptor
1431 		 */
1432 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
1433 				tx_q->desc_pool_id);
1434 
1435 		if (!tx_desc) {
1436 			if (msdu_info->frm_type == dp_tx_frm_me) {
1437 				dp_tx_me_free_buf(pdev,
1438 					(void *)(msdu_info->u.sg_info
1439 						.curr_seg->frags[0].vaddr));
1440 			}
1441 			goto done;
1442 		}
1443 
1444 		if (msdu_info->frm_type == dp_tx_frm_me) {
1445 			tx_desc->me_buffer =
1446 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
1447 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
1448 		}
1449 
1450 		if (is_cce_classified)
1451 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1452 
1453 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1454 		if (msdu_info->exception_fw) {
1455 			HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1456 		}
1457 
1458 		/*
1459 		 * Enqueue the Tx MSDU descriptor to HW for transmit
1460 		 */
1461 		status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
1462 			htt_tcl_metadata, tx_q->ring_id, NULL);
1463 
1464 		if (status != QDF_STATUS_SUCCESS) {
1465 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1466 				  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1467 				  __func__, tx_desc, tx_q->ring_id);
1468 
1469 			if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
1470 				dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
1471 
1472 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1473 			goto done;
1474 		}
1475 
1476 		/*
1477 		 * TODO
1478 		 * if tso_info structure can be modified to have curr_seg
1479 		 * as first element, following 2 blocks of code (for TSO and SG)
1480 		 * can be combined into 1
1481 		 */
1482 
1483 		/*
1484 		 * For frames with multiple segments (TSO, ME), jump to next
1485 		 * segment.
1486 		 */
1487 		if (msdu_info->frm_type == dp_tx_frm_tso) {
1488 			if (msdu_info->u.tso_info.curr_seg->next) {
1489 				msdu_info->u.tso_info.curr_seg =
1490 					msdu_info->u.tso_info.curr_seg->next;
1491 
1492 				/*
1493 				 * If this is a jumbo nbuf, then increment the number of
1494 				 * nbuf users for each additional segment of the msdu.
1495 				 * This will ensure that the skb is freed only after
1496 				 * receiving tx completion for all segments of an nbuf
1497 				 */
1498 				qdf_nbuf_inc_users(nbuf);
1499 
1500 				/* Check with MCL if this is needed */
1501 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
1502 			}
1503 		}
1504 
1505 		/*
1506 		 * For Multicast-Unicast converted packets,
1507 		 * each converted frame (for a client) is represented as
1508 		 * 1 segment
1509 		 */
1510 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
1511 				(msdu_info->frm_type == dp_tx_frm_me)) {
1512 			if (msdu_info->u.sg_info.curr_seg->next) {
1513 				msdu_info->u.sg_info.curr_seg =
1514 					msdu_info->u.sg_info.curr_seg->next;
1515 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1516 			}
1517 		}
1518 		i++;
1519 	}
1520 
1521 	nbuf = NULL;
1522 
1523 done:
1524 	if (hif_pm_runtime_get(soc->hif_handle) == 0) {
1525 		hal_srng_access_end(soc->hal_soc, hal_srng);
1526 		hif_pm_runtime_put(soc->hif_handle);
1527 	} else {
1528 		hal_srng_access_end_reap(soc->hal_soc, hal_srng);
1529 	}
1530 
1531 	return nbuf;
1532 }
1533 
1534 /**
1535  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
1536  *                     for SG frames
1537  * @vdev: DP vdev handle
1538  * @nbuf: skb
1539  * @seg_info: Pointer to Segment info Descriptor to be prepared
1540  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1541  *
1542  * Return: NULL on success,
1543  *         nbuf when it fails to send
1544  */
1545 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1546 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1547 {
1548 	uint32_t cur_frag, nr_frags;
1549 	qdf_dma_addr_t paddr;
1550 	struct dp_tx_sg_info_s *sg_info;
1551 
1552 	sg_info = &msdu_info->u.sg_info;
1553 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
1554 
1555 	if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
1556 				QDF_DMA_TO_DEVICE)) {
1557 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1558 				"dma map error");
1559 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1560 
1561 		qdf_nbuf_free(nbuf);
1562 		return NULL;
1563 	}
1564 
1565 	paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1566 	seg_info->frags[0].paddr_lo = paddr;
1567 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
1568 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
1569 	seg_info->frags[0].vaddr = (void *) nbuf;
1570 
1571 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
1572 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
1573 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
1574 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1575 					"frag dma map error");
1576 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1577 			qdf_nbuf_free(nbuf);
1578 			return NULL;
1579 		}
1580 
1581 		paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1582 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
1583 		seg_info->frags[cur_frag + 1].paddr_hi =
1584 			((uint64_t) paddr) >> 32;
1585 		seg_info->frags[cur_frag + 1].len =
1586 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
1587 	}
1588 
1589 	seg_info->frag_cnt = (cur_frag + 1);
1590 	seg_info->total_len = qdf_nbuf_len(nbuf);
1591 	seg_info->next = NULL;
1592 
1593 	sg_info->curr_seg = seg_info;
1594 
1595 	msdu_info->frm_type = dp_tx_frm_sg;
1596 	msdu_info->num_seg = 1;
1597 
1598 	return nbuf;
1599 }
1600 
1601 #ifdef MESH_MODE_SUPPORT
1602 
1603 /**
1604  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
1605 				and prepare msdu_info for mesh frames.
1606  * @vdev: DP vdev handle
1607  * @nbuf: skb
1608  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1609  *
1610  * Return: NULL on failure,
1611  *         nbuf when extracted successfully
1612  */
1613 static
1614 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1615 				struct dp_tx_msdu_info_s *msdu_info)
1616 {
1617 	struct meta_hdr_s *mhdr;
1618 	struct htt_tx_msdu_desc_ext2_t *meta_data =
1619 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
1620 
1621 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1622 
1623 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
1624 		msdu_info->exception_fw = 0;
1625 		goto remove_meta_hdr;
1626 	}
1627 
1628 	msdu_info->exception_fw = 1;
1629 
1630 	qdf_mem_set(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t), 0);
1631 
1632 	meta_data->host_tx_desc_pool = 1;
1633 	meta_data->update_peer_cache = 1;
1634 	meta_data->learning_frame = 1;
1635 
1636 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
1637 		meta_data->power = mhdr->power;
1638 
1639 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
1640 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
1641 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
1642 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
1643 
1644 		meta_data->dyn_bw = 1;
1645 
1646 		meta_data->valid_pwr = 1;
1647 		meta_data->valid_mcs_mask = 1;
1648 		meta_data->valid_nss_mask = 1;
1649 		meta_data->valid_preamble_type  = 1;
1650 		meta_data->valid_retries = 1;
1651 		meta_data->valid_bw_info = 1;
1652 	}
1653 
1654 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
1655 		meta_data->encrypt_type = 0;
1656 		meta_data->valid_encrypt_type = 1;
1657 		meta_data->learning_frame = 0;
1658 	}
1659 
1660 	meta_data->valid_key_flags = 1;
1661 	meta_data->key_flags = (mhdr->keyix & 0x3);
1662 
1663 remove_meta_hdr:
1664 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
1665 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1666 				"qdf_nbuf_pull_head failed");
1667 		qdf_nbuf_free(nbuf);
1668 		return NULL;
1669 	}
1670 
1671 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
1672 		msdu_info->tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1673 	else
1674 		msdu_info->tid = qdf_nbuf_get_priority(nbuf);
1675 
1676 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1677 			"%s , Meta hdr %0x %0x %0x %0x %0x %0x"
1678 			" tid %d to_fw %d",
1679 			__func__, msdu_info->meta_data[0],
1680 			msdu_info->meta_data[1],
1681 			msdu_info->meta_data[2],
1682 			msdu_info->meta_data[3],
1683 			msdu_info->meta_data[4],
1684 			msdu_info->meta_data[5],
1685 			msdu_info->tid, msdu_info->exception_fw);
1686 
1687 	return nbuf;
1688 }
1689 #else
1690 static
1691 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1692 				struct dp_tx_msdu_info_s *msdu_info)
1693 {
1694 	return nbuf;
1695 }
1696 
1697 #endif
1698 
1699 #ifdef DP_FEATURE_NAWDS_TX
1700 /**
1701  * dp_tx_prepare_nawds(): Tramit NAWDS frames
1702  * @vdev: dp_vdev handle
1703  * @nbuf: skb
1704  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1705  * @tx_q: Tx queue to be used for this Tx frame
1706  * @meta_data: Meta date for mesh
1707  * @peer_id: peer_id of the peer in case of NAWDS frames
1708  *
1709  * return: NULL on success nbuf on failure
1710  */
1711 static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1712 		struct dp_tx_msdu_info_s *msdu_info)
1713 {
1714 	struct dp_peer *peer = NULL;
1715 	struct dp_soc *soc = vdev->pdev->soc;
1716 	struct dp_ast_entry *ast_entry = NULL;
1717 	struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1718 	uint16_t peer_id = HTT_INVALID_PEER;
1719 
1720 	struct dp_peer *sa_peer = NULL;
1721 	qdf_nbuf_t nbuf_copy;
1722 
1723 	qdf_spin_lock_bh(&(soc->ast_lock));
1724 	ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost));
1725 
1726 	if (ast_entry)
1727 		sa_peer = ast_entry->peer;
1728 
1729 	qdf_spin_unlock_bh(&(soc->ast_lock));
1730 
1731 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1732 		if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
1733 				(peer->nawds_enabled)) {
1734 			if (sa_peer == peer) {
1735 				QDF_TRACE(QDF_MODULE_ID_DP,
1736 						QDF_TRACE_LEVEL_DEBUG,
1737 						" %s: broadcast multicast packet",
1738 						 __func__);
1739 				DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
1740 				continue;
1741 			}
1742 
1743 			nbuf_copy = qdf_nbuf_copy(nbuf);
1744 			if (!nbuf_copy) {
1745 				QDF_TRACE(QDF_MODULE_ID_DP,
1746 						QDF_TRACE_LEVEL_ERROR,
1747 						"nbuf copy failed");
1748 			}
1749 
1750 			peer_id = peer->peer_ids[0];
1751 			nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy,
1752 					msdu_info, peer_id, NULL);
1753 			if (nbuf_copy != NULL) {
1754 				qdf_nbuf_free(nbuf_copy);
1755 				continue;
1756 			}
1757 			DP_STATS_INC_PKT(peer, tx.nawds_mcast,
1758 						1, qdf_nbuf_len(nbuf));
1759 		}
1760 	}
1761 	if (peer_id == HTT_INVALID_PEER)
1762 		return nbuf;
1763 
1764 	return NULL;
1765 }
1766 #endif
1767 
1768 /**
1769  * dp_check_exc_metadata() - Checks if parameters are valid
1770  * @tx_exc - holds all exception path parameters
1771  *
1772  * Returns true when all the parameters are valid else false
1773  *
1774  */
1775 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
1776 {
1777 	if ((tx_exc->tid > DP_MAX_TIDS && tx_exc->tid != HTT_INVALID_TID) ||
1778 	    tx_exc->tx_encap_type > htt_cmn_pkt_num_types ||
1779 	    tx_exc->sec_type > cdp_num_sec_types) {
1780 		return false;
1781 	}
1782 
1783 	return true;
1784 }
1785 
1786 /**
1787  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
1788  * @vap_dev: DP vdev handle
1789  * @nbuf: skb
1790  * @tx_exc_metadata: Handle that holds exception path meta data
1791  *
1792  * Entry point for Core Tx layer (DP_TX) invoked from
1793  * hard_start_xmit in OSIF/HDD to transmit frames through fw
1794  *
1795  * Return: NULL on success,
1796  *         nbuf when it fails to send
1797  */
1798 qdf_nbuf_t dp_tx_send_exception(void *vap_dev, qdf_nbuf_t nbuf,
1799 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1800 {
1801 	struct ether_header *eh = NULL;
1802 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1803 	struct dp_tx_msdu_info_s msdu_info;
1804 
1805 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1806 
1807 	msdu_info.tid = tx_exc_metadata->tid;
1808 
1809 	eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1810 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1811 			"%s , skb %pM",
1812 			__func__, nbuf->data);
1813 
1814 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
1815 
1816 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
1817 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1818 			"Invalid parameters in exception path");
1819 		goto fail;
1820 	}
1821 
1822 	/* Basic sanity checks for unsupported packets */
1823 
1824 	/* MESH mode */
1825 	if (qdf_unlikely(vdev->mesh_vdev)) {
1826 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1827 			"Mesh mode is not supported in exception path");
1828 		goto fail;
1829 	}
1830 
1831 	/* TSO or SG */
1832 	if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
1833 	    qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
1834 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1835 			  "TSO and SG are not supported in exception path");
1836 
1837 		goto fail;
1838 	}
1839 
1840 	/* RAW */
1841 	if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
1842 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1843 			  "Raw frame is not supported in exception path");
1844 		goto fail;
1845 	}
1846 
1847 
1848 	/* Mcast enhancement*/
1849 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
1850 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
1851 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1852 					  "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW");
1853 		}
1854 	}
1855 
1856 	/*
1857 	 * Get HW Queue to use for this frame.
1858 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
1859 	 * dedicated for data and 1 for command.
1860 	 * "queue_id" maps to one hardware ring.
1861 	 *  With each ring, we also associate a unique Tx descriptor pool
1862 	 *  to minimize lock contention for these resources.
1863 	 */
1864 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
1865 
1866 	/*  Single linear frame */
1867 	/*
1868 	 * If nbuf is a simple linear frame, use send_single function to
1869 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
1870 	 * SRNG. There is no need to setup a MSDU extension descriptor.
1871 	 */
1872 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
1873 			tx_exc_metadata->peer_id, tx_exc_metadata);
1874 
1875 	return nbuf;
1876 
1877 fail:
1878 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1879 			"pkt send failed");
1880 	return nbuf;
1881 }
1882 
1883 /**
1884  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
1885  * @vap_dev: DP vdev handle
1886  * @nbuf: skb
1887  *
1888  * Entry point for Core Tx layer (DP_TX) invoked from
1889  * hard_start_xmit in OSIF/HDD
1890  *
1891  * Return: NULL on success,
1892  *         nbuf when it fails to send
1893  */
1894 #ifdef MESH_MODE_SUPPORT
1895 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
1896 {
1897 	struct meta_hdr_s *mhdr;
1898 	qdf_nbuf_t nbuf_mesh = NULL;
1899 	qdf_nbuf_t nbuf_clone = NULL;
1900 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1901 	uint8_t no_enc_frame = 0;
1902 
1903 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
1904 	if (nbuf_mesh == NULL) {
1905 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1906 				"qdf_nbuf_unshare failed");
1907 		return nbuf;
1908 	}
1909 	nbuf = nbuf_mesh;
1910 
1911 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
1912 
1913 	if ((vdev->sec_type != cdp_sec_type_none) &&
1914 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
1915 		no_enc_frame = 1;
1916 
1917 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
1918 		       !no_enc_frame) {
1919 		nbuf_clone = qdf_nbuf_clone(nbuf);
1920 		if (nbuf_clone == NULL) {
1921 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1922 				"qdf_nbuf_clone failed");
1923 			return nbuf;
1924 		}
1925 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
1926 	}
1927 
1928 	if (nbuf_clone) {
1929 		if (!dp_tx_send(vap_dev, nbuf_clone)) {
1930 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
1931 		} else {
1932 			qdf_nbuf_free(nbuf_clone);
1933 		}
1934 	}
1935 
1936 	if (no_enc_frame)
1937 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
1938 	else
1939 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
1940 
1941 	nbuf = dp_tx_send(vap_dev, nbuf);
1942 	if ((nbuf == NULL) && no_enc_frame) {
1943 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
1944 	}
1945 
1946 	return nbuf;
1947 }
1948 
1949 #else
1950 
1951 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
1952 {
1953 	return dp_tx_send(vap_dev, nbuf);
1954 }
1955 
1956 #endif
1957 
1958 /**
1959  * dp_tx_send() - Transmit a frame on a given VAP
1960  * @vap_dev: DP vdev handle
1961  * @nbuf: skb
1962  *
1963  * Entry point for Core Tx layer (DP_TX) invoked from
1964  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
1965  * cases
1966  *
1967  * Return: NULL on success,
1968  *         nbuf when it fails to send
1969  */
1970 qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
1971 {
1972 	struct ether_header *eh = NULL;
1973 	struct dp_tx_msdu_info_s msdu_info;
1974 	struct dp_tx_seg_info_s seg_info;
1975 	struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
1976 	uint16_t peer_id = HTT_INVALID_PEER;
1977 	qdf_nbuf_t nbuf_mesh = NULL;
1978 
1979 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
1980 	qdf_mem_set(&seg_info, sizeof(seg_info), 0x0);
1981 
1982 	eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1983 
1984 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1985 			"%s , skb %pM",
1986 			__func__, nbuf->data);
1987 
1988 	/*
1989 	 * Set Default Host TID value to invalid TID
1990 	 * (TID override disabled)
1991 	 */
1992 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
1993 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
1994 
1995 	if (qdf_unlikely(vdev->mesh_vdev)) {
1996 		nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
1997 								&msdu_info);
1998 		if (nbuf_mesh == NULL) {
1999 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2000 					"Extracting mesh metadata failed");
2001 			return nbuf;
2002 		}
2003 		nbuf = nbuf_mesh;
2004 	}
2005 
2006 	/*
2007 	 * Get HW Queue to use for this frame.
2008 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
2009 	 * dedicated for data and 1 for command.
2010 	 * "queue_id" maps to one hardware ring.
2011 	 *  With each ring, we also associate a unique Tx descriptor pool
2012 	 *  to minimize lock contention for these resources.
2013 	 */
2014 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2015 
2016 	/*
2017 	 * TCL H/W supports 2 DSCP-TID mapping tables.
2018 	 *  Table 1 - Default DSCP-TID mapping table
2019 	 *  Table 2 - 1 DSCP-TID override table
2020 	 *
2021 	 * If we need a different DSCP-TID mapping for this vap,
2022 	 * call tid_classify to extract DSCP/ToS from frame and
2023 	 * map to a TID and store in msdu_info. This is later used
2024 	 * to fill in TCL Input descriptor (per-packet TID override).
2025 	 */
2026 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
2027 
2028 	/*
2029 	 * Classify the frame and call corresponding
2030 	 * "prepare" function which extracts the segment (TSO)
2031 	 * and fragmentation information (for TSO , SG, ME, or Raw)
2032 	 * into MSDU_INFO structure which is later used to fill
2033 	 * SW and HW descriptors.
2034 	 */
2035 	if (qdf_nbuf_is_tso(nbuf)) {
2036 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2037 			  "%s TSO frame %pK", __func__, vdev);
2038 		DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1,
2039 				qdf_nbuf_len(nbuf));
2040 
2041 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
2042 			DP_STATS_INC_PKT(vdev, tx_i.tso.dropped_host, 1,
2043 					 qdf_nbuf_len(nbuf));
2044 			return nbuf;
2045 		}
2046 
2047 		goto send_multiple;
2048 	}
2049 
2050 	/* SG */
2051 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2052 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
2053 
2054 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2055 			 "%s non-TSO SG frame %pK", __func__, vdev);
2056 
2057 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
2058 				qdf_nbuf_len(nbuf));
2059 
2060 		goto send_multiple;
2061 	}
2062 
2063 #ifdef ATH_SUPPORT_IQUE
2064 	/* Mcast to Ucast Conversion*/
2065 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
2066 		eh = (struct ether_header *)qdf_nbuf_data(nbuf);
2067 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
2068 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2069 				  "%s Mcast frm for ME %pK", __func__, vdev);
2070 
2071 			DP_STATS_INC_PKT(vdev,
2072 					tx_i.mcast_en.mcast_pkt, 1,
2073 					qdf_nbuf_len(nbuf));
2074 			if (dp_tx_prepare_send_me(vdev, nbuf) ==
2075 					QDF_STATUS_SUCCESS) {
2076 				return NULL;
2077 			}
2078 		}
2079 	}
2080 #endif
2081 
2082 	/* RAW */
2083 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
2084 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
2085 		if (nbuf == NULL)
2086 			return NULL;
2087 
2088 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2089 			  "%s Raw frame %pK", __func__, vdev);
2090 
2091 		goto send_multiple;
2092 
2093 	}
2094 
2095 	/*  Single linear frame */
2096 	/*
2097 	 * If nbuf is a simple linear frame, use send_single function to
2098 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2099 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2100 	 */
2101 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
2102 
2103 	return nbuf;
2104 
2105 send_multiple:
2106 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2107 
2108 	return nbuf;
2109 }
2110 
2111 /**
2112  * dp_tx_reinject_handler() - Tx Reinject Handler
2113  * @tx_desc: software descriptor head pointer
2114  * @status : Tx completion status from HTT descriptor
2115  *
2116  * This function reinjects frames back to Target.
2117  * Todo - Host queue needs to be added
2118  *
2119  * Return: none
2120  */
2121 static
2122 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2123 {
2124 	struct dp_vdev *vdev;
2125 	struct dp_peer *peer = NULL;
2126 	uint32_t peer_id = HTT_INVALID_PEER;
2127 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2128 	qdf_nbuf_t nbuf_copy = NULL;
2129 	struct dp_tx_msdu_info_s msdu_info;
2130 	struct dp_peer *sa_peer = NULL;
2131 	struct dp_ast_entry *ast_entry = NULL;
2132 	struct dp_soc *soc = NULL;
2133 	struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
2134 #ifdef WDS_VENDOR_EXTENSION
2135 	int is_mcast = 0, is_ucast = 0;
2136 	int num_peers_3addr = 0;
2137 	struct ether_header *eth_hdr = (struct ether_header *)(qdf_nbuf_data(nbuf));
2138 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
2139 #endif
2140 
2141 	vdev = tx_desc->vdev;
2142 	soc = vdev->pdev->soc;
2143 
2144 	qdf_assert(vdev);
2145 
2146 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
2147 
2148 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2149 
2150 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2151 			"%s Tx reinject path", __func__);
2152 
2153 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
2154 			qdf_nbuf_len(tx_desc->nbuf));
2155 
2156 	qdf_spin_lock_bh(&(soc->ast_lock));
2157 
2158 	ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost));
2159 
2160 	if (ast_entry)
2161 		sa_peer = ast_entry->peer;
2162 
2163 	qdf_spin_unlock_bh(&(soc->ast_lock));
2164 
2165 #ifdef WDS_VENDOR_EXTENSION
2166 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
2167 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
2168 	} else {
2169 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
2170 	}
2171 	is_ucast = !is_mcast;
2172 
2173 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2174 		if (peer->bss_peer)
2175 			continue;
2176 
2177 		/* Detect wds peers that use 3-addr framing for mcast.
2178 		 * if there are any, the bss_peer is used to send the
2179 		 * the mcast frame using 3-addr format. all wds enabled
2180 		 * peers that use 4-addr framing for mcast frames will
2181 		 * be duplicated and sent as 4-addr frames below.
2182 		 */
2183 		if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
2184 			num_peers_3addr = 1;
2185 			break;
2186 		}
2187 	}
2188 #endif
2189 
2190 	if (qdf_unlikely(vdev->mesh_vdev)) {
2191 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
2192 	} else {
2193 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2194 			if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
2195 #ifdef WDS_VENDOR_EXTENSION
2196 			/*
2197 			 * . if 3-addr STA, then send on BSS Peer
2198 			 * . if Peer WDS enabled and accept 4-addr mcast,
2199 			 * send mcast on that peer only
2200 			 * . if Peer WDS enabled and accept 4-addr ucast,
2201 			 * send ucast on that peer only
2202 			 */
2203 			((peer->bss_peer && num_peers_3addr && is_mcast) ||
2204 			 (peer->wds_enabled &&
2205 				  ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
2206 				   (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
2207 #else
2208 			((peer->bss_peer &&
2209 			  !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) ||
2210 				 peer->nawds_enabled)) {
2211 #endif
2212 				peer_id = DP_INVALID_PEER;
2213 
2214 				if (peer->nawds_enabled) {
2215 					peer_id = peer->peer_ids[0];
2216 					if (sa_peer == peer) {
2217 						QDF_TRACE(
2218 							QDF_MODULE_ID_DP,
2219 							QDF_TRACE_LEVEL_DEBUG,
2220 							" %s: multicast packet",
2221 							__func__);
2222 						DP_STATS_INC(peer,
2223 							tx.nawds_mcast_drop, 1);
2224 						continue;
2225 					}
2226 				}
2227 
2228 				nbuf_copy = qdf_nbuf_copy(nbuf);
2229 
2230 				if (!nbuf_copy) {
2231 					QDF_TRACE(QDF_MODULE_ID_DP,
2232 						QDF_TRACE_LEVEL_DEBUG,
2233 						FL("nbuf copy failed"));
2234 					break;
2235 				}
2236 
2237 				nbuf_copy = dp_tx_send_msdu_single(vdev,
2238 						nbuf_copy,
2239 						&msdu_info,
2240 						peer_id,
2241 						NULL);
2242 
2243 				if (nbuf_copy) {
2244 					QDF_TRACE(QDF_MODULE_ID_DP,
2245 						QDF_TRACE_LEVEL_DEBUG,
2246 						FL("pkt send failed"));
2247 					qdf_nbuf_free(nbuf_copy);
2248 				} else {
2249 					if (peer_id != DP_INVALID_PEER)
2250 						DP_STATS_INC_PKT(peer,
2251 							tx.nawds_mcast,
2252 							1, qdf_nbuf_len(nbuf));
2253 				}
2254 			}
2255 		}
2256 	}
2257 
2258 	if (vdev->nawds_enabled) {
2259 		peer_id = DP_INVALID_PEER;
2260 
2261 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
2262 					1, qdf_nbuf_len(nbuf));
2263 
2264 		nbuf = dp_tx_send_msdu_single(vdev,
2265 				nbuf,
2266 				&msdu_info,
2267 				peer_id, NULL);
2268 
2269 		if (nbuf) {
2270 			QDF_TRACE(QDF_MODULE_ID_DP,
2271 				QDF_TRACE_LEVEL_DEBUG,
2272 				FL("pkt send failed"));
2273 			qdf_nbuf_free(nbuf);
2274 		}
2275 	} else
2276 		qdf_nbuf_free(nbuf);
2277 
2278 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2279 }
2280 
2281 /**
2282  * dp_tx_inspect_handler() - Tx Inspect Handler
2283  * @tx_desc: software descriptor head pointer
2284  * @status : Tx completion status from HTT descriptor
2285  *
2286  * Handles Tx frames sent back to Host for inspection
2287  * (ProxyARP)
2288  *
2289  * Return: none
2290  */
2291 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2292 {
2293 
2294 	struct dp_soc *soc;
2295 	struct dp_pdev *pdev = tx_desc->pdev;
2296 
2297 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2298 			"%s Tx inspect path",
2299 			__func__);
2300 
2301 	qdf_assert(pdev);
2302 
2303 	soc = pdev->soc;
2304 
2305 	DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
2306 			qdf_nbuf_len(tx_desc->nbuf));
2307 
2308 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
2309 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2310 }
2311 
2312 #ifdef FEATURE_PERPKT_INFO
2313 /**
2314  * dp_get_completion_indication_for_stack() - send completion to stack
2315  * @soc : dp_soc handle
2316  * @pdev: dp_pdev handle
2317  * @peer: dp peer handle
2318  * @peer_id: peer_id of the peer for which completion came
2319  * @ppdu_id: ppdu_id
2320  * @first_msdu: first msdu
2321  * @last_msdu: last msdu
2322  * @netbuf: Buffer pointer for free
2323  *
2324  * This function is used for indication whether buffer needs to be
2325  * send to stack for free or not
2326 */
2327 QDF_STATUS
2328 dp_get_completion_indication_for_stack(struct dp_soc *soc,
2329 				       struct dp_pdev *pdev,
2330 				       struct dp_peer *peer, uint16_t peer_id,
2331 				       uint32_t ppdu_id, uint8_t first_msdu,
2332 				       uint8_t last_msdu, qdf_nbuf_t netbuf)
2333 {
2334 	struct tx_capture_hdr *ppdu_hdr;
2335 	struct ether_header *eh;
2336 
2337 	if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode))
2338 		return QDF_STATUS_E_NOSUPPORT;
2339 
2340 	if (!peer) {
2341 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2342 				FL("Peer Invalid"));
2343 		return QDF_STATUS_E_INVAL;
2344 	}
2345 
2346 	if (pdev->mcopy_mode) {
2347 		if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
2348 			(pdev->m_copy_id.tx_peer_id == peer_id)) {
2349 			return QDF_STATUS_E_INVAL;
2350 		}
2351 
2352 		pdev->m_copy_id.tx_ppdu_id = ppdu_id;
2353 		pdev->m_copy_id.tx_peer_id = peer_id;
2354 	}
2355 
2356 	eh = (struct ether_header *)qdf_nbuf_data(netbuf);
2357 
2358 	if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) {
2359 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2360 				FL("No headroom"));
2361 		return QDF_STATUS_E_NOMEM;
2362 	}
2363 
2364 	ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
2365 	qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
2366 		     IEEE80211_ADDR_LEN);
2367 	if (peer->bss_peer) {
2368 		qdf_mem_copy(ppdu_hdr->ra, eh->ether_dhost, IEEE80211_ADDR_LEN);
2369 	} else {
2370 		qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
2371 			     IEEE80211_ADDR_LEN);
2372 	}
2373 
2374 	ppdu_hdr->ppdu_id = ppdu_id;
2375 	ppdu_hdr->peer_id = peer_id;
2376 	ppdu_hdr->first_msdu = first_msdu;
2377 	ppdu_hdr->last_msdu = last_msdu;
2378 
2379 	return QDF_STATUS_SUCCESS;
2380 }
2381 
2382 
2383 /**
2384  * dp_send_completion_to_stack() - send completion to stack
2385  * @soc :  dp_soc handle
2386  * @pdev:  dp_pdev handle
2387  * @peer_id: peer_id of the peer for which completion came
2388  * @ppdu_id: ppdu_id
2389  * @netbuf: Buffer pointer for free
2390  *
2391  * This function is used to send completion to stack
2392  * to free buffer
2393 */
2394 void  dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2395 					uint16_t peer_id, uint32_t ppdu_id,
2396 					qdf_nbuf_t netbuf)
2397 {
2398 	dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
2399 				netbuf, peer_id,
2400 				WDI_NO_VAL, pdev->pdev_id);
2401 }
2402 #else
2403 static QDF_STATUS
2404 dp_get_completion_indication_for_stack(struct dp_soc *soc,
2405 				       struct dp_pdev *pdev,
2406 				       struct dp_peer *peer, uint16_t peer_id,
2407 				       uint32_t ppdu_id, uint8_t first_msdu,
2408 				       uint8_t last_msdu, qdf_nbuf_t netbuf)
2409 {
2410 	return QDF_STATUS_E_NOSUPPORT;
2411 }
2412 
2413 static void
2414 dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2415 		      uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
2416 {
2417 }
2418 #endif
2419 
2420 /**
2421  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2422  * @soc: Soc handle
2423  * @desc: software Tx descriptor to be processed
2424  *
2425  * Return: none
2426  */
2427 static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
2428 		struct dp_tx_desc_s *desc)
2429 {
2430 	struct dp_vdev *vdev = desc->vdev;
2431 	qdf_nbuf_t nbuf = desc->nbuf;
2432 
2433 	/* If it is TDLS mgmt, don't unmap or free the frame */
2434 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2435 		return dp_non_std_tx_comp_free_buff(desc, vdev);
2436 
2437 	/* 0 : MSDU buffer, 1 : MLE */
2438 	if (desc->msdu_ext_desc) {
2439 		/* TSO free */
2440 		if (hal_tx_ext_desc_get_tso_enable(
2441 					desc->msdu_ext_desc->vaddr)) {
2442 			/* unmap eash TSO seg before free the nbuf */
2443 			dp_tx_tso_unmap_segment(soc, desc);
2444 			qdf_nbuf_free(nbuf);
2445 			return;
2446 		}
2447 	}
2448 
2449 	qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
2450 
2451 	if (qdf_likely(!vdev->mesh_vdev))
2452 		qdf_nbuf_free(nbuf);
2453 	else {
2454 		if (desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2455 			qdf_nbuf_free(nbuf);
2456 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2457 		} else
2458 			vdev->osif_tx_free_ext((nbuf));
2459 	}
2460 }
2461 
2462 /**
2463  * dp_tx_mec_handler() - Tx  MEC Notify Handler
2464  * @vdev: pointer to dp dev handler
2465  * @status : Tx completion status from HTT descriptor
2466  *
2467  * Handles MEC notify event sent from fw to Host
2468  *
2469  * Return: none
2470  */
2471 #ifdef FEATURE_WDS
2472 void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
2473 {
2474 
2475 	struct dp_soc *soc;
2476 	uint32_t flags = IEEE80211_NODE_F_WDS_HM;
2477 	struct dp_peer *peer;
2478 	uint8_t mac_addr[DP_MAC_ADDR_LEN], i;
2479 
2480 	if (!vdev->wds_enabled)
2481 		return;
2482 
2483 	/* MEC required only in STA mode */
2484 	if (vdev->opmode != wlan_op_mode_sta)
2485 		return;
2486 
2487 	soc = vdev->pdev->soc;
2488 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
2489 	peer = TAILQ_FIRST(&vdev->peer_list);
2490 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2491 
2492 	if (!peer) {
2493 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2494 				FL("peer is NULL"));
2495 		return;
2496 	}
2497 
2498 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2499 			"%s Tx MEC Handler",
2500 			__func__);
2501 
2502 	for (i = 0; i < DP_MAC_ADDR_LEN; i++)
2503 		mac_addr[(DP_MAC_ADDR_LEN - 1) - i] =
2504 					status[(DP_MAC_ADDR_LEN - 2) + i];
2505 
2506 	if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, DP_MAC_ADDR_LEN))
2507 		dp_peer_add_ast(soc,
2508 				peer,
2509 				mac_addr,
2510 				CDP_TXRX_AST_TYPE_MEC,
2511 				flags);
2512 }
2513 #endif
2514 
2515 /**
2516  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
2517  * @tx_desc: software descriptor head pointer
2518  * @status : Tx completion status from HTT descriptor
2519  *
2520  * This function will process HTT Tx indication messages from Target
2521  *
2522  * Return: none
2523  */
2524 static
2525 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2526 {
2527 	uint8_t tx_status;
2528 	struct dp_pdev *pdev;
2529 	struct dp_vdev *vdev;
2530 	struct dp_soc *soc;
2531 	uint32_t *htt_status_word = (uint32_t *) status;
2532 
2533 	qdf_assert(tx_desc->pdev);
2534 
2535 	pdev = tx_desc->pdev;
2536 	vdev = tx_desc->vdev;
2537 	soc = pdev->soc;
2538 
2539 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_status_word[0]);
2540 
2541 	switch (tx_status) {
2542 	case HTT_TX_FW2WBM_TX_STATUS_OK:
2543 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
2544 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
2545 	{
2546 		dp_tx_comp_free_buf(soc, tx_desc);
2547 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2548 		break;
2549 	}
2550 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
2551 	{
2552 		dp_tx_reinject_handler(tx_desc, status);
2553 		break;
2554 	}
2555 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
2556 	{
2557 		dp_tx_inspect_handler(tx_desc, status);
2558 		break;
2559 	}
2560 	case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
2561 	{
2562 		dp_tx_mec_handler(vdev, status);
2563 		break;
2564 	}
2565 	default:
2566 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2567 				"%s Invalid HTT tx_status %d",
2568 				__func__, tx_status);
2569 		break;
2570 	}
2571 }
2572 
2573 #ifdef MESH_MODE_SUPPORT
2574 /**
2575  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
2576  *                                         in mesh meta header
2577  * @tx_desc: software descriptor head pointer
2578  * @ts: pointer to tx completion stats
2579  * Return: none
2580  */
2581 static
2582 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2583 		struct hal_tx_completion_status *ts)
2584 {
2585 	struct meta_hdr_s *mhdr;
2586 	qdf_nbuf_t netbuf = tx_desc->nbuf;
2587 
2588 	if (!tx_desc->msdu_ext_desc) {
2589 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
2590 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2591 				"netbuf %pK offset %d",
2592 				netbuf, tx_desc->pkt_offset);
2593 			return;
2594 		}
2595 	}
2596 	if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
2597 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2598 			"netbuf %pK offset %d", netbuf,
2599 			sizeof(struct meta_hdr_s));
2600 		return;
2601 	}
2602 
2603 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
2604 	mhdr->rssi = ts->ack_frame_rssi;
2605 	mhdr->channel = tx_desc->pdev->operating_channel;
2606 }
2607 
2608 #else
2609 static
2610 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2611 		struct hal_tx_completion_status *ts)
2612 {
2613 }
2614 
2615 #endif
2616 
2617 /**
2618  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
2619  * @peer: Handle to DP peer
2620  * @ts: pointer to HAL Tx completion stats
2621  * @length: MSDU length
2622  *
2623  * Return: None
2624  */
2625 static void dp_tx_update_peer_stats(struct dp_peer *peer,
2626 		struct hal_tx_completion_status *ts, uint32_t length)
2627 {
2628 	struct dp_pdev *pdev = peer->vdev->pdev;
2629 	struct dp_soc *soc = pdev->soc;
2630 	uint8_t mcs, pkt_type;
2631 
2632 	mcs = ts->mcs;
2633 	pkt_type = ts->pkt_type;
2634 
2635 	if (!ts->release_src == HAL_TX_COMP_RELEASE_SOURCE_TQM)
2636 		return;
2637 
2638 	if (peer->bss_peer) {
2639 		DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
2640 	} else {
2641 		DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
2642 	}
2643 
2644 	DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
2645 
2646 	DP_STATS_INCC_PKT(peer, tx.tx_success, 1, length,
2647 			  (ts->status == HAL_TX_TQM_RR_FRAME_ACKED));
2648 
2649 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
2650 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
2651 
2652 	DP_STATS_INCC(peer, tx.dropped.fw_rem, 1,
2653 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
2654 
2655 	DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
2656 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
2657 
2658 	DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
2659 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
2660 
2661 	DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
2662 		     (ts->status == HAL_TX_TQM_RR_FW_REASON1));
2663 
2664 	DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
2665 		     (ts->status == HAL_TX_TQM_RR_FW_REASON2));
2666 
2667 	DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
2668 		     (ts->status == HAL_TX_TQM_RR_FW_REASON3));
2669 
2670 	if (!ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
2671 		return;
2672 
2673 	DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
2674 
2675 	DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
2676 	DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
2677 
2678 	if (!(soc->process_tx_status))
2679 		return;
2680 
2681 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2682 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
2683 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2684 			((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
2685 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2686 			((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
2687 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2688 			((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
2689 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2690 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
2691 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2692 			((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
2693 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2694 			((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2695 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2696 			((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
2697 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
2698 			((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2699 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
2700 			((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
2701 	DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
2702 	DP_STATS_INC(peer, tx.bw[ts->bw], 1);
2703 	DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
2704 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
2705 	DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
2706 	DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
2707 	DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
2708 
2709 	if (soc->cdp_soc.ol_ops->update_dp_stats) {
2710 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
2711 				&peer->stats, ts->peer_id,
2712 				UPDATE_PEER_STATS);
2713 	}
2714 }
2715 
2716 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2717 /**
2718  * dp_tx_flow_pool_lock() - take flow pool lock
2719  * @soc: core txrx main context
2720  * @tx_desc: tx desc
2721  *
2722  * Return: None
2723  */
2724 static inline
2725 void dp_tx_flow_pool_lock(struct dp_soc *soc,
2726 			  struct dp_tx_desc_s *tx_desc)
2727 {
2728 	struct dp_tx_desc_pool_s *pool;
2729 	uint8_t desc_pool_id;
2730 
2731 	desc_pool_id = tx_desc->pool_id;
2732 	pool = &soc->tx_desc[desc_pool_id];
2733 
2734 	qdf_spin_lock_bh(&pool->flow_pool_lock);
2735 }
2736 
2737 /**
2738  * dp_tx_flow_pool_unlock() - release flow pool lock
2739  * @soc: core txrx main context
2740  * @tx_desc: tx desc
2741  *
2742  * Return: None
2743  */
2744 static inline
2745 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
2746 			    struct dp_tx_desc_s *tx_desc)
2747 {
2748 	struct dp_tx_desc_pool_s *pool;
2749 	uint8_t desc_pool_id;
2750 
2751 	desc_pool_id = tx_desc->pool_id;
2752 	pool = &soc->tx_desc[desc_pool_id];
2753 
2754 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
2755 }
2756 #else
2757 static inline
2758 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2759 {
2760 }
2761 
2762 static inline
2763 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
2764 {
2765 }
2766 #endif
2767 
2768 /**
2769  * dp_tx_notify_completion() - Notify tx completion for this desc
2770  * @soc: core txrx main context
2771  * @tx_desc: tx desc
2772  * @netbuf:  buffer
2773  *
2774  * Return: none
2775  */
2776 static inline void dp_tx_notify_completion(struct dp_soc *soc,
2777 					   struct dp_tx_desc_s *tx_desc,
2778 					   qdf_nbuf_t netbuf)
2779 {
2780 	void *osif_dev;
2781 	ol_txrx_completion_fp tx_compl_cbk = NULL;
2782 
2783 	qdf_assert(tx_desc);
2784 
2785 	dp_tx_flow_pool_lock(soc, tx_desc);
2786 
2787 	if (!tx_desc->vdev ||
2788 	    !tx_desc->vdev->osif_vdev) {
2789 		dp_tx_flow_pool_unlock(soc, tx_desc);
2790 		return;
2791 	}
2792 
2793 	osif_dev = tx_desc->vdev->osif_vdev;
2794 	tx_compl_cbk = tx_desc->vdev->tx_comp;
2795 	dp_tx_flow_pool_unlock(soc, tx_desc);
2796 
2797 	if (tx_compl_cbk)
2798 		tx_compl_cbk(netbuf, osif_dev);
2799 }
2800 
2801 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
2802  * @pdev: pdev handle
2803  * @tid: tid value
2804  * @txdesc_ts: timestamp from txdesc
2805  * @ppdu_id: ppdu id
2806  *
2807  * Return: none
2808  */
2809 #ifdef FEATURE_PERPKT_INFO
2810 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
2811 					       uint8_t tid,
2812 					       uint64_t txdesc_ts,
2813 					       uint32_t ppdu_id)
2814 {
2815 	uint64_t delta_ms;
2816 	struct cdp_tx_sojourn_stats *sojourn_stats;
2817 
2818 	if (pdev->enhanced_stats_en == 0)
2819 		return;
2820 
2821 	if (pdev->sojourn_stats.ppdu_seq_id == 0)
2822 		pdev->sojourn_stats.ppdu_seq_id = ppdu_id;
2823 
2824 	if (ppdu_id != pdev->sojourn_stats.ppdu_seq_id) {
2825 		if (!pdev->sojourn_buf)
2826 			return;
2827 
2828 		sojourn_stats = (struct cdp_tx_sojourn_stats *)
2829 					qdf_nbuf_data(pdev->sojourn_buf);
2830 
2831 		qdf_mem_copy(sojourn_stats, &pdev->sojourn_stats,
2832 			     sizeof(struct cdp_tx_sojourn_stats));
2833 
2834 		qdf_mem_zero(&pdev->sojourn_stats,
2835 			     sizeof(struct cdp_tx_sojourn_stats));
2836 
2837 		dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
2838 				     pdev->sojourn_buf, HTT_INVALID_PEER,
2839 				     WDI_NO_VAL, pdev->pdev_id);
2840 
2841 		pdev->sojourn_stats.ppdu_seq_id = ppdu_id;
2842 	}
2843 
2844 	if (tid == HTT_INVALID_TID)
2845 		return;
2846 
2847 	delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
2848 				txdesc_ts;
2849 	qdf_ewma_tx_lag_add(&pdev->sojourn_stats.avg_sojourn_msdu[tid],
2850 			    delta_ms);
2851 	pdev->sojourn_stats.sum_sojourn_msdu[tid] += delta_ms;
2852 	pdev->sojourn_stats.num_msdus[tid]++;
2853 }
2854 #else
2855 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
2856 					       uint8_t tid,
2857 					       uint64_t txdesc_ts,
2858 					       uint32_t ppdu_id)
2859 {
2860 }
2861 #endif
2862 
2863 /**
2864  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
2865  * @tx_desc: software descriptor head pointer
2866  * @length: packet length
2867  * @peer: peer handle
2868  *
2869  * Return: none
2870  */
2871 static inline
2872 void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
2873 				  uint32_t length, struct dp_peer *peer)
2874 {
2875 	struct hal_tx_completion_status ts = {0};
2876 	struct dp_soc *soc = NULL;
2877 	struct dp_vdev *vdev = tx_desc->vdev;
2878 	struct ether_header *eh =
2879 		(struct ether_header *)qdf_nbuf_data(tx_desc->nbuf);
2880 
2881 	if (!vdev) {
2882 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2883 				"invalid vdev");
2884 		goto out;
2885 	}
2886 
2887 	hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
2888 
2889 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2890 				"-------------------- \n"
2891 				"Tx Completion Stats: \n"
2892 				"-------------------- \n"
2893 				"ack_frame_rssi = %d \n"
2894 				"first_msdu = %d \n"
2895 				"last_msdu = %d \n"
2896 				"msdu_part_of_amsdu = %d \n"
2897 				"rate_stats valid = %d \n"
2898 				"bw = %d \n"
2899 				"pkt_type = %d \n"
2900 				"stbc = %d \n"
2901 				"ldpc = %d \n"
2902 				"sgi = %d \n"
2903 				"mcs = %d \n"
2904 				"ofdma = %d \n"
2905 				"tones_in_ru = %d \n"
2906 				"tsf = %d \n"
2907 				"ppdu_id = %d \n"
2908 				"transmit_cnt = %d \n"
2909 				"tid = %d \n"
2910 				"peer_id = %d ",
2911 				ts.ack_frame_rssi, ts.first_msdu, ts.last_msdu,
2912 				ts.msdu_part_of_amsdu, ts.valid, ts.bw,
2913 				ts.pkt_type, ts.stbc, ts.ldpc, ts.sgi,
2914 				ts.mcs, ts.ofdma, ts.tones_in_ru, ts.tsf,
2915 				ts.ppdu_id, ts.transmit_cnt, ts.tid,
2916 				ts.peer_id);
2917 
2918 	soc = vdev->pdev->soc;
2919 
2920 	/* Update SoC level stats */
2921 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
2922 			(ts.status == HAL_TX_TQM_RR_REM_CMD_REM));
2923 
2924 	/* Update per-packet stats */
2925 	if (qdf_unlikely(vdev->mesh_vdev) &&
2926 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
2927 		dp_tx_comp_fill_tx_completion_stats(tx_desc, &ts);
2928 
2929 	/* Update peer level stats */
2930 	if (!peer) {
2931 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2932 				"invalid peer");
2933 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
2934 		goto out;
2935 	}
2936 
2937 	if (qdf_likely(peer->vdev->tx_encap_type ==
2938 				htt_cmn_pkt_type_ethernet)) {
2939 		if (peer->bss_peer && IEEE80211_IS_BROADCAST(eh->ether_dhost))
2940 			DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
2941 	}
2942 
2943 	dp_tx_sojourn_stats_process(vdev->pdev, ts.tid,
2944 				    tx_desc->timestamp,
2945 				    ts.ppdu_id);
2946 
2947 	dp_tx_update_peer_stats(peer, &ts, length);
2948 
2949 out:
2950 	return;
2951 }
2952 /**
2953  * dp_tx_comp_process_desc() - Tx complete software descriptor handler
2954  * @soc: core txrx main context
2955  * @comp_head: software descriptor head pointer
2956  *
2957  * This function will process batch of descriptors reaped by dp_tx_comp_handler
2958  * and release the software descriptors after processing is complete
2959  *
2960  * Return: none
2961  */
2962 static void dp_tx_comp_process_desc(struct dp_soc *soc,
2963 		struct dp_tx_desc_s *comp_head)
2964 {
2965 	struct dp_tx_desc_s *desc;
2966 	struct dp_tx_desc_s *next;
2967 	struct hal_tx_completion_status ts = {0};
2968 	uint32_t length;
2969 	struct dp_peer *peer;
2970 
2971 	DP_HIST_INIT();
2972 	desc = comp_head;
2973 
2974 	while (desc) {
2975 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
2976 		peer = dp_peer_find_by_id(soc, ts.peer_id);
2977 		length = qdf_nbuf_len(desc->nbuf);
2978 
2979 		/* check tx completion notification */
2980 		if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(desc->nbuf))
2981 			dp_tx_notify_completion(soc, desc, desc->nbuf);
2982 
2983 		dp_tx_comp_process_tx_status(desc, length, peer);
2984 
2985 		DPTRACE(qdf_dp_trace_ptr
2986 				(desc->nbuf,
2987 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
2988 				 QDF_TRACE_DEFAULT_PDEV_ID,
2989 				 qdf_nbuf_data_addr(desc->nbuf),
2990 				 sizeof(qdf_nbuf_data(desc->nbuf)),
2991 				 desc->id, ts.status)
2992 			);
2993 
2994 		/*currently m_copy/tx_capture is not supported for scatter gather packets*/
2995 		if (!(desc->msdu_ext_desc) &&
2996 		    (dp_get_completion_indication_for_stack(soc, desc->pdev,
2997 					peer, ts.peer_id, ts.ppdu_id,
2998 					ts.first_msdu, ts.last_msdu,
2999 					desc->nbuf) == QDF_STATUS_SUCCESS)) {
3000 			qdf_nbuf_unmap(soc->osdev, desc->nbuf,
3001 				       QDF_DMA_TO_DEVICE);
3002 
3003 			dp_send_completion_to_stack(soc, desc->pdev, ts.peer_id,
3004 						    ts.ppdu_id, desc->nbuf);
3005 		} else {
3006 			dp_tx_comp_free_buf(soc, desc);
3007 		}
3008 
3009 		if (peer)
3010 			dp_peer_unref_del_find_by_id(peer);
3011 
3012 		DP_HIST_PACKET_COUNT_INC(desc->pdev->pdev_id);
3013 
3014 		next = desc->next;
3015 
3016 		dp_tx_desc_release(desc, desc->pool_id);
3017 		desc = next;
3018 	}
3019 
3020 	DP_TX_HIST_STATS_PER_PDEV();
3021 }
3022 
3023 /**
3024  * dp_tx_comp_handler() - Tx completion handler
3025  * @soc: core txrx main context
3026  * @ring_id: completion ring id
3027  * @quota: No. of packets/descriptors that can be serviced in one loop
3028  *
3029  * This function will collect hardware release ring element contents and
3030  * handle descriptor contents. Based on contents, free packet or handle error
3031  * conditions
3032  *
3033  * Return: none
3034  */
3035 uint32_t dp_tx_comp_handler(struct dp_soc *soc, void *hal_srng, uint32_t quota)
3036 {
3037 	void *tx_comp_hal_desc;
3038 	uint8_t buffer_src;
3039 	uint8_t pool_id;
3040 	uint32_t tx_desc_id;
3041 	struct dp_tx_desc_s *tx_desc = NULL;
3042 	struct dp_tx_desc_s *head_desc = NULL;
3043 	struct dp_tx_desc_s *tail_desc = NULL;
3044 	uint32_t num_processed;
3045 	uint32_t count;
3046 
3047 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
3048 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3049 				"%s %d : HAL RING Access Failed -- %pK",
3050 				__func__, __LINE__, hal_srng);
3051 		return 0;
3052 	}
3053 
3054 	num_processed = 0;
3055 	count = 0;
3056 
3057 	/* Find head descriptor from completion ring */
3058 	while (qdf_likely(tx_comp_hal_desc =
3059 			hal_srng_dst_get_next(soc->hal_soc, hal_srng))) {
3060 
3061 		buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
3062 
3063 		/* If this buffer was not released by TQM or FW, then it is not
3064 		 * Tx completion indication, assert */
3065 		if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
3066 				(buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
3067 
3068 			QDF_TRACE(QDF_MODULE_ID_DP,
3069 					QDF_TRACE_LEVEL_FATAL,
3070 					"Tx comp release_src != TQM | FW");
3071 
3072 			qdf_assert_always(0);
3073 		}
3074 
3075 		/* Get descriptor id */
3076 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
3077 		pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
3078 			DP_TX_DESC_ID_POOL_OS;
3079 
3080 		if (!dp_tx_is_desc_id_valid(soc, tx_desc_id))
3081 			continue;
3082 
3083 		/* Find Tx descriptor */
3084 		tx_desc = dp_tx_desc_find(soc, pool_id,
3085 				(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
3086 				DP_TX_DESC_ID_PAGE_OS,
3087 				(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
3088 				DP_TX_DESC_ID_OFFSET_OS);
3089 
3090 		/*
3091 		 * If the release source is FW, process the HTT status
3092 		 */
3093 		if (qdf_unlikely(buffer_src ==
3094 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
3095 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
3096 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
3097 					htt_tx_status);
3098 			dp_tx_process_htt_completion(tx_desc,
3099 					htt_tx_status);
3100 		} else {
3101 			/* Pool id is not matching. Error */
3102 			if (tx_desc->pool_id != pool_id) {
3103 				QDF_TRACE(QDF_MODULE_ID_DP,
3104 					QDF_TRACE_LEVEL_FATAL,
3105 					"Tx Comp pool id %d not matched %d",
3106 					pool_id, tx_desc->pool_id);
3107 
3108 				qdf_assert_always(0);
3109 			}
3110 
3111 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
3112 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
3113 				QDF_TRACE(QDF_MODULE_ID_DP,
3114 					QDF_TRACE_LEVEL_FATAL,
3115 					"Txdesc invalid, flgs = %x,id = %d",
3116 					tx_desc->flags,	tx_desc_id);
3117 				qdf_assert_always(0);
3118 			}
3119 
3120 			/* First ring descriptor on the cycle */
3121 			if (!head_desc) {
3122 				head_desc = tx_desc;
3123 				tail_desc = tx_desc;
3124 			}
3125 
3126 			tail_desc->next = tx_desc;
3127 			tx_desc->next = NULL;
3128 			tail_desc = tx_desc;
3129 
3130 			/* Collect hw completion contents */
3131 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
3132 					&tx_desc->comp, 1);
3133 
3134 		}
3135 
3136 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
3137 
3138 		/*
3139 		 * Processed packet count is more than given quota
3140 		 * stop to processing
3141 		 */
3142 		if ((num_processed >= quota))
3143 			break;
3144 
3145 		count++;
3146 	}
3147 
3148 	hal_srng_access_end(soc->hal_soc, hal_srng);
3149 
3150 	/* Process the reaped descriptors */
3151 	if (head_desc)
3152 		dp_tx_comp_process_desc(soc, head_desc);
3153 
3154 	return num_processed;
3155 }
3156 
3157 #ifdef CONVERGED_TDLS_ENABLE
3158 /**
3159  * dp_tx_non_std() - Allow the control-path SW to send data frames
3160  *
3161  * @data_vdev - which vdev should transmit the tx data frames
3162  * @tx_spec - what non-standard handling to apply to the tx data frames
3163  * @msdu_list - NULL-terminated list of tx MSDUs
3164  *
3165  * Return: NULL on success,
3166  *         nbuf when it fails to send
3167  */
3168 qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle,
3169 			enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
3170 {
3171 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
3172 
3173 	if (tx_spec & OL_TX_SPEC_NO_FREE)
3174 		vdev->is_tdls_frame = true;
3175 	return dp_tx_send(vdev_handle, msdu_list);
3176 }
3177 #endif
3178 
3179 /**
3180  * dp_tx_vdev_attach() - attach vdev to dp tx
3181  * @vdev: virtual device instance
3182  *
3183  * Return: QDF_STATUS_SUCCESS: success
3184  *         QDF_STATUS_E_RESOURCES: Error return
3185  */
3186 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
3187 {
3188 	/*
3189 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
3190 	 */
3191 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
3192 			HTT_TCL_METADATA_TYPE_VDEV_BASED);
3193 
3194 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
3195 			vdev->vdev_id);
3196 
3197 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
3198 			DP_SW2HW_MACID(vdev->pdev->pdev_id));
3199 
3200 	/*
3201 	 * Set HTT Extension Valid bit to 0 by default
3202 	 */
3203 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
3204 
3205 	dp_tx_vdev_update_search_flags(vdev);
3206 
3207 	return QDF_STATUS_SUCCESS;
3208 }
3209 
3210 /**
3211  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
3212  * @vdev: virtual device instance
3213  *
3214  * Return: void
3215  *
3216  */
3217 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
3218 {
3219 	struct dp_soc *soc = vdev->pdev->soc;
3220 
3221 	/*
3222 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
3223 	 * for TDLS link
3224 	 *
3225 	 * Enable AddrY (SA based search) only for non-WDS STA and
3226 	 * ProxySTA VAP modes.
3227 	 *
3228 	 * In all other VAP modes, only DA based search should be
3229 	 * enabled
3230 	 */
3231 	if (vdev->opmode == wlan_op_mode_sta &&
3232 	    vdev->tdls_link_connected)
3233 		vdev->hal_desc_addr_search_flags =
3234 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
3235 	else if ((vdev->opmode == wlan_op_mode_sta &&
3236 				(!vdev->wds_enabled || vdev->proxysta_vdev)))
3237 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
3238 	else
3239 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
3240 
3241 	/* Set search type only when peer map v2 messaging is enabled
3242 	 * as we will have the search index (AST hash) only when v2 is
3243 	 * enabled
3244 	 */
3245 	if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
3246 		vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
3247 	else
3248 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
3249 }
3250 
3251 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3252 static void dp_tx_desc_flush(struct dp_vdev *vdev)
3253 {
3254 }
3255 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3256 
3257 /* dp_tx_desc_flush() - release resources associated
3258  *                      to tx_desc
3259  * @vdev: virtual device instance
3260  *
3261  * This function will free all outstanding Tx buffers,
3262  * including ME buffer for which either free during
3263  * completion didn't happened or completion is not
3264  * received.
3265 */
3266 static void dp_tx_desc_flush(struct dp_vdev *vdev)
3267 {
3268 	uint8_t i, num_pool;
3269 	uint32_t j;
3270 	uint32_t num_desc;
3271 	struct dp_soc *soc = vdev->pdev->soc;
3272 	struct dp_tx_desc_s *tx_desc = NULL;
3273 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
3274 
3275 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3276 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3277 
3278 	for (i = 0; i < num_pool; i++) {
3279 		for (j = 0; j < num_desc; j++) {
3280 			tx_desc_pool = &((soc)->tx_desc[(i)]);
3281 			if (tx_desc_pool &&
3282 				tx_desc_pool->desc_pages.cacheable_pages) {
3283 				tx_desc = dp_tx_desc_find(soc, i,
3284 					(j & DP_TX_DESC_ID_PAGE_MASK) >>
3285 					DP_TX_DESC_ID_PAGE_OS,
3286 					(j & DP_TX_DESC_ID_OFFSET_MASK) >>
3287 					DP_TX_DESC_ID_OFFSET_OS);
3288 
3289 				if (tx_desc && (tx_desc->vdev == vdev) &&
3290 					(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) {
3291 					dp_tx_comp_free_buf(soc, tx_desc);
3292 					dp_tx_desc_release(tx_desc, i);
3293 				}
3294 			}
3295 		}
3296 	}
3297 }
3298 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3299 
3300 /**
3301  * dp_tx_vdev_detach() - detach vdev from dp tx
3302  * @vdev: virtual device instance
3303  *
3304  * Return: QDF_STATUS_SUCCESS: success
3305  *         QDF_STATUS_E_RESOURCES: Error return
3306  */
3307 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
3308 {
3309 	dp_tx_desc_flush(vdev);
3310 	return QDF_STATUS_SUCCESS;
3311 }
3312 
3313 /**
3314  * dp_tx_pdev_attach() - attach pdev to dp tx
3315  * @pdev: physical device instance
3316  *
3317  * Return: QDF_STATUS_SUCCESS: success
3318  *         QDF_STATUS_E_RESOURCES: Error return
3319  */
3320 QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
3321 {
3322 	struct dp_soc *soc = pdev->soc;
3323 
3324 	/* Initialize Flow control counters */
3325 	qdf_atomic_init(&pdev->num_tx_exception);
3326 	qdf_atomic_init(&pdev->num_tx_outstanding);
3327 
3328 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3329 		/* Initialize descriptors in TCL Ring */
3330 		hal_tx_init_data_ring(soc->hal_soc,
3331 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
3332 	}
3333 
3334 	return QDF_STATUS_SUCCESS;
3335 }
3336 
3337 /**
3338  * dp_tx_pdev_detach() - detach pdev from dp tx
3339  * @pdev: physical device instance
3340  *
3341  * Return: QDF_STATUS_SUCCESS: success
3342  *         QDF_STATUS_E_RESOURCES: Error return
3343  */
3344 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
3345 {
3346 	dp_tx_me_exit(pdev);
3347 	return QDF_STATUS_SUCCESS;
3348 }
3349 
3350 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3351 /* Pools will be allocated dynamically */
3352 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3353 					int num_desc)
3354 {
3355 	uint8_t i;
3356 
3357 	for (i = 0; i < num_pool; i++) {
3358 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
3359 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
3360 	}
3361 
3362 	return 0;
3363 }
3364 
3365 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3366 {
3367 	uint8_t i;
3368 
3369 	for (i = 0; i < num_pool; i++)
3370 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
3371 }
3372 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
3373 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
3374 					int num_desc)
3375 {
3376 	uint8_t i;
3377 
3378 	/* Allocate software Tx descriptor pools */
3379 	for (i = 0; i < num_pool; i++) {
3380 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
3381 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3382 					"%s Tx Desc Pool alloc %d failed %pK",
3383 					__func__, i, soc);
3384 			return ENOMEM;
3385 		}
3386 	}
3387 	return 0;
3388 }
3389 
3390 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
3391 {
3392 	uint8_t i;
3393 
3394 	for (i = 0; i < num_pool; i++) {
3395 		qdf_assert_always(!soc->tx_desc[i].num_allocated);
3396 		if (dp_tx_desc_pool_free(soc, i)) {
3397 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3398 				"%s Tx Desc Pool Free failed", __func__);
3399 		}
3400 	}
3401 }
3402 
3403 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
3404 
3405 /**
3406  * dp_tx_soc_detach() - detach soc from dp tx
3407  * @soc: core txrx main context
3408  *
3409  * This function will detach dp tx into main device context
3410  * will free dp tx resource and initialize resources
3411  *
3412  * Return: QDF_STATUS_SUCCESS: success
3413  *         QDF_STATUS_E_RESOURCES: Error return
3414  */
3415 QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
3416 {
3417 	uint8_t num_pool;
3418 	uint16_t num_desc;
3419 	uint16_t num_ext_desc;
3420 	uint8_t i;
3421 
3422 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3423 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3424 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
3425 
3426 	dp_tx_flow_control_deinit(soc);
3427 	dp_tx_delete_static_pools(soc, num_pool);
3428 
3429 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3430 			"%s Tx Desc Pool Free num_pool = %d, descs = %d",
3431 			__func__, num_pool, num_desc);
3432 
3433 	for (i = 0; i < num_pool; i++) {
3434 		if (dp_tx_ext_desc_pool_free(soc, i)) {
3435 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3436 					"%s Tx Ext Desc Pool Free failed",
3437 					__func__);
3438 			return QDF_STATUS_E_RESOURCES;
3439 		}
3440 	}
3441 
3442 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3443 			"%s MSDU Ext Desc Pool %d Free descs = %d",
3444 			__func__, num_pool, num_ext_desc);
3445 
3446 	for (i = 0; i < num_pool; i++) {
3447 		dp_tx_tso_desc_pool_free(soc, i);
3448 	}
3449 
3450 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3451 			"%s TSO Desc Pool %d Free descs = %d",
3452 			__func__, num_pool, num_desc);
3453 
3454 
3455 	for (i = 0; i < num_pool; i++)
3456 		dp_tx_tso_num_seg_pool_free(soc, i);
3457 
3458 
3459 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3460 		"%s TSO Num of seg Desc Pool %d Free descs = %d",
3461 		__func__, num_pool, num_desc);
3462 
3463 	return QDF_STATUS_SUCCESS;
3464 }
3465 
3466 /**
3467  * dp_tx_soc_attach() - attach soc to dp tx
3468  * @soc: core txrx main context
3469  *
3470  * This function will attach dp tx into main device context
3471  * will allocate dp tx resource and initialize resources
3472  *
3473  * Return: QDF_STATUS_SUCCESS: success
3474  *         QDF_STATUS_E_RESOURCES: Error return
3475  */
3476 QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
3477 {
3478 	uint8_t i;
3479 	uint8_t num_pool;
3480 	uint32_t num_desc;
3481 	uint32_t num_ext_desc;
3482 
3483 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
3484 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3485 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
3486 
3487 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
3488 		goto fail;
3489 
3490 	dp_tx_flow_control_init(soc);
3491 
3492 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3493 			"%s Tx Desc Alloc num_pool = %d, descs = %d",
3494 			__func__, num_pool, num_desc);
3495 
3496 	/* Allocate extension tx descriptor pools */
3497 	for (i = 0; i < num_pool; i++) {
3498 		if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
3499 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3500 				"MSDU Ext Desc Pool alloc %d failed %pK",
3501 				i, soc);
3502 
3503 			goto fail;
3504 		}
3505 	}
3506 
3507 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3508 			"%s MSDU Ext Desc Alloc %d, descs = %d",
3509 			__func__, num_pool, num_ext_desc);
3510 
3511 	for (i = 0; i < num_pool; i++) {
3512 		if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
3513 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3514 				"TSO Desc Pool alloc %d failed %pK",
3515 				i, soc);
3516 
3517 			goto fail;
3518 		}
3519 	}
3520 
3521 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3522 			"%s TSO Desc Alloc %d, descs = %d",
3523 			__func__, num_pool, num_desc);
3524 
3525 	for (i = 0; i < num_pool; i++) {
3526 		if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
3527 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3528 				"TSO Num of seg Pool alloc %d failed %pK",
3529 				i, soc);
3530 
3531 			goto fail;
3532 		}
3533 	}
3534 
3535 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3536 			"%s TSO Num of seg pool Alloc %d, descs = %d",
3537 			__func__, num_pool, num_desc);
3538 
3539 	/* Initialize descriptors in TCL Rings */
3540 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3541 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3542 			hal_tx_init_data_ring(soc->hal_soc,
3543 					soc->tcl_data_ring[i].hal_srng);
3544 		}
3545 	}
3546 
3547 	/*
3548 	 * todo - Add a runtime config option to enable this.
3549 	 */
3550 	/*
3551 	 * Due to multiple issues on NPR EMU, enable it selectively
3552 	 * only for NPR EMU, should be removed, once NPR platforms
3553 	 * are stable.
3554 	 */
3555 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
3556 
3557 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3558 			"%s HAL Tx init Success", __func__);
3559 
3560 	return QDF_STATUS_SUCCESS;
3561 
3562 fail:
3563 	/* Detach will take care of freeing only allocated resources */
3564 	dp_tx_soc_detach(soc);
3565 	return QDF_STATUS_E_RESOURCES;
3566 }
3567 
3568 /*
3569  * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
3570  * pdev: pointer to DP PDEV structure
3571  * seg_info_head: Pointer to the head of list
3572  *
3573  * return: void
3574  */
3575 static void dp_tx_me_mem_free(struct dp_pdev *pdev,
3576 		struct dp_tx_seg_info_s *seg_info_head)
3577 {
3578 	struct dp_tx_me_buf_t *mc_uc_buf;
3579 	struct dp_tx_seg_info_s *seg_info_new = NULL;
3580 	qdf_nbuf_t nbuf = NULL;
3581 	uint64_t phy_addr;
3582 
3583 	while (seg_info_head) {
3584 		nbuf = seg_info_head->nbuf;
3585 		mc_uc_buf = (struct dp_tx_me_buf_t *)
3586 			seg_info_head->frags[0].vaddr;
3587 		phy_addr = seg_info_head->frags[0].paddr_hi;
3588 		phy_addr =  (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
3589 		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
3590 				phy_addr,
3591 				QDF_DMA_TO_DEVICE , DP_MAC_ADDR_LEN);
3592 		dp_tx_me_free_buf(pdev, mc_uc_buf);
3593 		qdf_nbuf_free(nbuf);
3594 		seg_info_new = seg_info_head;
3595 		seg_info_head = seg_info_head->next;
3596 		qdf_mem_free(seg_info_new);
3597 	}
3598 }
3599 
3600 /**
3601  * dp_tx_me_send_convert_ucast(): function to convert multicast to unicast
3602  * @vdev: DP VDEV handle
3603  * @nbuf: Multicast nbuf
3604  * @newmac: Table of the clients to which packets have to be sent
3605  * @new_mac_cnt: No of clients
3606  *
3607  * return: no of converted packets
3608  */
3609 uint16_t
3610 dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
3611 		uint8_t newmac[][DP_MAC_ADDR_LEN], uint8_t new_mac_cnt)
3612 {
3613 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
3614 	struct dp_pdev *pdev = vdev->pdev;
3615 	struct ether_header *eh;
3616 	uint8_t *data;
3617 	uint16_t len;
3618 
3619 	/* reference to frame dst addr */
3620 	uint8_t *dstmac;
3621 	/* copy of original frame src addr */
3622 	uint8_t srcmac[DP_MAC_ADDR_LEN];
3623 
3624 	/* local index into newmac */
3625 	uint8_t new_mac_idx = 0;
3626 	struct dp_tx_me_buf_t *mc_uc_buf;
3627 	qdf_nbuf_t  nbuf_clone;
3628 	struct dp_tx_msdu_info_s msdu_info;
3629 	struct dp_tx_seg_info_s *seg_info_head = NULL;
3630 	struct dp_tx_seg_info_s *seg_info_tail = NULL;
3631 	struct dp_tx_seg_info_s *seg_info_new;
3632 	struct dp_tx_frag_info_s data_frag;
3633 	qdf_dma_addr_t paddr_data;
3634 	qdf_dma_addr_t paddr_mcbuf = 0;
3635 	uint8_t empty_entry_mac[DP_MAC_ADDR_LEN] = {0};
3636 	QDF_STATUS status;
3637 
3638 	qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
3639 
3640 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
3641 
3642 	eh = (struct ether_header *) nbuf;
3643 	qdf_mem_copy(srcmac, eh->ether_shost, DP_MAC_ADDR_LEN);
3644 
3645 	len = qdf_nbuf_len(nbuf);
3646 
3647 	data = qdf_nbuf_data(nbuf);
3648 
3649 	status = qdf_nbuf_map(vdev->osdev, nbuf,
3650 			QDF_DMA_TO_DEVICE);
3651 
3652 	if (status) {
3653 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3654 				"Mapping failure Error:%d", status);
3655 		DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
3656 		qdf_nbuf_free(nbuf);
3657 		return 1;
3658 	}
3659 
3660 	paddr_data = qdf_nbuf_get_frag_paddr(nbuf, 0) + IEEE80211_ADDR_LEN;
3661 
3662 	/*preparing data fragment*/
3663 	data_frag.vaddr = qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN;
3664 	data_frag.paddr_lo = (uint32_t)paddr_data;
3665 	data_frag.paddr_hi = (((uint64_t) paddr_data)  >> 32);
3666 	data_frag.len = len - DP_MAC_ADDR_LEN;
3667 
3668 	for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
3669 		dstmac = newmac[new_mac_idx];
3670 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3671 				"added mac addr (%pM)", dstmac);
3672 
3673 		/* Check for NULL Mac Address */
3674 		if (!qdf_mem_cmp(dstmac, empty_entry_mac, DP_MAC_ADDR_LEN))
3675 			continue;
3676 
3677 		/* frame to self mac. skip */
3678 		if (!qdf_mem_cmp(dstmac, srcmac, DP_MAC_ADDR_LEN))
3679 			continue;
3680 
3681 		/*
3682 		 * TODO: optimize to avoid malloc in per-packet path
3683 		 * For eg. seg_pool can be made part of vdev structure
3684 		 */
3685 		seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
3686 
3687 		if (!seg_info_new) {
3688 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3689 					"alloc failed");
3690 			DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
3691 			goto fail_seg_alloc;
3692 		}
3693 
3694 		mc_uc_buf = dp_tx_me_alloc_buf(pdev);
3695 		if (mc_uc_buf == NULL)
3696 			goto fail_buf_alloc;
3697 
3698 		/*
3699 		 * TODO: Check if we need to clone the nbuf
3700 		 * Or can we just use the reference for all cases
3701 		 */
3702 		if (new_mac_idx < (new_mac_cnt - 1)) {
3703 			nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
3704 			if (nbuf_clone == NULL) {
3705 				DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
3706 				goto fail_clone;
3707 			}
3708 		} else {
3709 			/*
3710 			 * Update the ref
3711 			 * to account for frame sent without cloning
3712 			 */
3713 			qdf_nbuf_ref(nbuf);
3714 			nbuf_clone = nbuf;
3715 		}
3716 
3717 		qdf_mem_copy(mc_uc_buf->data, dstmac, DP_MAC_ADDR_LEN);
3718 
3719 		status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
3720 				QDF_DMA_TO_DEVICE, DP_MAC_ADDR_LEN,
3721 				&paddr_mcbuf);
3722 
3723 		if (status) {
3724 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3725 					"Mapping failure Error:%d", status);
3726 			DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
3727 			goto fail_map;
3728 		}
3729 
3730 		seg_info_new->frags[0].vaddr =  (uint8_t *)mc_uc_buf;
3731 		seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
3732 		seg_info_new->frags[0].paddr_hi =
3733 			((uint64_t) paddr_mcbuf >> 32);
3734 		seg_info_new->frags[0].len = DP_MAC_ADDR_LEN;
3735 
3736 		seg_info_new->frags[1] = data_frag;
3737 		seg_info_new->nbuf = nbuf_clone;
3738 		seg_info_new->frag_cnt = 2;
3739 		seg_info_new->total_len = len;
3740 
3741 		seg_info_new->next = NULL;
3742 
3743 		if (seg_info_head == NULL)
3744 			seg_info_head = seg_info_new;
3745 		else
3746 			seg_info_tail->next = seg_info_new;
3747 
3748 		seg_info_tail = seg_info_new;
3749 	}
3750 
3751 	if (!seg_info_head) {
3752 		goto free_return;
3753 	}
3754 
3755 	msdu_info.u.sg_info.curr_seg = seg_info_head;
3756 	msdu_info.num_seg = new_mac_cnt;
3757 	msdu_info.frm_type = dp_tx_frm_me;
3758 
3759 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
3760 	dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
3761 
3762 	while (seg_info_head->next) {
3763 		seg_info_new = seg_info_head;
3764 		seg_info_head = seg_info_head->next;
3765 		qdf_mem_free(seg_info_new);
3766 	}
3767 	qdf_mem_free(seg_info_head);
3768 
3769 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
3770 	qdf_nbuf_free(nbuf);
3771 	return new_mac_cnt;
3772 
3773 fail_map:
3774 	qdf_nbuf_free(nbuf_clone);
3775 
3776 fail_clone:
3777 	dp_tx_me_free_buf(pdev, mc_uc_buf);
3778 
3779 fail_buf_alloc:
3780 	qdf_mem_free(seg_info_new);
3781 
3782 fail_seg_alloc:
3783 	dp_tx_me_mem_free(pdev, seg_info_head);
3784 
3785 free_return:
3786 	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
3787 	qdf_nbuf_free(nbuf);
3788 	return 1;
3789 }
3790 
3791