xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision a86b23ee68a2491aede2e03991f3fb37046f4e41)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "htt.h"
20 #include "dp_htt.h"
21 #include "hal_hw_headers.h"
22 #include "dp_tx.h"
23 #include "dp_tx_desc.h"
24 #include "dp_peer.h"
25 #include "dp_types.h"
26 #include "hal_tx.h"
27 #include "qdf_mem.h"
28 #include "qdf_nbuf.h"
29 #include "qdf_net_types.h"
30 #include <wlan_cfg.h>
31 #include "dp_ipa.h"
32 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
33 #include "if_meta_hdr.h"
34 #endif
35 #include "enet.h"
36 #include "dp_internal.h"
37 #ifdef FEATURE_WDS
38 #include "dp_txrx_wds.h"
39 #endif
40 #ifdef ATH_SUPPORT_IQUE
41 #include "dp_txrx_me.h"
42 #endif
43 
44 
45 /* TODO Add support in TSO */
46 #define DP_DESC_NUM_FRAG(x) 0
47 
48 /* disable TQM_BYPASS */
49 #define TQM_BYPASS_WAR 0
50 
51 /* invalid peer id for reinject*/
52 #define DP_INVALID_PEER 0XFFFE
53 
54 /*mapping between hal encrypt type and cdp_sec_type*/
55 #define MAX_CDP_SEC_TYPE 12
56 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
57 					HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
58 					HAL_TX_ENCRYPT_TYPE_WEP_128,
59 					HAL_TX_ENCRYPT_TYPE_WEP_104,
60 					HAL_TX_ENCRYPT_TYPE_WEP_40,
61 					HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
62 					HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
63 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
64 					HAL_TX_ENCRYPT_TYPE_WAPI,
65 					HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
66 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
67 					HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
68 					HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
69 
70 #ifdef QCA_TX_LIMIT_CHECK
71 /**
72  * dp_tx_limit_check - Check if allocated tx descriptors reached
73  * soc max limit and pdev max limit
74  * @vdev: DP vdev handle
75  *
76  * Return: true if allocated tx descriptors reached max configured value, else
77  * false
78  */
79 static inline bool
80 dp_tx_limit_check(struct dp_vdev *vdev)
81 {
82 	struct dp_pdev *pdev = vdev->pdev;
83 	struct dp_soc *soc = pdev->soc;
84 
85 	if (qdf_atomic_read(&soc->num_tx_outstanding) >=
86 			soc->num_tx_allowed) {
87 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
88 			  "%s: queued packets are more than max tx, drop the frame",
89 			  __func__);
90 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
91 		return true;
92 	}
93 
94 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
95 			pdev->num_tx_allowed) {
96 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
97 			  "%s: queued packets are more than max tx, drop the frame",
98 			  __func__);
99 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
100 		return true;
101 	}
102 	return false;
103 }
104 
105 /**
106  * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
107  * reached soc max limit
108  * @vdev: DP vdev handle
109  *
110  * Return: true if allocated tx descriptors reached max configured value, else
111  * false
112  */
113 static inline bool
114 dp_tx_exception_limit_check(struct dp_vdev *vdev)
115 {
116 	struct dp_pdev *pdev = vdev->pdev;
117 	struct dp_soc *soc = pdev->soc;
118 
119 	if (qdf_atomic_read(&soc->num_tx_exception) >=
120 			soc->num_msdu_exception_desc) {
121 		dp_info("exc packets are more than max drop the exc pkt");
122 		DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
123 		return true;
124 	}
125 
126 	return false;
127 }
128 
129 /**
130  * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
131  * @vdev: DP pdev handle
132  *
133  * Return: void
134  */
135 static inline void
136 dp_tx_outstanding_inc(struct dp_pdev *pdev)
137 {
138 	struct dp_soc *soc = pdev->soc;
139 
140 	qdf_atomic_inc(&pdev->num_tx_outstanding);
141 	qdf_atomic_inc(&soc->num_tx_outstanding);
142 }
143 
144 /**
145  * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
146  * @vdev: DP pdev handle
147  *
148  * Return: void
149  */
150 static inline void
151 dp_tx_outstanding_dec(struct dp_pdev *pdev)
152 {
153 	struct dp_soc *soc = pdev->soc;
154 
155 	qdf_atomic_dec(&pdev->num_tx_outstanding);
156 	qdf_atomic_dec(&soc->num_tx_outstanding);
157 }
158 
159 #else //QCA_TX_LIMIT_CHECK
160 static inline bool
161 dp_tx_limit_check(struct dp_vdev *vdev)
162 {
163 	return false;
164 }
165 
166 static inline bool
167 dp_tx_exception_limit_check(struct dp_vdev *vdev)
168 {
169 	return false;
170 }
171 
172 static inline void
173 dp_tx_outstanding_inc(struct dp_pdev *pdev)
174 {
175 	qdf_atomic_inc(&pdev->num_tx_outstanding);
176 }
177 
178 static inline void
179 dp_tx_outstanding_dec(struct dp_pdev *pdev)
180 {
181 	qdf_atomic_dec(&pdev->num_tx_outstanding);
182 }
183 #endif //QCA_TX_LIMIT_CHECK
184 
185 #if defined(FEATURE_TSO)
186 /**
187  * dp_tx_tso_unmap_segment() - Unmap TSO segment
188  *
189  * @soc - core txrx main context
190  * @seg_desc - tso segment descriptor
191  * @num_seg_desc - tso number segment descriptor
192  */
193 static void dp_tx_tso_unmap_segment(
194 		struct dp_soc *soc,
195 		struct qdf_tso_seg_elem_t *seg_desc,
196 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
197 {
198 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
199 	if (qdf_unlikely(!seg_desc)) {
200 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
201 			 __func__, __LINE__);
202 		qdf_assert(0);
203 	} else if (qdf_unlikely(!num_seg_desc)) {
204 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
205 			 __func__, __LINE__);
206 		qdf_assert(0);
207 	} else {
208 		bool is_last_seg;
209 		/* no tso segment left to do dma unmap */
210 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
211 			return;
212 
213 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
214 					true : false;
215 		qdf_nbuf_unmap_tso_segment(soc->osdev,
216 					   seg_desc, is_last_seg);
217 		num_seg_desc->num_seg.tso_cmn_num_seg--;
218 	}
219 }
220 
221 /**
222  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
223  *                            back to the freelist
224  *
225  * @soc - soc device handle
226  * @tx_desc - Tx software descriptor
227  */
228 static void dp_tx_tso_desc_release(struct dp_soc *soc,
229 				   struct dp_tx_desc_s *tx_desc)
230 {
231 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
232 	if (qdf_unlikely(!tx_desc->tso_desc)) {
233 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
234 			  "%s %d TSO desc is NULL!",
235 			  __func__, __LINE__);
236 		qdf_assert(0);
237 	} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
238 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
239 			  "%s %d TSO num desc is NULL!",
240 			  __func__, __LINE__);
241 		qdf_assert(0);
242 	} else {
243 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
244 			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
245 
246 		/* Add the tso num segment into the free list */
247 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
248 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
249 					    tx_desc->tso_num_desc);
250 			tx_desc->tso_num_desc = NULL;
251 			DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
252 		}
253 
254 		/* Add the tso segment into the free list*/
255 		dp_tx_tso_desc_free(soc,
256 				    tx_desc->pool_id, tx_desc->tso_desc);
257 		tx_desc->tso_desc = NULL;
258 	}
259 }
260 #else
261 static void dp_tx_tso_unmap_segment(
262 		struct dp_soc *soc,
263 		struct qdf_tso_seg_elem_t *seg_desc,
264 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
265 
266 {
267 }
268 
269 static void dp_tx_tso_desc_release(struct dp_soc *soc,
270 				   struct dp_tx_desc_s *tx_desc)
271 {
272 }
273 #endif
274 /**
275  * dp_tx_desc_release() - Release Tx Descriptor
276  * @tx_desc : Tx Descriptor
277  * @desc_pool_id: Descriptor Pool ID
278  *
279  * Deallocate all resources attached to Tx descriptor and free the Tx
280  * descriptor.
281  *
282  * Return:
283  */
284 static void
285 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
286 {
287 	struct dp_pdev *pdev = tx_desc->pdev;
288 	struct dp_soc *soc;
289 	uint8_t comp_status = 0;
290 
291 	qdf_assert(pdev);
292 
293 	soc = pdev->soc;
294 
295 	dp_tx_outstanding_dec(pdev);
296 
297 	if (tx_desc->frm_type == dp_tx_frm_tso)
298 		dp_tx_tso_desc_release(soc, tx_desc);
299 
300 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
301 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
302 
303 	if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
304 		dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
305 
306 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
307 		qdf_atomic_dec(&soc->num_tx_exception);
308 
309 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
310 				hal_tx_comp_get_buffer_source(&tx_desc->comp))
311 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
312 							     soc->hal_soc);
313 	else
314 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
315 
316 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
317 		"Tx Completion Release desc %d status %d outstanding %d",
318 		tx_desc->id, comp_status,
319 		qdf_atomic_read(&pdev->num_tx_outstanding));
320 
321 	dp_tx_desc_free(soc, tx_desc, desc_pool_id);
322 	return;
323 }
324 
325 /**
326  * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
327  * @vdev: DP vdev Handle
328  * @nbuf: skb
329  * @msdu_info: msdu_info required to create HTT metadata
330  *
331  * Prepares and fills HTT metadata in the frame pre-header for special frames
332  * that should be transmitted using varying transmit parameters.
333  * There are 2 VDEV modes that currently needs this special metadata -
334  *  1) Mesh Mode
335  *  2) DSRC Mode
336  *
337  * Return: HTT metadata size
338  *
339  */
340 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
341 					  struct dp_tx_msdu_info_s *msdu_info)
342 {
343 	uint32_t *meta_data = msdu_info->meta_data;
344 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
345 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
346 
347 	uint8_t htt_desc_size;
348 
349 	/* Size rounded of multiple of 8 bytes */
350 	uint8_t htt_desc_size_aligned;
351 
352 	uint8_t *hdr = NULL;
353 
354 	/*
355 	 * Metadata - HTT MSDU Extension header
356 	 */
357 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
358 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
359 
360 	if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
361 	    HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
362 							   meta_data[0])) {
363 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
364 				 htt_desc_size_aligned)) {
365 			nbuf = qdf_nbuf_realloc_headroom(nbuf,
366 							 htt_desc_size_aligned);
367 			if (!nbuf) {
368 				/*
369 				 * qdf_nbuf_realloc_headroom won't do skb_clone
370 				 * as skb_realloc_headroom does. so, no free is
371 				 * needed here.
372 				 */
373 				DP_STATS_INC(vdev,
374 					     tx_i.dropped.headroom_insufficient,
375 					     1);
376 				qdf_print(" %s[%d] skb_realloc_headroom failed",
377 					  __func__, __LINE__);
378 				return 0;
379 			}
380 		}
381 		/* Fill and add HTT metaheader */
382 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
383 		if (!hdr) {
384 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
385 					"Error in filling HTT metadata");
386 
387 			return 0;
388 		}
389 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
390 
391 	} else if (vdev->opmode == wlan_op_mode_ocb) {
392 		/* Todo - Add support for DSRC */
393 	}
394 
395 	return htt_desc_size_aligned;
396 }
397 
398 /**
399  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
400  * @tso_seg: TSO segment to process
401  * @ext_desc: Pointer to MSDU extension descriptor
402  *
403  * Return: void
404  */
405 #if defined(FEATURE_TSO)
406 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
407 		void *ext_desc)
408 {
409 	uint8_t num_frag;
410 	uint32_t tso_flags;
411 
412 	/*
413 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
414 	 * tcp_flag_mask
415 	 *
416 	 * Checksum enable flags are set in TCL descriptor and not in Extension
417 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
418 	 */
419 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
420 
421 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
422 
423 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
424 		tso_seg->tso_flags.ip_len);
425 
426 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
427 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
428 
429 
430 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
431 		uint32_t lo = 0;
432 		uint32_t hi = 0;
433 
434 		qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
435 				  (tso_seg->tso_frags[num_frag].length));
436 
437 		qdf_dmaaddr_to_32s(
438 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
439 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
440 			tso_seg->tso_frags[num_frag].length);
441 	}
442 
443 	return;
444 }
445 #else
446 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
447 		void *ext_desc)
448 {
449 	return;
450 }
451 #endif
452 
453 #if defined(FEATURE_TSO)
454 /**
455  * dp_tx_free_tso_seg_list() - Loop through the tso segments
456  *                             allocated and free them
457  *
458  * @soc: soc handle
459  * @free_seg: list of tso segments
460  * @msdu_info: msdu descriptor
461  *
462  * Return - void
463  */
464 static void dp_tx_free_tso_seg_list(
465 		struct dp_soc *soc,
466 		struct qdf_tso_seg_elem_t *free_seg,
467 		struct dp_tx_msdu_info_s *msdu_info)
468 {
469 	struct qdf_tso_seg_elem_t *next_seg;
470 
471 	while (free_seg) {
472 		next_seg = free_seg->next;
473 		dp_tx_tso_desc_free(soc,
474 				    msdu_info->tx_queue.desc_pool_id,
475 				    free_seg);
476 		free_seg = next_seg;
477 	}
478 }
479 
480 /**
481  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
482  *                                 allocated and free them
483  *
484  * @soc:  soc handle
485  * @free_num_seg: list of tso number segments
486  * @msdu_info: msdu descriptor
487  * Return - void
488  */
489 static void dp_tx_free_tso_num_seg_list(
490 		struct dp_soc *soc,
491 		struct qdf_tso_num_seg_elem_t *free_num_seg,
492 		struct dp_tx_msdu_info_s *msdu_info)
493 {
494 	struct qdf_tso_num_seg_elem_t *next_num_seg;
495 
496 	while (free_num_seg) {
497 		next_num_seg = free_num_seg->next;
498 		dp_tso_num_seg_free(soc,
499 				    msdu_info->tx_queue.desc_pool_id,
500 				    free_num_seg);
501 		free_num_seg = next_num_seg;
502 	}
503 }
504 
505 /**
506  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
507  *                              do dma unmap for each segment
508  *
509  * @soc: soc handle
510  * @free_seg: list of tso segments
511  * @num_seg_desc: tso number segment descriptor
512  *
513  * Return - void
514  */
515 static void dp_tx_unmap_tso_seg_list(
516 		struct dp_soc *soc,
517 		struct qdf_tso_seg_elem_t *free_seg,
518 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
519 {
520 	struct qdf_tso_seg_elem_t *next_seg;
521 
522 	if (qdf_unlikely(!num_seg_desc)) {
523 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
524 		return;
525 	}
526 
527 	while (free_seg) {
528 		next_seg = free_seg->next;
529 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
530 		free_seg = next_seg;
531 	}
532 }
533 
534 #ifdef FEATURE_TSO_STATS
535 /**
536  * dp_tso_get_stats_idx: Retrieve the tso packet id
537  * @pdev - pdev handle
538  *
539  * Return: id
540  */
541 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
542 {
543 	uint32_t stats_idx;
544 
545 	stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
546 						% CDP_MAX_TSO_PACKETS);
547 	return stats_idx;
548 }
549 #else
550 static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
551 {
552 	return 0;
553 }
554 #endif /* FEATURE_TSO_STATS */
555 
556 /**
557  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
558  *				     free the tso segments descriptor and
559  *				     tso num segments descriptor
560  *
561  * @soc:  soc handle
562  * @msdu_info: msdu descriptor
563  * @tso_seg_unmap: flag to show if dma unmap is necessary
564  *
565  * Return - void
566  */
567 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
568 					  struct dp_tx_msdu_info_s *msdu_info,
569 					  bool tso_seg_unmap)
570 {
571 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
572 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
573 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
574 					tso_info->tso_num_seg_list;
575 
576 	/* do dma unmap for each segment */
577 	if (tso_seg_unmap)
578 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
579 
580 	/* free all tso number segment descriptor though looks only have 1 */
581 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
582 
583 	/* free all tso segment descriptor */
584 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
585 }
586 
587 /**
588  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
589  * @vdev: virtual device handle
590  * @msdu: network buffer
591  * @msdu_info: meta data associated with the msdu
592  *
593  * Return: QDF_STATUS_SUCCESS success
594  */
595 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
596 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
597 {
598 	struct qdf_tso_seg_elem_t *tso_seg;
599 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
600 	struct dp_soc *soc = vdev->pdev->soc;
601 	struct dp_pdev *pdev = vdev->pdev;
602 	struct qdf_tso_info_t *tso_info;
603 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
604 	tso_info = &msdu_info->u.tso_info;
605 	tso_info->curr_seg = NULL;
606 	tso_info->tso_seg_list = NULL;
607 	tso_info->num_segs = num_seg;
608 	msdu_info->frm_type = dp_tx_frm_tso;
609 	tso_info->tso_num_seg_list = NULL;
610 
611 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
612 
613 	while (num_seg) {
614 		tso_seg = dp_tx_tso_desc_alloc(
615 				soc, msdu_info->tx_queue.desc_pool_id);
616 		if (tso_seg) {
617 			tso_seg->next = tso_info->tso_seg_list;
618 			tso_info->tso_seg_list = tso_seg;
619 			num_seg--;
620 		} else {
621 			dp_err_rl("Failed to alloc tso seg desc");
622 			DP_STATS_INC_PKT(vdev->pdev,
623 					 tso_stats.tso_no_mem_dropped, 1,
624 					 qdf_nbuf_len(msdu));
625 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
626 
627 			return QDF_STATUS_E_NOMEM;
628 		}
629 	}
630 
631 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
632 
633 	tso_num_seg = dp_tso_num_seg_alloc(soc,
634 			msdu_info->tx_queue.desc_pool_id);
635 
636 	if (tso_num_seg) {
637 		tso_num_seg->next = tso_info->tso_num_seg_list;
638 		tso_info->tso_num_seg_list = tso_num_seg;
639 	} else {
640 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
641 			 __func__);
642 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
643 
644 		return QDF_STATUS_E_NOMEM;
645 	}
646 
647 	msdu_info->num_seg =
648 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
649 
650 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
651 			msdu_info->num_seg);
652 
653 	if (!(msdu_info->num_seg)) {
654 		/*
655 		 * Free allocated TSO seg desc and number seg desc,
656 		 * do unmap for segments if dma map has done.
657 		 */
658 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
659 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
660 
661 		return QDF_STATUS_E_INVAL;
662 	}
663 
664 	tso_info->curr_seg = tso_info->tso_seg_list;
665 
666 	tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
667 	dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
668 			     msdu, msdu_info->num_seg);
669 	dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
670 				    tso_info->msdu_stats_idx);
671 	dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
672 	return QDF_STATUS_SUCCESS;
673 }
674 #else
675 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
676 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
677 {
678 	return QDF_STATUS_E_NOMEM;
679 }
680 #endif
681 
682 /**
683  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
684  * @vdev: DP Vdev handle
685  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
686  * @desc_pool_id: Descriptor Pool ID
687  *
688  * Return:
689  */
690 static
691 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
692 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
693 {
694 	uint8_t i;
695 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
696 	struct dp_tx_seg_info_s *seg_info;
697 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
698 	struct dp_soc *soc = vdev->pdev->soc;
699 
700 	/* Allocate an extension descriptor */
701 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
702 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
703 
704 	if (!msdu_ext_desc) {
705 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
706 		return NULL;
707 	}
708 
709 	if (msdu_info->exception_fw &&
710 			qdf_unlikely(vdev->mesh_vdev)) {
711 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
712 				&msdu_info->meta_data[0],
713 				sizeof(struct htt_tx_msdu_desc_ext2_t));
714 		qdf_atomic_inc(&soc->num_tx_exception);
715 	}
716 
717 	switch (msdu_info->frm_type) {
718 	case dp_tx_frm_sg:
719 	case dp_tx_frm_me:
720 	case dp_tx_frm_raw:
721 		seg_info = msdu_info->u.sg_info.curr_seg;
722 		/* Update the buffer pointers in MSDU Extension Descriptor */
723 		for (i = 0; i < seg_info->frag_cnt; i++) {
724 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
725 				seg_info->frags[i].paddr_lo,
726 				seg_info->frags[i].paddr_hi,
727 				seg_info->frags[i].len);
728 		}
729 
730 		break;
731 
732 	case dp_tx_frm_tso:
733 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
734 				&cached_ext_desc[0]);
735 		break;
736 
737 
738 	default:
739 		break;
740 	}
741 
742 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
743 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
744 
745 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
746 			msdu_ext_desc->vaddr);
747 
748 	return msdu_ext_desc;
749 }
750 
751 /**
752  * dp_tx_trace_pkt() - Trace TX packet at DP layer
753  *
754  * @skb: skb to be traced
755  * @msdu_id: msdu_id of the packet
756  * @vdev_id: vdev_id of the packet
757  *
758  * Return: None
759  */
760 #ifdef DP_DISABLE_TX_PKT_TRACE
761 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
762 			    uint8_t vdev_id)
763 {
764 }
765 #else
766 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
767 			    uint8_t vdev_id)
768 {
769 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
770 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
771 	DPTRACE(qdf_dp_trace_ptr(skb,
772 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
773 				 QDF_TRACE_DEFAULT_PDEV_ID,
774 				 qdf_nbuf_data_addr(skb),
775 				 sizeof(qdf_nbuf_data(skb)),
776 				 msdu_id, vdev_id));
777 
778 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
779 
780 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
781 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
782 				      msdu_id, QDF_TX));
783 }
784 #endif
785 
786 /**
787  * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
788  * @vdev: DP vdev handle
789  * @nbuf: skb
790  * @desc_pool_id: Descriptor pool ID
791  * @meta_data: Metadata to the fw
792  * @tx_exc_metadata: Handle that holds exception path metadata
793  * Allocate and prepare Tx descriptor with msdu information.
794  *
795  * Return: Pointer to Tx Descriptor on success,
796  *         NULL on failure
797  */
798 static
799 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
800 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
801 		struct dp_tx_msdu_info_s *msdu_info,
802 		struct cdp_tx_exception_metadata *tx_exc_metadata)
803 {
804 	uint8_t align_pad;
805 	uint8_t is_exception = 0;
806 	uint8_t htt_hdr_size;
807 	struct dp_tx_desc_s *tx_desc;
808 	struct dp_pdev *pdev = vdev->pdev;
809 	struct dp_soc *soc = pdev->soc;
810 
811 	if (dp_tx_limit_check(vdev))
812 		return NULL;
813 
814 	/* Allocate software Tx descriptor */
815 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
816 	if (qdf_unlikely(!tx_desc)) {
817 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
818 		return NULL;
819 	}
820 
821 	dp_tx_outstanding_inc(pdev);
822 
823 	/* Initialize the SW tx descriptor */
824 	tx_desc->nbuf = nbuf;
825 	tx_desc->frm_type = dp_tx_frm_std;
826 	tx_desc->tx_encap_type = ((tx_exc_metadata &&
827 		(tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
828 		tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
829 	tx_desc->vdev = vdev;
830 	tx_desc->pdev = pdev;
831 	tx_desc->msdu_ext_desc = NULL;
832 	tx_desc->pkt_offset = 0;
833 
834 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
835 
836 	if (qdf_unlikely(vdev->multipass_en)) {
837 		if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
838 			goto failure;
839 	}
840 
841 	/*
842 	 * For special modes (vdev_type == ocb or mesh), data frames should be
843 	 * transmitted using varying transmit parameters (tx spec) which include
844 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
845 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
846 	 * These frames are sent as exception packets to firmware.
847 	 *
848 	 * HW requirement is that metadata should always point to a
849 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
850 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
851 	 *  to get 8-byte aligned start address along with align_pad added
852 	 *
853 	 *  |-----------------------------|
854 	 *  |                             |
855 	 *  |-----------------------------| <-----Buffer Pointer Address given
856 	 *  |                             |  ^    in HW descriptor (aligned)
857 	 *  |       HTT Metadata          |  |
858 	 *  |                             |  |
859 	 *  |                             |  | Packet Offset given in descriptor
860 	 *  |                             |  |
861 	 *  |-----------------------------|  |
862 	 *  |       Alignment Pad         |  v
863 	 *  |-----------------------------| <----- Actual buffer start address
864 	 *  |        SKB Data             |           (Unaligned)
865 	 *  |                             |
866 	 *  |                             |
867 	 *  |                             |
868 	 *  |                             |
869 	 *  |                             |
870 	 *  |-----------------------------|
871 	 */
872 	if (qdf_unlikely((msdu_info->exception_fw)) ||
873 				(vdev->opmode == wlan_op_mode_ocb) ||
874 				(tx_exc_metadata &&
875 				tx_exc_metadata->is_tx_sniffer)) {
876 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
877 
878 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
879 			DP_STATS_INC(vdev,
880 				     tx_i.dropped.headroom_insufficient, 1);
881 			goto failure;
882 		}
883 
884 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
885 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
886 					"qdf_nbuf_push_head failed");
887 			goto failure;
888 		}
889 
890 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
891 				msdu_info);
892 		if (htt_hdr_size == 0)
893 			goto failure;
894 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
895 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
896 		is_exception = 1;
897 	}
898 
899 #if !TQM_BYPASS_WAR
900 	if (is_exception || tx_exc_metadata)
901 #endif
902 	{
903 		/* Temporary WAR due to TQM VP issues */
904 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
905 		qdf_atomic_inc(&soc->num_tx_exception);
906 	}
907 
908 	return tx_desc;
909 
910 failure:
911 	dp_tx_desc_release(tx_desc, desc_pool_id);
912 	return NULL;
913 }
914 
915 /**
916  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
917  * @vdev: DP vdev handle
918  * @nbuf: skb
919  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
920  * @desc_pool_id : Descriptor Pool ID
921  *
922  * Allocate and prepare Tx descriptor with msdu and fragment descritor
923  * information. For frames wth fragments, allocate and prepare
924  * an MSDU extension descriptor
925  *
926  * Return: Pointer to Tx Descriptor on success,
927  *         NULL on failure
928  */
929 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
930 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
931 		uint8_t desc_pool_id)
932 {
933 	struct dp_tx_desc_s *tx_desc;
934 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
935 	struct dp_pdev *pdev = vdev->pdev;
936 	struct dp_soc *soc = pdev->soc;
937 
938 	if (dp_tx_limit_check(vdev))
939 		return NULL;
940 
941 	/* Allocate software Tx descriptor */
942 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
943 	if (!tx_desc) {
944 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
945 		return NULL;
946 	}
947 
948 	dp_tx_outstanding_inc(pdev);
949 
950 	/* Initialize the SW tx descriptor */
951 	tx_desc->nbuf = nbuf;
952 	tx_desc->frm_type = msdu_info->frm_type;
953 	tx_desc->tx_encap_type = vdev->tx_encap_type;
954 	tx_desc->vdev = vdev;
955 	tx_desc->pdev = pdev;
956 	tx_desc->pkt_offset = 0;
957 	tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
958 	tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
959 
960 	dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
961 
962 	/* Handle scattered frames - TSO/SG/ME */
963 	/* Allocate and prepare an extension descriptor for scattered frames */
964 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
965 	if (!msdu_ext_desc) {
966 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
967 				"%s Tx Extension Descriptor Alloc Fail",
968 				__func__);
969 		goto failure;
970 	}
971 
972 #if TQM_BYPASS_WAR
973 	/* Temporary WAR due to TQM VP issues */
974 	tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
975 	qdf_atomic_inc(&soc->num_tx_exception);
976 #endif
977 	if (qdf_unlikely(msdu_info->exception_fw))
978 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
979 
980 	tx_desc->msdu_ext_desc = msdu_ext_desc;
981 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
982 
983 	return tx_desc;
984 failure:
985 	dp_tx_desc_release(tx_desc, desc_pool_id);
986 	return NULL;
987 }
988 
989 /**
990  * dp_tx_prepare_raw() - Prepare RAW packet TX
991  * @vdev: DP vdev handle
992  * @nbuf: buffer pointer
993  * @seg_info: Pointer to Segment info Descriptor to be prepared
994  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
995  *     descriptor
996  *
997  * Return:
998  */
999 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1000 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1001 {
1002 	qdf_nbuf_t curr_nbuf = NULL;
1003 	uint16_t total_len = 0;
1004 	qdf_dma_addr_t paddr;
1005 	int32_t i;
1006 	int32_t mapped_buf_num = 0;
1007 
1008 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
1009 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1010 
1011 	DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
1012 
1013 	/* Continue only if frames are of DATA type */
1014 	if (!DP_FRAME_IS_DATA(qos_wh)) {
1015 		DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
1016 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1017 			  "Pkt. recd is of not data type");
1018 		goto error;
1019 	}
1020 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
1021 	if (vdev->raw_mode_war &&
1022 	    (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
1023 	    (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
1024 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
1025 
1026 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
1027 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
1028 
1029 		if (QDF_STATUS_SUCCESS !=
1030 			qdf_nbuf_map_nbytes_single(vdev->osdev,
1031 						   curr_nbuf,
1032 						   QDF_DMA_TO_DEVICE,
1033 						   curr_nbuf->len)) {
1034 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1035 				"%s dma map error ", __func__);
1036 			DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
1037 			mapped_buf_num = i;
1038 			goto error;
1039 		}
1040 
1041 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
1042 		seg_info->frags[i].paddr_lo = paddr;
1043 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
1044 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
1045 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
1046 		total_len += qdf_nbuf_len(curr_nbuf);
1047 	}
1048 
1049 	seg_info->frag_cnt = i;
1050 	seg_info->total_len = total_len;
1051 	seg_info->next = NULL;
1052 
1053 	sg_info->curr_seg = seg_info;
1054 
1055 	msdu_info->frm_type = dp_tx_frm_raw;
1056 	msdu_info->num_seg = 1;
1057 
1058 	return nbuf;
1059 
1060 error:
1061 	i = 0;
1062 	while (nbuf) {
1063 		curr_nbuf = nbuf;
1064 		if (i < mapped_buf_num) {
1065 			qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
1066 						     QDF_DMA_TO_DEVICE,
1067 						     curr_nbuf->len);
1068 			i++;
1069 		}
1070 		nbuf = qdf_nbuf_next(nbuf);
1071 		qdf_nbuf_free(curr_nbuf);
1072 	}
1073 	return NULL;
1074 
1075 }
1076 
1077 /**
1078  * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
1079  * @soc: DP soc handle
1080  * @nbuf: Buffer pointer
1081  *
1082  * unmap the chain of nbufs that belong to this RAW frame.
1083  *
1084  * Return: None
1085  */
1086 static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
1087 				    qdf_nbuf_t nbuf)
1088 {
1089 	qdf_nbuf_t cur_nbuf = nbuf;
1090 
1091 	do {
1092 		qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
1093 					     QDF_DMA_TO_DEVICE,
1094 					     cur_nbuf->len);
1095 		cur_nbuf = qdf_nbuf_next(cur_nbuf);
1096 	} while (cur_nbuf);
1097 }
1098 
1099 #ifdef VDEV_PEER_PROTOCOL_COUNT
1100 #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, nbuf) \
1101 { \
1102 	qdf_nbuf_t nbuf_local; \
1103 	struct dp_vdev *vdev_local = vdev_hdl; \
1104 	do { \
1105 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
1106 			break; \
1107 		nbuf_local = nbuf; \
1108 		if (qdf_unlikely(((vdev_local)->tx_encap_type) == \
1109 			 htt_cmn_pkt_type_raw)) \
1110 			break; \
1111 		else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local)))) \
1112 			break; \
1113 		else if (qdf_nbuf_is_tso((nbuf_local))) \
1114 			break; \
1115 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
1116 						       (nbuf_local), \
1117 						       NULL, 1, 0); \
1118 	} while (0); \
1119 }
1120 #else
1121 #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, skb)
1122 #endif
1123 
1124 /**
1125  * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
1126  * @soc: DP Soc Handle
1127  * @vdev: DP vdev handle
1128  * @tx_desc: Tx Descriptor Handle
1129  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1130  * @fw_metadata: Metadata to send to Target Firmware along with frame
1131  * @ring_id: Ring ID of H/W ring to which we enqueue the packet
1132  * @tx_exc_metadata: Handle that holds exception path meta data
1133  *
1134  *  Gets the next free TCL HW DMA descriptor and sets up required parameters
1135  *  from software Tx descriptor
1136  *
1137  * Return: QDF_STATUS_SUCCESS: success
1138  *         QDF_STATUS_E_RESOURCES: Error return
1139  */
1140 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
1141 				   struct dp_tx_desc_s *tx_desc, uint8_t tid,
1142 				   uint16_t fw_metadata, uint8_t ring_id,
1143 				   struct cdp_tx_exception_metadata
1144 					*tx_exc_metadata)
1145 {
1146 	uint8_t type;
1147 	void *hal_tx_desc;
1148 	uint32_t *hal_tx_desc_cached;
1149 
1150 	/*
1151 	 * Setting it initialization statically here to avoid
1152 	 * a memset call jump with qdf_mem_set call
1153 	 */
1154 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
1155 
1156 	enum cdp_sec_type sec_type = ((tx_exc_metadata &&
1157 			tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
1158 			tx_exc_metadata->sec_type : vdev->sec_type);
1159 
1160 	/* Return Buffer Manager ID */
1161 	uint8_t bm_id = dp_tx_get_rbm_id(soc, ring_id);
1162 
1163 	hal_ring_handle_t hal_ring_hdl = NULL;
1164 
1165 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
1166 
1167 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
1168 		dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
1169 		return QDF_STATUS_E_RESOURCES;
1170 	}
1171 
1172 	hal_tx_desc_cached = (void *) cached_desc;
1173 
1174 	if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
1175 		tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
1176 		type = HAL_TX_BUF_TYPE_EXT_DESC;
1177 		tx_desc->dma_addr = tx_desc->msdu_ext_desc->paddr;
1178 	} else {
1179 		tx_desc->length = qdf_nbuf_len(tx_desc->nbuf) -
1180 					tx_desc->pkt_offset;
1181 		type = HAL_TX_BUF_TYPE_BUFFER;
1182 		tx_desc->dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
1183 	}
1184 
1185 	qdf_assert_always(tx_desc->dma_addr);
1186 
1187 	hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
1188 				 tx_desc->dma_addr, bm_id, tx_desc->id,
1189 				 type);
1190 	hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
1191 				vdev->lmac_id);
1192 	hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
1193 				    vdev->search_type);
1194 	hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
1195 				     vdev->bss_ast_idx);
1196 	hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
1197 					  vdev->dscp_tid_map_id);
1198 
1199 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
1200 			sec_type_map[sec_type]);
1201 	hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
1202 				      (vdev->bss_ast_hash & 0xF));
1203 
1204 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
1205 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
1206 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
1207 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
1208 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
1209 					  vdev->hal_desc_addr_search_flags);
1210 
1211 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
1212 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
1213 
1214 	/* verify checksum offload configuration*/
1215 	if (vdev->csum_enabled &&
1216 	    ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
1217 		|| qdf_nbuf_is_tso(tx_desc->nbuf)))  {
1218 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
1219 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
1220 	}
1221 
1222 	if (tid != HTT_TX_EXT_TID_INVALID)
1223 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
1224 
1225 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
1226 		hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
1227 
1228 	if (qdf_unlikely(vdev->pdev->delay_stats_flag))
1229 		tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_get());
1230 
1231 	dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
1232 			 tx_desc->length, type, (uint64_t)tx_desc->dma_addr,
1233 			 tx_desc->pkt_offset, tx_desc->id);
1234 
1235 	hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
1236 
1237 	if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
1238 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1239 			  "%s %d : HAL RING Access Failed -- %pK",
1240 			 __func__, __LINE__, hal_ring_hdl);
1241 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1242 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1243 		return status;
1244 	}
1245 
1246 	/* Sync cached descriptor with HW */
1247 
1248 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
1249 	if (qdf_unlikely(!hal_tx_desc)) {
1250 		dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
1251 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1252 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1253 		goto ring_access_fail;
1254 	}
1255 
1256 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
1257 	dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
1258 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
1259 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
1260 	status = QDF_STATUS_SUCCESS;
1261 
1262 ring_access_fail:
1263 	if (hif_pm_runtime_get(soc->hif_handle,
1264 			       RTPM_ID_DW_TX_HW_ENQUEUE) == 0) {
1265 		dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1266 		hif_pm_runtime_put(soc->hif_handle,
1267 				   RTPM_ID_DW_TX_HW_ENQUEUE);
1268 	} else {
1269 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1270 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1271 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1272 	}
1273 
1274 	return status;
1275 }
1276 
1277 
1278 /**
1279  * dp_cce_classify() - Classify the frame based on CCE rules
1280  * @vdev: DP vdev handle
1281  * @nbuf: skb
1282  *
1283  * Classify frames based on CCE rules
1284  * Return: bool( true if classified,
1285  *               else false)
1286  */
1287 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1288 {
1289 	qdf_ether_header_t *eh = NULL;
1290 	uint16_t   ether_type;
1291 	qdf_llc_t *llcHdr;
1292 	qdf_nbuf_t nbuf_clone = NULL;
1293 	qdf_dot3_qosframe_t *qos_wh = NULL;
1294 
1295 	/* for mesh packets don't do any classification */
1296 	if (qdf_unlikely(vdev->mesh_vdev))
1297 		return false;
1298 
1299 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1300 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1301 		ether_type = eh->ether_type;
1302 		llcHdr = (qdf_llc_t *)(nbuf->data +
1303 					sizeof(qdf_ether_header_t));
1304 	} else {
1305 		qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1306 		/* For encrypted packets don't do any classification */
1307 		if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
1308 			return false;
1309 
1310 		if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
1311 			if (qdf_unlikely(
1312 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
1313 				qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
1314 
1315 				ether_type = *(uint16_t *)(nbuf->data
1316 						+ QDF_IEEE80211_4ADDR_HDR_LEN
1317 						+ sizeof(qdf_llc_t)
1318 						- sizeof(ether_type));
1319 				llcHdr = (qdf_llc_t *)(nbuf->data +
1320 						QDF_IEEE80211_4ADDR_HDR_LEN);
1321 			} else {
1322 				ether_type = *(uint16_t *)(nbuf->data
1323 						+ QDF_IEEE80211_3ADDR_HDR_LEN
1324 						+ sizeof(qdf_llc_t)
1325 						- sizeof(ether_type));
1326 				llcHdr = (qdf_llc_t *)(nbuf->data +
1327 					QDF_IEEE80211_3ADDR_HDR_LEN);
1328 			}
1329 
1330 			if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
1331 				&& (ether_type ==
1332 				qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
1333 
1334 				DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
1335 				return true;
1336 			}
1337 		}
1338 
1339 		return false;
1340 	}
1341 
1342 	if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
1343 		ether_type = *(uint16_t *)(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1344 				sizeof(*llcHdr));
1345 		nbuf_clone = qdf_nbuf_clone(nbuf);
1346 		if (qdf_unlikely(nbuf_clone)) {
1347 			qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
1348 
1349 			if (ether_type == htons(ETHERTYPE_VLAN)) {
1350 				qdf_nbuf_pull_head(nbuf_clone,
1351 						sizeof(qdf_net_vlanhdr_t));
1352 			}
1353 		}
1354 	} else {
1355 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1356 			nbuf_clone = qdf_nbuf_clone(nbuf);
1357 			if (qdf_unlikely(nbuf_clone)) {
1358 				qdf_nbuf_pull_head(nbuf_clone,
1359 					sizeof(qdf_net_vlanhdr_t));
1360 			}
1361 		}
1362 	}
1363 
1364 	if (qdf_unlikely(nbuf_clone))
1365 		nbuf = nbuf_clone;
1366 
1367 
1368 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
1369 		|| qdf_nbuf_is_ipv4_arp_pkt(nbuf)
1370 		|| qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
1371 		|| qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
1372 		|| (qdf_nbuf_is_ipv4_pkt(nbuf)
1373 			&& qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
1374 		|| (qdf_nbuf_is_ipv6_pkt(nbuf) &&
1375 			qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
1376 		if (qdf_unlikely(nbuf_clone))
1377 			qdf_nbuf_free(nbuf_clone);
1378 		return true;
1379 	}
1380 
1381 	if (qdf_unlikely(nbuf_clone))
1382 		qdf_nbuf_free(nbuf_clone);
1383 
1384 	return false;
1385 }
1386 
1387 /**
1388  * dp_tx_get_tid() - Obtain TID to be used for this frame
1389  * @vdev: DP vdev handle
1390  * @nbuf: skb
1391  *
1392  * Extract the DSCP or PCP information from frame and map into TID value.
1393  *
1394  * Return: void
1395  */
1396 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1397 			  struct dp_tx_msdu_info_s *msdu_info)
1398 {
1399 	uint8_t tos = 0, dscp_tid_override = 0;
1400 	uint8_t *hdr_ptr, *L3datap;
1401 	uint8_t is_mcast = 0;
1402 	qdf_ether_header_t *eh = NULL;
1403 	qdf_ethervlan_header_t *evh = NULL;
1404 	uint16_t   ether_type;
1405 	qdf_llc_t *llcHdr;
1406 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1407 
1408 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1409 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1410 		eh = (qdf_ether_header_t *)nbuf->data;
1411 		hdr_ptr = eh->ether_dhost;
1412 		L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
1413 	} else {
1414 		qdf_dot3_qosframe_t *qos_wh =
1415 			(qdf_dot3_qosframe_t *) nbuf->data;
1416 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1417 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1418 		return;
1419 	}
1420 
1421 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1422 	ether_type = eh->ether_type;
1423 
1424 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
1425 	/*
1426 	 * Check if packet is dot3 or eth2 type.
1427 	 */
1428 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1429 		ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1430 				sizeof(*llcHdr));
1431 
1432 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1433 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1434 				sizeof(*llcHdr);
1435 			ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
1436 					+ sizeof(*llcHdr) +
1437 					sizeof(qdf_net_vlanhdr_t));
1438 		} else {
1439 			L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
1440 				sizeof(*llcHdr);
1441 		}
1442 	} else {
1443 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1444 			evh = (qdf_ethervlan_header_t *) eh;
1445 			ether_type = evh->ether_type;
1446 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1447 		}
1448 	}
1449 
1450 	/*
1451 	 * Find priority from IP TOS DSCP field
1452 	 */
1453 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1454 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1455 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1456 			/* Only for unicast frames */
1457 			if (!is_mcast) {
1458 				/* send it on VO queue */
1459 				msdu_info->tid = DP_VO_TID;
1460 			}
1461 		} else {
1462 			/*
1463 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1464 			 * from TOS byte.
1465 			 */
1466 			tos = ip->ip_tos;
1467 			dscp_tid_override = 1;
1468 
1469 		}
1470 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1471 		/* TODO
1472 		 * use flowlabel
1473 		 *igmpmld cases to be handled in phase 2
1474 		 */
1475 		unsigned long ver_pri_flowlabel;
1476 		unsigned long pri;
1477 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1478 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1479 			DP_IPV6_PRIORITY_SHIFT;
1480 		tos = pri;
1481 		dscp_tid_override = 1;
1482 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1483 		msdu_info->tid = DP_VO_TID;
1484 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1485 		/* Only for unicast frames */
1486 		if (!is_mcast) {
1487 			/* send ucast arp on VO queue */
1488 			msdu_info->tid = DP_VO_TID;
1489 		}
1490 	}
1491 
1492 	/*
1493 	 * Assign all MCAST packets to BE
1494 	 */
1495 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1496 		if (is_mcast) {
1497 			tos = 0;
1498 			dscp_tid_override = 1;
1499 		}
1500 	}
1501 
1502 	if (dscp_tid_override == 1) {
1503 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1504 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1505 	}
1506 
1507 	if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
1508 		msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
1509 
1510 	return;
1511 }
1512 
1513 /**
1514  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1515  * @vdev: DP vdev handle
1516  * @nbuf: skb
1517  *
1518  * Software based TID classification is required when more than 2 DSCP-TID
1519  * mapping tables are needed.
1520  * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
1521  *
1522  * Return: void
1523  */
1524 static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1525 				      struct dp_tx_msdu_info_s *msdu_info)
1526 {
1527 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1528 
1529 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1530 
1531 	if (pdev->soc && vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map)
1532 		return;
1533 
1534 	/* for mesh packets don't do any classification */
1535 	if (qdf_unlikely(vdev->mesh_vdev))
1536 		return;
1537 
1538 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1539 }
1540 
1541 #ifdef FEATURE_WLAN_TDLS
1542 /**
1543  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1544  * @tx_desc: TX descriptor
1545  *
1546  * Return: None
1547  */
1548 static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1549 {
1550 	if (tx_desc->vdev) {
1551 		if (tx_desc->vdev->is_tdls_frame) {
1552 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1553 			tx_desc->vdev->is_tdls_frame = false;
1554 		}
1555 	}
1556 }
1557 
1558 /**
1559  * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
1560  * @soc: dp_soc handle
1561  * @tx_desc: TX descriptor
1562  * @vdev: datapath vdev handle
1563  *
1564  * Return: None
1565  */
1566 static void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
1567 					 struct dp_tx_desc_s *tx_desc,
1568 					 struct dp_vdev *vdev)
1569 {
1570 	struct hal_tx_completion_status ts = {0};
1571 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1572 
1573 	if (qdf_unlikely(!vdev)) {
1574 		dp_err_rl("vdev is null!");
1575 		goto error;
1576 	}
1577 
1578 	hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
1579 	if (vdev->tx_non_std_data_callback.func) {
1580 		qdf_nbuf_set_next(nbuf, NULL);
1581 		vdev->tx_non_std_data_callback.func(
1582 				vdev->tx_non_std_data_callback.ctxt,
1583 				nbuf, ts.status);
1584 		return;
1585 	} else {
1586 		dp_err_rl("callback func is null");
1587 	}
1588 
1589 error:
1590 	qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
1591 	qdf_nbuf_free(nbuf);
1592 }
1593 
1594 /**
1595  * dp_tx_msdu_single_map() - do nbuf map
1596  * @vdev: DP vdev handle
1597  * @tx_desc: DP TX descriptor pointer
1598  * @nbuf: skb pointer
1599  *
1600  * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
1601  * operation done in other component.
1602  *
1603  * Return: QDF_STATUS
1604  */
1605 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1606 					       struct dp_tx_desc_s *tx_desc,
1607 					       qdf_nbuf_t nbuf)
1608 {
1609 	if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
1610 		return qdf_nbuf_map_nbytes_single(vdev->osdev,
1611 						  nbuf,
1612 						  QDF_DMA_TO_DEVICE,
1613 						  nbuf->len);
1614 	else
1615 		return qdf_nbuf_map_single(vdev->osdev, nbuf,
1616 					   QDF_DMA_TO_DEVICE);
1617 }
1618 #else
1619 static inline void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
1620 {
1621 }
1622 
1623 static inline void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
1624 						struct dp_tx_desc_s *tx_desc,
1625 						struct dp_vdev *vdev)
1626 {
1627 }
1628 
1629 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1630 					       struct dp_tx_desc_s *tx_desc,
1631 					       qdf_nbuf_t nbuf)
1632 {
1633 	return qdf_nbuf_map_nbytes_single(vdev->osdev,
1634 					  nbuf,
1635 					  QDF_DMA_TO_DEVICE,
1636 					  nbuf->len);
1637 }
1638 #endif
1639 
1640 /**
1641  * dp_tx_frame_is_drop() - checks if the packet is loopback
1642  * @vdev: DP vdev handle
1643  * @nbuf: skb
1644  *
1645  * Return: 1 if frame needs to be dropped else 0
1646  */
1647 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
1648 {
1649 	struct dp_pdev *pdev = NULL;
1650 	struct dp_ast_entry *src_ast_entry = NULL;
1651 	struct dp_ast_entry *dst_ast_entry = NULL;
1652 	struct dp_soc *soc = NULL;
1653 
1654 	qdf_assert(vdev);
1655 	pdev = vdev->pdev;
1656 	qdf_assert(pdev);
1657 	soc = pdev->soc;
1658 
1659 	dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
1660 				(soc, dstmac, vdev->pdev->pdev_id);
1661 
1662 	src_ast_entry = dp_peer_ast_hash_find_by_pdevid
1663 				(soc, srcmac, vdev->pdev->pdev_id);
1664 	if (dst_ast_entry && src_ast_entry) {
1665 		if (dst_ast_entry->peer->peer_id ==
1666 				src_ast_entry->peer->peer_id)
1667 			return 1;
1668 	}
1669 
1670 	return 0;
1671 }
1672 
1673 /**
1674  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
1675  * @vdev: DP vdev handle
1676  * @nbuf: skb
1677  * @tid: TID from HLOS for overriding default DSCP-TID mapping
1678  * @meta_data: Metadata to the fw
1679  * @tx_q: Tx queue to be used for this Tx frame
1680  * @peer_id: peer_id of the peer in case of NAWDS frames
1681  * @tx_exc_metadata: Handle that holds exception path metadata
1682  *
1683  * Return: NULL on success,
1684  *         nbuf when it fails to send
1685  */
1686 qdf_nbuf_t
1687 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1688 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
1689 		       struct cdp_tx_exception_metadata *tx_exc_metadata)
1690 {
1691 	struct dp_pdev *pdev = vdev->pdev;
1692 	struct dp_soc *soc = pdev->soc;
1693 	struct dp_tx_desc_s *tx_desc;
1694 	QDF_STATUS status;
1695 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
1696 	uint16_t htt_tcl_metadata = 0;
1697 	enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
1698 	uint8_t tid = msdu_info->tid;
1699 	struct cdp_tid_tx_stats *tid_stats = NULL;
1700 
1701 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
1702 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
1703 			msdu_info, tx_exc_metadata);
1704 	if (!tx_desc) {
1705 		dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
1706 			  vdev, tx_q->desc_pool_id);
1707 		drop_code = TX_DESC_ERR;
1708 		goto fail_return;
1709 	}
1710 
1711 	if (qdf_unlikely(soc->cce_disable)) {
1712 		if (dp_cce_classify(vdev, nbuf) == true) {
1713 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1714 			tid = DP_VO_TID;
1715 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1716 		}
1717 	}
1718 
1719 	dp_tx_update_tdls_flags(tx_desc);
1720 
1721 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
1722 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1723 		HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
1724 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
1725 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
1726 				HTT_TCL_METADATA_TYPE_PEER_BASED);
1727 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
1728 				peer_id);
1729 	} else
1730 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1731 
1732 	if (msdu_info->exception_fw)
1733 		HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1734 
1735 	dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
1736 					 !pdev->enhanced_stats_en);
1737 
1738 	if (qdf_unlikely(QDF_STATUS_SUCCESS !=
1739 			 dp_tx_msdu_single_map(vdev, tx_desc, nbuf))) {
1740 		/* Handle failure */
1741 		dp_err("qdf_nbuf_map failed");
1742 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
1743 		drop_code = TX_DMA_MAP_ERR;
1744 		goto release_desc;
1745 	}
1746 
1747 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
1748 	status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
1749 			htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
1750 
1751 	if (status != QDF_STATUS_SUCCESS) {
1752 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1753 			  "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1754 			  __func__, tx_desc, tx_q->ring_id);
1755 		qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
1756 					     QDF_DMA_TO_DEVICE,
1757 					     nbuf->len);
1758 		drop_code = TX_HW_ENQUEUE;
1759 		goto release_desc;
1760 	}
1761 
1762 	return NULL;
1763 
1764 release_desc:
1765 	dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1766 
1767 fail_return:
1768 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1769 	tid_stats = &pdev->stats.tid_stats.
1770 		    tid_tx_stats[tx_q->ring_id][tid];
1771 	tid_stats->swdrop_cnt[drop_code]++;
1772 	return nbuf;
1773 }
1774 
1775 /**
1776  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
1777  * @vdev: DP vdev handle
1778  * @nbuf: skb
1779  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
1780  *
1781  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
1782  *
1783  * Return: NULL on success,
1784  *         nbuf when it fails to send
1785  */
1786 #if QDF_LOCK_STATS
1787 noinline
1788 #else
1789 #endif
1790 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1791 				    struct dp_tx_msdu_info_s *msdu_info)
1792 {
1793 	uint8_t i;
1794 	struct dp_pdev *pdev = vdev->pdev;
1795 	struct dp_soc *soc = pdev->soc;
1796 	struct dp_tx_desc_s *tx_desc;
1797 	bool is_cce_classified = false;
1798 	QDF_STATUS status;
1799 	uint16_t htt_tcl_metadata = 0;
1800 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1801 	struct cdp_tid_tx_stats *tid_stats = NULL;
1802 
1803 	if (qdf_unlikely(soc->cce_disable)) {
1804 		is_cce_classified = dp_cce_classify(vdev, nbuf);
1805 		if (is_cce_classified) {
1806 			DP_STATS_INC(vdev, tx_i.cce_classified, 1);
1807 			msdu_info->tid = DP_VO_TID;
1808 		}
1809 	}
1810 
1811 	if (msdu_info->frm_type == dp_tx_frm_me)
1812 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1813 
1814 	i = 0;
1815 	/* Print statement to track i and num_seg */
1816 	/*
1817 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
1818 	 * descriptors using information in msdu_info
1819 	 */
1820 	while (i < msdu_info->num_seg) {
1821 		/*
1822 		 * Setup Tx descriptor for an MSDU, and MSDU extension
1823 		 * descriptor
1824 		 */
1825 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
1826 				tx_q->desc_pool_id);
1827 
1828 		if (!tx_desc) {
1829 			if (msdu_info->frm_type == dp_tx_frm_me) {
1830 				dp_tx_me_free_buf(pdev,
1831 					(void *)(msdu_info->u.sg_info
1832 						.curr_seg->frags[0].vaddr));
1833 				i++;
1834 				continue;
1835 			}
1836 			goto done;
1837 		}
1838 
1839 		if (msdu_info->frm_type == dp_tx_frm_me) {
1840 			tx_desc->me_buffer =
1841 				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
1842 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
1843 		}
1844 
1845 		if (is_cce_classified)
1846 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1847 
1848 		htt_tcl_metadata = vdev->htt_tcl_metadata;
1849 		if (msdu_info->exception_fw) {
1850 			HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
1851 		}
1852 
1853 		/*
1854 		 * Enqueue the Tx MSDU descriptor to HW for transmit
1855 		 */
1856 		status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
1857 			htt_tcl_metadata, tx_q->ring_id, NULL);
1858 
1859 		if (status != QDF_STATUS_SUCCESS) {
1860 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1861 					"%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
1862 					__func__, tx_desc, tx_q->ring_id);
1863 
1864 			dp_tx_get_tid(vdev, nbuf, msdu_info);
1865 			tid_stats = &pdev->stats.tid_stats.
1866 				    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
1867 			tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
1868 
1869 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
1870 			if (msdu_info->frm_type == dp_tx_frm_me) {
1871 				i++;
1872 				continue;
1873 			}
1874 			goto done;
1875 		}
1876 
1877 		/*
1878 		 * TODO
1879 		 * if tso_info structure can be modified to have curr_seg
1880 		 * as first element, following 2 blocks of code (for TSO and SG)
1881 		 * can be combined into 1
1882 		 */
1883 
1884 		/*
1885 		 * For frames with multiple segments (TSO, ME), jump to next
1886 		 * segment.
1887 		 */
1888 		if (msdu_info->frm_type == dp_tx_frm_tso) {
1889 			if (msdu_info->u.tso_info.curr_seg->next) {
1890 				msdu_info->u.tso_info.curr_seg =
1891 					msdu_info->u.tso_info.curr_seg->next;
1892 
1893 				/*
1894 				 * If this is a jumbo nbuf, then increment the number of
1895 				 * nbuf users for each additional segment of the msdu.
1896 				 * This will ensure that the skb is freed only after
1897 				 * receiving tx completion for all segments of an nbuf
1898 				 */
1899 				qdf_nbuf_inc_users(nbuf);
1900 
1901 				/* Check with MCL if this is needed */
1902 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
1903 			}
1904 		}
1905 
1906 		/*
1907 		 * For Multicast-Unicast converted packets,
1908 		 * each converted frame (for a client) is represented as
1909 		 * 1 segment
1910 		 */
1911 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
1912 				(msdu_info->frm_type == dp_tx_frm_me)) {
1913 			if (msdu_info->u.sg_info.curr_seg->next) {
1914 				msdu_info->u.sg_info.curr_seg =
1915 					msdu_info->u.sg_info.curr_seg->next;
1916 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
1917 			}
1918 		}
1919 		i++;
1920 	}
1921 
1922 	nbuf = NULL;
1923 
1924 done:
1925 	return nbuf;
1926 }
1927 
1928 /**
1929  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
1930  *                     for SG frames
1931  * @vdev: DP vdev handle
1932  * @nbuf: skb
1933  * @seg_info: Pointer to Segment info Descriptor to be prepared
1934  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
1935  *
1936  * Return: NULL on success,
1937  *         nbuf when it fails to send
1938  */
1939 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1940 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1941 {
1942 	uint32_t cur_frag, nr_frags;
1943 	qdf_dma_addr_t paddr;
1944 	struct dp_tx_sg_info_s *sg_info;
1945 
1946 	sg_info = &msdu_info->u.sg_info;
1947 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
1948 
1949 	if (QDF_STATUS_SUCCESS !=
1950 		qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
1951 					   QDF_DMA_TO_DEVICE, nbuf->len)) {
1952 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1953 				"dma map error");
1954 		DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1955 
1956 		qdf_nbuf_free(nbuf);
1957 		return NULL;
1958 	}
1959 
1960 	paddr = qdf_nbuf_mapped_paddr_get(nbuf);
1961 	seg_info->frags[0].paddr_lo = paddr;
1962 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
1963 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
1964 	seg_info->frags[0].vaddr = (void *) nbuf;
1965 
1966 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
1967 		if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
1968 					nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
1969 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1970 					"frag dma map error");
1971 			DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
1972 			qdf_nbuf_free(nbuf);
1973 			return NULL;
1974 		}
1975 
1976 		paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
1977 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
1978 		seg_info->frags[cur_frag + 1].paddr_hi =
1979 			((uint64_t) paddr) >> 32;
1980 		seg_info->frags[cur_frag + 1].len =
1981 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
1982 	}
1983 
1984 	seg_info->frag_cnt = (cur_frag + 1);
1985 	seg_info->total_len = qdf_nbuf_len(nbuf);
1986 	seg_info->next = NULL;
1987 
1988 	sg_info->curr_seg = seg_info;
1989 
1990 	msdu_info->frm_type = dp_tx_frm_sg;
1991 	msdu_info->num_seg = 1;
1992 
1993 	return nbuf;
1994 }
1995 
1996 /**
1997  * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
1998  * @vdev: DP vdev handle
1999  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2000  * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
2001  *
2002  * Return: NULL on failure,
2003  *         nbuf when extracted successfully
2004  */
2005 static
2006 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
2007 				    struct dp_tx_msdu_info_s *msdu_info,
2008 				    uint16_t ppdu_cookie)
2009 {
2010 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2011 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2012 
2013 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2014 
2015 	HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
2016 				(msdu_info->meta_data[5], 1);
2017 	HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
2018 				(msdu_info->meta_data[5], 1);
2019 	HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
2020 				(msdu_info->meta_data[6], ppdu_cookie);
2021 
2022 	msdu_info->exception_fw = 1;
2023 	msdu_info->is_tx_sniffer = 1;
2024 }
2025 
2026 #ifdef MESH_MODE_SUPPORT
2027 
2028 /**
2029  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
2030 				and prepare msdu_info for mesh frames.
2031  * @vdev: DP vdev handle
2032  * @nbuf: skb
2033  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
2034  *
2035  * Return: NULL on failure,
2036  *         nbuf when extracted successfully
2037  */
2038 static
2039 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2040 				struct dp_tx_msdu_info_s *msdu_info)
2041 {
2042 	struct meta_hdr_s *mhdr;
2043 	struct htt_tx_msdu_desc_ext2_t *meta_data =
2044 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
2045 
2046 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2047 
2048 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
2049 		msdu_info->exception_fw = 0;
2050 		goto remove_meta_hdr;
2051 	}
2052 
2053 	msdu_info->exception_fw = 1;
2054 
2055 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
2056 
2057 	meta_data->host_tx_desc_pool = 1;
2058 	meta_data->update_peer_cache = 1;
2059 	meta_data->learning_frame = 1;
2060 
2061 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
2062 		meta_data->power = mhdr->power;
2063 
2064 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
2065 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
2066 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
2067 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
2068 
2069 		meta_data->dyn_bw = 1;
2070 
2071 		meta_data->valid_pwr = 1;
2072 		meta_data->valid_mcs_mask = 1;
2073 		meta_data->valid_nss_mask = 1;
2074 		meta_data->valid_preamble_type  = 1;
2075 		meta_data->valid_retries = 1;
2076 		meta_data->valid_bw_info = 1;
2077 	}
2078 
2079 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
2080 		meta_data->encrypt_type = 0;
2081 		meta_data->valid_encrypt_type = 1;
2082 		meta_data->learning_frame = 0;
2083 	}
2084 
2085 	meta_data->valid_key_flags = 1;
2086 	meta_data->key_flags = (mhdr->keyix & 0x3);
2087 
2088 remove_meta_hdr:
2089 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
2090 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2091 				"qdf_nbuf_pull_head failed");
2092 		qdf_nbuf_free(nbuf);
2093 		return NULL;
2094 	}
2095 
2096 	msdu_info->tid = qdf_nbuf_get_priority(nbuf);
2097 
2098 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2099 			"%s , Meta hdr %0x %0x %0x %0x %0x %0x"
2100 			" tid %d to_fw %d",
2101 			__func__, msdu_info->meta_data[0],
2102 			msdu_info->meta_data[1],
2103 			msdu_info->meta_data[2],
2104 			msdu_info->meta_data[3],
2105 			msdu_info->meta_data[4],
2106 			msdu_info->meta_data[5],
2107 			msdu_info->tid, msdu_info->exception_fw);
2108 
2109 	return nbuf;
2110 }
2111 #else
2112 static
2113 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
2114 				struct dp_tx_msdu_info_s *msdu_info)
2115 {
2116 	return nbuf;
2117 }
2118 
2119 #endif
2120 
2121 /**
2122  * dp_check_exc_metadata() - Checks if parameters are valid
2123  * @tx_exc - holds all exception path parameters
2124  *
2125  * Returns true when all the parameters are valid else false
2126  *
2127  */
2128 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
2129 {
2130 	bool invalid_tid = (tx_exc->tid > DP_MAX_TIDS && tx_exc->tid !=
2131 			    HTT_INVALID_TID);
2132 	bool invalid_encap_type =
2133 			(tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
2134 			 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
2135 	bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
2136 				 tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
2137 	bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
2138 			       tx_exc->ppdu_cookie == 0);
2139 
2140 	if (invalid_tid || invalid_encap_type || invalid_sec_type ||
2141 	    invalid_cookie) {
2142 		return false;
2143 	}
2144 
2145 	return true;
2146 }
2147 
2148 /**
2149  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
2150  * @soc: DP soc handle
2151  * @vdev_id: id of DP vdev handle
2152  * @nbuf: skb
2153  * @tx_exc_metadata: Handle that holds exception path meta data
2154  *
2155  * Entry point for Core Tx layer (DP_TX) invoked from
2156  * hard_start_xmit in OSIF/HDD to transmit frames through fw
2157  *
2158  * Return: NULL on success,
2159  *         nbuf when it fails to send
2160  */
2161 qdf_nbuf_t
2162 dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf,
2163 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
2164 {
2165 	qdf_ether_header_t *eh = NULL;
2166 	struct dp_tx_msdu_info_s msdu_info;
2167 	struct dp_vdev *vdev =
2168 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
2169 						   vdev_id);
2170 
2171 	if (qdf_unlikely(!vdev))
2172 		goto fail;
2173 
2174 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
2175 
2176 	if (!tx_exc_metadata)
2177 		goto fail;
2178 
2179 	msdu_info.tid = tx_exc_metadata->tid;
2180 
2181 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2182 	dp_verbose_debug("skb %pM", nbuf->data);
2183 
2184 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
2185 
2186 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
2187 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2188 			"Invalid parameters in exception path");
2189 		goto fail;
2190 	}
2191 
2192 	/* Basic sanity checks for unsupported packets */
2193 
2194 	/* MESH mode */
2195 	if (qdf_unlikely(vdev->mesh_vdev)) {
2196 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2197 			"Mesh mode is not supported in exception path");
2198 		goto fail;
2199 	}
2200 
2201 	/* TSO or SG */
2202 	if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
2203 	    qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2204 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2205 			  "TSO and SG are not supported in exception path");
2206 
2207 		goto fail;
2208 	}
2209 
2210 	/* RAW */
2211 	if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
2212 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2213 			  "Raw frame is not supported in exception path");
2214 		goto fail;
2215 	}
2216 
2217 
2218 	/* Mcast enhancement*/
2219 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
2220 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2221 		    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2222 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2223 					  "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW");
2224 		}
2225 	}
2226 
2227 	if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
2228 		DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
2229 				 qdf_nbuf_len(nbuf));
2230 
2231 		dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
2232 					       tx_exc_metadata->ppdu_cookie);
2233 	}
2234 
2235 	/*
2236 	 * Get HW Queue to use for this frame.
2237 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
2238 	 * dedicated for data and 1 for command.
2239 	 * "queue_id" maps to one hardware ring.
2240 	 *  With each ring, we also associate a unique Tx descriptor pool
2241 	 *  to minimize lock contention for these resources.
2242 	 */
2243 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2244 
2245 	/*
2246 	 * Check exception descriptors
2247 	 */
2248 	if (dp_tx_exception_limit_check(vdev))
2249 		goto fail;
2250 
2251 	/*  Single linear frame */
2252 	/*
2253 	 * If nbuf is a simple linear frame, use send_single function to
2254 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2255 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2256 	 */
2257 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
2258 			tx_exc_metadata->peer_id, tx_exc_metadata);
2259 
2260 	return nbuf;
2261 
2262 fail:
2263 	dp_verbose_debug("pkt send failed");
2264 	return nbuf;
2265 }
2266 
2267 /**
2268  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
2269  * @soc: DP soc handle
2270  * @vdev_id: DP vdev handle
2271  * @nbuf: skb
2272  *
2273  * Entry point for Core Tx layer (DP_TX) invoked from
2274  * hard_start_xmit in OSIF/HDD
2275  *
2276  * Return: NULL on success,
2277  *         nbuf when it fails to send
2278  */
2279 #ifdef MESH_MODE_SUPPORT
2280 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
2281 			   qdf_nbuf_t nbuf)
2282 {
2283 	struct meta_hdr_s *mhdr;
2284 	qdf_nbuf_t nbuf_mesh = NULL;
2285 	qdf_nbuf_t nbuf_clone = NULL;
2286 	struct dp_vdev *vdev;
2287 	uint8_t no_enc_frame = 0;
2288 
2289 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
2290 	if (!nbuf_mesh) {
2291 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2292 				"qdf_nbuf_unshare failed");
2293 		return nbuf;
2294 	}
2295 
2296 	vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
2297 						  vdev_id);
2298 	if (!vdev) {
2299 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2300 				"vdev is NULL for vdev_id %d", vdev_id);
2301 		return nbuf;
2302 	}
2303 
2304 	nbuf = nbuf_mesh;
2305 
2306 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
2307 
2308 	if ((vdev->sec_type != cdp_sec_type_none) &&
2309 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
2310 		no_enc_frame = 1;
2311 
2312 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
2313 		qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
2314 
2315 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
2316 		       !no_enc_frame) {
2317 		nbuf_clone = qdf_nbuf_clone(nbuf);
2318 		if (!nbuf_clone) {
2319 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2320 				"qdf_nbuf_clone failed");
2321 			return nbuf;
2322 		}
2323 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
2324 	}
2325 
2326 	if (nbuf_clone) {
2327 		if (!dp_tx_send(soc, vdev_id, nbuf_clone)) {
2328 			DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2329 		} else {
2330 			qdf_nbuf_free(nbuf_clone);
2331 		}
2332 	}
2333 
2334 	if (no_enc_frame)
2335 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
2336 	else
2337 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
2338 
2339 	nbuf = dp_tx_send(soc, vdev_id, nbuf);
2340 	if ((!nbuf) && no_enc_frame) {
2341 		DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
2342 	}
2343 
2344 	return nbuf;
2345 }
2346 
2347 #else
2348 
2349 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
2350 			   qdf_nbuf_t nbuf)
2351 {
2352 	return dp_tx_send(soc, vdev_id, nbuf);
2353 }
2354 
2355 #endif
2356 
2357 /**
2358  * dp_tx_nawds_handler() - NAWDS handler
2359  *
2360  * @soc: DP soc handle
2361  * @vdev_id: id of DP vdev handle
2362  * @msdu_info: msdu_info required to create HTT metadata
2363  * @nbuf: skb
2364  *
2365  * This API transfers the multicast frames with the peer id
2366  * on NAWDS enabled peer.
2367 
2368  * Return: none
2369  */
2370 
2371 static inline
2372 void dp_tx_nawds_handler(struct cdp_soc_t *soc, struct dp_vdev *vdev,
2373 			 struct dp_tx_msdu_info_s *msdu_info, qdf_nbuf_t nbuf)
2374 {
2375 	struct dp_peer *peer = NULL;
2376 	qdf_nbuf_t nbuf_clone = NULL;
2377 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
2378 	uint16_t peer_id = DP_INVALID_PEER;
2379 	struct dp_peer *sa_peer = NULL;
2380 	struct dp_ast_entry *ast_entry = NULL;
2381 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2382 
2383 	if (qdf_nbuf_get_tx_ftype(nbuf) == CB_FTYPE_INTRABSS_FWD) {
2384 		qdf_spin_lock_bh(&dp_soc->ast_lock);
2385 
2386 		ast_entry = dp_peer_ast_hash_find_by_pdevid
2387 					(dp_soc,
2388 					 (uint8_t *)(eh->ether_shost),
2389 					 vdev->pdev->pdev_id);
2390 
2391 		if (ast_entry)
2392 			sa_peer = ast_entry->peer;
2393 		qdf_spin_unlock_bh(&dp_soc->ast_lock);
2394 	}
2395 
2396 	qdf_spin_lock_bh(&dp_soc->peer_ref_mutex);
2397 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2398 		if (!peer->bss_peer && peer->nawds_enabled) {
2399 			peer_id = peer->peer_id;
2400 			/* Multicast packets needs to be
2401 			 * dropped in case of intra bss forwarding
2402 			 */
2403 			if (sa_peer == peer) {
2404 				QDF_TRACE(QDF_MODULE_ID_DP,
2405 					  QDF_TRACE_LEVEL_DEBUG,
2406 					  " %s: multicast packet",  __func__);
2407 				DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
2408 				continue;
2409 			}
2410 			nbuf_clone = qdf_nbuf_clone(nbuf);
2411 
2412 			if (!nbuf_clone) {
2413 				QDF_TRACE(QDF_MODULE_ID_DP,
2414 					  QDF_TRACE_LEVEL_ERROR,
2415 					  FL("nbuf clone failed"));
2416 				break;
2417 			}
2418 
2419 			nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
2420 							    msdu_info, peer_id,
2421 							    NULL);
2422 
2423 			if (nbuf_clone) {
2424 				QDF_TRACE(QDF_MODULE_ID_DP,
2425 					  QDF_TRACE_LEVEL_DEBUG,
2426 					  FL("pkt send failed"));
2427 				qdf_nbuf_free(nbuf_clone);
2428 			} else {
2429 				if (peer_id != DP_INVALID_PEER)
2430 					DP_STATS_INC_PKT(peer, tx.nawds_mcast,
2431 							 1, qdf_nbuf_len(nbuf));
2432 			}
2433 		}
2434 	}
2435 
2436 	qdf_spin_unlock_bh(&dp_soc->peer_ref_mutex);
2437 }
2438 
2439 /**
2440  * dp_tx_send() - Transmit a frame on a given VAP
2441  * @soc: DP soc handle
2442  * @vdev_id: id of DP vdev handle
2443  * @nbuf: skb
2444  *
2445  * Entry point for Core Tx layer (DP_TX) invoked from
2446  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
2447  * cases
2448  *
2449  * Return: NULL on success,
2450  *         nbuf when it fails to send
2451  */
2452 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf)
2453 {
2454 	uint16_t peer_id = HTT_INVALID_PEER;
2455 	/*
2456 	 * doing a memzero is causing additional function call overhead
2457 	 * so doing static stack clearing
2458 	 */
2459 	struct dp_tx_msdu_info_s msdu_info = {0};
2460 	struct dp_vdev *vdev =
2461 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
2462 						   vdev_id);
2463 	if (qdf_unlikely(!vdev))
2464 		return nbuf;
2465 
2466 	dp_verbose_debug("skb %pM", nbuf->data);
2467 
2468 	/*
2469 	 * Set Default Host TID value to invalid TID
2470 	 * (TID override disabled)
2471 	 */
2472 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
2473 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
2474 
2475 	if (qdf_unlikely(vdev->mesh_vdev)) {
2476 		qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
2477 								&msdu_info);
2478 		if (!nbuf_mesh) {
2479 			dp_verbose_debug("Extracting mesh metadata failed");
2480 			return nbuf;
2481 		}
2482 		nbuf = nbuf_mesh;
2483 	}
2484 
2485 	/*
2486 	 * Get HW Queue to use for this frame.
2487 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
2488 	 * dedicated for data and 1 for command.
2489 	 * "queue_id" maps to one hardware ring.
2490 	 *  With each ring, we also associate a unique Tx descriptor pool
2491 	 *  to minimize lock contention for these resources.
2492 	 */
2493 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2494 
2495 	/*
2496 	 * TCL H/W supports 2 DSCP-TID mapping tables.
2497 	 *  Table 1 - Default DSCP-TID mapping table
2498 	 *  Table 2 - 1 DSCP-TID override table
2499 	 *
2500 	 * If we need a different DSCP-TID mapping for this vap,
2501 	 * call tid_classify to extract DSCP/ToS from frame and
2502 	 * map to a TID and store in msdu_info. This is later used
2503 	 * to fill in TCL Input descriptor (per-packet TID override).
2504 	 */
2505 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
2506 
2507 	/*
2508 	 * Classify the frame and call corresponding
2509 	 * "prepare" function which extracts the segment (TSO)
2510 	 * and fragmentation information (for TSO , SG, ME, or Raw)
2511 	 * into MSDU_INFO structure which is later used to fill
2512 	 * SW and HW descriptors.
2513 	 */
2514 	if (qdf_nbuf_is_tso(nbuf)) {
2515 		dp_verbose_debug("TSO frame %pK", vdev);
2516 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
2517 				 qdf_nbuf_len(nbuf));
2518 
2519 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
2520 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
2521 					 qdf_nbuf_len(nbuf));
2522 			return nbuf;
2523 		}
2524 
2525 		goto send_multiple;
2526 	}
2527 
2528 	/* SG */
2529 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
2530 		struct dp_tx_seg_info_s seg_info = {0};
2531 
2532 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
2533 		if (!nbuf)
2534 			return NULL;
2535 
2536 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
2537 
2538 		DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
2539 				qdf_nbuf_len(nbuf));
2540 
2541 		goto send_multiple;
2542 	}
2543 
2544 #ifdef ATH_SUPPORT_IQUE
2545 	/* Mcast to Ucast Conversion*/
2546 	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
2547 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
2548 					  qdf_nbuf_data(nbuf);
2549 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
2550 		    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
2551 			dp_verbose_debug("Mcast frm for ME %pK", vdev);
2552 
2553 			DP_STATS_INC_PKT(vdev,
2554 					tx_i.mcast_en.mcast_pkt, 1,
2555 					qdf_nbuf_len(nbuf));
2556 			if (dp_tx_prepare_send_me(vdev, nbuf) ==
2557 					QDF_STATUS_SUCCESS) {
2558 				return NULL;
2559 			}
2560 		}
2561 	}
2562 #endif
2563 
2564 	/* RAW */
2565 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
2566 		struct dp_tx_seg_info_s seg_info = {0};
2567 
2568 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
2569 		if (!nbuf)
2570 			return NULL;
2571 
2572 		dp_verbose_debug("Raw frame %pK", vdev);
2573 
2574 		goto send_multiple;
2575 
2576 	}
2577 
2578 	if (qdf_unlikely(vdev->nawds_enabled)) {
2579 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
2580 					  qdf_nbuf_data(nbuf);
2581 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost))
2582 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf);
2583 
2584 		peer_id = DP_INVALID_PEER;
2585 		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
2586 				 1, qdf_nbuf_len(nbuf));
2587 	}
2588 
2589 	/*  Single linear frame */
2590 	/*
2591 	 * If nbuf is a simple linear frame, use send_single function to
2592 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
2593 	 * SRNG. There is no need to setup a MSDU extension descriptor.
2594 	 */
2595 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
2596 
2597 	return nbuf;
2598 
2599 send_multiple:
2600 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
2601 
2602 	if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
2603 		dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
2604 
2605 	return nbuf;
2606 }
2607 
2608 /**
2609  * dp_tx_reinject_handler() - Tx Reinject Handler
2610  * @tx_desc: software descriptor head pointer
2611  * @status : Tx completion status from HTT descriptor
2612  *
2613  * This function reinjects frames back to Target.
2614  * Todo - Host queue needs to be added
2615  *
2616  * Return: none
2617  */
2618 static
2619 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2620 {
2621 	struct dp_vdev *vdev;
2622 	struct dp_peer *peer = NULL;
2623 	uint32_t peer_id = HTT_INVALID_PEER;
2624 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2625 	qdf_nbuf_t nbuf_copy = NULL;
2626 	struct dp_tx_msdu_info_s msdu_info;
2627 	struct dp_soc *soc = NULL;
2628 #ifdef WDS_VENDOR_EXTENSION
2629 	int is_mcast = 0, is_ucast = 0;
2630 	int num_peers_3addr = 0;
2631 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
2632 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
2633 #endif
2634 
2635 	vdev = tx_desc->vdev;
2636 	soc = vdev->pdev->soc;
2637 
2638 	qdf_assert(vdev);
2639 
2640 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
2641 
2642 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
2643 
2644 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2645 			"%s Tx reinject path", __func__);
2646 
2647 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
2648 			qdf_nbuf_len(tx_desc->nbuf));
2649 
2650 #ifdef WDS_VENDOR_EXTENSION
2651 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
2652 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
2653 	} else {
2654 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
2655 	}
2656 	is_ucast = !is_mcast;
2657 
2658 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2659 		if (peer->bss_peer)
2660 			continue;
2661 
2662 		/* Detect wds peers that use 3-addr framing for mcast.
2663 		 * if there are any, the bss_peer is used to send the
2664 		 * the mcast frame using 3-addr format. all wds enabled
2665 		 * peers that use 4-addr framing for mcast frames will
2666 		 * be duplicated and sent as 4-addr frames below.
2667 		 */
2668 		if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
2669 			num_peers_3addr = 1;
2670 			break;
2671 		}
2672 	}
2673 #endif
2674 
2675 	if (qdf_unlikely(vdev->mesh_vdev)) {
2676 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
2677 	} else {
2678 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2679 			if ((peer->peer_id != HTT_INVALID_PEER) &&
2680 #ifdef WDS_VENDOR_EXTENSION
2681 			/*
2682 			 * . if 3-addr STA, then send on BSS Peer
2683 			 * . if Peer WDS enabled and accept 4-addr mcast,
2684 			 * send mcast on that peer only
2685 			 * . if Peer WDS enabled and accept 4-addr ucast,
2686 			 * send ucast on that peer only
2687 			 */
2688 			((peer->bss_peer && num_peers_3addr && is_mcast) ||
2689 			 (peer->wds_enabled &&
2690 				  ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
2691 				   (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
2692 #else
2693 			((peer->bss_peer &&
2694 			  !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))))) {
2695 #endif
2696 				peer_id = DP_INVALID_PEER;
2697 
2698 				nbuf_copy = qdf_nbuf_copy(nbuf);
2699 
2700 				if (!nbuf_copy) {
2701 					QDF_TRACE(QDF_MODULE_ID_DP,
2702 						QDF_TRACE_LEVEL_DEBUG,
2703 						FL("nbuf copy failed"));
2704 					break;
2705 				}
2706 
2707 				nbuf_copy = dp_tx_send_msdu_single(vdev,
2708 						nbuf_copy,
2709 						&msdu_info,
2710 						peer_id,
2711 						NULL);
2712 
2713 				if (nbuf_copy) {
2714 					QDF_TRACE(QDF_MODULE_ID_DP,
2715 						QDF_TRACE_LEVEL_DEBUG,
2716 						FL("pkt send failed"));
2717 					qdf_nbuf_free(nbuf_copy);
2718 				} else {
2719 					if (peer_id != DP_INVALID_PEER)
2720 						DP_STATS_INC_PKT(peer,
2721 							tx.nawds_mcast,
2722 							1, qdf_nbuf_len(nbuf));
2723 				}
2724 			}
2725 		}
2726 	}
2727 
2728 	qdf_nbuf_free(nbuf);
2729 
2730 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2731 }
2732 
2733 /**
2734  * dp_tx_inspect_handler() - Tx Inspect Handler
2735  * @tx_desc: software descriptor head pointer
2736  * @status : Tx completion status from HTT descriptor
2737  *
2738  * Handles Tx frames sent back to Host for inspection
2739  * (ProxyARP)
2740  *
2741  * Return: none
2742  */
2743 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
2744 {
2745 
2746 	struct dp_soc *soc;
2747 	struct dp_pdev *pdev = tx_desc->pdev;
2748 
2749 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2750 			"%s Tx inspect path",
2751 			__func__);
2752 
2753 	qdf_assert(pdev);
2754 
2755 	soc = pdev->soc;
2756 
2757 	DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
2758 			qdf_nbuf_len(tx_desc->nbuf));
2759 
2760 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
2761 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
2762 }
2763 
2764 #ifdef FEATURE_PERPKT_INFO
2765 /**
2766  * dp_get_completion_indication_for_stack() - send completion to stack
2767  * @soc : dp_soc handle
2768  * @pdev: dp_pdev handle
2769  * @peer: dp peer handle
2770  * @ts: transmit completion status structure
2771  * @netbuf: Buffer pointer for free
2772  *
2773  * This function is used for indication whether buffer needs to be
2774  * sent to stack for freeing or not
2775 */
2776 QDF_STATUS
2777 dp_get_completion_indication_for_stack(struct dp_soc *soc,
2778 				       struct dp_pdev *pdev,
2779 				       struct dp_peer *peer,
2780 				       struct hal_tx_completion_status *ts,
2781 				       qdf_nbuf_t netbuf,
2782 				       uint64_t time_latency)
2783 {
2784 	struct tx_capture_hdr *ppdu_hdr;
2785 	uint16_t peer_id = ts->peer_id;
2786 	uint32_t ppdu_id = ts->ppdu_id;
2787 	uint8_t first_msdu = ts->first_msdu;
2788 	uint8_t last_msdu = ts->last_msdu;
2789 
2790 	if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
2791 			 !pdev->latency_capture_enable))
2792 		return QDF_STATUS_E_NOSUPPORT;
2793 
2794 	if (!peer) {
2795 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2796 				FL("Peer Invalid"));
2797 		return QDF_STATUS_E_INVAL;
2798 	}
2799 
2800 	if (pdev->mcopy_mode) {
2801 		if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
2802 				(pdev->m_copy_id.tx_peer_id == peer_id)) {
2803 			return QDF_STATUS_E_INVAL;
2804 		}
2805 
2806 		pdev->m_copy_id.tx_ppdu_id = ppdu_id;
2807 		pdev->m_copy_id.tx_peer_id = peer_id;
2808 	}
2809 
2810 	if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) {
2811 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2812 				FL("No headroom"));
2813 		return QDF_STATUS_E_NOMEM;
2814 	}
2815 
2816 	ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
2817 	qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
2818 		     QDF_MAC_ADDR_SIZE);
2819 	qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
2820 		     QDF_MAC_ADDR_SIZE);
2821 	ppdu_hdr->ppdu_id = ppdu_id;
2822 	ppdu_hdr->peer_id = peer_id;
2823 	ppdu_hdr->first_msdu = first_msdu;
2824 	ppdu_hdr->last_msdu = last_msdu;
2825 	if (qdf_unlikely(pdev->latency_capture_enable)) {
2826 		ppdu_hdr->tsf = ts->tsf;
2827 		ppdu_hdr->time_latency = time_latency;
2828 	}
2829 
2830 	return QDF_STATUS_SUCCESS;
2831 }
2832 
2833 
2834 /**
2835  * dp_send_completion_to_stack() - send completion to stack
2836  * @soc :  dp_soc handle
2837  * @pdev:  dp_pdev handle
2838  * @peer_id: peer_id of the peer for which completion came
2839  * @ppdu_id: ppdu_id
2840  * @netbuf: Buffer pointer for free
2841  *
2842  * This function is used to send completion to stack
2843  * to free buffer
2844 */
2845 void  dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2846 					uint16_t peer_id, uint32_t ppdu_id,
2847 					qdf_nbuf_t netbuf)
2848 {
2849 	dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
2850 				netbuf, peer_id,
2851 				WDI_NO_VAL, pdev->pdev_id);
2852 }
2853 #else
2854 static QDF_STATUS
2855 dp_get_completion_indication_for_stack(struct dp_soc *soc,
2856 				       struct dp_pdev *pdev,
2857 				       struct dp_peer *peer,
2858 				       struct hal_tx_completion_status *ts,
2859 				       qdf_nbuf_t netbuf,
2860 				       uint64_t time_latency)
2861 {
2862 	return QDF_STATUS_E_NOSUPPORT;
2863 }
2864 
2865 static void
2866 dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
2867 	uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
2868 {
2869 }
2870 #endif
2871 
2872 /**
2873  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
2874  * @soc: Soc handle
2875  * @desc: software Tx descriptor to be processed
2876  *
2877  * Return: none
2878  */
2879 static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
2880 				       struct dp_tx_desc_s *desc)
2881 {
2882 	struct dp_vdev *vdev = desc->vdev;
2883 	qdf_nbuf_t nbuf = desc->nbuf;
2884 
2885 	/* nbuf already freed in vdev detach path */
2886 	if (!nbuf)
2887 		return;
2888 
2889 	/* If it is TDLS mgmt, don't unmap or free the frame */
2890 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
2891 		return dp_non_std_tx_comp_free_buff(soc, desc, vdev);
2892 
2893 	/* 0 : MSDU buffer, 1 : MLE */
2894 	if (desc->msdu_ext_desc) {
2895 		/* TSO free */
2896 		if (hal_tx_ext_desc_get_tso_enable(
2897 					desc->msdu_ext_desc->vaddr)) {
2898 			/* unmap eash TSO seg before free the nbuf */
2899 			dp_tx_tso_unmap_segment(soc, desc->tso_desc,
2900 						desc->tso_num_desc);
2901 			qdf_nbuf_free(nbuf);
2902 			return;
2903 		}
2904 	}
2905 
2906 	qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
2907 				     QDF_DMA_TO_DEVICE, nbuf->len);
2908 
2909 	if (qdf_unlikely(!vdev)) {
2910 		qdf_nbuf_free(nbuf);
2911 		return;
2912 	}
2913 
2914 	if (qdf_likely(!vdev->mesh_vdev))
2915 		qdf_nbuf_free(nbuf);
2916 	else {
2917 		if (desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2918 			qdf_nbuf_free(nbuf);
2919 			DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
2920 		} else
2921 			vdev->osif_tx_free_ext((nbuf));
2922 	}
2923 }
2924 
2925 #ifdef MESH_MODE_SUPPORT
2926 /**
2927  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
2928  *                                         in mesh meta header
2929  * @tx_desc: software descriptor head pointer
2930  * @ts: pointer to tx completion stats
2931  * Return: none
2932  */
2933 static
2934 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2935 		struct hal_tx_completion_status *ts)
2936 {
2937 	struct meta_hdr_s *mhdr;
2938 	qdf_nbuf_t netbuf = tx_desc->nbuf;
2939 
2940 	if (!tx_desc->msdu_ext_desc) {
2941 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
2942 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2943 				"netbuf %pK offset %d",
2944 				netbuf, tx_desc->pkt_offset);
2945 			return;
2946 		}
2947 	}
2948 	if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
2949 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2950 			"netbuf %pK offset %lu", netbuf,
2951 			sizeof(struct meta_hdr_s));
2952 		return;
2953 	}
2954 
2955 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
2956 	mhdr->rssi = ts->ack_frame_rssi;
2957 	mhdr->band = tx_desc->pdev->operating_channel.band;
2958 	mhdr->channel = tx_desc->pdev->operating_channel.num;
2959 }
2960 
2961 #else
2962 static
2963 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
2964 		struct hal_tx_completion_status *ts)
2965 {
2966 }
2967 
2968 #endif
2969 
2970 /**
2971  * dp_tx_compute_delay() - Compute and fill in all timestamps
2972  *				to pass in correct fields
2973  *
2974  * @vdev: pdev handle
2975  * @tx_desc: tx descriptor
2976  * @tid: tid value
2977  * @ring_id: TCL or WBM ring number for transmit path
2978  * Return: none
2979  */
2980 static void dp_tx_compute_delay(struct dp_vdev *vdev,
2981 				struct dp_tx_desc_s *tx_desc,
2982 				uint8_t tid, uint8_t ring_id)
2983 {
2984 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
2985 	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
2986 
2987 	if (qdf_likely(!vdev->pdev->delay_stats_flag))
2988 		return;
2989 
2990 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_get());
2991 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
2992 	timestamp_hw_enqueue = tx_desc->timestamp;
2993 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
2994 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
2995 					 timestamp_hw_enqueue);
2996 	interframe_delay = (uint32_t)(timestamp_ingress -
2997 				      vdev->prev_tx_enq_tstamp);
2998 
2999 	/*
3000 	 * Delay in software enqueue
3001 	 */
3002 	dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid,
3003 			      CDP_DELAY_STATS_SW_ENQ, ring_id);
3004 	/*
3005 	 * Delay between packet enqueued to HW and Tx completion
3006 	 */
3007 	dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid,
3008 			      CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id);
3009 
3010 	/*
3011 	 * Update interframe delay stats calculated at hardstart receive point.
3012 	 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
3013 	 * interframe delay will not be calculate correctly for 1st frame.
3014 	 * On the other side, this will help in avoiding extra per packet check
3015 	 * of !vdev->prev_tx_enq_tstamp.
3016 	 */
3017 	dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
3018 			      CDP_DELAY_STATS_TX_INTERFRAME, ring_id);
3019 	vdev->prev_tx_enq_tstamp = timestamp_ingress;
3020 }
3021 
3022 #ifdef DISABLE_DP_STATS
3023 static
3024 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
3025 {
3026 }
3027 #else
3028 static
3029 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
3030 {
3031 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
3032 
3033 	DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
3034 	if (subtype != QDF_PROTO_INVALID)
3035 		DP_STATS_INC(peer, tx.no_ack_count[subtype], 1);
3036 }
3037 #endif
3038 
3039 /**
3040  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
3041  *				per wbm ring
3042  *
3043  * @tx_desc: software descriptor head pointer
3044  * @ts: Tx completion status
3045  * @peer: peer handle
3046  * @ring_id: ring number
3047  *
3048  * Return: None
3049  */
3050 static inline void
3051 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
3052 			struct hal_tx_completion_status *ts,
3053 			struct dp_peer *peer, uint8_t ring_id)
3054 {
3055 	struct dp_pdev *pdev = peer->vdev->pdev;
3056 	struct dp_soc *soc = NULL;
3057 	uint8_t mcs, pkt_type;
3058 	uint8_t tid = ts->tid;
3059 	uint32_t length;
3060 	struct cdp_tid_tx_stats *tid_stats;
3061 
3062 	if (!pdev)
3063 		return;
3064 
3065 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
3066 		tid = CDP_MAX_DATA_TIDS - 1;
3067 
3068 	tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
3069 	soc = pdev->soc;
3070 
3071 	mcs = ts->mcs;
3072 	pkt_type = ts->pkt_type;
3073 
3074 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
3075 		dp_err("Release source is not from TQM");
3076 		return;
3077 	}
3078 
3079 	length = qdf_nbuf_len(tx_desc->nbuf);
3080 	DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
3081 
3082 	if (qdf_unlikely(pdev->delay_stats_flag))
3083 		dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id);
3084 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
3085 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
3086 
3087 	DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length,
3088 			  (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
3089 
3090 	DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
3091 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
3092 
3093 	DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
3094 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
3095 
3096 	DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
3097 		     (ts->status == HAL_TX_TQM_RR_FW_REASON1));
3098 
3099 	DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
3100 		     (ts->status == HAL_TX_TQM_RR_FW_REASON2));
3101 
3102 	DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
3103 		     (ts->status == HAL_TX_TQM_RR_FW_REASON3));
3104 
3105 	/*
3106 	 * tx_failed is ideally supposed to be updated from HTT ppdu completion
3107 	 * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there
3108 	 * are no completions for failed cases. Hence updating tx_failed from
3109 	 * data path. Please note that if tx_failed is fixed to be from ppdu,
3110 	 * then this has to be removed
3111 	 */
3112 	peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num +
3113 				peer->stats.tx.dropped.fw_rem_notx +
3114 				peer->stats.tx.dropped.fw_rem_tx +
3115 				peer->stats.tx.dropped.age_out +
3116 				peer->stats.tx.dropped.fw_reason1 +
3117 				peer->stats.tx.dropped.fw_reason2 +
3118 				peer->stats.tx.dropped.fw_reason3;
3119 
3120 	if (ts->status < CDP_MAX_TX_TQM_STATUS) {
3121 		tid_stats->tqm_status_cnt[ts->status]++;
3122 	}
3123 
3124 	if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
3125 		dp_update_no_ack_stats(tx_desc->nbuf, peer);
3126 		return;
3127 	}
3128 
3129 	DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
3130 
3131 	DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
3132 	DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
3133 
3134 	/*
3135 	 * Following Rate Statistics are updated from HTT PPDU events from FW.
3136 	 * Return from here if HTT PPDU events are enabled.
3137 	 */
3138 	if (!(soc->process_tx_status))
3139 		return;
3140 
3141 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3142 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
3143 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3144 			((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
3145 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3146 			((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
3147 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3148 			((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
3149 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3150 			((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
3151 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3152 			((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
3153 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3154 			((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
3155 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3156 			((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
3157 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
3158 			((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
3159 	DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
3160 			((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
3161 
3162 	DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
3163 	DP_STATS_INC(peer, tx.bw[ts->bw], 1);
3164 	DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
3165 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
3166 	DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
3167 	DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
3168 	DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
3169 
3170 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
3171 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
3172 			     &peer->stats, ts->peer_id,
3173 			     UPDATE_PEER_STATS, pdev->pdev_id);
3174 #endif
3175 }
3176 
3177 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3178 /**
3179  * dp_tx_flow_pool_lock() - take flow pool lock
3180  * @soc: core txrx main context
3181  * @tx_desc: tx desc
3182  *
3183  * Return: None
3184  */
3185 static inline
3186 void dp_tx_flow_pool_lock(struct dp_soc *soc,
3187 			  struct dp_tx_desc_s *tx_desc)
3188 {
3189 	struct dp_tx_desc_pool_s *pool;
3190 	uint8_t desc_pool_id;
3191 
3192 	desc_pool_id = tx_desc->pool_id;
3193 	pool = &soc->tx_desc[desc_pool_id];
3194 
3195 	qdf_spin_lock_bh(&pool->flow_pool_lock);
3196 }
3197 
3198 /**
3199  * dp_tx_flow_pool_unlock() - release flow pool lock
3200  * @soc: core txrx main context
3201  * @tx_desc: tx desc
3202  *
3203  * Return: None
3204  */
3205 static inline
3206 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
3207 			    struct dp_tx_desc_s *tx_desc)
3208 {
3209 	struct dp_tx_desc_pool_s *pool;
3210 	uint8_t desc_pool_id;
3211 
3212 	desc_pool_id = tx_desc->pool_id;
3213 	pool = &soc->tx_desc[desc_pool_id];
3214 
3215 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
3216 }
3217 #else
3218 static inline
3219 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
3220 {
3221 }
3222 
3223 static inline
3224 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
3225 {
3226 }
3227 #endif
3228 
3229 /**
3230  * dp_tx_notify_completion() - Notify tx completion for this desc
3231  * @soc: core txrx main context
3232  * @tx_desc: tx desc
3233  * @netbuf:  buffer
3234  *
3235  * Return: none
3236  */
3237 static inline void dp_tx_notify_completion(struct dp_soc *soc,
3238 					   struct dp_tx_desc_s *tx_desc,
3239 					   qdf_nbuf_t netbuf)
3240 {
3241 	void *osif_dev;
3242 	ol_txrx_completion_fp tx_compl_cbk = NULL;
3243 
3244 	qdf_assert(tx_desc);
3245 
3246 	dp_tx_flow_pool_lock(soc, tx_desc);
3247 
3248 	if (!tx_desc->vdev ||
3249 	    !tx_desc->vdev->osif_vdev) {
3250 		dp_tx_flow_pool_unlock(soc, tx_desc);
3251 		return;
3252 	}
3253 
3254 	osif_dev = tx_desc->vdev->osif_vdev;
3255 	tx_compl_cbk = tx_desc->vdev->tx_comp;
3256 	dp_tx_flow_pool_unlock(soc, tx_desc);
3257 
3258 	if (tx_compl_cbk)
3259 		tx_compl_cbk(netbuf, osif_dev);
3260 }
3261 
3262 /** dp_tx_sojourn_stats_process() - Collect sojourn stats
3263  * @pdev: pdev handle
3264  * @tid: tid value
3265  * @txdesc_ts: timestamp from txdesc
3266  * @ppdu_id: ppdu id
3267  *
3268  * Return: none
3269  */
3270 #ifdef FEATURE_PERPKT_INFO
3271 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
3272 					       struct dp_peer *peer,
3273 					       uint8_t tid,
3274 					       uint64_t txdesc_ts,
3275 					       uint32_t ppdu_id)
3276 {
3277 	uint64_t delta_ms;
3278 	struct cdp_tx_sojourn_stats *sojourn_stats;
3279 
3280 	if (qdf_unlikely(pdev->enhanced_stats_en == 0))
3281 		return;
3282 
3283 	if (qdf_unlikely(tid == HTT_INVALID_TID ||
3284 			 tid >= CDP_DATA_TID_MAX))
3285 		return;
3286 
3287 	if (qdf_unlikely(!pdev->sojourn_buf))
3288 		return;
3289 
3290 	sojourn_stats = (struct cdp_tx_sojourn_stats *)
3291 		qdf_nbuf_data(pdev->sojourn_buf);
3292 
3293 	sojourn_stats->cookie = (void *)peer->wlanstats_ctx;
3294 
3295 	delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
3296 				txdesc_ts;
3297 	qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid],
3298 			    delta_ms);
3299 	sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
3300 	sojourn_stats->num_msdus[tid] = 1;
3301 	sojourn_stats->avg_sojourn_msdu[tid].internal =
3302 		peer->avg_sojourn_msdu[tid].internal;
3303 	dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
3304 			     pdev->sojourn_buf, HTT_INVALID_PEER,
3305 			     WDI_NO_VAL, pdev->pdev_id);
3306 	sojourn_stats->sum_sojourn_msdu[tid] = 0;
3307 	sojourn_stats->num_msdus[tid] = 0;
3308 	sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
3309 }
3310 #else
3311 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
3312 					       struct dp_peer *peer,
3313 					       uint8_t tid,
3314 					       uint64_t txdesc_ts,
3315 					       uint32_t ppdu_id)
3316 {
3317 }
3318 #endif
3319 
3320 /**
3321  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
3322  * @soc: DP Soc handle
3323  * @tx_desc: software Tx descriptor
3324  * @ts : Tx completion status from HAL/HTT descriptor
3325  *
3326  * Return: none
3327  */
3328 static inline void
3329 dp_tx_comp_process_desc(struct dp_soc *soc,
3330 			struct dp_tx_desc_s *desc,
3331 			struct hal_tx_completion_status *ts,
3332 			struct dp_peer *peer)
3333 {
3334 	uint64_t time_latency = 0;
3335 	/*
3336 	 * m_copy/tx_capture modes are not supported for
3337 	 * scatter gather packets
3338 	 */
3339 	if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
3340 		time_latency = (qdf_ktime_to_ms(qdf_ktime_get()) -
3341 				desc->timestamp);
3342 	}
3343 	if (!(desc->msdu_ext_desc)) {
3344 		if (QDF_STATUS_SUCCESS ==
3345 		    dp_tx_add_to_comp_queue(soc, desc, ts, peer)) {
3346 			return;
3347 		}
3348 
3349 		if (QDF_STATUS_SUCCESS ==
3350 		    dp_get_completion_indication_for_stack(soc,
3351 							   desc->pdev,
3352 							   peer, ts,
3353 							   desc->nbuf,
3354 							   time_latency)) {
3355 			qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
3356 						     QDF_DMA_TO_DEVICE,
3357 						     desc->nbuf->len);
3358 			dp_send_completion_to_stack(soc,
3359 						    desc->pdev,
3360 						    ts->peer_id,
3361 						    ts->ppdu_id,
3362 						    desc->nbuf);
3363 			return;
3364 		}
3365 	}
3366 
3367 	dp_tx_comp_free_buf(soc, desc);
3368 }
3369 
3370 #ifdef DISABLE_DP_STATS
3371 /**
3372  * dp_tx_update_connectivity_stats() - update tx connectivity stats
3373  * @soc: core txrx main context
3374  * @tx_desc: tx desc
3375  * @status: tx status
3376  *
3377  * Return: none
3378  */
3379 static inline
3380 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
3381 				     struct dp_tx_desc_s *tx_desc,
3382 				     uint8_t status)
3383 {
3384 }
3385 #else
3386 static inline
3387 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
3388 				     struct dp_tx_desc_s *tx_desc,
3389 				     uint8_t status)
3390 {
3391 	void *osif_dev;
3392 	ol_txrx_stats_rx_fp stats_cbk;
3393 	uint8_t pkt_type;
3394 
3395 	qdf_assert(tx_desc);
3396 
3397 	if (!tx_desc->vdev ||
3398 	    !tx_desc->vdev->osif_vdev ||
3399 	    !tx_desc->vdev->stats_cb)
3400 		return;
3401 
3402 	osif_dev = tx_desc->vdev->osif_vdev;
3403 	stats_cbk = tx_desc->vdev->stats_cb;
3404 
3405 	stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
3406 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
3407 		stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
3408 			  &pkt_type);
3409 }
3410 #endif
3411 
3412 /**
3413  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
3414  * @soc: DP soc handle
3415  * @tx_desc: software descriptor head pointer
3416  * @ts: Tx completion status
3417  * @peer: peer handle
3418  * @ring_id: ring number
3419  *
3420  * Return: none
3421  */
3422 static inline
3423 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
3424 				  struct dp_tx_desc_s *tx_desc,
3425 				  struct hal_tx_completion_status *ts,
3426 				  struct dp_peer *peer, uint8_t ring_id)
3427 {
3428 	uint32_t length;
3429 	qdf_ether_header_t *eh;
3430 	struct dp_vdev *vdev = tx_desc->vdev;
3431 	qdf_nbuf_t nbuf = tx_desc->nbuf;
3432 	uint8_t dp_status;
3433 
3434 	if (!vdev || !nbuf) {
3435 		dp_info_rl("invalid tx descriptor. vdev or nbuf NULL");
3436 		goto out;
3437 	}
3438 
3439 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3440 	length = qdf_nbuf_len(nbuf);
3441 
3442 	dp_status = qdf_dp_get_status_from_htt(ts->status);
3443 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
3444 				 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
3445 				 QDF_TRACE_DEFAULT_PDEV_ID,
3446 				 qdf_nbuf_data_addr(nbuf),
3447 				 sizeof(qdf_nbuf_data(nbuf)),
3448 				 tx_desc->id,
3449 				 dp_status));
3450 
3451 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3452 				"-------------------- \n"
3453 				"Tx Completion Stats: \n"
3454 				"-------------------- \n"
3455 				"ack_frame_rssi = %d \n"
3456 				"first_msdu = %d \n"
3457 				"last_msdu = %d \n"
3458 				"msdu_part_of_amsdu = %d \n"
3459 				"rate_stats valid = %d \n"
3460 				"bw = %d \n"
3461 				"pkt_type = %d \n"
3462 				"stbc = %d \n"
3463 				"ldpc = %d \n"
3464 				"sgi = %d \n"
3465 				"mcs = %d \n"
3466 				"ofdma = %d \n"
3467 				"tones_in_ru = %d \n"
3468 				"tsf = %d \n"
3469 				"ppdu_id = %d \n"
3470 				"transmit_cnt = %d \n"
3471 				"tid = %d \n"
3472 				"peer_id = %d\n",
3473 				ts->ack_frame_rssi, ts->first_msdu,
3474 				ts->last_msdu, ts->msdu_part_of_amsdu,
3475 				ts->valid, ts->bw, ts->pkt_type, ts->stbc,
3476 				ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
3477 				ts->tones_in_ru, ts->tsf, ts->ppdu_id,
3478 				ts->transmit_cnt, ts->tid, ts->peer_id);
3479 
3480 	/* Update SoC level stats */
3481 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
3482 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
3483 
3484 	if (!peer) {
3485 		dp_err_rl("peer is null or deletion in progress");
3486 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
3487 		goto out;
3488 	}
3489 
3490 	dp_tx_update_connectivity_stats(soc, tx_desc, ts->status);
3491 
3492 	/* Update per-packet stats for mesh mode */
3493 	if (qdf_unlikely(vdev->mesh_vdev) &&
3494 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
3495 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
3496 
3497 	/* Update peer level stats */
3498 	if (qdf_unlikely(peer->bss_peer && vdev->opmode == wlan_op_mode_ap)) {
3499 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
3500 			DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
3501 
3502 			if ((peer->vdev->tx_encap_type ==
3503 				htt_cmn_pkt_type_ethernet) &&
3504 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
3505 				DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
3506 			}
3507 		}
3508 	} else {
3509 		DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
3510 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
3511 			DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
3512 	}
3513 
3514 	dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id);
3515 
3516 #ifdef QCA_SUPPORT_RDK_STATS
3517 	if (soc->wlanstats_enabled)
3518 		dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid,
3519 					    tx_desc->timestamp,
3520 					    ts->ppdu_id);
3521 #endif
3522 
3523 out:
3524 	return;
3525 }
3526 /**
3527  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
3528  * @soc: core txrx main context
3529  * @comp_head: software descriptor head pointer
3530  * @ring_id: ring number
3531  *
3532  * This function will process batch of descriptors reaped by dp_tx_comp_handler
3533  * and release the software descriptors after processing is complete
3534  *
3535  * Return: none
3536  */
3537 static void
3538 dp_tx_comp_process_desc_list(struct dp_soc *soc,
3539 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id)
3540 {
3541 	struct dp_tx_desc_s *desc;
3542 	struct dp_tx_desc_s *next;
3543 	struct hal_tx_completion_status ts;
3544 	struct dp_peer *peer;
3545 	qdf_nbuf_t netbuf;
3546 
3547 	desc = comp_head;
3548 
3549 	while (desc) {
3550 		if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
3551 			struct dp_pdev *pdev = desc->pdev;
3552 
3553 			peer = dp_peer_find_by_id(soc, desc->peer_id);
3554 			if (qdf_likely(peer)) {
3555 				/*
3556 				 * Increment peer statistics
3557 				 * Minimal statistics update done here
3558 				 */
3559 				DP_STATS_INC_PKT(peer, tx.comp_pkt, 1,
3560 						 desc->length);
3561 
3562 				if (desc->tx_status !=
3563 						HAL_TX_TQM_RR_FRAME_ACKED)
3564 					peer->stats.tx.tx_failed++;
3565 
3566 				dp_peer_unref_del_find_by_id(peer);
3567 			}
3568 
3569 			qdf_assert(pdev);
3570 			dp_tx_outstanding_dec(pdev);
3571 
3572 			/*
3573 			 * Calling a QDF WRAPPER here is creating signifcant
3574 			 * performance impact so avoided the wrapper call here
3575 			 */
3576 			next = desc->next;
3577 			qdf_mem_unmap_nbytes_single(soc->osdev,
3578 						    desc->dma_addr,
3579 						    QDF_DMA_TO_DEVICE,
3580 						    desc->length);
3581 			qdf_nbuf_free(desc->nbuf);
3582 			dp_tx_desc_free(soc, desc, desc->pool_id);
3583 			desc = next;
3584 			continue;
3585 		}
3586 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
3587 		peer = dp_peer_find_by_id(soc, ts.peer_id);
3588 		dp_tx_comp_process_tx_status(soc, desc, &ts, peer, ring_id);
3589 
3590 		netbuf = desc->nbuf;
3591 		/* check tx complete notification */
3592 		if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(netbuf))
3593 			dp_tx_notify_completion(soc, desc, netbuf);
3594 
3595 		dp_tx_comp_process_desc(soc, desc, &ts, peer);
3596 
3597 		if (peer)
3598 			dp_peer_unref_del_find_by_id(peer);
3599 
3600 		next = desc->next;
3601 
3602 		dp_tx_desc_release(desc, desc->pool_id);
3603 		desc = next;
3604 	}
3605 
3606 }
3607 
3608 /**
3609  * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
3610  * @tx_desc: software descriptor head pointer
3611  * @status : Tx completion status from HTT descriptor
3612  * @ring_id: ring number
3613  *
3614  * This function will process HTT Tx indication messages from Target
3615  *
3616  * Return: none
3617  */
3618 static
3619 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status,
3620 				  uint8_t ring_id)
3621 {
3622 	uint8_t tx_status;
3623 	struct dp_pdev *pdev;
3624 	struct dp_vdev *vdev;
3625 	struct dp_soc *soc;
3626 	struct hal_tx_completion_status ts = {0};
3627 	uint32_t *htt_desc = (uint32_t *)status;
3628 	struct dp_peer *peer;
3629 	struct cdp_tid_tx_stats *tid_stats = NULL;
3630 	struct htt_soc *htt_handle;
3631 
3632 	/*
3633 	 * If the descriptor is already freed in vdev_detach,
3634 	 * continue to next descriptor
3635 	 */
3636 	if (!tx_desc->vdev && !tx_desc->flags) {
3637 		QDF_TRACE(QDF_MODULE_ID_DP,
3638 			  QDF_TRACE_LEVEL_INFO,
3639 			  "Descriptor freed in vdev_detach %d",
3640 			  tx_desc->id);
3641 		return;
3642 	}
3643 
3644 	pdev = tx_desc->pdev;
3645 	soc = pdev->soc;
3646 
3647 	if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
3648 		QDF_TRACE(QDF_MODULE_ID_DP,
3649 			  QDF_TRACE_LEVEL_INFO,
3650 			  "pdev in down state %d",
3651 			  tx_desc->id);
3652 		dp_tx_comp_free_buf(soc, tx_desc);
3653 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3654 		return;
3655 	}
3656 
3657 	qdf_assert(tx_desc->pdev);
3658 
3659 	vdev = tx_desc->vdev;
3660 
3661 	if (!vdev)
3662 		return;
3663 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
3664 	htt_handle = (struct htt_soc *)soc->htt_handle;
3665 	htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
3666 
3667 	switch (tx_status) {
3668 	case HTT_TX_FW2WBM_TX_STATUS_OK:
3669 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
3670 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
3671 	{
3672 		uint8_t tid;
3673 		if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
3674 			ts.peer_id =
3675 				HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
3676 						htt_desc[2]);
3677 			ts.tid =
3678 				HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
3679 						htt_desc[2]);
3680 		} else {
3681 			ts.peer_id = HTT_INVALID_PEER;
3682 			ts.tid = HTT_INVALID_TID;
3683 		}
3684 		ts.ppdu_id =
3685 			HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
3686 					htt_desc[1]);
3687 		ts.ack_frame_rssi =
3688 			HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
3689 					htt_desc[1]);
3690 
3691 		ts.tsf = htt_desc[3];
3692 		ts.first_msdu = 1;
3693 		ts.last_msdu = 1;
3694 		tid = ts.tid;
3695 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
3696 			tid = CDP_MAX_DATA_TIDS - 1;
3697 
3698 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
3699 
3700 		if (qdf_unlikely(pdev->delay_stats_flag))
3701 			dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
3702 		if (tx_status < CDP_MAX_TX_HTT_STATUS) {
3703 			tid_stats->htt_status_cnt[tx_status]++;
3704 		}
3705 
3706 		peer = dp_peer_find_by_id(soc, ts.peer_id);
3707 
3708 		if (qdf_likely(peer))
3709 			dp_peer_unref_del_find_by_id(peer);
3710 
3711 		dp_tx_comp_process_tx_status(soc, tx_desc, &ts, peer, ring_id);
3712 		dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
3713 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3714 
3715 		break;
3716 	}
3717 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
3718 	{
3719 		dp_tx_reinject_handler(tx_desc, status);
3720 		break;
3721 	}
3722 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
3723 	{
3724 		dp_tx_inspect_handler(tx_desc, status);
3725 		break;
3726 	}
3727 	case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
3728 	{
3729 		dp_tx_mec_handler(vdev, status);
3730 		break;
3731 	}
3732 	default:
3733 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3734 			  "%s Invalid HTT tx_status %d\n",
3735 			  __func__, tx_status);
3736 		break;
3737 	}
3738 }
3739 
3740 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
3741 static inline
3742 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
3743 {
3744 	bool limit_hit = false;
3745 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
3746 
3747 	limit_hit =
3748 		(num_reaped >= cfg->tx_comp_loop_pkt_limit) ? true : false;
3749 
3750 	if (limit_hit)
3751 		DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
3752 
3753 	return limit_hit;
3754 }
3755 
3756 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
3757 {
3758 	return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
3759 }
3760 #else
3761 static inline
3762 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
3763 {
3764 	return false;
3765 }
3766 
3767 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
3768 {
3769 	return false;
3770 }
3771 #endif
3772 
3773 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
3774 			    hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
3775 			    uint32_t quota)
3776 {
3777 	void *tx_comp_hal_desc;
3778 	uint8_t buffer_src;
3779 	uint8_t pool_id;
3780 	uint32_t tx_desc_id;
3781 	struct dp_tx_desc_s *tx_desc = NULL;
3782 	struct dp_tx_desc_s *head_desc = NULL;
3783 	struct dp_tx_desc_s *tail_desc = NULL;
3784 	uint32_t num_processed = 0;
3785 	uint32_t count;
3786 	uint32_t num_avail_for_reap = 0;
3787 	bool force_break = false;
3788 
3789 	DP_HIST_INIT();
3790 
3791 more_data:
3792 	/* Re-initialize local variables to be re-used */
3793 	head_desc = NULL;
3794 	tail_desc = NULL;
3795 	count = 0;
3796 
3797 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
3798 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
3799 		return 0;
3800 	}
3801 
3802 	num_avail_for_reap = hal_srng_dst_num_valid(soc->hal_soc, hal_ring_hdl, 0);
3803 
3804 	if (num_avail_for_reap >= quota)
3805 		num_avail_for_reap = quota;
3806 
3807 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
3808 
3809 	/* Find head descriptor from completion ring */
3810 	while (qdf_likely(num_avail_for_reap)) {
3811 
3812 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
3813 		if (qdf_unlikely(!tx_comp_hal_desc))
3814 			break;
3815 
3816 		buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
3817 
3818 		/* If this buffer was not released by TQM or FW, then it is not
3819 		 * Tx completion indication, assert */
3820 		if (qdf_unlikely(buffer_src !=
3821 					HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
3822 				 (qdf_unlikely(buffer_src !=
3823 					HAL_TX_COMP_RELEASE_SOURCE_FW))) {
3824 			uint8_t wbm_internal_error;
3825 
3826 			dp_err_rl(
3827 				"Tx comp release_src != TQM | FW but from %d",
3828 				buffer_src);
3829 			hal_dump_comp_desc(tx_comp_hal_desc);
3830 			DP_STATS_INC(soc, tx.invalid_release_source, 1);
3831 
3832 			/* When WBM sees NULL buffer_addr_info in any of
3833 			 * ingress rings it sends an error indication,
3834 			 * with wbm_internal_error=1, to a specific ring.
3835 			 * The WBM2SW ring used to indicate these errors is
3836 			 * fixed in HW, and that ring is being used as Tx
3837 			 * completion ring. These errors are not related to
3838 			 * Tx completions, and should just be ignored
3839 			 */
3840 			wbm_internal_error = hal_get_wbm_internal_error(
3841 							soc->hal_soc,
3842 							tx_comp_hal_desc);
3843 
3844 			if (wbm_internal_error) {
3845 				dp_err_rl("Tx comp wbm_internal_error!!");
3846 				DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
3847 
3848 				if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
3849 								buffer_src)
3850 					dp_handle_wbm_internal_error(
3851 						soc,
3852 						tx_comp_hal_desc,
3853 						hal_tx_comp_get_buffer_type(
3854 							tx_comp_hal_desc));
3855 
3856 			} else {
3857 				dp_err_rl("Tx comp wbm_internal_error false");
3858 				DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
3859 			}
3860 			continue;
3861 		}
3862 
3863 		/* Get descriptor id */
3864 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
3865 		pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
3866 			DP_TX_DESC_ID_POOL_OS;
3867 
3868 		/* Find Tx descriptor */
3869 		tx_desc = dp_tx_desc_find(soc, pool_id,
3870 				(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
3871 				DP_TX_DESC_ID_PAGE_OS,
3872 				(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
3873 				DP_TX_DESC_ID_OFFSET_OS);
3874 
3875 		/*
3876 		 * If the release source is FW, process the HTT status
3877 		 */
3878 		if (qdf_unlikely(buffer_src ==
3879 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
3880 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
3881 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
3882 					htt_tx_status);
3883 			dp_tx_process_htt_completion(tx_desc,
3884 					htt_tx_status, ring_id);
3885 		} else {
3886 			/*
3887 			 * If the fast completion mode is enabled extended
3888 			 * metadata from descriptor is not copied
3889 			 */
3890 			if (qdf_likely(tx_desc->flags &
3891 						DP_TX_DESC_FLAG_SIMPLE)) {
3892 				tx_desc->peer_id =
3893 					hal_tx_comp_get_peer_id(tx_comp_hal_desc);
3894 				tx_desc->tx_status =
3895 					hal_tx_comp_get_tx_status(tx_comp_hal_desc);
3896 				goto add_to_pool;
3897 			}
3898 
3899 			/*
3900 			 * If the descriptor is already freed in vdev_detach,
3901 			 * continue to next descriptor
3902 			 */
3903 			if (qdf_unlikely(!tx_desc->vdev) &&
3904 					 qdf_unlikely(!tx_desc->flags)) {
3905 				QDF_TRACE(QDF_MODULE_ID_DP,
3906 					  QDF_TRACE_LEVEL_INFO,
3907 					  "Descriptor freed in vdev_detach %d",
3908 					  tx_desc_id);
3909 				continue;
3910 			}
3911 
3912 			if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
3913 				QDF_TRACE(QDF_MODULE_ID_DP,
3914 					  QDF_TRACE_LEVEL_INFO,
3915 					  "pdev in down state %d",
3916 					  tx_desc_id);
3917 
3918 				dp_tx_comp_free_buf(soc, tx_desc);
3919 				dp_tx_desc_release(tx_desc, tx_desc->pool_id);
3920 				goto next_desc;
3921 			}
3922 
3923 			/* Pool id is not matching. Error */
3924 			if (tx_desc->pool_id != pool_id) {
3925 				QDF_TRACE(QDF_MODULE_ID_DP,
3926 					QDF_TRACE_LEVEL_FATAL,
3927 					"Tx Comp pool id %d not matched %d",
3928 					pool_id, tx_desc->pool_id);
3929 
3930 				qdf_assert_always(0);
3931 			}
3932 
3933 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
3934 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
3935 				QDF_TRACE(QDF_MODULE_ID_DP,
3936 					  QDF_TRACE_LEVEL_FATAL,
3937 					  "Txdesc invalid, flgs = %x,id = %d",
3938 					  tx_desc->flags, tx_desc_id);
3939 				qdf_assert_always(0);
3940 			}
3941 
3942 			/* Collect hw completion contents */
3943 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
3944 					      &tx_desc->comp, 1);
3945 add_to_pool:
3946 			DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
3947 
3948 			/* First ring descriptor on the cycle */
3949 			if (!head_desc) {
3950 				head_desc = tx_desc;
3951 				tail_desc = tx_desc;
3952 			}
3953 
3954 			tail_desc->next = tx_desc;
3955 			tx_desc->next = NULL;
3956 			tail_desc = tx_desc;
3957 		}
3958 next_desc:
3959 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
3960 
3961 		/*
3962 		 * Processed packet count is more than given quota
3963 		 * stop to processing
3964 		 */
3965 
3966 		count++;
3967 
3968 		if (dp_tx_comp_loop_pkt_limit_hit(soc, count))
3969 			break;
3970 	}
3971 
3972 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
3973 
3974 	/* Process the reaped descriptors */
3975 	if (head_desc)
3976 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
3977 
3978 	if (dp_tx_comp_enable_eol_data_check(soc)) {
3979 
3980 		if (num_processed >= quota)
3981 			force_break = true;
3982 
3983 		if (!force_break &&
3984 		    hal_srng_dst_peek_sync_locked(soc->hal_soc,
3985 						  hal_ring_hdl)) {
3986 			DP_STATS_INC(soc, tx.hp_oos2, 1);
3987 			if (!hif_exec_should_yield(soc->hif_handle,
3988 						   int_ctx->dp_intr_id))
3989 				goto more_data;
3990 		}
3991 	}
3992 	DP_TX_HIST_STATS_PER_PDEV();
3993 
3994 	return num_processed;
3995 }
3996 
3997 #ifdef FEATURE_WLAN_TDLS
3998 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3999 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
4000 {
4001 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4002 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
4003 
4004 	if (!vdev) {
4005 		dp_err("vdev handle for id %d is NULL", vdev_id);
4006 		return NULL;
4007 	}
4008 
4009 	if (tx_spec & OL_TX_SPEC_NO_FREE)
4010 		vdev->is_tdls_frame = true;
4011 
4012 	return dp_tx_send(soc_hdl, vdev_id, msdu_list);
4013 }
4014 #endif
4015 
4016 static void dp_tx_vdev_update_feature_flags(struct dp_vdev *vdev)
4017 {
4018 	struct wlan_cfg_dp_soc_ctxt *cfg;
4019 
4020 	struct dp_soc *soc;
4021 
4022 	soc = vdev->pdev->soc;
4023 	if (!soc)
4024 		return;
4025 
4026 	cfg = soc->wlan_cfg_ctx;
4027 	if (!cfg)
4028 		return;
4029 
4030 	if (vdev->opmode == wlan_op_mode_ndi)
4031 		vdev->csum_enabled = wlan_cfg_get_nan_checksum_offload(cfg);
4032 	else
4033 		vdev->csum_enabled = wlan_cfg_get_checksum_offload(cfg);
4034 }
4035 
4036 /**
4037  * dp_tx_vdev_attach() - attach vdev to dp tx
4038  * @vdev: virtual device instance
4039  *
4040  * Return: QDF_STATUS_SUCCESS: success
4041  *         QDF_STATUS_E_RESOURCES: Error return
4042  */
4043 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
4044 {
4045 	int pdev_id;
4046 	/*
4047 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
4048 	 */
4049 	HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
4050 			HTT_TCL_METADATA_TYPE_VDEV_BASED);
4051 
4052 	HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
4053 			vdev->vdev_id);
4054 
4055 	pdev_id =
4056 		dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
4057 						       vdev->pdev->pdev_id);
4058 	HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
4059 
4060 	/*
4061 	 * Set HTT Extension Valid bit to 0 by default
4062 	 */
4063 	HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
4064 
4065 	dp_tx_vdev_update_search_flags(vdev);
4066 
4067 	dp_tx_vdev_update_feature_flags(vdev);
4068 
4069 	return QDF_STATUS_SUCCESS;
4070 }
4071 
4072 #ifndef FEATURE_WDS
4073 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
4074 {
4075 	return false;
4076 }
4077 #endif
4078 
4079 /**
4080  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
4081  * @vdev: virtual device instance
4082  *
4083  * Return: void
4084  *
4085  */
4086 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
4087 {
4088 	struct dp_soc *soc = vdev->pdev->soc;
4089 
4090 	/*
4091 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
4092 	 * for TDLS link
4093 	 *
4094 	 * Enable AddrY (SA based search) only for non-WDS STA and
4095 	 * ProxySTA VAP (in HKv1) modes.
4096 	 *
4097 	 * In all other VAP modes, only DA based search should be
4098 	 * enabled
4099 	 */
4100 	if (vdev->opmode == wlan_op_mode_sta &&
4101 	    vdev->tdls_link_connected)
4102 		vdev->hal_desc_addr_search_flags =
4103 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
4104 	else if ((vdev->opmode == wlan_op_mode_sta) &&
4105 		 !dp_tx_da_search_override(vdev))
4106 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
4107 	else
4108 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
4109 
4110 	/* Set search type only when peer map v2 messaging is enabled
4111 	 * as we will have the search index (AST hash) only when v2 is
4112 	 * enabled
4113 	 */
4114 	if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
4115 		vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
4116 	else
4117 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
4118 }
4119 
4120 static inline bool
4121 dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
4122 			  struct dp_vdev *vdev,
4123 			  struct dp_tx_desc_s *tx_desc)
4124 {
4125 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
4126 		return false;
4127 
4128 	/*
4129 	 * if vdev is given, then only check whether desc
4130 	 * vdev match. if vdev is NULL, then check whether
4131 	 * desc pdev match.
4132 	 */
4133 	return vdev ? (tx_desc->vdev == vdev) : (tx_desc->pdev == pdev);
4134 }
4135 
4136 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
4137 /**
4138  * dp_tx_desc_flush() - release resources associated
4139  *                      to TX Desc
4140  *
4141  * @dp_pdev: Handle to DP pdev structure
4142  * @vdev: virtual device instance
4143  * NULL: no specific Vdev is required and check all allcated TX desc
4144  * on this pdev.
4145  * Non-NULL: only check the allocated TX Desc associated to this Vdev.
4146  *
4147  * @force_free:
4148  * true: flush the TX desc.
4149  * false: only reset the Vdev in each allocated TX desc
4150  * that associated to current Vdev.
4151  *
4152  * This function will go through the TX desc pool to flush
4153  * the outstanding TX data or reset Vdev to NULL in associated TX
4154  * Desc.
4155  */
4156 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
4157 		      bool force_free)
4158 {
4159 	uint8_t i;
4160 	uint32_t j;
4161 	uint32_t num_desc, page_id, offset;
4162 	uint16_t num_desc_per_page;
4163 	struct dp_soc *soc = pdev->soc;
4164 	struct dp_tx_desc_s *tx_desc = NULL;
4165 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
4166 
4167 	if (!vdev && !force_free) {
4168 		dp_err("Reset TX desc vdev, Vdev param is required!");
4169 		return;
4170 	}
4171 
4172 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
4173 		tx_desc_pool = &soc->tx_desc[i];
4174 		if (!(tx_desc_pool->pool_size) ||
4175 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
4176 		    !(tx_desc_pool->desc_pages.cacheable_pages))
4177 			continue;
4178 
4179 		/*
4180 		 * Add flow pool lock protection in case pool is freed
4181 		 * due to all tx_desc is recycled when handle TX completion.
4182 		 * this is not necessary when do force flush as:
4183 		 * a. double lock will happen if dp_tx_desc_release is
4184 		 *    also trying to acquire it.
4185 		 * b. dp interrupt has been disabled before do force TX desc
4186 		 *    flush in dp_pdev_deinit().
4187 		 */
4188 		if (!force_free)
4189 			qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
4190 		num_desc = tx_desc_pool->pool_size;
4191 		num_desc_per_page =
4192 			tx_desc_pool->desc_pages.num_element_per_page;
4193 		for (j = 0; j < num_desc; j++) {
4194 			page_id = j / num_desc_per_page;
4195 			offset = j % num_desc_per_page;
4196 
4197 			if (qdf_unlikely(!(tx_desc_pool->
4198 					 desc_pages.cacheable_pages)))
4199 				break;
4200 
4201 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
4202 
4203 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
4204 				/*
4205 				 * Free TX desc if force free is
4206 				 * required, otherwise only reset vdev
4207 				 * in this TX desc.
4208 				 */
4209 				if (force_free) {
4210 					dp_tx_comp_free_buf(soc, tx_desc);
4211 					dp_tx_desc_release(tx_desc, i);
4212 				} else {
4213 					tx_desc->vdev = NULL;
4214 				}
4215 			}
4216 		}
4217 		if (!force_free)
4218 			qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
4219 	}
4220 }
4221 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
4222 /**
4223  * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
4224  *
4225  * @soc: Handle to DP soc structure
4226  * @tx_desc: pointer of one TX desc
4227  * @desc_pool_id: TX Desc pool id
4228  */
4229 static inline void
4230 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
4231 		      uint8_t desc_pool_id)
4232 {
4233 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
4234 
4235 	tx_desc->vdev = NULL;
4236 
4237 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
4238 }
4239 
4240 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
4241 		      bool force_free)
4242 {
4243 	uint8_t i, num_pool;
4244 	uint32_t j;
4245 	uint32_t num_desc, page_id, offset;
4246 	uint16_t num_desc_per_page;
4247 	struct dp_soc *soc = pdev->soc;
4248 	struct dp_tx_desc_s *tx_desc = NULL;
4249 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
4250 
4251 	if (!vdev && !force_free) {
4252 		dp_err("Reset TX desc vdev, Vdev param is required!");
4253 		return;
4254 	}
4255 
4256 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4257 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4258 
4259 	for (i = 0; i < num_pool; i++) {
4260 		tx_desc_pool = &soc->tx_desc[i];
4261 		if (!tx_desc_pool->desc_pages.cacheable_pages)
4262 			continue;
4263 
4264 		num_desc_per_page =
4265 			tx_desc_pool->desc_pages.num_element_per_page;
4266 		for (j = 0; j < num_desc; j++) {
4267 			page_id = j / num_desc_per_page;
4268 			offset = j % num_desc_per_page;
4269 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
4270 
4271 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
4272 				if (force_free) {
4273 					dp_tx_comp_free_buf(soc, tx_desc);
4274 					dp_tx_desc_release(tx_desc, i);
4275 				} else {
4276 					dp_tx_desc_reset_vdev(soc, tx_desc,
4277 							      i);
4278 				}
4279 			}
4280 		}
4281 	}
4282 }
4283 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
4284 
4285 /**
4286  * dp_tx_vdev_detach() - detach vdev from dp tx
4287  * @vdev: virtual device instance
4288  *
4289  * Return: QDF_STATUS_SUCCESS: success
4290  *         QDF_STATUS_E_RESOURCES: Error return
4291  */
4292 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
4293 {
4294 	struct dp_pdev *pdev = vdev->pdev;
4295 
4296 	/* Reset TX desc associated to this Vdev as NULL */
4297 	dp_tx_desc_flush(pdev, vdev, false);
4298 	dp_tx_vdev_multipass_deinit(vdev);
4299 
4300 	return QDF_STATUS_SUCCESS;
4301 }
4302 
4303 /**
4304  * dp_tx_pdev_attach() - attach pdev to dp tx
4305  * @pdev: physical device instance
4306  *
4307  * Return: QDF_STATUS_SUCCESS: success
4308  *         QDF_STATUS_E_RESOURCES: Error return
4309  */
4310 QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev)
4311 {
4312 	struct dp_soc *soc = pdev->soc;
4313 
4314 	/* Initialize Flow control counters */
4315 	qdf_atomic_init(&pdev->num_tx_outstanding);
4316 
4317 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
4318 		/* Initialize descriptors in TCL Ring */
4319 		hal_tx_init_data_ring(soc->hal_soc,
4320 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
4321 	}
4322 
4323 	return QDF_STATUS_SUCCESS;
4324 }
4325 
4326 /**
4327  * dp_tx_pdev_detach() - detach pdev from dp tx
4328  * @pdev: physical device instance
4329  *
4330  * Return: QDF_STATUS_SUCCESS: success
4331  *         QDF_STATUS_E_RESOURCES: Error return
4332  */
4333 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
4334 {
4335 	/* flush TX outstanding data per pdev */
4336 	dp_tx_desc_flush(pdev, NULL, true);
4337 	dp_tx_me_exit(pdev);
4338 	return QDF_STATUS_SUCCESS;
4339 }
4340 
4341 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
4342 /* Pools will be allocated dynamically */
4343 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
4344 					   int num_desc)
4345 {
4346 	uint8_t i;
4347 
4348 	for (i = 0; i < num_pool; i++) {
4349 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
4350 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
4351 	}
4352 
4353 	return QDF_STATUS_SUCCESS;
4354 }
4355 
4356 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
4357 					  int num_desc)
4358 {
4359 	return QDF_STATUS_SUCCESS;
4360 }
4361 
4362 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
4363 {
4364 }
4365 
4366 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
4367 {
4368 	uint8_t i;
4369 
4370 	for (i = 0; i < num_pool; i++)
4371 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
4372 }
4373 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
4374 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
4375 					   int num_desc)
4376 {
4377 	uint8_t i, count;
4378 
4379 	/* Allocate software Tx descriptor pools */
4380 	for (i = 0; i < num_pool; i++) {
4381 		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
4382 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4383 				  FL("Tx Desc Pool alloc %d failed %pK"),
4384 				  i, soc);
4385 			goto fail;
4386 		}
4387 	}
4388 	return QDF_STATUS_SUCCESS;
4389 
4390 fail:
4391 	for (count = 0; count < i; count++)
4392 		dp_tx_desc_pool_free(soc, count);
4393 
4394 	return QDF_STATUS_E_NOMEM;
4395 }
4396 
4397 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
4398 					  int num_desc)
4399 {
4400 	uint8_t i;
4401 	for (i = 0; i < num_pool; i++) {
4402 		if (dp_tx_desc_pool_init(soc, i, num_desc)) {
4403 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4404 				  FL("Tx Desc Pool init %d failed %pK"),
4405 				  i, soc);
4406 			return QDF_STATUS_E_NOMEM;
4407 		}
4408 	}
4409 	return QDF_STATUS_SUCCESS;
4410 }
4411 
4412 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
4413 {
4414 	uint8_t i;
4415 
4416 	for (i = 0; i < num_pool; i++)
4417 		dp_tx_desc_pool_deinit(soc, i);
4418 }
4419 
4420 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
4421 {
4422 	uint8_t i;
4423 
4424 	for (i = 0; i < num_pool; i++)
4425 		dp_tx_desc_pool_free(soc, i);
4426 }
4427 
4428 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
4429 
4430 /**
4431  * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
4432  * @soc: core txrx main context
4433  * @num_pool: number of pools
4434  *
4435  */
4436 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
4437 {
4438 	dp_tx_tso_desc_pool_deinit(soc, num_pool);
4439 	dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
4440 }
4441 
4442 /**
4443  * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
4444  * @soc: core txrx main context
4445  * @num_pool: number of pools
4446  *
4447  */
4448 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
4449 {
4450 	dp_tx_tso_desc_pool_free(soc, num_pool);
4451 	dp_tx_tso_num_seg_pool_free(soc, num_pool);
4452 }
4453 
4454 /**
4455  * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
4456  * @soc: core txrx main context
4457  *
4458  * This function frees all tx related descriptors as below
4459  * 1. Regular TX descriptors (static pools)
4460  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
4461  * 3. TSO descriptors
4462  *
4463  */
4464 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
4465 {
4466 	uint8_t num_pool;
4467 
4468 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4469 
4470 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
4471 	dp_tx_ext_desc_pool_free(soc, num_pool);
4472 	dp_tx_delete_static_pools(soc, num_pool);
4473 }
4474 
4475 /**
4476  * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
4477  * @soc: core txrx main context
4478  *
4479  * This function de-initializes all tx related descriptors as below
4480  * 1. Regular TX descriptors (static pools)
4481  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
4482  * 3. TSO descriptors
4483  *
4484  */
4485 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
4486 {
4487 	uint8_t num_pool;
4488 
4489 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4490 
4491 	dp_tx_flow_control_deinit(soc);
4492 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
4493 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
4494 	dp_tx_deinit_static_pools(soc, num_pool);
4495 }
4496 
4497 /**
4498  * dp_tso_attach() - TSO attach handler
4499  * @txrx_soc: Opaque Dp handle
4500  *
4501  * Reserve TSO descriptor buffers
4502  *
4503  * Return: QDF_STATUS_E_FAILURE on failure or
4504  * QDF_STATUS_SUCCESS on success
4505  */
4506 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
4507 					 uint8_t num_pool,
4508 					 uint16_t num_desc)
4509 {
4510 	if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
4511 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
4512 		return QDF_STATUS_E_FAILURE;
4513 	}
4514 
4515 	if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
4516 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
4517 		       num_pool, soc);
4518 		return QDF_STATUS_E_FAILURE;
4519 	}
4520 	return QDF_STATUS_SUCCESS;
4521 }
4522 
4523 /**
4524  * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
4525  * @soc: DP soc handle
4526  * @num_pool: Number of pools
4527  * @num_desc: Number of descriptors
4528  *
4529  * Initialize TSO descriptor pools
4530  *
4531  * Return: QDF_STATUS_E_FAILURE on failure or
4532  * QDF_STATUS_SUCCESS on success
4533  */
4534 
4535 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
4536 					uint8_t num_pool,
4537 					uint16_t num_desc)
4538 {
4539 	if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
4540 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
4541 		return QDF_STATUS_E_FAILURE;
4542 	}
4543 
4544 	if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
4545 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
4546 		       num_pool, soc);
4547 		return QDF_STATUS_E_FAILURE;
4548 	}
4549 	return QDF_STATUS_SUCCESS;
4550 }
4551 
4552 /**
4553  * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
4554  * @soc: core txrx main context
4555  *
4556  * This function allocates memory for following descriptor pools
4557  * 1. regular sw tx descriptor pools (static pools)
4558  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
4559  * 3. TSO descriptor pools
4560  *
4561  * Return: QDF_STATUS_SUCCESS: success
4562  *         QDF_STATUS_E_RESOURCES: Error return
4563  */
4564 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
4565 {
4566 	uint8_t num_pool;
4567 	uint32_t num_desc;
4568 	uint32_t num_ext_desc;
4569 
4570 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4571 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4572 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
4573 
4574 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4575 		  "%s Tx Desc Alloc num_pool = %d, descs = %d",
4576 		  __func__, num_pool, num_desc);
4577 
4578 	if ((num_pool > MAX_TXDESC_POOLS) ||
4579 	    (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
4580 		goto fail1;
4581 
4582 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
4583 		goto fail1;
4584 
4585 	if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
4586 		goto fail2;
4587 
4588 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
4589 		return QDF_STATUS_SUCCESS;
4590 
4591 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
4592 		goto fail3;
4593 
4594 	return QDF_STATUS_SUCCESS;
4595 
4596 fail3:
4597 	dp_tx_ext_desc_pool_free(soc, num_pool);
4598 fail2:
4599 	dp_tx_delete_static_pools(soc, num_pool);
4600 fail1:
4601 	return QDF_STATUS_E_RESOURCES;
4602 }
4603 
4604 /**
4605  * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
4606  * @soc: core txrx main context
4607  *
4608  * This function initializes the following TX descriptor pools
4609  * 1. regular sw tx descriptor pools (static pools)
4610  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
4611  * 3. TSO descriptor pools
4612  *
4613  * Return: QDF_STATUS_SUCCESS: success
4614  *	   QDF_STATUS_E_RESOURCES: Error return
4615  */
4616 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
4617 {
4618 	uint8_t num_pool;
4619 	uint32_t num_desc;
4620 	uint32_t num_ext_desc;
4621 
4622 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4623 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4624 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
4625 
4626 	if (dp_tx_init_static_pools(soc, num_pool, num_desc))
4627 		goto fail1;
4628 
4629 	if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
4630 		goto fail2;
4631 
4632 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
4633 		return QDF_STATUS_SUCCESS;
4634 
4635 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
4636 		goto fail3;
4637 
4638 	dp_tx_flow_control_init(soc);
4639 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
4640 	return QDF_STATUS_SUCCESS;
4641 
4642 fail3:
4643 	dp_tx_ext_desc_pool_deinit(soc, num_pool);
4644 fail2:
4645 	dp_tx_deinit_static_pools(soc, num_pool);
4646 fail1:
4647 	return QDF_STATUS_E_RESOURCES;
4648 }
4649 
4650 /**
4651  * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
4652  * @txrx_soc: dp soc handle
4653  *
4654  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
4655  *			QDF_STATUS_E_FAILURE
4656  */
4657 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
4658 {
4659 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4660 	uint8_t num_pool;
4661 	uint32_t num_desc;
4662 	uint32_t num_ext_desc;
4663 
4664 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4665 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
4666 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
4667 
4668 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
4669 		return QDF_STATUS_E_FAILURE;
4670 
4671 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
4672 		return QDF_STATUS_E_FAILURE;
4673 
4674 	return QDF_STATUS_SUCCESS;
4675 }
4676 
4677 /**
4678  * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
4679  * @txrx_soc: dp soc handle
4680  *
4681  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
4682  */
4683 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
4684 {
4685 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4686 	uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
4687 
4688 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
4689 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
4690 
4691 	return QDF_STATUS_SUCCESS;
4692 }
4693 
4694