1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "htt.h"
21 #include "dp_htt.h"
22 #include "hal_hw_headers.h"
23 #include "dp_tx.h"
24 #include "dp_tx_desc.h"
25 #include "dp_peer.h"
26 #include "dp_types.h"
27 #include "hal_tx.h"
28 #include "qdf_mem.h"
29 #include "qdf_nbuf.h"
30 #include "qdf_net_types.h"
31 #include "qdf_module.h"
32 #include <wlan_cfg.h>
33 #include "dp_ipa.h"
34 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
35 #include "if_meta_hdr.h"
36 #endif
37 #include "enet.h"
38 #include "dp_internal.h"
39 #ifdef ATH_SUPPORT_IQUE
40 #include "dp_txrx_me.h"
41 #endif
42 #include "dp_hist.h"
43 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
44 #include <wlan_dp_swlm.h>
45 #endif
46 #ifdef WIFI_MONITOR_SUPPORT
47 #include <dp_mon.h>
48 #endif
49 #ifdef FEATURE_WDS
50 #include "dp_txrx_wds.h"
51 #endif
52 #include "cdp_txrx_cmn_reg.h"
53 #ifdef CONFIG_SAWF
54 #include <dp_sawf.h>
55 #endif
56 
57 /* Flag to skip CCE classify when mesh or tid override enabled */
58 #define DP_TX_SKIP_CCE_CLASSIFY \
59 	(DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
60 
61 /* TODO Add support in TSO */
62 #define DP_DESC_NUM_FRAG(x) 0
63 
64 /* disable TQM_BYPASS */
65 #define TQM_BYPASS_WAR 0
66 
67 #define DP_RETRY_COUNT 7
68 #ifdef WLAN_PEER_JITTER
69 #define DP_AVG_JITTER_WEIGHT_DENOM 4
70 #define DP_AVG_DELAY_WEIGHT_DENOM 3
71 #endif
72 
73 #ifdef QCA_DP_TX_FW_METADATA_V2
74 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
75 	HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
76 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
77 	HTT_TX_TCL_METADATA_V2_VALID_HTT_SET(_var, _val)
78 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
79 	HTT_TX_TCL_METADATA_TYPE_V2_SET(_var, _val)
80 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
81 	HTT_TX_TCL_METADATA_V2_HOST_INSPECTED_SET(_var, _val)
82 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
83 	 HTT_TX_TCL_METADATA_V2_PEER_ID_SET(_var, _val)
84 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
85 	HTT_TX_TCL_METADATA_V2_VDEV_ID_SET(_var, _val)
86 #define DP_TCL_METADATA_TYPE_PEER_BASED \
87 	HTT_TCL_METADATA_V2_TYPE_PEER_BASED
88 #define DP_TCL_METADATA_TYPE_VDEV_BASED \
89 	HTT_TCL_METADATA_V2_TYPE_VDEV_BASED
90 #else
91 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
92 	HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
93 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
94 	HTT_TX_TCL_METADATA_VALID_HTT_SET(_var, _val)
95 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
96 	HTT_TX_TCL_METADATA_TYPE_SET(_var, _val)
97 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
98 	HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val)
99 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
100 	HTT_TX_TCL_METADATA_PEER_ID_SET(_var, _val)
101 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
102 	HTT_TX_TCL_METADATA_VDEV_ID_SET(_var, _val)
103 #define DP_TCL_METADATA_TYPE_PEER_BASED \
104 	HTT_TCL_METADATA_TYPE_PEER_BASED
105 #define DP_TCL_METADATA_TYPE_VDEV_BASED \
106 	HTT_TCL_METADATA_TYPE_VDEV_BASED
107 #endif
108 
109 #define DP_GET_HW_LINK_ID_FRM_PPDU_ID(PPDU_ID, LINK_ID_OFFSET, LINK_ID_BITS) \
110 	(((PPDU_ID) >> (LINK_ID_OFFSET)) & ((1 << (LINK_ID_BITS)) - 1))
111 
112 /*mapping between hal encrypt type and cdp_sec_type*/
113 uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
114 					  HAL_TX_ENCRYPT_TYPE_WEP_128,
115 					  HAL_TX_ENCRYPT_TYPE_WEP_104,
116 					  HAL_TX_ENCRYPT_TYPE_WEP_40,
117 					  HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
118 					  HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
119 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
120 					  HAL_TX_ENCRYPT_TYPE_WAPI,
121 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
122 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
123 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
124 					  HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
125 qdf_export_symbol(sec_type_map);
126 
127 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
dp_tx_get_event_type(uint32_t flags)128 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
129 {
130 	enum dp_tx_event_type type;
131 
132 	if (flags & DP_TX_DESC_FLAG_FLUSH)
133 		type = DP_TX_DESC_FLUSH;
134 	else if (flags & DP_TX_DESC_FLAG_TX_COMP_ERR)
135 		type = DP_TX_COMP_UNMAP_ERR;
136 	else if (flags & DP_TX_DESC_FLAG_COMPLETED_TX)
137 		type = DP_TX_COMP_UNMAP;
138 	else
139 		type = DP_TX_DESC_UNMAP;
140 
141 	return type;
142 }
143 
144 static inline void
dp_tx_desc_history_add(struct dp_soc * soc,dma_addr_t paddr,qdf_nbuf_t skb,uint32_t sw_cookie,enum dp_tx_event_type type)145 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
146 		       qdf_nbuf_t skb, uint32_t sw_cookie,
147 		       enum dp_tx_event_type type)
148 {
149 	struct dp_tx_tcl_history *tx_tcl_history = &soc->tx_tcl_history;
150 	struct dp_tx_comp_history *tx_comp_history = &soc->tx_comp_history;
151 	struct dp_tx_desc_event *entry;
152 	uint32_t idx;
153 	uint16_t slot;
154 
155 	switch (type) {
156 	case DP_TX_COMP_UNMAP:
157 	case DP_TX_COMP_UNMAP_ERR:
158 	case DP_TX_COMP_MSDU_EXT:
159 		if (qdf_unlikely(!tx_comp_history->allocated))
160 			return;
161 
162 		dp_get_frag_hist_next_atomic_idx(&tx_comp_history->index, &idx,
163 						 &slot,
164 						 DP_TX_COMP_HIST_SLOT_SHIFT,
165 						 DP_TX_COMP_HIST_PER_SLOT_MAX,
166 						 DP_TX_COMP_HISTORY_SIZE);
167 		entry = &tx_comp_history->entry[slot][idx];
168 		break;
169 	case DP_TX_DESC_MAP:
170 	case DP_TX_DESC_UNMAP:
171 	case DP_TX_DESC_COOKIE:
172 	case DP_TX_DESC_FLUSH:
173 		if (qdf_unlikely(!tx_tcl_history->allocated))
174 			return;
175 
176 		dp_get_frag_hist_next_atomic_idx(&tx_tcl_history->index, &idx,
177 						 &slot,
178 						 DP_TX_TCL_HIST_SLOT_SHIFT,
179 						 DP_TX_TCL_HIST_PER_SLOT_MAX,
180 						 DP_TX_TCL_HISTORY_SIZE);
181 		entry = &tx_tcl_history->entry[slot][idx];
182 		break;
183 	default:
184 		dp_info_rl("Invalid dp_tx_event_type: %d", type);
185 		return;
186 	}
187 
188 	entry->skb = skb;
189 	entry->paddr = paddr;
190 	entry->sw_cookie = sw_cookie;
191 	entry->type = type;
192 	entry->ts = qdf_get_log_timestamp();
193 }
194 
195 static inline void
dp_tx_tso_seg_history_add(struct dp_soc * soc,struct qdf_tso_seg_elem_t * tso_seg,qdf_nbuf_t skb,uint32_t sw_cookie,enum dp_tx_event_type type)196 dp_tx_tso_seg_history_add(struct dp_soc *soc,
197 			  struct qdf_tso_seg_elem_t *tso_seg,
198 			  qdf_nbuf_t skb, uint32_t sw_cookie,
199 			  enum dp_tx_event_type type)
200 {
201 	int i;
202 
203 	for (i = 1; i < tso_seg->seg.num_frags; i++) {
204 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[i].paddr,
205 				       skb, sw_cookie, type);
206 	}
207 
208 	if (!tso_seg->next)
209 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[0].paddr,
210 				       skb, 0xFFFFFFFF, type);
211 }
212 
213 static inline void
dp_tx_tso_history_add(struct dp_soc * soc,struct qdf_tso_info_t tso_info,qdf_nbuf_t skb,uint32_t sw_cookie,enum dp_tx_event_type type)214 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
215 		      qdf_nbuf_t skb, uint32_t sw_cookie,
216 		      enum dp_tx_event_type type)
217 {
218 	struct qdf_tso_seg_elem_t *curr_seg = tso_info.tso_seg_list;
219 	uint32_t num_segs = tso_info.num_segs;
220 
221 	while (num_segs) {
222 		dp_tx_tso_seg_history_add(soc, curr_seg, skb, sw_cookie, type);
223 		curr_seg = curr_seg->next;
224 		num_segs--;
225 	}
226 }
227 
228 #else
dp_tx_get_event_type(uint32_t flags)229 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
230 {
231 	return DP_TX_DESC_INVAL_EVT;
232 }
233 
234 static inline void
dp_tx_desc_history_add(struct dp_soc * soc,dma_addr_t paddr,qdf_nbuf_t skb,uint32_t sw_cookie,enum dp_tx_event_type type)235 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
236 		       qdf_nbuf_t skb, uint32_t sw_cookie,
237 		       enum dp_tx_event_type type)
238 {
239 }
240 
241 static inline void
dp_tx_tso_seg_history_add(struct dp_soc * soc,struct qdf_tso_seg_elem_t * tso_seg,qdf_nbuf_t skb,uint32_t sw_cookie,enum dp_tx_event_type type)242 dp_tx_tso_seg_history_add(struct dp_soc *soc,
243 			  struct qdf_tso_seg_elem_t *tso_seg,
244 			  qdf_nbuf_t skb, uint32_t sw_cookie,
245 			  enum dp_tx_event_type type)
246 {
247 }
248 
249 static inline void
dp_tx_tso_history_add(struct dp_soc * soc,struct qdf_tso_info_t tso_info,qdf_nbuf_t skb,uint32_t sw_cookie,enum dp_tx_event_type type)250 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
251 		      qdf_nbuf_t skb, uint32_t sw_cookie,
252 		      enum dp_tx_event_type type)
253 {
254 }
255 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
256 
257 /**
258  * dp_is_tput_high() - Check if throughput is high
259  *
260  * @soc: core txrx main context
261  *
262  * The current function is based of the RTPM tput policy variable where RTPM is
263  * avoided based on throughput.
264  */
dp_is_tput_high(struct dp_soc * soc)265 static inline int dp_is_tput_high(struct dp_soc *soc)
266 {
267 	return dp_get_rtpm_tput_policy_requirement(soc);
268 }
269 
270 #if defined(FEATURE_TSO)
271 /**
272  * dp_tx_tso_unmap_segment() - Unmap TSO segment
273  *
274  * @soc: core txrx main context
275  * @seg_desc: tso segment descriptor
276  * @num_seg_desc: tso number segment descriptor
277  */
dp_tx_tso_unmap_segment(struct dp_soc * soc,struct qdf_tso_seg_elem_t * seg_desc,struct qdf_tso_num_seg_elem_t * num_seg_desc)278 static void dp_tx_tso_unmap_segment(
279 		struct dp_soc *soc,
280 		struct qdf_tso_seg_elem_t *seg_desc,
281 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
282 {
283 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
284 	if (qdf_unlikely(!seg_desc)) {
285 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
286 			 __func__, __LINE__);
287 		qdf_assert(0);
288 	} else if (qdf_unlikely(!num_seg_desc)) {
289 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
290 			 __func__, __LINE__);
291 		qdf_assert(0);
292 	} else {
293 		bool is_last_seg;
294 		/* no tso segment left to do dma unmap */
295 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
296 			return;
297 
298 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
299 					true : false;
300 		qdf_nbuf_unmap_tso_segment(soc->osdev,
301 					   seg_desc, is_last_seg);
302 		num_seg_desc->num_seg.tso_cmn_num_seg--;
303 	}
304 }
305 
306 /**
307  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
308  *                            back to the freelist
309  *
310  * @soc: soc device handle
311  * @tx_desc: Tx software descriptor
312  */
dp_tx_tso_desc_release(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc)313 static void dp_tx_tso_desc_release(struct dp_soc *soc,
314 				   struct dp_tx_desc_s *tx_desc)
315 {
316 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
317 	if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_desc)) {
318 		dp_tx_err("SO desc is NULL!");
319 		qdf_assert(0);
320 	} else if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_num_desc)) {
321 		dp_tx_err("TSO num desc is NULL!");
322 		qdf_assert(0);
323 	} else {
324 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
325 			(struct qdf_tso_num_seg_elem_t *)tx_desc->
326 				msdu_ext_desc->tso_num_desc;
327 
328 		/* Add the tso num segment into the free list */
329 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
330 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
331 					    tx_desc->msdu_ext_desc->
332 					    tso_num_desc);
333 			tx_desc->msdu_ext_desc->tso_num_desc = NULL;
334 			DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
335 		}
336 
337 		/* Add the tso segment into the free list*/
338 		dp_tx_tso_desc_free(soc,
339 				    tx_desc->pool_id, tx_desc->msdu_ext_desc->
340 				    tso_desc);
341 		tx_desc->msdu_ext_desc->tso_desc = NULL;
342 	}
343 }
344 #else
dp_tx_tso_unmap_segment(struct dp_soc * soc,struct qdf_tso_seg_elem_t * seg_desc,struct qdf_tso_num_seg_elem_t * num_seg_desc)345 static void dp_tx_tso_unmap_segment(
346 		struct dp_soc *soc,
347 		struct qdf_tso_seg_elem_t *seg_desc,
348 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
349 
350 {
351 }
352 
dp_tx_tso_desc_release(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc)353 static void dp_tx_tso_desc_release(struct dp_soc *soc,
354 				   struct dp_tx_desc_s *tx_desc)
355 {
356 }
357 #endif
358 
359 #ifdef WLAN_SUPPORT_PPEDS
360 static inline int
dp_tx_release_ds_tx_desc(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t desc_pool_id)361 dp_tx_release_ds_tx_desc(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
362 			 uint8_t desc_pool_id)
363 {
364 	if (tx_desc->flags & DP_TX_DESC_FLAG_PPEDS) {
365 		__dp_tx_outstanding_dec(soc);
366 		dp_tx_desc_free(soc, tx_desc, desc_pool_id);
367 
368 		return 1;
369 	}
370 
371 	return 0;
372 }
373 #else
374 static inline int
dp_tx_release_ds_tx_desc(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t desc_pool_id)375 dp_tx_release_ds_tx_desc(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
376 			 uint8_t desc_pool_id)
377 {
378 	return 0;
379 }
380 #endif
381 
382 void
dp_tx_desc_release(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t desc_pool_id)383 dp_tx_desc_release(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
384 		   uint8_t desc_pool_id)
385 {
386 	struct dp_pdev *pdev = tx_desc->pdev;
387 	uint8_t comp_status = 0;
388 
389 	if (dp_tx_release_ds_tx_desc(soc, tx_desc, desc_pool_id))
390 		return;
391 
392 	qdf_assert(pdev);
393 
394 	soc = pdev->soc;
395 
396 	dp_tx_outstanding_dec(pdev);
397 
398 	if (tx_desc->msdu_ext_desc) {
399 		if (tx_desc->frm_type == dp_tx_frm_tso)
400 			dp_tx_tso_desc_release(soc, tx_desc);
401 
402 		if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
403 			dp_tx_me_free_buf(tx_desc->pdev,
404 					  tx_desc->msdu_ext_desc->me_buffer);
405 
406 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
407 
408 		tx_desc->msdu_ext_desc = NULL;
409 	}
410 
411 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
412 		qdf_atomic_dec(&soc->num_tx_exception);
413 
414 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
415 				tx_desc->buffer_src)
416 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
417 							     soc->hal_soc);
418 	else
419 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
420 	if (soc->dp_debug_log_en) {
421 		dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d",
422 			    tx_desc->id, comp_status,
423 			    qdf_atomic_read(&pdev->num_tx_outstanding));
424 	}
425 
426 	if (tx_desc->flags & DP_TX_DESC_FLAG_SPECIAL)
427 		dp_tx_spcl_desc_free(soc, tx_desc, desc_pool_id);
428 	else
429 		dp_tx_desc_free(soc, tx_desc, desc_pool_id);
430 	return;
431 }
432 
433 /**
434  * dp_tx_prepare_htt_metadata() - Prepare HTT metadata for special frames
435  * @vdev: DP vdev Handle
436  * @nbuf: skb
437  * @msdu_info: msdu_info required to create HTT metadata
438  *
439  * Prepares and fills HTT metadata in the frame pre-header for special frames
440  * that should be transmitted using varying transmit parameters.
441  * There are 2 VDEV modes that currently needs this special metadata -
442  *  1) Mesh Mode
443  *  2) DSRC Mode
444  *
445  * Return: HTT metadata size
446  *
447  */
dp_tx_prepare_htt_metadata(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)448 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
449 					  struct dp_tx_msdu_info_s *msdu_info)
450 {
451 	uint32_t *meta_data = msdu_info->meta_data;
452 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
453 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
454 
455 	uint8_t htt_desc_size;
456 
457 	/* Size rounded of multiple of 8 bytes */
458 	uint8_t htt_desc_size_aligned;
459 
460 	uint8_t *hdr = NULL;
461 
462 	/*
463 	 * Metadata - HTT MSDU Extension header
464 	 */
465 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
466 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
467 
468 	if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
469 	    HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
470 							   meta_data[0]) ||
471 	    msdu_info->exception_fw) {
472 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
473 				 htt_desc_size_aligned)) {
474 			nbuf = qdf_nbuf_realloc_headroom(nbuf,
475 							 htt_desc_size_aligned);
476 			if (!nbuf) {
477 				/*
478 				 * qdf_nbuf_realloc_headroom won't do skb_clone
479 				 * as skb_realloc_headroom does. so, no free is
480 				 * needed here.
481 				 */
482 				DP_STATS_INC(vdev,
483 					     tx_i[msdu_info->xmit_type].dropped.headroom_insufficient,
484 					     1);
485 				qdf_print(" %s[%d] skb_realloc_headroom failed",
486 					  __func__, __LINE__);
487 				return 0;
488 			}
489 		}
490 		/* Fill and add HTT metaheader */
491 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
492 		if (!hdr) {
493 			dp_tx_err("Error in filling HTT metadata");
494 
495 			return 0;
496 		}
497 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
498 
499 	} else if (vdev->opmode == wlan_op_mode_ocb) {
500 		/* Todo - Add support for DSRC */
501 	}
502 
503 	return htt_desc_size_aligned;
504 }
505 
506 /**
507  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
508  * @tso_seg: TSO segment to process
509  * @ext_desc: Pointer to MSDU extension descriptor
510  *
511  * Return: void
512  */
513 #if defined(FEATURE_TSO)
dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t * tso_seg,void * ext_desc)514 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
515 		void *ext_desc)
516 {
517 	uint8_t num_frag;
518 	uint32_t tso_flags;
519 
520 	/*
521 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
522 	 * tcp_flag_mask
523 	 *
524 	 * Checksum enable flags are set in TCL descriptor and not in Extension
525 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
526 	 */
527 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
528 
529 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
530 
531 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
532 		tso_seg->tso_flags.ip_len);
533 
534 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
535 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
536 
537 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
538 		uint32_t lo = 0;
539 		uint32_t hi = 0;
540 
541 		qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
542 				  (tso_seg->tso_frags[num_frag].length));
543 
544 		qdf_dmaaddr_to_32s(
545 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
546 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
547 			tso_seg->tso_frags[num_frag].length);
548 	}
549 
550 	return;
551 }
552 #else
dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t * tso_seg,void * ext_desc)553 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
554 		void *ext_desc)
555 {
556 	return;
557 }
558 #endif
559 
560 #if defined(FEATURE_TSO)
561 /**
562  * dp_tx_free_tso_seg_list() - Loop through the tso segments
563  *                             allocated and free them
564  * @soc: soc handle
565  * @free_seg: list of tso segments
566  * @msdu_info: msdu descriptor
567  *
568  * Return: void
569  */
dp_tx_free_tso_seg_list(struct dp_soc * soc,struct qdf_tso_seg_elem_t * free_seg,struct dp_tx_msdu_info_s * msdu_info)570 static void dp_tx_free_tso_seg_list(
571 		struct dp_soc *soc,
572 		struct qdf_tso_seg_elem_t *free_seg,
573 		struct dp_tx_msdu_info_s *msdu_info)
574 {
575 	struct qdf_tso_seg_elem_t *next_seg;
576 
577 	while (free_seg) {
578 		next_seg = free_seg->next;
579 		dp_tx_tso_desc_free(soc,
580 				    msdu_info->tx_queue.desc_pool_id,
581 				    free_seg);
582 		free_seg = next_seg;
583 	}
584 }
585 
586 /**
587  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
588  *                                 allocated and free them
589  * @soc:  soc handle
590  * @free_num_seg: list of tso number segments
591  * @msdu_info: msdu descriptor
592  *
593  * Return: void
594  */
dp_tx_free_tso_num_seg_list(struct dp_soc * soc,struct qdf_tso_num_seg_elem_t * free_num_seg,struct dp_tx_msdu_info_s * msdu_info)595 static void dp_tx_free_tso_num_seg_list(
596 		struct dp_soc *soc,
597 		struct qdf_tso_num_seg_elem_t *free_num_seg,
598 		struct dp_tx_msdu_info_s *msdu_info)
599 {
600 	struct qdf_tso_num_seg_elem_t *next_num_seg;
601 
602 	while (free_num_seg) {
603 		next_num_seg = free_num_seg->next;
604 		dp_tso_num_seg_free(soc,
605 				    msdu_info->tx_queue.desc_pool_id,
606 				    free_num_seg);
607 		free_num_seg = next_num_seg;
608 	}
609 }
610 
611 /**
612  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
613  *                              do dma unmap for each segment
614  * @soc: soc handle
615  * @free_seg: list of tso segments
616  * @num_seg_desc: tso number segment descriptor
617  *
618  * Return: void
619  */
dp_tx_unmap_tso_seg_list(struct dp_soc * soc,struct qdf_tso_seg_elem_t * free_seg,struct qdf_tso_num_seg_elem_t * num_seg_desc)620 static void dp_tx_unmap_tso_seg_list(
621 		struct dp_soc *soc,
622 		struct qdf_tso_seg_elem_t *free_seg,
623 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
624 {
625 	struct qdf_tso_seg_elem_t *next_seg;
626 
627 	if (qdf_unlikely(!num_seg_desc)) {
628 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
629 		return;
630 	}
631 
632 	while (free_seg) {
633 		next_seg = free_seg->next;
634 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
635 		free_seg = next_seg;
636 	}
637 }
638 
639 #ifdef FEATURE_TSO_STATS
640 /**
641  * dp_tso_get_stats_idx() - Retrieve the tso packet id
642  * @pdev: pdev handle
643  *
644  * Return: id
645  */
dp_tso_get_stats_idx(struct dp_pdev * pdev)646 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
647 {
648 	uint32_t stats_idx;
649 
650 	stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
651 						% CDP_MAX_TSO_PACKETS);
652 	return stats_idx;
653 }
654 #else
dp_tso_get_stats_idx(struct dp_pdev * pdev)655 static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
656 {
657 	return 0;
658 }
659 #endif /* FEATURE_TSO_STATS */
660 
661 /**
662  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
663  *				     free the tso segments descriptor and
664  *				     tso num segments descriptor
665  * @soc:  soc handle
666  * @msdu_info: msdu descriptor
667  * @tso_seg_unmap: flag to show if dma unmap is necessary
668  *
669  * Return: void
670  */
dp_tx_free_remaining_tso_desc(struct dp_soc * soc,struct dp_tx_msdu_info_s * msdu_info,bool tso_seg_unmap)671 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
672 					  struct dp_tx_msdu_info_s *msdu_info,
673 					  bool tso_seg_unmap)
674 {
675 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
676 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
677 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
678 					tso_info->tso_num_seg_list;
679 
680 	/* do dma unmap for each segment */
681 	if (tso_seg_unmap)
682 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
683 
684 	/* free all tso number segment descriptor though looks only have 1 */
685 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
686 
687 	/* free all tso segment descriptor */
688 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
689 }
690 
691 /**
692  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
693  * @vdev: virtual device handle
694  * @msdu: network buffer
695  * @msdu_info: meta data associated with the msdu
696  *
697  * Return: QDF_STATUS_SUCCESS success
698  */
dp_tx_prepare_tso(struct dp_vdev * vdev,qdf_nbuf_t msdu,struct dp_tx_msdu_info_s * msdu_info)699 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
700 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
701 {
702 	struct qdf_tso_seg_elem_t *tso_seg;
703 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
704 	struct dp_soc *soc = vdev->pdev->soc;
705 	struct dp_pdev *pdev = vdev->pdev;
706 	struct qdf_tso_info_t *tso_info;
707 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
708 	tso_info = &msdu_info->u.tso_info;
709 	tso_info->curr_seg = NULL;
710 	tso_info->tso_seg_list = NULL;
711 	tso_info->num_segs = num_seg;
712 	msdu_info->frm_type = dp_tx_frm_tso;
713 	tso_info->tso_num_seg_list = NULL;
714 
715 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
716 
717 	while (num_seg) {
718 		tso_seg = dp_tx_tso_desc_alloc(
719 				soc, msdu_info->tx_queue.desc_pool_id);
720 		if (tso_seg) {
721 			tso_seg->next = tso_info->tso_seg_list;
722 			tso_info->tso_seg_list = tso_seg;
723 			num_seg--;
724 		} else {
725 			dp_err_rl("Failed to alloc tso seg desc");
726 			DP_STATS_INC_PKT(vdev->pdev,
727 					 tso_stats.tso_no_mem_dropped, 1,
728 					 qdf_nbuf_len(msdu));
729 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
730 
731 			return QDF_STATUS_E_NOMEM;
732 		}
733 	}
734 
735 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
736 
737 	tso_num_seg = dp_tso_num_seg_alloc(soc,
738 			msdu_info->tx_queue.desc_pool_id);
739 
740 	if (tso_num_seg) {
741 		tso_num_seg->next = tso_info->tso_num_seg_list;
742 		tso_info->tso_num_seg_list = tso_num_seg;
743 	} else {
744 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
745 			 __func__);
746 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
747 
748 		return QDF_STATUS_E_NOMEM;
749 	}
750 
751 	msdu_info->num_seg =
752 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
753 
754 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
755 			msdu_info->num_seg);
756 
757 	if (!(msdu_info->num_seg)) {
758 		/*
759 		 * Free allocated TSO seg desc and number seg desc,
760 		 * do unmap for segments if dma map has done.
761 		 */
762 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
763 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
764 
765 		return QDF_STATUS_E_INVAL;
766 	}
767 	dp_tx_tso_history_add(soc, msdu_info->u.tso_info,
768 			      msdu, 0, DP_TX_DESC_MAP);
769 
770 	tso_info->curr_seg = tso_info->tso_seg_list;
771 
772 	tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
773 	dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
774 			     msdu, msdu_info->num_seg);
775 	dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
776 				    tso_info->msdu_stats_idx);
777 	dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
778 	return QDF_STATUS_SUCCESS;
779 }
780 #else
dp_tx_prepare_tso(struct dp_vdev * vdev,qdf_nbuf_t msdu,struct dp_tx_msdu_info_s * msdu_info)781 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
782 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
783 {
784 	return QDF_STATUS_E_NOMEM;
785 }
786 #endif
787 
788 QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
789 			(DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
790 			 sizeof(struct htt_tx_msdu_desc_ext2_t)));
791 
792 /**
793  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
794  * @vdev: DP Vdev handle
795  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
796  * @desc_pool_id: Descriptor Pool ID
797  *
798  * Return:
799  */
800 static
dp_tx_prepare_ext_desc(struct dp_vdev * vdev,struct dp_tx_msdu_info_s * msdu_info,uint8_t desc_pool_id)801 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
802 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
803 {
804 	uint8_t i;
805 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
806 	struct dp_tx_seg_info_s *seg_info;
807 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
808 	struct dp_soc *soc = vdev->pdev->soc;
809 
810 	/* Allocate an extension descriptor */
811 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
812 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
813 
814 	if (!msdu_ext_desc) {
815 		DP_STATS_INC(vdev,
816 			     tx_i[msdu_info->xmit_type].dropped.desc_na.num, 1);
817 		return NULL;
818 	}
819 
820 	if (msdu_info->exception_fw &&
821 			qdf_unlikely(vdev->mesh_vdev)) {
822 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
823 				&msdu_info->meta_data[0],
824 				sizeof(struct htt_tx_msdu_desc_ext2_t));
825 		qdf_atomic_inc(&soc->num_tx_exception);
826 		msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
827 	}
828 
829 	switch (msdu_info->frm_type) {
830 	case dp_tx_frm_sg:
831 	case dp_tx_frm_me:
832 	case dp_tx_frm_raw:
833 		seg_info = msdu_info->u.sg_info.curr_seg;
834 		/* Update the buffer pointers in MSDU Extension Descriptor */
835 		for (i = 0; i < seg_info->frag_cnt; i++) {
836 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
837 				seg_info->frags[i].paddr_lo,
838 				seg_info->frags[i].paddr_hi,
839 				seg_info->frags[i].len);
840 		}
841 
842 		break;
843 
844 	case dp_tx_frm_tso:
845 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
846 				&cached_ext_desc[0]);
847 		break;
848 
849 
850 	default:
851 		break;
852 	}
853 
854 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
855 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
856 
857 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
858 			msdu_ext_desc->vaddr);
859 
860 	return msdu_ext_desc;
861 }
862 
863 /**
864  * dp_tx_trace_pkt() - Trace TX packet at DP layer
865  * @soc: datapath SOC
866  * @skb: skb to be traced
867  * @msdu_id: msdu_id of the packet
868  * @vdev_id: vdev_id of the packet
869  * @op_mode: Vdev Operation mode
870  *
871  * Return: None
872  */
873 #ifdef DP_DISABLE_TX_PKT_TRACE
dp_tx_trace_pkt(struct dp_soc * soc,qdf_nbuf_t skb,uint16_t msdu_id,uint8_t vdev_id,enum QDF_OPMODE op_mode)874 static void dp_tx_trace_pkt(struct dp_soc *soc,
875 			    qdf_nbuf_t skb, uint16_t msdu_id,
876 			    uint8_t vdev_id, enum QDF_OPMODE op_mode)
877 {
878 }
879 #else
dp_tx_trace_pkt(struct dp_soc * soc,qdf_nbuf_t skb,uint16_t msdu_id,uint8_t vdev_id,enum QDF_OPMODE op_mode)880 static void dp_tx_trace_pkt(struct dp_soc *soc,
881 			    qdf_nbuf_t skb, uint16_t msdu_id,
882 			    uint8_t vdev_id, enum QDF_OPMODE op_mode)
883 {
884 	if (dp_is_tput_high(soc))
885 		return;
886 
887 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
888 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
889 	DPTRACE(qdf_dp_trace_ptr(skb,
890 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
891 				 QDF_TRACE_DEFAULT_PDEV_ID,
892 				 qdf_nbuf_data_addr(skb),
893 				 sizeof(qdf_nbuf_data(skb)),
894 				 msdu_id, vdev_id, 0,
895 				 op_mode));
896 
897 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID,
898 			     op_mode);
899 
900 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
901 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
902 				      msdu_id, QDF_TX));
903 }
904 #endif
905 
906 #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
907 /**
908  * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as
909  *				      exception by the upper layer (OS_IF)
910  * @soc: DP soc handle
911  * @nbuf: packet to be transmitted
912  *
913  * Return: 1 if the packet is marked as exception,
914  *	   0, if the packet is not marked as exception.
915  */
dp_tx_is_nbuf_marked_exception(struct dp_soc * soc,qdf_nbuf_t nbuf)916 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
917 						 qdf_nbuf_t nbuf)
918 {
919 	return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf);
920 }
921 #else
dp_tx_is_nbuf_marked_exception(struct dp_soc * soc,qdf_nbuf_t nbuf)922 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
923 						 qdf_nbuf_t nbuf)
924 {
925 	return 0;
926 }
927 #endif
928 
929 #ifdef DP_TRAFFIC_END_INDICATION
930 /**
931  * dp_tx_get_traffic_end_indication_pkt() - Allocate and prepare packet to send
932  *                                          as indication to fw to inform that
933  *                                          data stream has ended
934  * @vdev: DP vdev handle
935  * @nbuf: original buffer from network stack
936  *
937  * Return: NULL on failure,
938  *         nbuf on success
939  */
940 static inline qdf_nbuf_t
dp_tx_get_traffic_end_indication_pkt(struct dp_vdev * vdev,qdf_nbuf_t nbuf)941 dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
942 				     qdf_nbuf_t nbuf)
943 {
944 	/* Packet length should be enough to copy upto L3 header */
945 	uint8_t end_nbuf_len = 64;
946 	uint8_t htt_desc_size_aligned;
947 	uint8_t htt_desc_size;
948 	qdf_nbuf_t end_nbuf;
949 
950 	if (qdf_unlikely(QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) ==
951 			 QDF_NBUF_CB_PACKET_TYPE_END_INDICATION)) {
952 		htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
953 		htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
954 
955 		end_nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q);
956 		if (!end_nbuf) {
957 			end_nbuf = qdf_nbuf_alloc(NULL,
958 						  (htt_desc_size_aligned +
959 						  end_nbuf_len),
960 						  htt_desc_size_aligned,
961 						  8, false);
962 			if (!end_nbuf) {
963 				dp_err("Packet allocation failed");
964 				goto out;
965 			}
966 		} else {
967 			qdf_nbuf_reset(end_nbuf, htt_desc_size_aligned, 8);
968 		}
969 		qdf_mem_copy(qdf_nbuf_data(end_nbuf), qdf_nbuf_data(nbuf),
970 			     end_nbuf_len);
971 		qdf_nbuf_set_pktlen(end_nbuf, end_nbuf_len);
972 
973 		return end_nbuf;
974 	}
975 out:
976 	return NULL;
977 }
978 
979 /**
980  * dp_tx_send_traffic_end_indication_pkt() - Send indication packet to FW
981  *                                           via exception path.
982  * @vdev: DP vdev handle
983  * @end_nbuf: skb to send as indication
984  * @msdu_info: msdu_info of original nbuf
985  * @peer_id: peer id
986  *
987  * Return: None
988  */
989 static inline void
dp_tx_send_traffic_end_indication_pkt(struct dp_vdev * vdev,qdf_nbuf_t end_nbuf,struct dp_tx_msdu_info_s * msdu_info,uint16_t peer_id)990 dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
991 				      qdf_nbuf_t end_nbuf,
992 				      struct dp_tx_msdu_info_s *msdu_info,
993 				      uint16_t peer_id)
994 {
995 	struct dp_tx_msdu_info_s e_msdu_info = {0};
996 	qdf_nbuf_t nbuf;
997 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
998 		(struct htt_tx_msdu_desc_ext2_t *)(e_msdu_info.meta_data);
999 	e_msdu_info.tx_queue = msdu_info->tx_queue;
1000 	e_msdu_info.tid = msdu_info->tid;
1001 	e_msdu_info.exception_fw = 1;
1002 	e_msdu_info.xmit_type = msdu_info->xmit_type;
1003 	desc_ext->host_tx_desc_pool = 1;
1004 	desc_ext->traffic_end_indication = 1;
1005 	nbuf = dp_tx_send_msdu_single(vdev, end_nbuf, &e_msdu_info,
1006 				      peer_id, NULL);
1007 	if (nbuf) {
1008 		dp_err("Traffic end indication packet tx failed");
1009 		qdf_nbuf_free(nbuf);
1010 	}
1011 }
1012 
1013 /**
1014  * dp_tx_traffic_end_indication_set_desc_flag() - Set tx descriptor flag to
1015  *                                                mark it traffic end indication
1016  *                                                packet.
1017  * @tx_desc: Tx descriptor pointer
1018  * @msdu_info: msdu_info structure pointer
1019  *
1020  * Return: None
1021  */
1022 static inline void
dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s * tx_desc,struct dp_tx_msdu_info_s * msdu_info)1023 dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
1024 					   struct dp_tx_msdu_info_s *msdu_info)
1025 {
1026 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
1027 		(struct htt_tx_msdu_desc_ext2_t *)(msdu_info->meta_data);
1028 
1029 	if (qdf_unlikely(desc_ext->traffic_end_indication))
1030 		tx_desc->flags |= DP_TX_DESC_FLAG_TRAFFIC_END_IND;
1031 }
1032 
1033 /**
1034  * dp_tx_traffic_end_indication_enq_ind_pkt() - Enqueue the packet instead of
1035  *                                              freeing which are associated
1036  *                                              with traffic end indication
1037  *                                              flagged descriptor.
1038  * @soc: dp soc handle
1039  * @desc: Tx descriptor pointer
1040  * @nbuf: buffer pointer
1041  *
1042  * Return: True if packet gets enqueued else false
1043  */
1044 static bool
dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc * soc,struct dp_tx_desc_s * desc,qdf_nbuf_t nbuf)1045 dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
1046 					 struct dp_tx_desc_s *desc,
1047 					 qdf_nbuf_t nbuf)
1048 {
1049 	struct dp_vdev *vdev = NULL;
1050 
1051 	if (qdf_unlikely((desc->flags &
1052 			  DP_TX_DESC_FLAG_TRAFFIC_END_IND) != 0)) {
1053 		vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
1054 					     DP_MOD_ID_TX_COMP);
1055 		if (vdev) {
1056 			qdf_nbuf_queue_add(&vdev->end_ind_pkt_q, nbuf);
1057 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_COMP);
1058 			return true;
1059 		}
1060 	}
1061 	return false;
1062 }
1063 
1064 /**
1065  * dp_tx_traffic_end_indication_is_enabled() - get the feature
1066  *                                             enable/disable status
1067  * @vdev: dp vdev handle
1068  *
1069  * Return: True if feature is enable else false
1070  */
1071 static inline bool
dp_tx_traffic_end_indication_is_enabled(struct dp_vdev * vdev)1072 dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
1073 {
1074 	return qdf_unlikely(vdev->traffic_end_ind_en);
1075 }
1076 
1077 static inline qdf_nbuf_t
dp_tx_send_msdu_single_wrapper(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info,uint16_t peer_id,qdf_nbuf_t end_nbuf)1078 dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1079 			       struct dp_tx_msdu_info_s *msdu_info,
1080 			       uint16_t peer_id, qdf_nbuf_t end_nbuf)
1081 {
1082 	if (dp_tx_traffic_end_indication_is_enabled(vdev))
1083 		end_nbuf = dp_tx_get_traffic_end_indication_pkt(vdev, nbuf);
1084 
1085 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
1086 
1087 	if (qdf_unlikely(end_nbuf))
1088 		dp_tx_send_traffic_end_indication_pkt(vdev, end_nbuf,
1089 						      msdu_info, peer_id);
1090 	return nbuf;
1091 }
1092 #else
1093 static inline qdf_nbuf_t
dp_tx_get_traffic_end_indication_pkt(struct dp_vdev * vdev,qdf_nbuf_t nbuf)1094 dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
1095 				     qdf_nbuf_t nbuf)
1096 {
1097 	return NULL;
1098 }
1099 
1100 static inline void
dp_tx_send_traffic_end_indication_pkt(struct dp_vdev * vdev,qdf_nbuf_t end_nbuf,struct dp_tx_msdu_info_s * msdu_info,uint16_t peer_id)1101 dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
1102 				      qdf_nbuf_t end_nbuf,
1103 				      struct dp_tx_msdu_info_s *msdu_info,
1104 				      uint16_t peer_id)
1105 {}
1106 
1107 static inline void
dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s * tx_desc,struct dp_tx_msdu_info_s * msdu_info)1108 dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
1109 					   struct dp_tx_msdu_info_s *msdu_info)
1110 {}
1111 
1112 static inline bool
dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc * soc,struct dp_tx_desc_s * desc,qdf_nbuf_t nbuf)1113 dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
1114 					 struct dp_tx_desc_s *desc,
1115 					 qdf_nbuf_t nbuf)
1116 {
1117 	return false;
1118 }
1119 
1120 static inline bool
dp_tx_traffic_end_indication_is_enabled(struct dp_vdev * vdev)1121 dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
1122 {
1123 	return false;
1124 }
1125 
1126 static inline qdf_nbuf_t
dp_tx_send_msdu_single_wrapper(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info,uint16_t peer_id,qdf_nbuf_t end_nbuf)1127 dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1128 			       struct dp_tx_msdu_info_s *msdu_info,
1129 			       uint16_t peer_id, qdf_nbuf_t end_nbuf)
1130 {
1131 	return dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
1132 }
1133 #endif
1134 
1135 #if defined(QCA_SUPPORT_WDS_EXTENDED)
1136 static bool
dp_tx_is_wds_ast_override_en(struct dp_soc * soc,struct cdp_tx_exception_metadata * tx_exc_metadata)1137 dp_tx_is_wds_ast_override_en(struct dp_soc *soc,
1138 			     struct cdp_tx_exception_metadata *tx_exc_metadata)
1139 {
1140 	if (soc->features.wds_ext_ast_override_enable &&
1141 	    tx_exc_metadata && tx_exc_metadata->is_wds_extended)
1142 		return true;
1143 
1144 	return false;
1145 }
1146 #else
1147 static bool
dp_tx_is_wds_ast_override_en(struct dp_soc * soc,struct cdp_tx_exception_metadata * tx_exc_metadata)1148 dp_tx_is_wds_ast_override_en(struct dp_soc *soc,
1149 			     struct cdp_tx_exception_metadata *tx_exc_metadata)
1150 {
1151 	return false;
1152 }
1153 #endif
1154 
1155 /**
1156  * dp_tx_prepare_desc_single() - Allocate and prepare Tx descriptor
1157  * @vdev: DP vdev handle
1158  * @nbuf: skb
1159  * @desc_pool_id: Descriptor pool ID
1160  * @msdu_info: Metadata to the fw
1161  * @tx_exc_metadata: Handle that holds exception path metadata
1162  *
1163  * Allocate and prepare Tx descriptor with msdu information.
1164  *
1165  * Return: Pointer to Tx Descriptor on success,
1166  *         NULL on failure
1167  */
1168 static
dp_tx_prepare_desc_single(struct dp_vdev * vdev,qdf_nbuf_t nbuf,uint8_t desc_pool_id,struct dp_tx_msdu_info_s * msdu_info,struct cdp_tx_exception_metadata * tx_exc_metadata)1169 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
1170 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
1171 		struct dp_tx_msdu_info_s *msdu_info,
1172 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1173 {
1174 	uint8_t align_pad;
1175 	uint8_t is_exception = 0;
1176 	uint8_t htt_hdr_size;
1177 	struct dp_tx_desc_s *tx_desc;
1178 	struct dp_pdev *pdev = vdev->pdev;
1179 	struct dp_soc *soc = pdev->soc;
1180 	uint8_t xmit_type = msdu_info->xmit_type;
1181 
1182 	if (dp_tx_limit_check(vdev, nbuf))
1183 		return NULL;
1184 
1185 	/* Allocate software Tx descriptor */
1186 	if (nbuf->protocol == QDF_NBUF_TRAC_EAPOL_ETH_TYPE)
1187 		tx_desc = dp_tx_spcl_desc_alloc(soc, desc_pool_id);
1188 	else
1189 		tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1190 
1191 	if (qdf_unlikely(!tx_desc)) {
1192 		DP_STATS_INC(vdev,
1193 			     tx_i[xmit_type].dropped.desc_na.num, 1);
1194 		DP_STATS_INC(vdev,
1195 			     tx_i[xmit_type].dropped.desc_na_exc_alloc_fail.num,
1196 			     1);
1197 		return NULL;
1198 	}
1199 
1200 	dp_tx_outstanding_inc(pdev);
1201 
1202 	/* Initialize the SW tx descriptor */
1203 	tx_desc->nbuf = nbuf;
1204 	tx_desc->frm_type = dp_tx_frm_std;
1205 	tx_desc->tx_encap_type = ((tx_exc_metadata &&
1206 		(tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
1207 		tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
1208 	tx_desc->vdev_id = vdev->vdev_id;
1209 	tx_desc->pdev = pdev;
1210 	tx_desc->msdu_ext_desc = NULL;
1211 	tx_desc->pkt_offset = 0;
1212 	tx_desc->length = qdf_nbuf_headlen(nbuf);
1213 
1214 	dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id,
1215 			vdev->qdf_opmode);
1216 
1217 	if (qdf_unlikely(vdev->multipass_en)) {
1218 		if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
1219 			goto failure;
1220 	}
1221 
1222 	/* Packets marked by upper layer (OS-IF) to be sent to FW */
1223 	if (dp_tx_is_nbuf_marked_exception(soc, nbuf))
1224 		is_exception = 1;
1225 
1226 	/* for BE chipsets if wds extension was enbled will not mark FW
1227 	 * in desc will mark ast index based search for ast index.
1228 	 */
1229 	if (dp_tx_is_wds_ast_override_en(soc, tx_exc_metadata))
1230 		return tx_desc;
1231 
1232 	/*
1233 	 * For special modes (vdev_type == ocb or mesh), data frames should be
1234 	 * transmitted using varying transmit parameters (tx spec) which include
1235 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
1236 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
1237 	 * These frames are sent as exception packets to firmware.
1238 	 *
1239 	 * HW requirement is that metadata should always point to a
1240 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
1241 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
1242 	 *  to get 8-byte aligned start address along with align_pad added
1243 	 *
1244 	 *  |-----------------------------|
1245 	 *  |                             |
1246 	 *  |-----------------------------| <-----Buffer Pointer Address given
1247 	 *  |                             |  ^    in HW descriptor (aligned)
1248 	 *  |       HTT Metadata          |  |
1249 	 *  |                             |  |
1250 	 *  |                             |  | Packet Offset given in descriptor
1251 	 *  |                             |  |
1252 	 *  |-----------------------------|  |
1253 	 *  |       Alignment Pad         |  v
1254 	 *  |-----------------------------| <----- Actual buffer start address
1255 	 *  |        SKB Data             |           (Unaligned)
1256 	 *  |                             |
1257 	 *  |                             |
1258 	 *  |                             |
1259 	 *  |                             |
1260 	 *  |                             |
1261 	 *  |-----------------------------|
1262 	 */
1263 	if (qdf_unlikely((msdu_info->exception_fw)) ||
1264 				(vdev->opmode == wlan_op_mode_ocb) ||
1265 				(tx_exc_metadata &&
1266 				tx_exc_metadata->is_tx_sniffer)) {
1267 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
1268 
1269 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
1270 			DP_STATS_INC(vdev,
1271 				     tx_i[xmit_type].dropped.headroom_insufficient,
1272 				     1);
1273 			goto failure;
1274 		}
1275 
1276 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
1277 			dp_tx_err("qdf_nbuf_push_head failed");
1278 			goto failure;
1279 		}
1280 
1281 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
1282 				msdu_info);
1283 		if (htt_hdr_size == 0)
1284 			goto failure;
1285 
1286 		tx_desc->length = qdf_nbuf_headlen(nbuf);
1287 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
1288 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1289 		dp_tx_traffic_end_indication_set_desc_flag(tx_desc,
1290 							   msdu_info);
1291 		is_exception = 1;
1292 		tx_desc->length -= tx_desc->pkt_offset;
1293 	}
1294 
1295 #if !TQM_BYPASS_WAR
1296 	if (is_exception || tx_exc_metadata)
1297 #endif
1298 	{
1299 		/* Temporary WAR due to TQM VP issues */
1300 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1301 		qdf_atomic_inc(&soc->num_tx_exception);
1302 	}
1303 
1304 	return tx_desc;
1305 
1306 failure:
1307 	dp_tx_desc_release(soc, tx_desc, desc_pool_id);
1308 	return NULL;
1309 }
1310 
1311 /**
1312  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment
1313  *                        frame
1314  * @vdev: DP vdev handle
1315  * @nbuf: skb
1316  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
1317  * @desc_pool_id : Descriptor Pool ID
1318  *
1319  * Allocate and prepare Tx descriptor with msdu and fragment descritor
1320  * information. For frames with fragments, allocate and prepare
1321  * an MSDU extension descriptor
1322  *
1323  * Return: Pointer to Tx Descriptor on success,
1324  *         NULL on failure
1325  */
dp_tx_prepare_desc(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info,uint8_t desc_pool_id)1326 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
1327 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
1328 		uint8_t desc_pool_id)
1329 {
1330 	struct dp_tx_desc_s *tx_desc;
1331 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
1332 	struct dp_pdev *pdev = vdev->pdev;
1333 	struct dp_soc *soc = pdev->soc;
1334 
1335 	if (dp_tx_limit_check(vdev, nbuf))
1336 		return NULL;
1337 
1338 	/* Allocate software Tx descriptor */
1339 	if (nbuf->protocol == QDF_NBUF_TRAC_EAPOL_ETH_TYPE)
1340 		tx_desc = dp_tx_spcl_desc_alloc(soc, desc_pool_id);
1341 	else
1342 		tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1343 
1344 	if (!tx_desc) {
1345 		DP_STATS_INC(vdev,
1346 			     tx_i[msdu_info->xmit_type].dropped.desc_na.num, 1);
1347 		return NULL;
1348 	}
1349 	dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg,
1350 				  nbuf, tx_desc->id, DP_TX_DESC_COOKIE);
1351 
1352 	dp_tx_outstanding_inc(pdev);
1353 
1354 	/* Initialize the SW tx descriptor */
1355 	tx_desc->nbuf = nbuf;
1356 	tx_desc->frm_type = msdu_info->frm_type;
1357 	tx_desc->tx_encap_type = vdev->tx_encap_type;
1358 	tx_desc->vdev_id = vdev->vdev_id;
1359 	tx_desc->pdev = pdev;
1360 	tx_desc->pkt_offset = 0;
1361 
1362 	dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id,
1363 			vdev->qdf_opmode);
1364 
1365 	/* Handle scattered frames - TSO/SG/ME */
1366 	/* Allocate and prepare an extension descriptor for scattered frames */
1367 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
1368 	if (!msdu_ext_desc) {
1369 		dp_tx_info("Tx Extension Descriptor Alloc Fail");
1370 		goto failure;
1371 	}
1372 
1373 #if !TQM_BYPASS_WAR
1374 	if (qdf_unlikely(msdu_info->exception_fw) ||
1375 	    dp_tx_is_nbuf_marked_exception(soc, nbuf))
1376 #endif
1377 	{
1378 		/* Temporary WAR due to TQM VP issues */
1379 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1380 		qdf_atomic_inc(&soc->num_tx_exception);
1381 	}
1382 
1383 
1384 	tx_desc->msdu_ext_desc = msdu_ext_desc;
1385 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
1386 
1387 	msdu_ext_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
1388 	msdu_ext_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
1389 
1390 	tx_desc->dma_addr = msdu_ext_desc->paddr;
1391 
1392 	if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
1393 		tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
1394 	else
1395 		tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
1396 
1397 	return tx_desc;
1398 failure:
1399 	dp_tx_desc_release(soc, tx_desc, desc_pool_id);
1400 	return NULL;
1401 }
1402 
1403 /**
1404  * dp_tx_prepare_raw() - Prepare RAW packet TX
1405  * @vdev: DP vdev handle
1406  * @nbuf: buffer pointer
1407  * @seg_info: Pointer to Segment info Descriptor to be prepared
1408  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
1409  *     descriptor
1410  *
1411  * Return:
1412  */
dp_tx_prepare_raw(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_seg_info_s * seg_info,struct dp_tx_msdu_info_s * msdu_info)1413 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1414 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1415 {
1416 	qdf_nbuf_t curr_nbuf = NULL;
1417 	uint16_t total_len = 0;
1418 	qdf_dma_addr_t paddr;
1419 	int32_t i;
1420 	int32_t mapped_buf_num = 0;
1421 
1422 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
1423 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1424 
1425 	DP_STATS_INC_PKT(vdev, tx_i[msdu_info->xmit_type].raw.raw_pkt,
1426 			 1, qdf_nbuf_len(nbuf));
1427 
1428 	/* Continue only if frames are of DATA type */
1429 	if (!DP_FRAME_IS_DATA(qos_wh)) {
1430 		DP_STATS_INC(vdev,
1431 			     tx_i[msdu_info->xmit_type].raw.invalid_raw_pkt_datatype,
1432 			     1);
1433 		dp_tx_debug("Pkt. recd is of not data type");
1434 		goto error;
1435 	}
1436 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
1437 	if (vdev->raw_mode_war &&
1438 	    (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
1439 	    (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
1440 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
1441 
1442 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
1443 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
1444 		/*
1445 		 * Number of nbuf's must not exceed the size of the frags
1446 		 * array in seg_info.
1447 		 */
1448 		if (i >= DP_TX_MAX_NUM_FRAGS) {
1449 			dp_err_rl("nbuf cnt exceeds the max number of segs");
1450 			DP_STATS_INC(vdev,
1451 				     tx_i[msdu_info->xmit_type].raw.num_frags_overflow_err,
1452 				     1);
1453 			goto error;
1454 		}
1455 		if (QDF_STATUS_SUCCESS !=
1456 			qdf_nbuf_map_nbytes_single(vdev->osdev,
1457 						   curr_nbuf,
1458 						   QDF_DMA_TO_DEVICE,
1459 						   curr_nbuf->len)) {
1460 			dp_tx_err("%s dma map error ", __func__);
1461 			DP_STATS_INC(vdev,
1462 				     tx_i[msdu_info->xmit_type].raw.dma_map_error,
1463 				     1);
1464 			goto error;
1465 		}
1466 		/* Update the count of mapped nbuf's */
1467 		mapped_buf_num++;
1468 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
1469 		seg_info->frags[i].paddr_lo = paddr;
1470 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
1471 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
1472 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
1473 		total_len += qdf_nbuf_len(curr_nbuf);
1474 	}
1475 
1476 	seg_info->frag_cnt = i;
1477 	seg_info->total_len = total_len;
1478 	seg_info->next = NULL;
1479 
1480 	sg_info->curr_seg = seg_info;
1481 
1482 	msdu_info->frm_type = dp_tx_frm_raw;
1483 	msdu_info->num_seg = 1;
1484 
1485 	return nbuf;
1486 
1487 error:
1488 	i = 0;
1489 	while (nbuf) {
1490 		curr_nbuf = nbuf;
1491 		if (i < mapped_buf_num) {
1492 			qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
1493 						     QDF_DMA_TO_DEVICE,
1494 						     curr_nbuf->len);
1495 			i++;
1496 		}
1497 		nbuf = qdf_nbuf_next(nbuf);
1498 		qdf_nbuf_free(curr_nbuf);
1499 	}
1500 	return NULL;
1501 
1502 }
1503 
1504 /**
1505  * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
1506  * @soc: DP soc handle
1507  * @nbuf: Buffer pointer
1508  *
1509  * unmap the chain of nbufs that belong to this RAW frame.
1510  *
1511  * Return: None
1512  */
dp_tx_raw_prepare_unset(struct dp_soc * soc,qdf_nbuf_t nbuf)1513 static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
1514 				    qdf_nbuf_t nbuf)
1515 {
1516 	qdf_nbuf_t cur_nbuf = nbuf;
1517 
1518 	do {
1519 		qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
1520 					     QDF_DMA_TO_DEVICE,
1521 					     cur_nbuf->len);
1522 		cur_nbuf = qdf_nbuf_next(cur_nbuf);
1523 	} while (cur_nbuf);
1524 }
1525 
1526 #ifdef VDEV_PEER_PROTOCOL_COUNT
dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev * vdev_hdl,qdf_nbuf_t nbuf)1527 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
1528 					       qdf_nbuf_t nbuf)
1529 {
1530 	qdf_nbuf_t nbuf_local;
1531 	struct dp_vdev *vdev_local = vdev_hdl;
1532 
1533 	do {
1534 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track)))
1535 			break;
1536 		nbuf_local = nbuf;
1537 		if (qdf_unlikely(((vdev_local)->tx_encap_type) ==
1538 			 htt_cmn_pkt_type_raw))
1539 			break;
1540 		else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local))))
1541 			break;
1542 		else if (qdf_nbuf_is_tso((nbuf_local)))
1543 			break;
1544 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local),
1545 						       (nbuf_local),
1546 						       NULL, 1, 0);
1547 	} while (0);
1548 }
1549 #endif
1550 
1551 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
dp_tx_update_stats(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t ring_id)1552 void dp_tx_update_stats(struct dp_soc *soc,
1553 			struct dp_tx_desc_s *tx_desc,
1554 			uint8_t ring_id)
1555 {
1556 	uint32_t stats_len = dp_tx_get_pkt_len(tx_desc);
1557 
1558 	DP_STATS_INC_PKT(soc, tx.egress[ring_id], 1, stats_len);
1559 }
1560 
1561 int
dp_tx_attempt_coalescing(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,uint8_t tid,struct dp_tx_msdu_info_s * msdu_info,uint8_t ring_id)1562 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1563 			 struct dp_tx_desc_s *tx_desc,
1564 			 uint8_t tid,
1565 			 struct dp_tx_msdu_info_s *msdu_info,
1566 			 uint8_t ring_id)
1567 {
1568 	struct dp_swlm *swlm = &soc->swlm;
1569 	union swlm_data swlm_query_data;
1570 	struct dp_swlm_tcl_data tcl_data;
1571 	QDF_STATUS status;
1572 	int ret;
1573 
1574 	if (!swlm->is_enabled)
1575 		return msdu_info->skip_hp_update;
1576 
1577 	tcl_data.nbuf = tx_desc->nbuf;
1578 	tcl_data.tid = tid;
1579 	tcl_data.ring_id = ring_id;
1580 	tcl_data.pkt_len = dp_tx_get_pkt_len(tx_desc);
1581 	tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
1582 	swlm_query_data.tcl_data = &tcl_data;
1583 
1584 	status = dp_swlm_tcl_pre_check(soc, &tcl_data);
1585 	if (QDF_IS_STATUS_ERROR(status)) {
1586 		dp_swlm_tcl_reset_session_data(soc, ring_id);
1587 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
1588 		return 0;
1589 	}
1590 
1591 	ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
1592 	if (ret) {
1593 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_success, 1);
1594 	} else {
1595 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
1596 	}
1597 
1598 	return ret;
1599 }
1600 
1601 void
dp_tx_ring_access_end(struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl,int coalesce)1602 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1603 		      int coalesce)
1604 {
1605 	if (coalesce)
1606 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1607 	else
1608 		dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1609 }
1610 
1611 static inline void
dp_tx_is_hp_update_required(uint32_t i,struct dp_tx_msdu_info_s * msdu_info)1612 dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
1613 {
1614 	if (((i + 1) < msdu_info->num_seg))
1615 		msdu_info->skip_hp_update = 1;
1616 	else
1617 		msdu_info->skip_hp_update = 0;
1618 }
1619 
1620 static inline void
dp_flush_tcp_hp(struct dp_soc * soc,uint8_t ring_id)1621 dp_flush_tcp_hp(struct dp_soc *soc, uint8_t ring_id)
1622 {
1623 	hal_ring_handle_t hal_ring_hdl =
1624 		dp_tx_get_hal_ring_hdl(soc, ring_id);
1625 
1626 	if (dp_tx_hal_ring_access_start(soc, hal_ring_hdl)) {
1627 		dp_err("Fillmore: SRNG access start failed");
1628 		return;
1629 	}
1630 
1631 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
1632 }
1633 
1634 static inline void
dp_tx_check_and_flush_hp(struct dp_soc * soc,QDF_STATUS status,struct dp_tx_msdu_info_s * msdu_info)1635 dp_tx_check_and_flush_hp(struct dp_soc *soc,
1636 			 QDF_STATUS status,
1637 			 struct dp_tx_msdu_info_s *msdu_info)
1638 {
1639 	if (QDF_IS_STATUS_ERROR(status) && !msdu_info->skip_hp_update) {
1640 		dp_flush_tcp_hp(soc,
1641 			(msdu_info->tx_queue.ring_id & DP_TX_QUEUE_MASK));
1642 	}
1643 }
1644 #else
1645 static inline void
dp_tx_is_hp_update_required(uint32_t i,struct dp_tx_msdu_info_s * msdu_info)1646 dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
1647 {
1648 }
1649 
1650 static inline void
dp_tx_check_and_flush_hp(struct dp_soc * soc,QDF_STATUS status,struct dp_tx_msdu_info_s * msdu_info)1651 dp_tx_check_and_flush_hp(struct dp_soc *soc,
1652 			 QDF_STATUS status,
1653 			 struct dp_tx_msdu_info_s *msdu_info)
1654 {
1655 }
1656 #endif
1657 
1658 #ifdef FEATURE_RUNTIME_PM
1659 void
dp_tx_ring_access_end_wrapper(struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl,int coalesce)1660 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1661 			      hal_ring_handle_t hal_ring_hdl,
1662 			      int coalesce)
1663 {
1664 	int ret;
1665 
1666 	/*
1667 	 * Avoid runtime get and put APIs under high throughput scenarios.
1668 	 */
1669 	if (dp_get_rtpm_tput_policy_requirement(soc)) {
1670 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1671 		return;
1672 	}
1673 
1674 	ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
1675 	if (QDF_IS_STATUS_SUCCESS(ret)) {
1676 		if (hif_system_pm_state_check(soc->hif_handle)) {
1677 			dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1678 			hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1679 			hal_srng_inc_flush_cnt(hal_ring_hdl);
1680 		} else {
1681 			dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1682 		}
1683 		hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
1684 	} else {
1685 		dp_runtime_get(soc);
1686 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1687 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1688 		qdf_atomic_inc(&soc->tx_pending_rtpm);
1689 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1690 		dp_runtime_put(soc);
1691 	}
1692 }
1693 #else
1694 
1695 #ifdef DP_POWER_SAVE
1696 void
dp_tx_ring_access_end_wrapper(struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl,int coalesce)1697 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1698 			      hal_ring_handle_t hal_ring_hdl,
1699 			      int coalesce)
1700 {
1701 	if (hif_system_pm_state_check(soc->hif_handle)) {
1702 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1703 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1704 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1705 	} else {
1706 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1707 	}
1708 }
1709 #endif
1710 #endif
1711 
1712 /**
1713  * dp_tx_get_tid() - Obtain TID to be used for this frame
1714  * @vdev: DP vdev handle
1715  * @nbuf: skb
1716  * @msdu_info: msdu descriptor
1717  *
1718  * Extract the DSCP or PCP information from frame and map into TID value.
1719  *
1720  * Return: void
1721  */
dp_tx_get_tid(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)1722 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1723 			  struct dp_tx_msdu_info_s *msdu_info)
1724 {
1725 	uint8_t tos = 0, dscp_tid_override = 0;
1726 	uint8_t *hdr_ptr, *L3datap;
1727 	uint8_t is_mcast = 0;
1728 	qdf_ether_header_t *eh = NULL;
1729 	qdf_ethervlan_header_t *evh = NULL;
1730 	uint16_t   ether_type;
1731 	qdf_llc_t *llcHdr;
1732 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1733 
1734 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1735 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1736 		eh = (qdf_ether_header_t *)nbuf->data;
1737 		hdr_ptr = (uint8_t *)(eh->ether_dhost);
1738 		L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
1739 	} else {
1740 		qdf_dot3_qosframe_t *qos_wh =
1741 			(qdf_dot3_qosframe_t *) nbuf->data;
1742 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1743 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1744 		return;
1745 	}
1746 
1747 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1748 	ether_type = eh->ether_type;
1749 
1750 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
1751 	/*
1752 	 * Check if packet is dot3 or eth2 type.
1753 	 */
1754 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1755 		ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1756 				sizeof(*llcHdr));
1757 
1758 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1759 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1760 				sizeof(*llcHdr);
1761 			ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
1762 					+ sizeof(*llcHdr) +
1763 					sizeof(qdf_net_vlanhdr_t));
1764 		} else {
1765 			L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
1766 				sizeof(*llcHdr);
1767 		}
1768 	} else {
1769 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1770 			evh = (qdf_ethervlan_header_t *) eh;
1771 			ether_type = evh->ether_type;
1772 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1773 		}
1774 	}
1775 
1776 	/*
1777 	 * Find priority from IP TOS DSCP field
1778 	 */
1779 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1780 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1781 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1782 			/* Only for unicast frames */
1783 			if (!is_mcast) {
1784 				/* send it on VO queue */
1785 				msdu_info->tid = DP_VO_TID;
1786 			}
1787 		} else {
1788 			/*
1789 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1790 			 * from TOS byte.
1791 			 */
1792 			tos = ip->ip_tos;
1793 			dscp_tid_override = 1;
1794 
1795 		}
1796 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1797 		/* TODO
1798 		 * use flowlabel
1799 		 *igmpmld cases to be handled in phase 2
1800 		 */
1801 		unsigned long ver_pri_flowlabel;
1802 		unsigned long pri;
1803 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1804 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1805 			DP_IPV6_PRIORITY_SHIFT;
1806 		tos = pri;
1807 		dscp_tid_override = 1;
1808 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1809 		msdu_info->tid = DP_VO_TID;
1810 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1811 		/* Only for unicast frames */
1812 		if (!is_mcast) {
1813 			/* send ucast arp on VO queue */
1814 			msdu_info->tid = DP_VO_TID;
1815 		}
1816 	}
1817 
1818 	/*
1819 	 * Assign all MCAST packets to BE
1820 	 */
1821 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1822 		if (is_mcast) {
1823 			tos = 0;
1824 			dscp_tid_override = 1;
1825 		}
1826 	}
1827 
1828 	if (dscp_tid_override == 1) {
1829 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1830 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1831 	}
1832 
1833 	if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
1834 		msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
1835 
1836 	return;
1837 }
1838 
1839 /**
1840  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1841  * @vdev: DP vdev handle
1842  * @nbuf: skb
1843  * @msdu_info: msdu descriptor
1844  *
1845  * Software based TID classification is required when more than 2 DSCP-TID
1846  * mapping tables are needed.
1847  * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
1848  *
1849  * Return: void
1850  */
dp_tx_classify_tid(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)1851 static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1852 				      struct dp_tx_msdu_info_s *msdu_info)
1853 {
1854 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1855 
1856 	/*
1857 	 * skip_sw_tid_classification flag will set in below cases-
1858 	 * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
1859 	 * 2. hlos_tid_override enabled for vdev
1860 	 * 3. mesh mode enabled for vdev
1861 	 */
1862 	if (qdf_likely(vdev->skip_sw_tid_classification)) {
1863 		/* Update tid in msdu_info from skb priority */
1864 		if (qdf_unlikely(vdev->skip_sw_tid_classification
1865 			& DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
1866 			uint32_t tid = qdf_nbuf_get_priority(nbuf);
1867 
1868 			if (tid == DP_TX_INVALID_QOS_TAG)
1869 				return;
1870 
1871 			msdu_info->tid = tid;
1872 			return;
1873 		}
1874 		return;
1875 	}
1876 
1877 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1878 }
1879 
1880 #ifdef FEATURE_WLAN_TDLS
1881 /**
1882  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1883  * @soc: datapath SOC
1884  * @vdev: datapath vdev
1885  * @tx_desc: TX descriptor
1886  *
1887  * Return: None
1888  */
dp_tx_update_tdls_flags(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc)1889 static void dp_tx_update_tdls_flags(struct dp_soc *soc,
1890 				    struct dp_vdev *vdev,
1891 				    struct dp_tx_desc_s *tx_desc)
1892 {
1893 	if (vdev) {
1894 		if (vdev->is_tdls_frame) {
1895 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1896 			vdev->is_tdls_frame = false;
1897 		}
1898 	}
1899 }
1900 
dp_htt_tx_comp_get_status(struct dp_soc * soc,char * htt_desc)1901 static uint8_t dp_htt_tx_comp_get_status(struct dp_soc *soc, char *htt_desc)
1902 {
1903 	uint8_t tx_status = HTT_TX_FW2WBM_TX_STATUS_MAX;
1904 
1905 	switch (soc->arch_id) {
1906 	case CDP_ARCH_TYPE_LI:
1907 		tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
1908 		break;
1909 
1910 	case CDP_ARCH_TYPE_BE:
1911 		tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
1912 		break;
1913 
1914 	case CDP_ARCH_TYPE_RH:
1915 		{
1916 			uint32_t *msg_word = (uint32_t *)htt_desc;
1917 
1918 			tx_status = HTT_TX_MSDU_INFO_RELEASE_REASON_GET(
1919 							*(msg_word + 3));
1920 		}
1921 		break;
1922 	default:
1923 		dp_err("Incorrect CDP_ARCH %d", soc->arch_id);
1924 		QDF_BUG(0);
1925 	}
1926 
1927 	return tx_status;
1928 }
1929 
1930 /**
1931  * dp_non_std_htt_tx_comp_free_buff() - Free the non std tx packet buffer
1932  * @soc: dp_soc handle
1933  * @tx_desc: TX descriptor
1934  *
1935  * Return: None
1936  */
dp_non_std_htt_tx_comp_free_buff(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc)1937 static void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
1938 					 struct dp_tx_desc_s *tx_desc)
1939 {
1940 	uint8_t tx_status = 0;
1941 	uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
1942 
1943 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1944 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
1945 						     DP_MOD_ID_TDLS);
1946 
1947 	if (qdf_unlikely(!vdev)) {
1948 		dp_err_rl("vdev is null!");
1949 		goto error;
1950 	}
1951 
1952 	hal_tx_comp_get_htt_desc(&tx_desc->comp, htt_tx_status);
1953 	tx_status = dp_htt_tx_comp_get_status(soc, htt_tx_status);
1954 	dp_debug("vdev_id: %d tx_status: %d", tx_desc->vdev_id, tx_status);
1955 
1956 	if (vdev->tx_non_std_data_callback.func) {
1957 		qdf_nbuf_set_next(nbuf, NULL);
1958 		vdev->tx_non_std_data_callback.func(
1959 				vdev->tx_non_std_data_callback.ctxt,
1960 				nbuf, tx_status);
1961 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1962 		return;
1963 	} else {
1964 		dp_err_rl("callback func is null");
1965 	}
1966 
1967 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1968 error:
1969 	qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
1970 	qdf_nbuf_free(nbuf);
1971 }
1972 
1973 /**
1974  * dp_tx_msdu_single_map() - do nbuf map
1975  * @vdev: DP vdev handle
1976  * @tx_desc: DP TX descriptor pointer
1977  * @nbuf: skb pointer
1978  *
1979  * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
1980  * operation done in other component.
1981  *
1982  * Return: QDF_STATUS
1983  */
dp_tx_msdu_single_map(struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,qdf_nbuf_t nbuf)1984 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1985 					       struct dp_tx_desc_s *tx_desc,
1986 					       qdf_nbuf_t nbuf)
1987 {
1988 	if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
1989 		return qdf_nbuf_map_nbytes_single(vdev->osdev,
1990 						  nbuf,
1991 						  QDF_DMA_TO_DEVICE,
1992 						  nbuf->len);
1993 	else
1994 		return qdf_nbuf_map_single(vdev->osdev, nbuf,
1995 					   QDF_DMA_TO_DEVICE);
1996 }
1997 #else
dp_tx_update_tdls_flags(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc)1998 static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
1999 					   struct dp_vdev *vdev,
2000 					   struct dp_tx_desc_s *tx_desc)
2001 {
2002 }
2003 
dp_non_std_htt_tx_comp_free_buff(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc)2004 static inline void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
2005 						struct dp_tx_desc_s *tx_desc)
2006 {
2007 }
2008 
dp_tx_msdu_single_map(struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,qdf_nbuf_t nbuf)2009 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
2010 					       struct dp_tx_desc_s *tx_desc,
2011 					       qdf_nbuf_t nbuf)
2012 {
2013 	return qdf_nbuf_map_nbytes_single(vdev->osdev,
2014 					  nbuf,
2015 					  QDF_DMA_TO_DEVICE,
2016 					  nbuf->len);
2017 }
2018 #endif
2019 
2020 static inline
dp_tx_nbuf_map_regular(struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,qdf_nbuf_t nbuf)2021 qdf_dma_addr_t dp_tx_nbuf_map_regular(struct dp_vdev *vdev,
2022 				      struct dp_tx_desc_s *tx_desc,
2023 				      qdf_nbuf_t nbuf)
2024 {
2025 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
2026 
2027 	ret = dp_tx_msdu_single_map(vdev, tx_desc, nbuf);
2028 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret)))
2029 		return 0;
2030 
2031 	return qdf_nbuf_mapped_paddr_get(nbuf);
2032 }
2033 
2034 static inline
dp_tx_nbuf_unmap_regular(struct dp_soc * soc,struct dp_tx_desc_s * desc)2035 void dp_tx_nbuf_unmap_regular(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2036 {
2037 	qdf_nbuf_unmap_nbytes_single_paddr(soc->osdev,
2038 					   desc->nbuf,
2039 					   desc->dma_addr,
2040 					   QDF_DMA_TO_DEVICE,
2041 					   desc->length);
2042 }
2043 
2044 #ifdef QCA_DP_TX_RMNET_OPTIMIZATION
2045 static inline bool
is_nbuf_frm_rmnet(qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)2046 is_nbuf_frm_rmnet(qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
2047 {
2048 	struct net_device *ingress_dev;
2049 	skb_frag_t *frag;
2050 	uint16_t buf_len = 0;
2051 	uint16_t linear_data_len = 0;
2052 	uint8_t *payload_addr = NULL;
2053 
2054 	ingress_dev = dev_get_by_index(dev_net(nbuf->dev), nbuf->skb_iif);
2055 
2056 	if (!ingress_dev)
2057 		return false;
2058 
2059 	if ((ingress_dev->priv_flags & IFF_PHONY_HEADROOM)) {
2060 		qdf_net_if_release_dev((struct qdf_net_if *)ingress_dev);
2061 		frag = &(skb_shinfo(nbuf)->frags[0]);
2062 		buf_len = skb_frag_size(frag);
2063 		payload_addr = (uint8_t *)skb_frag_address(frag);
2064 		linear_data_len = skb_headlen(nbuf);
2065 
2066 		buf_len += linear_data_len;
2067 		payload_addr = payload_addr - linear_data_len;
2068 		memcpy(payload_addr, nbuf->data, linear_data_len);
2069 
2070 		msdu_info->frm_type = dp_tx_frm_rmnet;
2071 		msdu_info->buf_len = buf_len;
2072 		msdu_info->payload_addr = payload_addr;
2073 
2074 		return true;
2075 	}
2076 	qdf_net_if_release_dev((struct qdf_net_if *)ingress_dev);
2077 	return false;
2078 }
2079 
2080 static inline
dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s * msdu_info,struct dp_tx_desc_s * tx_desc)2081 qdf_dma_addr_t dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s *msdu_info,
2082 				    struct dp_tx_desc_s *tx_desc)
2083 {
2084 	qdf_dma_addr_t paddr;
2085 
2086 	paddr = (qdf_dma_addr_t)qdf_mem_virt_to_phys(msdu_info->payload_addr);
2087 	tx_desc->length  = msdu_info->buf_len;
2088 
2089 	qdf_nbuf_dma_clean_range((void *)msdu_info->payload_addr,
2090 				 (void *)(msdu_info->payload_addr +
2091 					  msdu_info->buf_len));
2092 
2093 	tx_desc->flags |= DP_TX_DESC_FLAG_RMNET;
2094 	return paddr;
2095 }
2096 #else
2097 static inline bool
is_nbuf_frm_rmnet(qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)2098 is_nbuf_frm_rmnet(qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
2099 {
2100 	return false;
2101 }
2102 
2103 static inline
dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s * msdu_info,struct dp_tx_desc_s * tx_desc)2104 qdf_dma_addr_t dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s *msdu_info,
2105 				    struct dp_tx_desc_s *tx_desc)
2106 {
2107 	return 0;
2108 }
2109 #endif
2110 
2111 #if defined(QCA_DP_TX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
2112 static inline
dp_tx_nbuf_map(struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,qdf_nbuf_t nbuf)2113 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
2114 			      struct dp_tx_desc_s *tx_desc,
2115 			      qdf_nbuf_t nbuf)
2116 {
2117 	if (qdf_likely(tx_desc->flags & DP_TX_DESC_FLAG_FAST)) {
2118 		qdf_nbuf_dma_clean_range((void *)nbuf->data,
2119 					 (void *)(nbuf->data + nbuf->len));
2120 		return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2121 	} else {
2122 		return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
2123 	}
2124 }
2125 
2126 static inline
dp_tx_nbuf_unmap(struct dp_soc * soc,struct dp_tx_desc_s * desc)2127 void dp_tx_nbuf_unmap(struct dp_soc *soc,
2128 		      struct dp_tx_desc_s *desc)
2129 {
2130 	if (qdf_unlikely(!(desc->flags &
2131 			   (DP_TX_DESC_FLAG_SIMPLE | DP_TX_DESC_FLAG_RMNET))))
2132 		return dp_tx_nbuf_unmap_regular(soc, desc);
2133 }
2134 #else
2135 static inline
dp_tx_nbuf_map(struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,qdf_nbuf_t nbuf)2136 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
2137 			      struct dp_tx_desc_s *tx_desc,
2138 			      qdf_nbuf_t nbuf)
2139 {
2140 	return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
2141 }
2142 
2143 static inline
dp_tx_nbuf_unmap(struct dp_soc * soc,struct dp_tx_desc_s * desc)2144 void dp_tx_nbuf_unmap(struct dp_soc *soc,
2145 		      struct dp_tx_desc_s *desc)
2146 {
2147 	return dp_tx_nbuf_unmap_regular(soc, desc);
2148 }
2149 #endif
2150 
2151 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
2152 static inline
dp_tx_enh_unmap(struct dp_soc * soc,struct dp_tx_desc_s * desc)2153 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2154 {
2155 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE))) {
2156 		dp_tx_nbuf_unmap(soc, desc);
2157 		desc->flags |= DP_TX_DESC_FLAG_UNMAP_DONE;
2158 	}
2159 }
2160 
dp_tx_unmap(struct dp_soc * soc,struct dp_tx_desc_s * desc)2161 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2162 {
2163 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE)))
2164 		dp_tx_nbuf_unmap(soc, desc);
2165 }
2166 #else
2167 static inline
dp_tx_enh_unmap(struct dp_soc * soc,struct dp_tx_desc_s * desc)2168 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2169 {
2170 }
2171 
dp_tx_unmap(struct dp_soc * soc,struct dp_tx_desc_s * desc)2172 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2173 {
2174 	dp_tx_nbuf_unmap(soc, desc);
2175 }
2176 #endif
2177 
2178 #ifdef MESH_MODE_SUPPORT
2179 /**
2180  * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
2181  * @soc: datapath SOC
2182  * @vdev: datapath vdev
2183  * @tx_desc: TX descriptor
2184  *
2185  * Return: None
2186  */
dp_tx_update_mesh_flags(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc)2187 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
2188 					   struct dp_vdev *vdev,
2189 					   struct dp_tx_desc_s *tx_desc)
2190 {
2191 	if (qdf_unlikely(vdev->mesh_vdev))
2192 		tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
2193 }
2194 
2195 /**
2196  * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
2197  * @soc: dp_soc handle
2198  * @tx_desc: TX descriptor
2199  * @delayed_free: delay the nbuf free
2200  *
2201  * Return: nbuf to be freed late
2202  */
dp_mesh_tx_comp_free_buff(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,bool delayed_free)2203 static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
2204 						   struct dp_tx_desc_s *tx_desc,
2205 						   bool delayed_free)
2206 {
2207 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2208 	struct dp_vdev *vdev = NULL;
2209 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
2210 
2211 	vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, DP_MOD_ID_MESH);
2212 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2213 		if (vdev)
2214 			DP_STATS_INC(vdev,
2215 				     tx_i[xmit_type].mesh.completion_fw, 1);
2216 
2217 		if (delayed_free)
2218 			return nbuf;
2219 
2220 		qdf_nbuf_free(nbuf);
2221 	} else {
2222 		if (vdev && vdev->osif_tx_free_ext) {
2223 			vdev->osif_tx_free_ext((nbuf));
2224 		} else {
2225 			if (delayed_free)
2226 				return nbuf;
2227 
2228 			qdf_nbuf_free(nbuf);
2229 		}
2230 	}
2231 
2232 	if (vdev)
2233 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
2234 
2235 	return NULL;
2236 }
2237 #else
dp_tx_update_mesh_flags(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc)2238 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
2239 					   struct dp_vdev *vdev,
2240 					   struct dp_tx_desc_s *tx_desc)
2241 {
2242 }
2243 
dp_mesh_tx_comp_free_buff(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,bool delayed_free)2244 static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
2245 						   struct dp_tx_desc_s *tx_desc,
2246 						   bool delayed_free)
2247 {
2248 	return NULL;
2249 }
2250 #endif
2251 
dp_tx_frame_is_drop(struct dp_vdev * vdev,uint8_t * srcmac,uint8_t * dstmac)2252 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
2253 {
2254 	struct dp_pdev *pdev = NULL;
2255 	struct dp_ast_entry *src_ast_entry = NULL;
2256 	struct dp_ast_entry *dst_ast_entry = NULL;
2257 	struct dp_soc *soc = NULL;
2258 
2259 	qdf_assert(vdev);
2260 	pdev = vdev->pdev;
2261 	qdf_assert(pdev);
2262 	soc = pdev->soc;
2263 
2264 	dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
2265 				(soc, dstmac, vdev->pdev->pdev_id);
2266 
2267 	src_ast_entry = dp_peer_ast_hash_find_by_pdevid
2268 				(soc, srcmac, vdev->pdev->pdev_id);
2269 	if (dst_ast_entry && src_ast_entry) {
2270 		if (dst_ast_entry->peer_id ==
2271 				src_ast_entry->peer_id)
2272 			return 1;
2273 	}
2274 
2275 	return 0;
2276 }
2277 
2278 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
2279 	defined(WLAN_MCAST_MLO)
2280 /* MLO peer id for reinject*/
2281 #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
2282 /* MLO vdev id inc offset */
2283 #define DP_MLO_VDEV_ID_OFFSET 0x80
2284 
2285 #ifdef QCA_SUPPORT_WDS_EXTENDED
2286 static inline bool
dp_tx_wds_ext_check(struct cdp_tx_exception_metadata * tx_exc_metadata)2287 dp_tx_wds_ext_check(struct cdp_tx_exception_metadata *tx_exc_metadata)
2288 {
2289 	if (tx_exc_metadata && tx_exc_metadata->is_wds_extended)
2290 		return true;
2291 
2292 	return false;
2293 }
2294 #else
2295 static inline bool
dp_tx_wds_ext_check(struct cdp_tx_exception_metadata * tx_exc_metadata)2296 dp_tx_wds_ext_check(struct cdp_tx_exception_metadata *tx_exc_metadata)
2297 {
2298 	return false;
2299 }
2300 #endif
2301 
2302 static inline void
dp_tx_bypass_reinjection(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,struct cdp_tx_exception_metadata * tx_exc_metadata)2303 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
2304 			 struct cdp_tx_exception_metadata *tx_exc_metadata)
2305 {
2306 	/* wds ext enabled will not set the TO_FW bit */
2307 	if (dp_tx_wds_ext_check(tx_exc_metadata))
2308 		return;
2309 
2310 	if (!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) {
2311 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2312 		qdf_atomic_inc(&soc->num_tx_exception);
2313 	}
2314 }
2315 
2316 static inline void
dp_tx_update_mcast_param(uint16_t peer_id,uint16_t * htt_tcl_metadata,struct dp_vdev * vdev,struct dp_tx_msdu_info_s * msdu_info)2317 dp_tx_update_mcast_param(uint16_t peer_id,
2318 			 uint16_t *htt_tcl_metadata,
2319 			 struct dp_vdev *vdev,
2320 			 struct dp_tx_msdu_info_s *msdu_info)
2321 {
2322 	if (peer_id == DP_MLO_MCAST_REINJECT_PEER_ID) {
2323 		*htt_tcl_metadata = 0;
2324 		DP_TX_TCL_METADATA_TYPE_SET(
2325 				*htt_tcl_metadata,
2326 				HTT_TCL_METADATA_V2_TYPE_GLOBAL_SEQ_BASED);
2327 		HTT_TX_TCL_METADATA_GLBL_SEQ_NO_SET(*htt_tcl_metadata,
2328 						    msdu_info->gsn);
2329 
2330 		msdu_info->vdev_id = vdev->vdev_id + DP_MLO_VDEV_ID_OFFSET;
2331 		HTT_TX_TCL_METADATA_GLBL_SEQ_HOST_INSPECTED_SET(
2332 							*htt_tcl_metadata, 1);
2333 	} else {
2334 		msdu_info->vdev_id = vdev->vdev_id;
2335 	}
2336 }
2337 #else
2338 static inline void
dp_tx_bypass_reinjection(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,struct cdp_tx_exception_metadata * tx_exc_metadata)2339 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
2340 			 struct cdp_tx_exception_metadata *tx_exc_metadata)
2341 {
2342 }
2343 
2344 static inline void
dp_tx_update_mcast_param(uint16_t peer_id,uint16_t * htt_tcl_metadata,struct dp_vdev * vdev,struct dp_tx_msdu_info_s * msdu_info)2345 dp_tx_update_mcast_param(uint16_t peer_id,
2346 			 uint16_t *htt_tcl_metadata,
2347 			 struct dp_vdev *vdev,
2348 			 struct dp_tx_msdu_info_s *msdu_info)
2349 {
2350 }
2351 #endif
2352 
2353 #ifdef DP_TX_SW_DROP_STATS_INC
tx_sw_drop_stats_inc(struct dp_pdev * pdev,qdf_nbuf_t nbuf,enum cdp_tx_sw_drop drop_code)2354 static void tx_sw_drop_stats_inc(struct dp_pdev *pdev,
2355 				 qdf_nbuf_t nbuf,
2356 				 enum cdp_tx_sw_drop drop_code)
2357 {
2358 	/* EAPOL Drop stats */
2359 	if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) {
2360 		switch (drop_code) {
2361 		case TX_DESC_ERR:
2362 			DP_STATS_INC(pdev, eap_drop_stats.tx_desc_err, 1);
2363 			break;
2364 		case TX_HAL_RING_ACCESS_ERR:
2365 			DP_STATS_INC(pdev,
2366 				     eap_drop_stats.tx_hal_ring_access_err, 1);
2367 			break;
2368 		case TX_DMA_MAP_ERR:
2369 			DP_STATS_INC(pdev, eap_drop_stats.tx_dma_map_err, 1);
2370 			break;
2371 		case TX_HW_ENQUEUE:
2372 			DP_STATS_INC(pdev, eap_drop_stats.tx_hw_enqueue, 1);
2373 			break;
2374 		case TX_SW_ENQUEUE:
2375 			DP_STATS_INC(pdev, eap_drop_stats.tx_sw_enqueue, 1);
2376 			break;
2377 		default:
2378 			dp_info_rl("Invalid eapol_drop code: %d", drop_code);
2379 			break;
2380 		}
2381 	}
2382 }
2383 #else
tx_sw_drop_stats_inc(struct dp_pdev * pdev,qdf_nbuf_t nbuf,enum cdp_tx_sw_drop drop_code)2384 static void tx_sw_drop_stats_inc(struct dp_pdev *pdev,
2385 				 qdf_nbuf_t nbuf,
2386 				 enum cdp_tx_sw_drop drop_code)
2387 {
2388 }
2389 #endif
2390 
2391 #ifdef WLAN_FEATURE_TX_LATENCY_STATS
2392 /**
2393  * dp_tx_latency_stats_enabled() - check enablement of transmit latency
2394  * statistics
2395  * @vdev: DP vdev handle
2396  *
2397  * Return: true if transmit latency statistics is enabled, false otherwise.
2398  */
dp_tx_latency_stats_enabled(struct dp_vdev * vdev)2399 static inline bool dp_tx_latency_stats_enabled(struct dp_vdev *vdev)
2400 {
2401 	return qdf_atomic_read(&vdev->tx_latency_cfg.enabled);
2402 }
2403 
2404 /**
2405  * dp_tx_latency_stats_report_enabled() - check enablement of async report
2406  * for transmit latency statistics
2407  * @vdev: DP vdev handle
2408  *
2409  * Return: true if transmit latency statistics is enabled, false otherwise.
2410  */
dp_tx_latency_stats_report_enabled(struct dp_vdev * vdev)2411 static inline bool dp_tx_latency_stats_report_enabled(struct dp_vdev *vdev)
2412 {
2413 	return qdf_atomic_read(&vdev->tx_latency_cfg.report);
2414 }
2415 
2416 /**
2417  * dp_tx_get_driver_ingress_ts() - get driver ingress timestamp from nbuf
2418  * @vdev: DP vdev handle
2419  * @msdu_info: pointer to MSDU Descriptor
2420  * @nbuf: original buffer from network stack
2421  *
2422  * Return: None
2423  */
2424 static inline void
dp_tx_get_driver_ingress_ts(struct dp_vdev * vdev,struct dp_tx_msdu_info_s * msdu_info,qdf_nbuf_t nbuf)2425 dp_tx_get_driver_ingress_ts(struct dp_vdev *vdev,
2426 			    struct dp_tx_msdu_info_s *msdu_info,
2427 			    qdf_nbuf_t nbuf)
2428 {
2429 	if (!dp_tx_latency_stats_enabled(vdev))
2430 		return;
2431 
2432 	msdu_info->driver_ingress_ts = qdf_nbuf_get_tx_ts(nbuf, true);
2433 }
2434 
2435 /**
2436  * dp_tx_update_ts_on_enqueued() - set driver ingress/egress timestamp in
2437  * tx descriptor
2438  * @vdev: DP vdev handle
2439  * @msdu_info: pointer to MSDU Descriptor
2440  * @tx_desc: pointer to tx descriptor
2441  *
2442  * Return: None
2443  */
2444 static inline void
dp_tx_update_ts_on_enqueued(struct dp_vdev * vdev,struct dp_tx_msdu_info_s * msdu_info,struct dp_tx_desc_s * tx_desc)2445 dp_tx_update_ts_on_enqueued(struct dp_vdev *vdev,
2446 			    struct dp_tx_msdu_info_s *msdu_info,
2447 			    struct dp_tx_desc_s *tx_desc)
2448 {
2449 	if (!dp_tx_latency_stats_enabled(vdev))
2450 		return;
2451 
2452 	tx_desc->driver_ingress_ts = msdu_info->driver_ingress_ts;
2453 	tx_desc->driver_egress_ts = qdf_ktime_real_get();
2454 }
2455 
2456 /**
2457  * dp_tx_latency_stats_update_bucket() - update transmit latency statistics
2458  * for specified type
2459  * @vdev: DP vdev handle
2460  * @tx_latency: pointer to transmit latency stats
2461  * @idx: index of the statistics
2462  * @type: transmit latency type
2463  * @value: latency to be recorded
2464  *
2465  * Return: None
2466  */
2467 static inline void
dp_tx_latency_stats_update_bucket(struct dp_vdev * vdev,struct dp_tx_latency * tx_latency,int idx,enum cdp_tx_latency_type type,uint32_t value)2468 dp_tx_latency_stats_update_bucket(struct dp_vdev *vdev,
2469 				  struct dp_tx_latency *tx_latency,
2470 				  int idx, enum cdp_tx_latency_type type,
2471 				  uint32_t value)
2472 {
2473 	int32_t granularity;
2474 	int lvl;
2475 
2476 	granularity =
2477 		qdf_atomic_read(&vdev->tx_latency_cfg.granularity[type]);
2478 	if (qdf_unlikely(!granularity))
2479 		return;
2480 
2481 	lvl = value / granularity;
2482 	if (lvl >= CDP_TX_LATENCY_DISTR_LV_MAX)
2483 		lvl = CDP_TX_LATENCY_DISTR_LV_MAX - 1;
2484 
2485 	qdf_atomic_inc(&tx_latency->stats[idx][type].msdus_accum);
2486 	qdf_atomic_add(value, &tx_latency->stats[idx][type].latency_accum);
2487 	qdf_atomic_inc(&tx_latency->stats[idx][type].distribution[lvl]);
2488 }
2489 
2490 /**
2491  * dp_tx_latency_stats_update() - update transmit latency statistics on
2492  * msdu transmit completed
2493  * @soc: dp soc handle
2494  * @txrx_peer: txrx peer handle
2495  * @tx_desc: pointer to tx descriptor
2496  * @ts: tx completion status
2497  * @link_id: link id
2498  *
2499  * Return: None
2500  */
2501 static inline void
dp_tx_latency_stats_update(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer,struct dp_tx_desc_s * tx_desc,struct hal_tx_completion_status * ts,uint8_t link_id)2502 dp_tx_latency_stats_update(struct dp_soc *soc,
2503 			   struct dp_txrx_peer *txrx_peer,
2504 			   struct dp_tx_desc_s *tx_desc,
2505 			   struct hal_tx_completion_status *ts,
2506 			   uint8_t link_id)
2507 {
2508 	uint32_t driver_latency, ring_buf_latency, hw_latency;
2509 	QDF_STATUS status = QDF_STATUS_E_INVAL;
2510 	int64_t current_ts, ingress, egress;
2511 	struct dp_vdev *vdev = txrx_peer->vdev;
2512 	struct dp_tx_latency *tx_latency;
2513 	uint8_t idx;
2514 
2515 	if (!dp_tx_latency_stats_enabled(vdev))
2516 		return;
2517 
2518 	if (!tx_desc->driver_ingress_ts || !tx_desc->driver_egress_ts)
2519 		return;
2520 
2521 	status = dp_tx_compute_hw_delay_us(ts, vdev->delta_tsf, &hw_latency);
2522 	if (QDF_IS_STATUS_ERROR(status))
2523 		return;
2524 
2525 	ingress = qdf_ktime_to_us(tx_desc->driver_ingress_ts);
2526 	egress = qdf_ktime_to_us(tx_desc->driver_egress_ts);
2527 	driver_latency = (uint32_t)(egress - ingress);
2528 
2529 	current_ts = qdf_ktime_to_us(qdf_ktime_real_get());
2530 	ring_buf_latency = (uint32_t)(current_ts - egress);
2531 
2532 	tx_latency = &txrx_peer->stats[link_id].tx_latency;
2533 	idx = tx_latency->cur_idx;
2534 	dp_tx_latency_stats_update_bucket(txrx_peer->vdev, tx_latency, idx,
2535 					  CDP_TX_LATENCY_TYPE_DRIVER,
2536 					  driver_latency);
2537 	dp_tx_latency_stats_update_bucket(txrx_peer->vdev, tx_latency, idx,
2538 					  CDP_TX_LATENCY_TYPE_RING_BUF,
2539 					  ring_buf_latency);
2540 	dp_tx_latency_stats_update_bucket(txrx_peer->vdev, tx_latency, idx,
2541 					  CDP_TX_LATENCY_TYPE_HW, hw_latency);
2542 }
2543 
2544 /**
2545  * dp_tx_latency_stats_clear_bucket() - clear specified transmit latency
2546  * statistics for specified type
2547  * @tx_latency: pointer to transmit latency stats
2548  * @idx: index of the statistics
2549  * @type: transmit latency type
2550  *
2551  * Return: None
2552  */
2553 static inline void
dp_tx_latency_stats_clear_bucket(struct dp_tx_latency * tx_latency,int idx,enum cdp_tx_latency_type type)2554 dp_tx_latency_stats_clear_bucket(struct dp_tx_latency *tx_latency,
2555 				 int idx, enum cdp_tx_latency_type type)
2556 {
2557 	int lvl;
2558 	struct dp_tx_latency_stats *stats;
2559 
2560 	stats = &tx_latency->stats[idx][type];
2561 	qdf_atomic_init(&stats->msdus_accum);
2562 	qdf_atomic_init(&stats->latency_accum);
2563 	for (lvl = 0; lvl < CDP_TX_LATENCY_DISTR_LV_MAX; lvl++)
2564 		qdf_atomic_init(&stats->distribution[lvl]);
2565 }
2566 
2567 /**
2568  * dp_tx_latency_stats_clear_buckets() - clear specified transmit latency
2569  * statistics
2570  * @tx_latency: pointer to transmit latency stats
2571  * @idx: index of the statistics
2572  *
2573  * Return: None
2574  */
2575 static void
dp_tx_latency_stats_clear_buckets(struct dp_tx_latency * tx_latency,int idx)2576 dp_tx_latency_stats_clear_buckets(struct dp_tx_latency *tx_latency,
2577 				  int idx)
2578 {
2579 	int type;
2580 
2581 	for (type = 0; type < CDP_TX_LATENCY_TYPE_MAX; type++)
2582 		dp_tx_latency_stats_clear_bucket(tx_latency, idx, type);
2583 }
2584 
2585 /**
2586  * dp_tx_latency_stats_update_cca() - update transmit latency statistics for
2587  * CCA
2588  * @soc: dp soc handle
2589  * @peer_id: peer id
2590  * @granularity: granularity of distribution
2591  * @distribution: distribution of transmit latency statistics
2592  * @avg: average of CCA latency(in microseconds) within a cycle
2593  *
2594  * Return: None
2595  */
2596 void
dp_tx_latency_stats_update_cca(struct dp_soc * soc,uint16_t peer_id,uint32_t granularity,uint32_t * distribution,uint32_t avg)2597 dp_tx_latency_stats_update_cca(struct dp_soc *soc, uint16_t peer_id,
2598 			       uint32_t granularity, uint32_t *distribution,
2599 			       uint32_t avg)
2600 {
2601 	int lvl, idx;
2602 	uint8_t link_id;
2603 	struct dp_tx_latency *tx_latency;
2604 	struct dp_tx_latency_stats *stats;
2605 	int32_t cur_granularity;
2606 	struct dp_vdev *vdev;
2607 	struct dp_tx_latency_config *cfg;
2608 	struct dp_txrx_peer *txrx_peer;
2609 	struct dp_peer *peer;
2610 
2611 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
2612 	if (!peer) {
2613 		dp_err_rl("Peer not found peer id %d", peer_id);
2614 		return;
2615 	}
2616 
2617 	if (IS_MLO_DP_MLD_PEER(peer))
2618 		goto out;
2619 
2620 	vdev = peer->vdev;
2621 	if (!dp_tx_latency_stats_enabled(vdev))
2622 		goto out;
2623 
2624 	cfg = &vdev->tx_latency_cfg;
2625 	cur_granularity =
2626 		qdf_atomic_read(&cfg->granularity[CDP_TX_LATENCY_TYPE_CCA]);
2627 
2628 	/* in unit of ms */
2629 	cur_granularity /= 1000;
2630 	if (cur_granularity != granularity) {
2631 		dp_info_rl("invalid granularity, cur %d report %d",
2632 			   cur_granularity, granularity);
2633 		goto out;
2634 	}
2635 
2636 	txrx_peer = dp_get_txrx_peer(peer);
2637 	if (qdf_unlikely(!txrx_peer)) {
2638 		dp_err_rl("txrx_peer NULL for MAC: " QDF_MAC_ADDR_FMT,
2639 			  QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2640 		goto out;
2641 	}
2642 
2643 	link_id = dp_get_peer_link_id(peer);
2644 	if (link_id >= txrx_peer->stats_arr_size)
2645 		goto out;
2646 
2647 	tx_latency = &txrx_peer->stats[link_id].tx_latency;
2648 	idx = tx_latency->cur_idx;
2649 	stats = &tx_latency->stats[idx][CDP_TX_LATENCY_TYPE_CCA];
2650 	qdf_atomic_set(&stats->latency_accum, avg);
2651 	qdf_atomic_set(&stats->msdus_accum, (avg ? 1 : 0));
2652 	for (lvl = 0; lvl < CDP_TX_LATENCY_DISTR_LV_MAX; lvl++)
2653 		qdf_atomic_set(&stats->distribution[lvl],
2654 			       distribution[lvl]);
2655 
2656 	/* prepare for the next cycle */
2657 	tx_latency->cur_idx = 1 - idx;
2658 	dp_tx_latency_stats_clear_buckets(tx_latency, tx_latency->cur_idx);
2659 
2660 out:
2661 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2662 }
2663 
2664 /**
2665  * dp_tx_latency_stats_get_per_peer() - get transmit latency statistics for a
2666  * peer
2667  * @soc: dp soc handle
2668  * @peer: dp peer Handle
2669  * @latency: buffer to hold transmit latency statistics
2670  *
2671  * Return: QDF_STATUS
2672  */
2673 static QDF_STATUS
dp_tx_latency_stats_get_per_peer(struct dp_soc * soc,struct dp_peer * peer,struct cdp_tx_latency * latency)2674 dp_tx_latency_stats_get_per_peer(struct dp_soc *soc, struct dp_peer *peer,
2675 				 struct cdp_tx_latency *latency)
2676 {
2677 	int lvl, type, link_id;
2678 	int32_t latency_accum, msdus_accum;
2679 	struct dp_vdev *vdev;
2680 	struct dp_txrx_peer *txrx_peer;
2681 	struct dp_tx_latency *tx_latency;
2682 	struct dp_tx_latency_config *cfg;
2683 	struct dp_tx_latency_stats *stats;
2684 	uint8_t last_idx;
2685 
2686 	if (unlikely(!latency))
2687 		return QDF_STATUS_E_INVAL;
2688 
2689 	/* Authenticated link/legacy peer only */
2690 	if (IS_MLO_DP_MLD_PEER(peer) || peer->state != OL_TXRX_PEER_STATE_AUTH)
2691 		return QDF_STATUS_E_INVAL;
2692 
2693 	vdev = peer->vdev;
2694 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap)
2695 		return QDF_STATUS_E_INVAL;
2696 
2697 	txrx_peer = dp_get_txrx_peer(peer);
2698 	if (!txrx_peer)
2699 		return QDF_STATUS_E_INVAL;
2700 
2701 	link_id = dp_get_peer_link_id(peer);
2702 	if (link_id >= txrx_peer->stats_arr_size)
2703 		return QDF_STATUS_E_INVAL;
2704 
2705 	tx_latency = &txrx_peer->stats[link_id].tx_latency;
2706 	qdf_mem_zero(latency, sizeof(*latency));
2707 	qdf_mem_copy(latency->mac_remote.bytes,
2708 		     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2709 	last_idx = 1 - tx_latency->cur_idx;
2710 	cfg = &vdev->tx_latency_cfg;
2711 	for (type = 0; type < CDP_TX_LATENCY_TYPE_MAX; type++) {
2712 		latency->stats[type].granularity =
2713 			qdf_atomic_read(&cfg->granularity[type]);
2714 		stats = &tx_latency->stats[last_idx][type];
2715 		msdus_accum = qdf_atomic_read(&stats->msdus_accum);
2716 		if (!msdus_accum)
2717 			continue;
2718 
2719 		latency_accum = qdf_atomic_read(&stats->latency_accum);
2720 		latency->stats[type].average = latency_accum / msdus_accum;
2721 		for (lvl = 0; lvl < CDP_TX_LATENCY_DISTR_LV_MAX; lvl++) {
2722 			latency->stats[type].distribution[lvl] =
2723 				qdf_atomic_read(&stats->distribution[lvl]);
2724 		}
2725 	}
2726 
2727 	return QDF_STATUS_SUCCESS;
2728 }
2729 
2730 /**
2731  * dp_tx_latency_stats_get_peer_iter() - iterator to get transmit latency
2732  * statistics for specified peer
2733  * @soc: dp soc handle
2734  * @peer: dp peer Handle
2735  * @arg: list to hold transmit latency statistics for peers
2736  *
2737  * Return: None
2738  */
2739 static void
dp_tx_latency_stats_get_peer_iter(struct dp_soc * soc,struct dp_peer * peer,void * arg)2740 dp_tx_latency_stats_get_peer_iter(struct dp_soc *soc,
2741 				  struct dp_peer *peer,
2742 				  void *arg)
2743 {
2744 	struct dp_vdev *vdev;
2745 	struct dp_txrx_peer *txrx_peer;
2746 	struct cdp_tx_latency *latency;
2747 	QDF_STATUS status;
2748 	qdf_list_t *stats_list = (qdf_list_t *)arg;
2749 
2750 	/* Authenticated link/legacy peer only */
2751 	if (IS_MLO_DP_MLD_PEER(peer) || peer->state != OL_TXRX_PEER_STATE_AUTH)
2752 		return;
2753 
2754 	txrx_peer = dp_get_txrx_peer(peer);
2755 	if (!txrx_peer)
2756 		return;
2757 
2758 	vdev = peer->vdev;
2759 	latency = qdf_mem_malloc(sizeof(*latency));
2760 	if (!latency)
2761 		return;
2762 
2763 	status = dp_tx_latency_stats_get_per_peer(soc, peer, latency);
2764 	if (QDF_IS_STATUS_ERROR(status))
2765 		goto out;
2766 
2767 	status = qdf_list_insert_back(stats_list, &latency->node);
2768 	if (QDF_IS_STATUS_ERROR(status))
2769 		goto out;
2770 
2771 	return;
2772 
2773 out:
2774 	qdf_mem_free(latency);
2775 }
2776 
2777 /**
2778  * dp_tx_latency_stats_rpt_per_vdev() - report transmit latency statistics for
2779  * specified vdev
2780  * @soc: dp soc handle
2781  * @vdev: dp vdev Handle
2782  *
2783  * Return: None
2784  */
2785 static void
dp_tx_latency_stats_rpt_per_vdev(struct dp_soc * soc,struct dp_vdev * vdev)2786 dp_tx_latency_stats_rpt_per_vdev(struct dp_soc *soc, struct dp_vdev *vdev)
2787 {
2788 	qdf_list_t stats_list;
2789 	struct cdp_tx_latency *entry, *next;
2790 
2791 	if (!soc->tx_latency_cb || !dp_tx_latency_stats_report_enabled(vdev))
2792 		return;
2793 
2794 	qdf_list_create(&stats_list, 0);
2795 	dp_vdev_iterate_peer(vdev, dp_tx_latency_stats_get_peer_iter,
2796 			     &stats_list, DP_MOD_ID_CDP);
2797 	if (qdf_list_empty(&stats_list))
2798 		goto out;
2799 
2800 	soc->tx_latency_cb(vdev->vdev_id, &stats_list);
2801 
2802 	qdf_list_for_each_del(&stats_list, entry, next, node) {
2803 		qdf_list_remove_node(&stats_list, &entry->node);
2804 		qdf_mem_free(entry);
2805 	}
2806 
2807 out:
2808 	qdf_list_destroy(&stats_list);
2809 }
2810 
2811 /**
2812  * dp_tx_latency_stats_report() - report transmit latency statistics for each
2813  * vdev of specified pdev
2814  * @soc: dp soc handle
2815  * @pdev: dp pdev Handle
2816  *
2817  * Return: None
2818  */
dp_tx_latency_stats_report(struct dp_soc * soc,struct dp_pdev * pdev)2819 void dp_tx_latency_stats_report(struct dp_soc *soc, struct dp_pdev *pdev)
2820 {
2821 	struct dp_vdev *vdev;
2822 
2823 	if (!soc->tx_latency_cb)
2824 		return;
2825 
2826 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
2827 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
2828 		dp_tx_latency_stats_rpt_per_vdev(soc, vdev);
2829 	}
2830 
2831 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2832 }
2833 
2834 /**
2835  * dp_tx_latency_stats_clear_per_peer() - iterator to clear transmit latency
2836  * statistics for specified peer
2837  * @soc: dp soc handle
2838  * @peer: dp pdev Handle
2839  * @arg: argument from iterator
2840  *
2841  * Return: None
2842  */
2843 static void
dp_tx_latency_stats_clear_per_peer(struct dp_soc * soc,struct dp_peer * peer,void * arg)2844 dp_tx_latency_stats_clear_per_peer(struct dp_soc *soc, struct dp_peer *peer,
2845 				   void *arg)
2846 {
2847 	int link_id;
2848 	struct dp_tx_latency *tx_latency;
2849 	struct dp_txrx_peer *txrx_peer = dp_get_txrx_peer(peer);
2850 
2851 	if (!txrx_peer) {
2852 		dp_err("no txrx peer, skip");
2853 		return;
2854 	}
2855 
2856 	for (link_id = 0; link_id < txrx_peer->stats_arr_size; link_id++) {
2857 		tx_latency = &txrx_peer->stats[link_id].tx_latency;
2858 		dp_tx_latency_stats_clear_buckets(tx_latency, 0);
2859 		dp_tx_latency_stats_clear_buckets(tx_latency, 1);
2860 	}
2861 }
2862 
2863 /**
2864  * dp_tx_latency_stats_clear_per_vdev() - clear transmit latency statistics
2865  * for specified vdev
2866  * @vdev: dp vdev handle
2867  *
2868  * Return: None
2869  */
dp_tx_latency_stats_clear_per_vdev(struct dp_vdev * vdev)2870 static inline void dp_tx_latency_stats_clear_per_vdev(struct dp_vdev *vdev)
2871 {
2872 	dp_vdev_iterate_peer(vdev, dp_tx_latency_stats_clear_per_peer,
2873 			     NULL, DP_MOD_ID_CDP);
2874 }
2875 
2876 /**
2877  * dp_tx_latency_stats_fetch() - fetch transmit latency statistics for
2878  * specified link mac address
2879  * @soc_hdl: Handle to struct dp_soc
2880  * @vdev_id: vdev id
2881  * @mac: link mac address of remote peer
2882  * @latency: buffer to hold per-link transmit latency statistics
2883  *
2884  * Return: QDF_STATUS
2885  */
2886 QDF_STATUS
dp_tx_latency_stats_fetch(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * mac,struct cdp_tx_latency * latency)2887 dp_tx_latency_stats_fetch(struct cdp_soc_t *soc_hdl,
2888 			  uint8_t vdev_id, uint8_t *mac,
2889 			  struct cdp_tx_latency *latency)
2890 {
2891 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2892 	struct cdp_peer_info peer_info = {0};
2893 	struct dp_peer *peer;
2894 	QDF_STATUS status;
2895 
2896 	/* MAC addr of link peer may be the same as MLD peer,
2897 	 * so specify the type as CDP_LINK_PEER_TYPE here to
2898 	 * get link peer explicitly.
2899 	 */
2900 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, mac, false,
2901 				 CDP_LINK_PEER_TYPE);
2902 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
2903 	if (!peer) {
2904 		dp_err_rl("peer(vdev id %d mac " QDF_MAC_ADDR_FMT ") not found",
2905 			  vdev_id, QDF_MAC_ADDR_REF(mac));
2906 		return QDF_STATUS_E_INVAL;
2907 	}
2908 
2909 	status = dp_tx_latency_stats_get_per_peer(soc, peer, latency);
2910 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
2911 	return status;
2912 }
2913 
2914 /**
2915  * dp_tx_latency_stats_config() - config transmit latency statistics for
2916  * specified vdev
2917  * @soc_hdl: Handle to struct dp_soc
2918  * @vdev_id: vdev id
2919  * @cfg: configuration for transmit latency statistics
2920  *
2921  * Return: QDF_STATUS
2922  */
2923 QDF_STATUS
dp_tx_latency_stats_config(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,struct cdp_tx_latency_config * cfg)2924 dp_tx_latency_stats_config(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2925 			   struct cdp_tx_latency_config *cfg)
2926 {
2927 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2928 	struct dp_vdev *vdev;
2929 	QDF_STATUS status = QDF_STATUS_E_INVAL;
2930 	uint32_t cca_granularity;
2931 	int type;
2932 
2933 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
2934 	if (!vdev) {
2935 		dp_err_rl("vdev %d does not exist", vdev_id);
2936 		return QDF_STATUS_E_FAILURE;
2937 	}
2938 
2939 	/* disable to ignore upcoming updates */
2940 	qdf_atomic_set(&vdev->tx_latency_cfg.enabled, 0);
2941 	dp_tx_latency_stats_clear_per_vdev(vdev);
2942 
2943 	if (!cfg->enable)
2944 		goto send_htt;
2945 
2946 	qdf_atomic_set(&vdev->tx_latency_cfg.report, (cfg->report ? 1 : 0));
2947 	for (type = 0; type < CDP_TX_LATENCY_TYPE_MAX; type++)
2948 		qdf_atomic_set(&vdev->tx_latency_cfg.granularity[type],
2949 			       cfg->granularity[type]);
2950 
2951 send_htt:
2952 	/* in units of ms */
2953 	cca_granularity = cfg->granularity[CDP_TX_LATENCY_TYPE_CCA] / 1000;
2954 	status = dp_h2t_tx_latency_stats_cfg_msg_send(soc, vdev_id,
2955 						      cfg->enable, cfg->period,
2956 						      cca_granularity);
2957 	if (QDF_IS_STATUS_ERROR(status)) {
2958 		dp_err_rl("failed to send htt msg: %d", status);
2959 		goto out;
2960 	}
2961 
2962 	qdf_atomic_set(&vdev->tx_latency_cfg.enabled, (cfg->enable ? 1 : 0));
2963 
2964 out:
2965 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
2966 	return status;
2967 }
2968 
2969 /**
2970  * dp_tx_latency_stats_register_cb() - register transmit latency statistics
2971  * callback
2972  * @handle: Handle to struct dp_soc
2973  * @cb: callback function for transmit latency statistics
2974  *
2975  * Return: QDF_STATUS
2976  */
2977 QDF_STATUS
dp_tx_latency_stats_register_cb(struct cdp_soc_t * handle,cdp_tx_latency_cb cb)2978 dp_tx_latency_stats_register_cb(struct cdp_soc_t *handle, cdp_tx_latency_cb cb)
2979 {
2980 	struct dp_soc *soc = (struct dp_soc *)handle;
2981 
2982 	if (!soc || !cb) {
2983 		dp_err("soc or cb is NULL");
2984 		return QDF_STATUS_E_INVAL;
2985 	}
2986 
2987 	soc->tx_latency_cb = cb;
2988 	return QDF_STATUS_SUCCESS;
2989 }
2990 
2991 #else
2992 static inline void
dp_tx_get_driver_ingress_ts(struct dp_vdev * vdev,struct dp_tx_msdu_info_s * msdu_info,qdf_nbuf_t nbuf)2993 dp_tx_get_driver_ingress_ts(struct dp_vdev *vdev,
2994 			    struct dp_tx_msdu_info_s *msdu_info,
2995 			    qdf_nbuf_t nbuf)
2996 {
2997 }
2998 
2999 static inline void
dp_tx_update_ts_on_enqueued(struct dp_vdev * vdev,struct dp_tx_msdu_info_s * msdu_info,struct dp_tx_desc_s * tx_desc)3000 dp_tx_update_ts_on_enqueued(struct dp_vdev *vdev,
3001 			    struct dp_tx_msdu_info_s *msdu_info,
3002 			    struct dp_tx_desc_s *tx_desc)
3003 {
3004 }
3005 
3006 static inline void
dp_tx_latency_stats_update(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer,struct dp_tx_desc_s * tx_desc,struct hal_tx_completion_status * ts,uint8_t link_id)3007 dp_tx_latency_stats_update(struct dp_soc *soc,
3008 			   struct dp_txrx_peer *txrx_peer,
3009 			   struct dp_tx_desc_s *tx_desc,
3010 			   struct hal_tx_completion_status *ts,
3011 			   uint8_t link_id)
3012 {
3013 }
3014 #endif
3015 
3016 qdf_nbuf_t
dp_tx_send_msdu_single(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info,uint16_t peer_id,struct cdp_tx_exception_metadata * tx_exc_metadata)3017 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
3018 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
3019 		       struct cdp_tx_exception_metadata *tx_exc_metadata)
3020 {
3021 	struct dp_pdev *pdev = vdev->pdev;
3022 	struct dp_soc *soc = pdev->soc;
3023 	struct dp_tx_desc_s *tx_desc;
3024 	QDF_STATUS status;
3025 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
3026 	uint16_t htt_tcl_metadata = 0;
3027 	enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
3028 	uint8_t tid = msdu_info->tid;
3029 	struct cdp_tid_tx_stats *tid_stats = NULL;
3030 	qdf_dma_addr_t paddr;
3031 
3032 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
3033 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
3034 			msdu_info, tx_exc_metadata);
3035 	if (!tx_desc) {
3036 		dp_err_rl("Tx_desc prepare Fail vdev_id %d vdev %pK queue %d",
3037 			  vdev->vdev_id, vdev, tx_q->desc_pool_id);
3038 		drop_code = TX_DESC_ERR;
3039 		goto fail_return;
3040 	}
3041 
3042 	dp_tx_update_tdls_flags(soc, vdev, tx_desc);
3043 
3044 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
3045 		htt_tcl_metadata = vdev->htt_tcl_metadata;
3046 		DP_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
3047 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
3048 		DP_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
3049 					    DP_TCL_METADATA_TYPE_PEER_BASED);
3050 		DP_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
3051 					       peer_id);
3052 		dp_tx_bypass_reinjection(soc, tx_desc, tx_exc_metadata);
3053 	} else
3054 		htt_tcl_metadata = vdev->htt_tcl_metadata;
3055 
3056 	if (msdu_info->exception_fw)
3057 		DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
3058 
3059 	dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
3060 					 !pdev->enhanced_stats_en);
3061 
3062 	dp_tx_update_mesh_flags(soc, vdev, tx_desc);
3063 
3064 	if (qdf_unlikely(msdu_info->frm_type == dp_tx_frm_rmnet))
3065 		paddr = dp_tx_rmnet_nbuf_map(msdu_info, tx_desc);
3066 	else
3067 		paddr =  dp_tx_nbuf_map(vdev, tx_desc, nbuf);
3068 
3069 	if (!paddr) {
3070 		/* Handle failure */
3071 		dp_err("qdf_nbuf_map failed");
3072 		DP_STATS_INC(vdev,
3073 			     tx_i[msdu_info->xmit_type].dropped.dma_error, 1);
3074 		drop_code = TX_DMA_MAP_ERR;
3075 		goto release_desc;
3076 	}
3077 
3078 	tx_desc->dma_addr = paddr;
3079 	dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
3080 			       tx_desc->id, DP_TX_DESC_MAP);
3081 	dp_tx_update_mcast_param(peer_id, &htt_tcl_metadata, vdev, msdu_info);
3082 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
3083 	status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
3084 					     htt_tcl_metadata,
3085 					     tx_exc_metadata, msdu_info);
3086 
3087 	if (status != QDF_STATUS_SUCCESS) {
3088 		dp_tx_err_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
3089 			     tx_desc, tx_q->ring_id);
3090 		dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
3091 				       tx_desc->id, DP_TX_DESC_UNMAP);
3092 		dp_tx_nbuf_unmap(soc, tx_desc);
3093 		drop_code = TX_HW_ENQUEUE;
3094 		goto release_desc;
3095 	}
3096 
3097 	dp_tx_update_ts_on_enqueued(vdev, msdu_info, tx_desc);
3098 
3099 	tx_sw_drop_stats_inc(pdev, nbuf, drop_code);
3100 	return NULL;
3101 
3102 release_desc:
3103 	dp_tx_desc_release(soc, tx_desc, tx_q->desc_pool_id);
3104 
3105 fail_return:
3106 	dp_tx_get_tid(vdev, nbuf, msdu_info);
3107 	tx_sw_drop_stats_inc(pdev, nbuf, drop_code);
3108 	tid_stats = &pdev->stats.tid_stats.
3109 		    tid_tx_stats[tx_q->ring_id][tid];
3110 	tid_stats->swdrop_cnt[drop_code]++;
3111 	return nbuf;
3112 }
3113 
3114 /**
3115  * dp_tdls_tx_comp_free_buff() - Free non std buffer when TDLS flag is set
3116  * @soc: Soc handle
3117  * @desc: software Tx descriptor to be processed
3118  *
3119  * Return: 0 if Success
3120  */
3121 #ifdef FEATURE_WLAN_TDLS
3122 static inline int
dp_tdls_tx_comp_free_buff(struct dp_soc * soc,struct dp_tx_desc_s * desc)3123 dp_tdls_tx_comp_free_buff(struct dp_soc *soc, struct dp_tx_desc_s *desc)
3124 {
3125 	/* If it is TDLS mgmt, don't unmap or free the frame */
3126 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) {
3127 		dp_non_std_htt_tx_comp_free_buff(soc, desc);
3128 		return 0;
3129 	}
3130 	return 1;
3131 }
3132 #else
3133 static inline int
dp_tdls_tx_comp_free_buff(struct dp_soc * soc,struct dp_tx_desc_s * desc)3134 dp_tdls_tx_comp_free_buff(struct dp_soc *soc, struct dp_tx_desc_s *desc)
3135 {
3136 	return 1;
3137 }
3138 #endif
3139 
dp_tx_comp_free_buf(struct dp_soc * soc,struct dp_tx_desc_s * desc,bool delayed_free)3140 qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
3141 			       bool delayed_free)
3142 {
3143 	qdf_nbuf_t nbuf = desc->nbuf;
3144 	enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
3145 
3146 	/* nbuf already freed in vdev detach path */
3147 	if (!nbuf)
3148 		return NULL;
3149 
3150 	if (!dp_tdls_tx_comp_free_buff(soc, desc))
3151 		return NULL;
3152 
3153 	/* 0 : MSDU buffer, 1 : MLE */
3154 	if (desc->msdu_ext_desc) {
3155 		/* TSO free */
3156 		if (hal_tx_ext_desc_get_tso_enable(
3157 					desc->msdu_ext_desc->vaddr)) {
3158 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
3159 					       desc->id, DP_TX_COMP_MSDU_EXT);
3160 			dp_tx_tso_seg_history_add(soc,
3161 						  desc->msdu_ext_desc->tso_desc,
3162 						  desc->nbuf, desc->id, type);
3163 			/* unmap eash TSO seg before free the nbuf */
3164 			dp_tx_tso_unmap_segment(soc,
3165 						desc->msdu_ext_desc->tso_desc,
3166 						desc->msdu_ext_desc->
3167 						tso_num_desc);
3168 			goto nbuf_free;
3169 		}
3170 
3171 		if (qdf_unlikely(desc->frm_type == dp_tx_frm_sg)) {
3172 			void *msdu_ext_desc = desc->msdu_ext_desc->vaddr;
3173 			qdf_dma_addr_t iova;
3174 			uint32_t frag_len;
3175 			uint32_t i;
3176 
3177 			qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
3178 						     QDF_DMA_TO_DEVICE,
3179 						     qdf_nbuf_headlen(nbuf));
3180 
3181 			for (i = 1; i < DP_TX_MAX_NUM_FRAGS; i++) {
3182 				hal_tx_ext_desc_get_frag_info(msdu_ext_desc, i,
3183 							      &iova,
3184 							      &frag_len);
3185 				if (!iova || !frag_len)
3186 					break;
3187 
3188 				qdf_mem_unmap_page(soc->osdev, iova, frag_len,
3189 						   QDF_DMA_TO_DEVICE);
3190 			}
3191 
3192 			goto nbuf_free;
3193 		}
3194 	}
3195 	/* If it's ME frame, dont unmap the cloned nbuf's */
3196 	if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
3197 		goto nbuf_free;
3198 
3199 	dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, desc->id, type);
3200 	dp_tx_unmap(soc, desc);
3201 
3202 	if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
3203 		return dp_mesh_tx_comp_free_buff(soc, desc, delayed_free);
3204 
3205 	if (dp_tx_traffic_end_indication_enq_ind_pkt(soc, desc, nbuf))
3206 		return NULL;
3207 
3208 nbuf_free:
3209 	if (delayed_free)
3210 		return nbuf;
3211 
3212 	qdf_nbuf_free(nbuf);
3213 
3214 	return NULL;
3215 }
3216 
3217 /**
3218  * dp_tx_sg_unmap_buf() - Unmap scatter gather fragments
3219  * @soc: DP soc handle
3220  * @nbuf: skb
3221  * @msdu_info: MSDU info
3222  *
3223  * Return: None
3224  */
3225 static inline void
dp_tx_sg_unmap_buf(struct dp_soc * soc,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)3226 dp_tx_sg_unmap_buf(struct dp_soc *soc, qdf_nbuf_t nbuf,
3227 		   struct dp_tx_msdu_info_s *msdu_info)
3228 {
3229 	uint32_t cur_idx;
3230 	struct dp_tx_seg_info_s *seg = msdu_info->u.sg_info.curr_seg;
3231 
3232 	qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE,
3233 				     qdf_nbuf_headlen(nbuf));
3234 
3235 	for (cur_idx = 1; cur_idx < seg->frag_cnt; cur_idx++)
3236 		qdf_mem_unmap_page(soc->osdev, (qdf_dma_addr_t)
3237 				   (seg->frags[cur_idx].paddr_lo | ((uint64_t)
3238 				    seg->frags[cur_idx].paddr_hi) << 32),
3239 				   seg->frags[cur_idx].len,
3240 				   QDF_DMA_TO_DEVICE);
3241 }
3242 
3243 #if QDF_LOCK_STATS
3244 noinline
3245 #else
3246 #endif
dp_tx_send_msdu_multiple(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)3247 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
3248 				    struct dp_tx_msdu_info_s *msdu_info)
3249 {
3250 	uint32_t i;
3251 	struct dp_pdev *pdev = vdev->pdev;
3252 	struct dp_soc *soc = pdev->soc;
3253 	struct dp_tx_desc_s *tx_desc;
3254 	bool is_cce_classified = false;
3255 	QDF_STATUS status;
3256 	uint16_t htt_tcl_metadata = 0;
3257 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
3258 	struct cdp_tid_tx_stats *tid_stats = NULL;
3259 	uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
3260 
3261 	if (msdu_info->frm_type == dp_tx_frm_me)
3262 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
3263 
3264 	i = 0;
3265 	/* Print statement to track i and num_seg */
3266 	/*
3267 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
3268 	 * descriptors using information in msdu_info
3269 	 */
3270 	while (i < msdu_info->num_seg) {
3271 		/*
3272 		 * Setup Tx descriptor for an MSDU, and MSDU extension
3273 		 * descriptor
3274 		 */
3275 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
3276 				tx_q->desc_pool_id);
3277 
3278 		if (!tx_desc) {
3279 			if (msdu_info->frm_type == dp_tx_frm_me) {
3280 				prep_desc_fail++;
3281 				dp_tx_me_free_buf(pdev,
3282 					(void *)(msdu_info->u.sg_info
3283 						.curr_seg->frags[0].vaddr));
3284 				if (prep_desc_fail == msdu_info->num_seg) {
3285 					/*
3286 					 * Unmap is needed only if descriptor
3287 					 * preparation failed for all segments.
3288 					 */
3289 					qdf_nbuf_unmap(soc->osdev,
3290 						       msdu_info->u.sg_info.
3291 						       curr_seg->nbuf,
3292 						       QDF_DMA_TO_DEVICE);
3293 				}
3294 				/*
3295 				 * Free the nbuf for the current segment
3296 				 * and make it point to the next in the list.
3297 				 * For me, there are as many segments as there
3298 				 * are no of clients.
3299 				 */
3300 				qdf_nbuf_free(msdu_info->u.sg_info
3301 					      .curr_seg->nbuf);
3302 				if (msdu_info->u.sg_info.curr_seg->next) {
3303 					msdu_info->u.sg_info.curr_seg =
3304 						msdu_info->u.sg_info
3305 						.curr_seg->next;
3306 					nbuf = msdu_info->u.sg_info
3307 					       .curr_seg->nbuf;
3308 				}
3309 				i++;
3310 				continue;
3311 			}
3312 
3313 			if (msdu_info->frm_type == dp_tx_frm_tso) {
3314 				dp_tx_tso_seg_history_add(
3315 						soc,
3316 						msdu_info->u.tso_info.curr_seg,
3317 						nbuf, 0, DP_TX_DESC_UNMAP);
3318 				dp_tx_tso_unmap_segment(soc,
3319 							msdu_info->u.tso_info.
3320 							curr_seg,
3321 							msdu_info->u.tso_info.
3322 							tso_num_seg_list);
3323 
3324 				if (msdu_info->u.tso_info.curr_seg->next) {
3325 					msdu_info->u.tso_info.curr_seg =
3326 					msdu_info->u.tso_info.curr_seg->next;
3327 					i++;
3328 					continue;
3329 				}
3330 			}
3331 
3332 			if (msdu_info->frm_type == dp_tx_frm_sg)
3333 				dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
3334 
3335 			goto done;
3336 		}
3337 
3338 		if (msdu_info->frm_type == dp_tx_frm_me) {
3339 			tx_desc->msdu_ext_desc->me_buffer =
3340 				(struct dp_tx_me_buf_t *)msdu_info->
3341 				u.sg_info.curr_seg->frags[0].vaddr;
3342 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
3343 		}
3344 
3345 		if (is_cce_classified)
3346 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
3347 
3348 		htt_tcl_metadata = vdev->htt_tcl_metadata;
3349 		if (msdu_info->exception_fw) {
3350 			DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
3351 		}
3352 
3353 		dp_tx_is_hp_update_required(i, msdu_info);
3354 
3355 		/*
3356 		 * For frames with multiple segments (TSO, ME), jump to next
3357 		 * segment.
3358 		 */
3359 		if (msdu_info->frm_type == dp_tx_frm_tso) {
3360 			if (msdu_info->u.tso_info.curr_seg->next) {
3361 				msdu_info->u.tso_info.curr_seg =
3362 					msdu_info->u.tso_info.curr_seg->next;
3363 
3364 				/*
3365 				 * If this is a jumbo nbuf, then increment the
3366 				 * number of nbuf users for each additional
3367 				 * segment of the msdu. This will ensure that
3368 				 * the skb is freed only after receiving tx
3369 				 * completion for all segments of an nbuf
3370 				 */
3371 				qdf_nbuf_inc_users(nbuf);
3372 
3373 				/* Check with MCL if this is needed */
3374 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
3375 				 */
3376 			}
3377 		}
3378 
3379 		dp_tx_update_mcast_param(DP_INVALID_PEER,
3380 					 &htt_tcl_metadata,
3381 					 vdev,
3382 					 msdu_info);
3383 		/*
3384 		 * Enqueue the Tx MSDU descriptor to HW for transmit
3385 		 */
3386 		status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
3387 						     htt_tcl_metadata,
3388 						     NULL, msdu_info);
3389 
3390 		dp_tx_check_and_flush_hp(soc, status, msdu_info);
3391 
3392 		if (status != QDF_STATUS_SUCCESS) {
3393 			dp_info_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
3394 				   tx_desc, tx_q->ring_id);
3395 
3396 			dp_tx_get_tid(vdev, nbuf, msdu_info);
3397 			tid_stats = &pdev->stats.tid_stats.
3398 				    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
3399 			tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
3400 
3401 			if (msdu_info->frm_type == dp_tx_frm_me) {
3402 				hw_enq_fail++;
3403 				if (hw_enq_fail == msdu_info->num_seg) {
3404 					/*
3405 					 * Unmap is needed only if enqueue
3406 					 * failed for all segments.
3407 					 */
3408 					qdf_nbuf_unmap(soc->osdev,
3409 						       msdu_info->u.sg_info.
3410 						       curr_seg->nbuf,
3411 						       QDF_DMA_TO_DEVICE);
3412 				}
3413 				/*
3414 				 * Free the nbuf for the current segment
3415 				 * and make it point to the next in the list.
3416 				 * For me, there are as many segments as there
3417 				 * are no of clients.
3418 				 */
3419 				qdf_nbuf_free(msdu_info->u.sg_info
3420 					      .curr_seg->nbuf);
3421 				dp_tx_desc_release(soc, tx_desc,
3422 						   tx_q->desc_pool_id);
3423 				if (msdu_info->u.sg_info.curr_seg->next) {
3424 					msdu_info->u.sg_info.curr_seg =
3425 						msdu_info->u.sg_info
3426 						.curr_seg->next;
3427 					nbuf = msdu_info->u.sg_info
3428 					       .curr_seg->nbuf;
3429 				} else
3430 					break;
3431 				i++;
3432 				continue;
3433 			}
3434 
3435 			/*
3436 			 * For TSO frames, the nbuf users increment done for
3437 			 * the current segment has to be reverted, since the
3438 			 * hw enqueue for this segment failed
3439 			 */
3440 			if (msdu_info->frm_type == dp_tx_frm_tso &&
3441 			    msdu_info->u.tso_info.curr_seg) {
3442 				/*
3443 				 * unmap and free current,
3444 				 * retransmit remaining segments
3445 				 */
3446 				dp_tx_comp_free_buf(soc, tx_desc, false);
3447 				i++;
3448 				dp_tx_desc_release(soc, tx_desc,
3449 						   tx_q->desc_pool_id);
3450 				continue;
3451 			}
3452 
3453 			if (msdu_info->frm_type == dp_tx_frm_sg)
3454 				dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
3455 
3456 			dp_tx_desc_release(soc, tx_desc, tx_q->desc_pool_id);
3457 			goto done;
3458 		}
3459 
3460 		dp_tx_update_ts_on_enqueued(vdev, msdu_info, tx_desc);
3461 
3462 		/*
3463 		 * TODO
3464 		 * if tso_info structure can be modified to have curr_seg
3465 		 * as first element, following 2 blocks of code (for TSO and SG)
3466 		 * can be combined into 1
3467 		 */
3468 
3469 		/*
3470 		 * For Multicast-Unicast converted packets,
3471 		 * each converted frame (for a client) is represented as
3472 		 * 1 segment
3473 		 */
3474 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
3475 				(msdu_info->frm_type == dp_tx_frm_me)) {
3476 			if (msdu_info->u.sg_info.curr_seg->next) {
3477 				msdu_info->u.sg_info.curr_seg =
3478 					msdu_info->u.sg_info.curr_seg->next;
3479 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
3480 			} else
3481 				break;
3482 		}
3483 		i++;
3484 	}
3485 
3486 	nbuf = NULL;
3487 
3488 done:
3489 	return nbuf;
3490 }
3491 
3492 /**
3493  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
3494  *                     for SG frames
3495  * @vdev: DP vdev handle
3496  * @nbuf: skb
3497  * @seg_info: Pointer to Segment info Descriptor to be prepared
3498  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
3499  *
3500  * Return: NULL on success,
3501  *         nbuf when it fails to send
3502  */
dp_tx_prepare_sg(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_seg_info_s * seg_info,struct dp_tx_msdu_info_s * msdu_info)3503 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
3504 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
3505 {
3506 	uint32_t cur_frag, nr_frags, i;
3507 	qdf_dma_addr_t paddr;
3508 	struct dp_tx_sg_info_s *sg_info;
3509 	uint8_t xmit_type = msdu_info->xmit_type;
3510 
3511 	sg_info = &msdu_info->u.sg_info;
3512 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
3513 
3514 	if (QDF_STATUS_SUCCESS !=
3515 		qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
3516 					   QDF_DMA_TO_DEVICE,
3517 					   qdf_nbuf_headlen(nbuf))) {
3518 		dp_tx_err("dma map error");
3519 		DP_STATS_INC(vdev, tx_i[xmit_type].sg.dma_map_error,
3520 			     1);
3521 		qdf_nbuf_free(nbuf);
3522 		return NULL;
3523 	}
3524 
3525 	paddr = qdf_nbuf_mapped_paddr_get(nbuf);
3526 	seg_info->frags[0].paddr_lo = paddr;
3527 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
3528 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
3529 	seg_info->frags[0].vaddr = (void *) nbuf;
3530 
3531 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
3532 		if (QDF_STATUS_SUCCESS != qdf_nbuf_frag_map(vdev->osdev,
3533 							    nbuf, 0,
3534 							    QDF_DMA_TO_DEVICE,
3535 							    cur_frag)) {
3536 			dp_tx_err("frag dma map error");
3537 			DP_STATS_INC(vdev,
3538 				     tx_i[xmit_type].sg.dma_map_error,
3539 				     1);
3540 			goto map_err;
3541 		}
3542 
3543 		paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
3544 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
3545 		seg_info->frags[cur_frag + 1].paddr_hi =
3546 			((uint64_t) paddr) >> 32;
3547 		seg_info->frags[cur_frag + 1].len =
3548 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
3549 	}
3550 
3551 	seg_info->frag_cnt = (cur_frag + 1);
3552 	seg_info->total_len = qdf_nbuf_len(nbuf);
3553 	seg_info->next = NULL;
3554 
3555 	sg_info->curr_seg = seg_info;
3556 
3557 	msdu_info->frm_type = dp_tx_frm_sg;
3558 	msdu_info->num_seg = 1;
3559 
3560 	return nbuf;
3561 map_err:
3562 	/* restore paddr into nbuf before calling unmap */
3563 	qdf_nbuf_mapped_paddr_set(nbuf,
3564 				  (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
3565 				  ((uint64_t)
3566 				  seg_info->frags[0].paddr_hi) << 32));
3567 	qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
3568 				     QDF_DMA_TO_DEVICE,
3569 				     seg_info->frags[0].len);
3570 	for (i = 1; i <= cur_frag; i++) {
3571 		qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
3572 				   (seg_info->frags[i].paddr_lo | ((uint64_t)
3573 				   seg_info->frags[i].paddr_hi) << 32),
3574 				   seg_info->frags[i].len,
3575 				   QDF_DMA_TO_DEVICE);
3576 	}
3577 	qdf_nbuf_free(nbuf);
3578 	return NULL;
3579 }
3580 
3581 /**
3582  * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
3583  * @vdev: DP vdev handle
3584  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
3585  * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
3586  *
3587  * Return: NULL on failure,
3588  *         nbuf when extracted successfully
3589  */
3590 static
dp_tx_add_tx_sniffer_meta_data(struct dp_vdev * vdev,struct dp_tx_msdu_info_s * msdu_info,uint16_t ppdu_cookie)3591 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
3592 				    struct dp_tx_msdu_info_s *msdu_info,
3593 				    uint16_t ppdu_cookie)
3594 {
3595 	struct htt_tx_msdu_desc_ext2_t *meta_data =
3596 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
3597 
3598 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
3599 
3600 	HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
3601 				(msdu_info->meta_data[5], 1);
3602 	HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
3603 				(msdu_info->meta_data[5], 1);
3604 	HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
3605 				(msdu_info->meta_data[6], ppdu_cookie);
3606 
3607 	msdu_info->exception_fw = 1;
3608 	msdu_info->is_tx_sniffer = 1;
3609 }
3610 
3611 #ifdef MESH_MODE_SUPPORT
3612 
3613 /**
3614  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
3615  *				and prepare msdu_info for mesh frames.
3616  * @vdev: DP vdev handle
3617  * @nbuf: skb
3618  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
3619  *
3620  * Return: NULL on failure,
3621  *         nbuf when extracted successfully
3622  */
3623 static
dp_tx_extract_mesh_meta_data(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)3624 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
3625 				struct dp_tx_msdu_info_s *msdu_info)
3626 {
3627 	struct meta_hdr_s *mhdr;
3628 	struct htt_tx_msdu_desc_ext2_t *meta_data =
3629 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
3630 
3631 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
3632 
3633 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
3634 		msdu_info->exception_fw = 0;
3635 		goto remove_meta_hdr;
3636 	}
3637 
3638 	msdu_info->exception_fw = 1;
3639 
3640 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
3641 
3642 	meta_data->host_tx_desc_pool = 1;
3643 	meta_data->update_peer_cache = 1;
3644 	meta_data->learning_frame = 1;
3645 
3646 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
3647 		meta_data->power = mhdr->power;
3648 
3649 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
3650 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
3651 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
3652 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
3653 
3654 		meta_data->dyn_bw = 1;
3655 
3656 		meta_data->valid_pwr = 1;
3657 		meta_data->valid_mcs_mask = 1;
3658 		meta_data->valid_nss_mask = 1;
3659 		meta_data->valid_preamble_type  = 1;
3660 		meta_data->valid_retries = 1;
3661 		meta_data->valid_bw_info = 1;
3662 	}
3663 
3664 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
3665 		meta_data->encrypt_type = 0;
3666 		meta_data->valid_encrypt_type = 1;
3667 		meta_data->learning_frame = 0;
3668 	}
3669 
3670 	meta_data->valid_key_flags = 1;
3671 	meta_data->key_flags = (mhdr->keyix & 0x3);
3672 
3673 remove_meta_hdr:
3674 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
3675 		dp_tx_err("qdf_nbuf_pull_head failed");
3676 		qdf_nbuf_free(nbuf);
3677 		return NULL;
3678 	}
3679 
3680 	msdu_info->tid = qdf_nbuf_get_priority(nbuf);
3681 
3682 	dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x"
3683 		   " tid %d to_fw %d",
3684 		   msdu_info->meta_data[0],
3685 		   msdu_info->meta_data[1],
3686 		   msdu_info->meta_data[2],
3687 		   msdu_info->meta_data[3],
3688 		   msdu_info->meta_data[4],
3689 		   msdu_info->meta_data[5],
3690 		   msdu_info->tid, msdu_info->exception_fw);
3691 
3692 	return nbuf;
3693 }
3694 #else
3695 static
dp_tx_extract_mesh_meta_data(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)3696 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
3697 				struct dp_tx_msdu_info_s *msdu_info)
3698 {
3699 	return nbuf;
3700 }
3701 
3702 #endif
3703 
3704 /**
3705  * dp_check_exc_metadata() - Checks if parameters are valid
3706  * @tx_exc: holds all exception path parameters
3707  *
3708  * Return: true when all the parameters are valid else false
3709  *
3710  */
dp_check_exc_metadata(struct cdp_tx_exception_metadata * tx_exc)3711 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
3712 {
3713 	bool invalid_tid = (tx_exc->tid >= DP_MAX_TIDS && tx_exc->tid !=
3714 			    HTT_INVALID_TID);
3715 	bool invalid_encap_type =
3716 			(tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
3717 			 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
3718 	bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
3719 				 tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
3720 	bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
3721 			       tx_exc->ppdu_cookie == 0);
3722 
3723 	if (tx_exc->is_intrabss_fwd)
3724 		return true;
3725 
3726 	if (invalid_tid || invalid_encap_type || invalid_sec_type ||
3727 	    invalid_cookie) {
3728 		return false;
3729 	}
3730 
3731 	return true;
3732 }
3733 
3734 #ifdef ATH_SUPPORT_IQUE
dp_tx_mcast_enhance(struct dp_vdev * vdev,qdf_nbuf_t nbuf)3735 bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3736 {
3737 	qdf_ether_header_t *eh;
3738 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
3739 	/* Mcast to Ucast Conversion*/
3740 	if (qdf_likely(!vdev->mcast_enhancement_en))
3741 		return true;
3742 
3743 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3744 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
3745 	    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
3746 		dp_verbose_debug("Mcast frm for ME %pK", vdev);
3747 		qdf_nbuf_set_next(nbuf, NULL);
3748 
3749 		DP_STATS_INC_PKT(vdev, tx_i[xmit_type].mcast_en.mcast_pkt, 1,
3750 				 qdf_nbuf_len(nbuf));
3751 		if (dp_tx_prepare_send_me(vdev, nbuf) ==
3752 				QDF_STATUS_SUCCESS) {
3753 			return false;
3754 		}
3755 
3756 		if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
3757 			if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
3758 					QDF_STATUS_SUCCESS) {
3759 				return false;
3760 			}
3761 		}
3762 	}
3763 
3764 	return true;
3765 }
3766 #else
dp_tx_mcast_enhance(struct dp_vdev * vdev,qdf_nbuf_t nbuf)3767 bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3768 {
3769 	return true;
3770 }
3771 #endif
3772 
3773 #ifdef QCA_SUPPORT_WDS_EXTENDED
3774 /**
3775  * dp_tx_mcast_drop() - Drop mcast frame if drop_tx_mcast is set in WDS_EXT
3776  * @vdev: vdev handle
3777  * @nbuf: skb
3778  *
3779  * Return: true if frame is dropped, false otherwise
3780  */
dp_tx_mcast_drop(struct dp_vdev * vdev,qdf_nbuf_t nbuf)3781 static inline bool dp_tx_mcast_drop(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3782 {
3783 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
3784 
3785 	/* Drop tx mcast and WDS Extended feature check */
3786 	if (qdf_unlikely((vdev->drop_tx_mcast) && (vdev->wds_ext_enabled))) {
3787 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
3788 						qdf_nbuf_data(nbuf);
3789 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
3790 			DP_STATS_INC(vdev,
3791 				     tx_i[xmit_type].dropped.tx_mcast_drop, 1);
3792 			return true;
3793 		}
3794 	}
3795 
3796 	return false;
3797 }
3798 #else
dp_tx_mcast_drop(struct dp_vdev * vdev,qdf_nbuf_t nbuf)3799 static inline bool dp_tx_mcast_drop(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3800 {
3801 	return false;
3802 }
3803 #endif
3804 /**
3805  * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
3806  * @nbuf: qdf_nbuf_t
3807  * @vdev: struct dp_vdev *
3808  *
3809  * Allow packet for processing only if it is for peer client which is
3810  * connected with same vap. Drop packet if client is connected to
3811  * different vap.
3812  *
3813  * Return: QDF_STATUS
3814  */
3815 static inline QDF_STATUS
dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf,struct dp_vdev * vdev)3816 dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
3817 {
3818 	struct dp_ast_entry *dst_ast_entry = NULL;
3819 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3820 
3821 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
3822 	    DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
3823 		return QDF_STATUS_SUCCESS;
3824 
3825 	qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
3826 	dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
3827 							eh->ether_dhost,
3828 							vdev->vdev_id);
3829 
3830 	/* If there is no ast entry, return failure */
3831 	if (qdf_unlikely(!dst_ast_entry)) {
3832 		qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
3833 		return QDF_STATUS_E_FAILURE;
3834 	}
3835 	qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
3836 
3837 	return QDF_STATUS_SUCCESS;
3838 }
3839 
3840 /**
3841  * dp_tx_nawds_handler() - NAWDS handler
3842  *
3843  * @soc: DP soc handle
3844  * @vdev: DP vdev handle
3845  * @msdu_info: msdu_info required to create HTT metadata
3846  * @nbuf: skb
3847  * @sa_peer_id:
3848  *
3849  * This API transfers the multicast frames with the peer id
3850  * on NAWDS enabled peer.
3851  *
3852  * Return: none
3853  */
3854 
dp_tx_nawds_handler(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_msdu_info_s * msdu_info,qdf_nbuf_t nbuf,uint16_t sa_peer_id)3855 void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
3856 			 struct dp_tx_msdu_info_s *msdu_info,
3857 			 qdf_nbuf_t nbuf, uint16_t sa_peer_id)
3858 {
3859 	struct dp_peer *peer = NULL;
3860 	qdf_nbuf_t nbuf_clone = NULL;
3861 	uint16_t peer_id = DP_INVALID_PEER;
3862 	struct dp_txrx_peer *txrx_peer;
3863 	uint8_t link_id = 0;
3864 
3865 	/* This check avoids pkt forwarding which is entered
3866 	 * in the ast table but still doesn't have valid peerid.
3867 	 */
3868 	if (sa_peer_id == HTT_INVALID_PEER)
3869 		return;
3870 
3871 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3872 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3873 		txrx_peer = dp_get_txrx_peer(peer);
3874 		if (!txrx_peer)
3875 			continue;
3876 
3877 		if (!txrx_peer->bss_peer && txrx_peer->nawds_enabled) {
3878 			peer_id = peer->peer_id;
3879 
3880 			if (!dp_peer_is_primary_link_peer(peer))
3881 				continue;
3882 
3883 			/* In the case of wds ext peer mcast traffic will be
3884 			 * sent as part of VLAN interface
3885 			 */
3886 			if (dp_peer_is_wds_ext_peer(txrx_peer))
3887 				continue;
3888 
3889 			/* Multicast packets needs to be
3890 			 * dropped in case of intra bss forwarding
3891 			 */
3892 			if (sa_peer_id == txrx_peer->peer_id) {
3893 				dp_tx_debug("multicast packet");
3894 				DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3895 							  tx.nawds_mcast_drop,
3896 							  1, link_id);
3897 				continue;
3898 			}
3899 
3900 			nbuf_clone = qdf_nbuf_clone(nbuf);
3901 
3902 			if (!nbuf_clone) {
3903 				QDF_TRACE(QDF_MODULE_ID_DP,
3904 					  QDF_TRACE_LEVEL_ERROR,
3905 					  FL("nbuf clone failed"));
3906 				break;
3907 			}
3908 
3909 			nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
3910 							    msdu_info, peer_id,
3911 							    NULL);
3912 
3913 			if (nbuf_clone) {
3914 				dp_tx_debug("pkt send failed");
3915 				qdf_nbuf_free(nbuf_clone);
3916 			} else {
3917 				if (peer_id != DP_INVALID_PEER)
3918 					DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
3919 								      tx.nawds_mcast,
3920 								      1, qdf_nbuf_len(nbuf), link_id);
3921 			}
3922 		}
3923 	}
3924 
3925 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3926 }
3927 
3928 #ifdef WLAN_MCAST_MLO
3929 static inline bool
dp_tx_check_mesh_vdev(struct dp_vdev * vdev,struct cdp_tx_exception_metadata * tx_exc_metadata)3930 dp_tx_check_mesh_vdev(struct dp_vdev *vdev,
3931 		      struct cdp_tx_exception_metadata *tx_exc_metadata)
3932 {
3933 	if (!tx_exc_metadata->is_mlo_mcast && qdf_unlikely(vdev->mesh_vdev))
3934 		return true;
3935 
3936 	return false;
3937 }
3938 #else
3939 static inline bool
dp_tx_check_mesh_vdev(struct dp_vdev * vdev,struct cdp_tx_exception_metadata * tx_exc_metadata)3940 dp_tx_check_mesh_vdev(struct dp_vdev *vdev,
3941 		      struct cdp_tx_exception_metadata *tx_exc_metadata)
3942 {
3943 	if (qdf_unlikely(vdev->mesh_vdev))
3944 		return true;
3945 
3946 	return false;
3947 }
3948 #endif
3949 
3950 qdf_nbuf_t
dp_tx_send_exception(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,qdf_nbuf_t nbuf,struct cdp_tx_exception_metadata * tx_exc_metadata)3951 dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3952 		     qdf_nbuf_t nbuf,
3953 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
3954 {
3955 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3956 	struct dp_tx_msdu_info_s msdu_info;
3957 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3958 						     DP_MOD_ID_TX_EXCEPTION);
3959 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
3960 
3961 	if (qdf_unlikely(!vdev))
3962 		goto fail;
3963 
3964 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
3965 
3966 	if (!tx_exc_metadata)
3967 		goto fail;
3968 
3969 	msdu_info.tid = tx_exc_metadata->tid;
3970 	msdu_info.xmit_type = xmit_type;
3971 	dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
3972 			 QDF_MAC_ADDR_REF(nbuf->data));
3973 
3974 	DP_STATS_INC_PKT(vdev, tx_i[xmit_type].rcvd, 1, qdf_nbuf_len(nbuf));
3975 
3976 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
3977 		dp_tx_err("Invalid parameters in exception path");
3978 		goto fail;
3979 	}
3980 
3981 	/* for peer based metadata check if peer is valid */
3982 	if (tx_exc_metadata->peer_id != CDP_INVALID_PEER) {
3983 		struct dp_peer *peer = NULL;
3984 
3985 		 peer = dp_peer_get_ref_by_id(vdev->pdev->soc,
3986 					      tx_exc_metadata->peer_id,
3987 					      DP_MOD_ID_TX_EXCEPTION);
3988 		if (qdf_unlikely(!peer)) {
3989 			DP_STATS_INC(vdev,
3990 			     tx_i[xmit_type].dropped.invalid_peer_id_in_exc_path,
3991 			     1);
3992 			goto fail;
3993 		}
3994 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_EXCEPTION);
3995 	}
3996 	/* Basic sanity checks for unsupported packets */
3997 
3998 	/* MESH mode */
3999 	if (dp_tx_check_mesh_vdev(vdev, tx_exc_metadata)) {
4000 		dp_tx_err("Mesh mode is not supported in exception path");
4001 		goto fail;
4002 	}
4003 
4004 	/*
4005 	 * Classify the frame and call corresponding
4006 	 * "prepare" function which extracts the segment (TSO)
4007 	 * and fragmentation information (for TSO , SG, ME, or Raw)
4008 	 * into MSDU_INFO structure which is later used to fill
4009 	 * SW and HW descriptors.
4010 	 */
4011 	if (qdf_nbuf_is_tso(nbuf)) {
4012 		dp_verbose_debug("TSO frame %pK", vdev);
4013 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
4014 				 qdf_nbuf_len(nbuf));
4015 
4016 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
4017 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
4018 					 qdf_nbuf_len(nbuf));
4019 			goto fail;
4020 		}
4021 
4022 		DP_STATS_INC(vdev,
4023 			     tx_i[xmit_type].rcvd.num, msdu_info.num_seg - 1);
4024 
4025 		goto send_multiple;
4026 	}
4027 
4028 	/* SG */
4029 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
4030 		struct dp_tx_seg_info_s seg_info = {0};
4031 
4032 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
4033 		if (!nbuf)
4034 			goto fail;
4035 
4036 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
4037 
4038 		DP_STATS_INC_PKT(vdev, tx_i[xmit_type].sg.sg_pkt, 1,
4039 				 qdf_nbuf_len(nbuf));
4040 
4041 		goto send_multiple;
4042 	}
4043 
4044 	if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
4045 		DP_STATS_INC_PKT(vdev, tx_i[xmit_type].sniffer_rcvd, 1,
4046 				 qdf_nbuf_len(nbuf));
4047 
4048 		dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
4049 					       tx_exc_metadata->ppdu_cookie);
4050 	}
4051 
4052 	/*
4053 	 * Get HW Queue to use for this frame.
4054 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
4055 	 * dedicated for data and 1 for command.
4056 	 * "queue_id" maps to one hardware ring.
4057 	 *  With each ring, we also associate a unique Tx descriptor pool
4058 	 *  to minimize lock contention for these resources.
4059 	 */
4060 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
4061 	DP_STATS_INC(vdev,
4062 		     tx_i[xmit_type].rcvd_per_core[msdu_info.tx_queue.desc_pool_id],
4063 		     1);
4064 
4065 	/*
4066 	 * if the packet is mcast packet send through mlo_macst handler
4067 	 * for all prnt_vdevs
4068 	 */
4069 
4070 	if (soc->arch_ops.dp_tx_mlo_mcast_send) {
4071 		nbuf = soc->arch_ops.dp_tx_mlo_mcast_send(soc, vdev,
4072 							  nbuf,
4073 							  tx_exc_metadata);
4074 		if (!nbuf)
4075 			goto fail;
4076 	}
4077 
4078 	if (qdf_likely(tx_exc_metadata->is_intrabss_fwd)) {
4079 		if (qdf_unlikely(vdev->nawds_enabled)) {
4080 			/*
4081 			 * This is a multicast packet
4082 			 */
4083 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
4084 					    tx_exc_metadata->peer_id);
4085 			DP_STATS_INC_PKT(vdev, tx_i[xmit_type].nawds_mcast,
4086 					 1, qdf_nbuf_len(nbuf));
4087 		}
4088 
4089 		nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
4090 					      DP_INVALID_PEER, NULL);
4091 	} else {
4092 		/*
4093 		 * Check exception descriptors
4094 		 */
4095 		if (dp_tx_exception_limit_check(vdev, xmit_type))
4096 			goto fail;
4097 
4098 		/*  Single linear frame */
4099 		/*
4100 		 * If nbuf is a simple linear frame, use send_single function to
4101 		 * prepare direct-buffer type TCL descriptor and enqueue to TCL
4102 		 * SRNG. There is no need to setup a MSDU extension descriptor.
4103 		 */
4104 		nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
4105 					      tx_exc_metadata->peer_id,
4106 					      tx_exc_metadata);
4107 	}
4108 
4109 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
4110 	return nbuf;
4111 
4112 send_multiple:
4113 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
4114 
4115 fail:
4116 	if (vdev)
4117 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
4118 	dp_verbose_debug("pkt send failed");
4119 	return nbuf;
4120 }
4121 
4122 qdf_nbuf_t
dp_tx_send_exception_vdev_id_check(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,qdf_nbuf_t nbuf,struct cdp_tx_exception_metadata * tx_exc_metadata)4123 dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
4124 				   uint8_t vdev_id, qdf_nbuf_t nbuf,
4125 				   struct cdp_tx_exception_metadata *tx_exc_metadata)
4126 {
4127 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4128 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4129 						     DP_MOD_ID_TX_EXCEPTION);
4130 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
4131 
4132 	if (qdf_unlikely(!vdev))
4133 		goto fail;
4134 
4135 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
4136 			== QDF_STATUS_E_FAILURE)) {
4137 		DP_STATS_INC(vdev,
4138 			     tx_i[xmit_type].dropped.fail_per_pkt_vdev_id_check,
4139 			     1);
4140 		goto fail;
4141 	}
4142 
4143 	/* Unref count as it will again be taken inside dp_tx_exception */
4144 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
4145 
4146 	return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
4147 
4148 fail:
4149 	if (vdev)
4150 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
4151 	dp_verbose_debug("pkt send failed");
4152 	return nbuf;
4153 }
4154 
4155 #ifdef MESH_MODE_SUPPORT
dp_tx_send_mesh(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,qdf_nbuf_t nbuf)4156 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4157 			   qdf_nbuf_t nbuf)
4158 {
4159 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4160 	struct meta_hdr_s *mhdr;
4161 	qdf_nbuf_t nbuf_mesh = NULL;
4162 	qdf_nbuf_t nbuf_clone = NULL;
4163 	struct dp_vdev *vdev;
4164 	uint8_t no_enc_frame = 0;
4165 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
4166 
4167 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
4168 	if (!nbuf_mesh) {
4169 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4170 				"qdf_nbuf_unshare failed");
4171 		return nbuf;
4172 	}
4173 
4174 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
4175 	if (!vdev) {
4176 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4177 				"vdev is NULL for vdev_id %d", vdev_id);
4178 		return nbuf;
4179 	}
4180 
4181 	nbuf = nbuf_mesh;
4182 
4183 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
4184 
4185 	if ((vdev->sec_type != cdp_sec_type_none) &&
4186 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
4187 		no_enc_frame = 1;
4188 
4189 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
4190 		qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
4191 
4192 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
4193 		       !no_enc_frame) {
4194 		nbuf_clone = qdf_nbuf_clone(nbuf);
4195 		if (!nbuf_clone) {
4196 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4197 				"qdf_nbuf_clone failed");
4198 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
4199 			return nbuf;
4200 		}
4201 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
4202 	}
4203 
4204 	if (nbuf_clone) {
4205 		if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
4206 			DP_STATS_INC(vdev, tx_i[xmit_type].mesh.exception_fw,
4207 				     1);
4208 		} else {
4209 			qdf_nbuf_free(nbuf_clone);
4210 		}
4211 	}
4212 
4213 	if (no_enc_frame)
4214 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
4215 	else
4216 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
4217 
4218 	nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
4219 	if ((!nbuf) && no_enc_frame) {
4220 		DP_STATS_INC(vdev, tx_i[xmit_type].mesh.exception_fw, 1);
4221 	}
4222 
4223 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
4224 	return nbuf;
4225 }
4226 
4227 #else
4228 
dp_tx_send_mesh(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,qdf_nbuf_t nbuf)4229 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4230 			   qdf_nbuf_t nbuf)
4231 {
4232 	return dp_tx_send(soc_hdl, vdev_id, nbuf);
4233 }
4234 
4235 #endif
4236 
4237 #ifdef DP_UMAC_HW_RESET_SUPPORT
dp_tx_drop(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,qdf_nbuf_t nbuf)4238 qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4239 		      qdf_nbuf_t nbuf)
4240 {
4241 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4242 	struct dp_vdev *vdev = NULL;
4243 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
4244 
4245 	vdev = soc->vdev_id_map[vdev_id];
4246 	if (qdf_unlikely(!vdev))
4247 		return nbuf;
4248 
4249 	DP_STATS_INC(vdev, tx_i[xmit_type].dropped.drop_ingress, 1);
4250 	return nbuf;
4251 }
4252 
dp_tx_exc_drop(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,qdf_nbuf_t nbuf,struct cdp_tx_exception_metadata * tx_exc_metadata)4253 qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4254 			  qdf_nbuf_t nbuf,
4255 			  struct cdp_tx_exception_metadata *tx_exc_metadata)
4256 {
4257 	return dp_tx_drop(soc_hdl, vdev_id, nbuf);
4258 }
4259 #endif
4260 
4261 #ifdef FEATURE_DIRECT_LINK
4262 /**
4263  * dp_vdev_tx_mark_to_fw() - Mark to_fw bit for the tx packet
4264  * @nbuf: skb
4265  * @vdev: DP vdev handle
4266  *
4267  * Return: None
4268  */
dp_vdev_tx_mark_to_fw(qdf_nbuf_t nbuf,struct dp_vdev * vdev)4269 static inline void dp_vdev_tx_mark_to_fw(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
4270 {
4271 	if (qdf_unlikely(vdev->to_fw))
4272 		QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf) = 1;
4273 }
4274 #else
dp_vdev_tx_mark_to_fw(qdf_nbuf_t nbuf,struct dp_vdev * vdev)4275 static inline void dp_vdev_tx_mark_to_fw(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
4276 {
4277 }
4278 #endif
4279 
dp_tx_send(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,qdf_nbuf_t nbuf)4280 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4281 		      qdf_nbuf_t nbuf)
4282 {
4283 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4284 	uint16_t peer_id = HTT_INVALID_PEER;
4285 	/*
4286 	 * doing a memzero is causing additional function call overhead
4287 	 * so doing static stack clearing
4288 	 */
4289 	struct dp_tx_msdu_info_s msdu_info = {0};
4290 	struct dp_vdev *vdev = NULL;
4291 	qdf_nbuf_t end_nbuf = NULL;
4292 	uint8_t xmit_type;
4293 
4294 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
4295 		return nbuf;
4296 
4297 	/*
4298 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
4299 	 * this in per packet path.
4300 	 *
4301 	 * As in this path vdev memory is already protected with netdev
4302 	 * tx lock
4303 	 */
4304 	vdev = soc->vdev_id_map[vdev_id];
4305 	if (qdf_unlikely(!vdev))
4306 		return nbuf;
4307 
4308 	dp_tx_get_driver_ingress_ts(vdev, &msdu_info, nbuf);
4309 
4310 	dp_vdev_tx_mark_to_fw(nbuf, vdev);
4311 
4312 	/*
4313 	 * Set Default Host TID value to invalid TID
4314 	 * (TID override disabled)
4315 	 */
4316 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
4317 	xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
4318 	msdu_info.xmit_type = xmit_type;
4319 	DP_STATS_INC_PKT(vdev, tx_i[xmit_type].rcvd, 1, qdf_nbuf_len(nbuf));
4320 
4321 	if (qdf_unlikely(vdev->mesh_vdev)) {
4322 		qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
4323 								&msdu_info);
4324 		if (!nbuf_mesh) {
4325 			dp_verbose_debug("Extracting mesh metadata failed");
4326 			return nbuf;
4327 		}
4328 		nbuf = nbuf_mesh;
4329 	}
4330 
4331 	/*
4332 	 * Get HW Queue to use for this frame.
4333 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
4334 	 * dedicated for data and 1 for command.
4335 	 * "queue_id" maps to one hardware ring.
4336 	 *  With each ring, we also associate a unique Tx descriptor pool
4337 	 *  to minimize lock contention for these resources.
4338 	 */
4339 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
4340 	DP_STATS_INC(vdev,
4341 		     tx_i[xmit_type].rcvd_per_core[msdu_info.tx_queue.desc_pool_id],
4342 		     1);
4343 
4344 	/*
4345 	 * TCL H/W supports 2 DSCP-TID mapping tables.
4346 	 *  Table 1 - Default DSCP-TID mapping table
4347 	 *  Table 2 - 1 DSCP-TID override table
4348 	 *
4349 	 * If we need a different DSCP-TID mapping for this vap,
4350 	 * call tid_classify to extract DSCP/ToS from frame and
4351 	 * map to a TID and store in msdu_info. This is later used
4352 	 * to fill in TCL Input descriptor (per-packet TID override).
4353 	 */
4354 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
4355 
4356 	/*
4357 	 * Classify the frame and call corresponding
4358 	 * "prepare" function which extracts the segment (TSO)
4359 	 * and fragmentation information (for TSO , SG, ME, or Raw)
4360 	 * into MSDU_INFO structure which is later used to fill
4361 	 * SW and HW descriptors.
4362 	 */
4363 	if (qdf_nbuf_is_tso(nbuf)) {
4364 		dp_verbose_debug("TSO frame %pK", vdev);
4365 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
4366 				 qdf_nbuf_len(nbuf));
4367 
4368 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
4369 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
4370 					 qdf_nbuf_len(nbuf));
4371 			return nbuf;
4372 		}
4373 
4374 		DP_STATS_INC(vdev, tx_i[xmit_type].rcvd.num,
4375 			     msdu_info.num_seg - 1);
4376 
4377 		goto send_multiple;
4378 	}
4379 
4380 	/* SG */
4381 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
4382 		if (qdf_nbuf_get_nr_frags(nbuf) > DP_TX_MAX_NUM_FRAGS - 1) {
4383 			if (qdf_unlikely(qdf_nbuf_linearize(nbuf)))
4384 				return nbuf;
4385 		} else {
4386 			struct dp_tx_seg_info_s seg_info = {0};
4387 
4388 			if (qdf_unlikely(is_nbuf_frm_rmnet(nbuf, &msdu_info)))
4389 				goto send_single;
4390 
4391 			nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info,
4392 						&msdu_info);
4393 			if (!nbuf)
4394 				return NULL;
4395 
4396 			dp_verbose_debug("non-TSO SG frame %pK", vdev);
4397 
4398 			DP_STATS_INC_PKT(vdev, tx_i[xmit_type].sg.sg_pkt, 1,
4399 					 qdf_nbuf_len(nbuf));
4400 
4401 			goto send_multiple;
4402 		}
4403 	}
4404 
4405 	if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
4406 		return NULL;
4407 
4408 	if (qdf_unlikely(dp_tx_mcast_drop(vdev, nbuf)))
4409 		return nbuf;
4410 
4411 	/* RAW */
4412 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
4413 		struct dp_tx_seg_info_s seg_info = {0};
4414 
4415 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
4416 		if (!nbuf)
4417 			return NULL;
4418 
4419 		dp_verbose_debug("Raw frame %pK", vdev);
4420 
4421 		goto send_multiple;
4422 
4423 	}
4424 
4425 	if (qdf_unlikely(vdev->nawds_enabled)) {
4426 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
4427 					  qdf_nbuf_data(nbuf);
4428 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
4429 			uint16_t sa_peer_id = DP_INVALID_PEER;
4430 
4431 			if (!soc->ast_offload_support) {
4432 				struct dp_ast_entry *ast_entry = NULL;
4433 
4434 				qdf_spin_lock_bh(&soc->ast_lock);
4435 				ast_entry = dp_peer_ast_hash_find_by_pdevid
4436 					(soc,
4437 					 (uint8_t *)(eh->ether_shost),
4438 					 vdev->pdev->pdev_id);
4439 				if (ast_entry)
4440 					sa_peer_id = ast_entry->peer_id;
4441 				qdf_spin_unlock_bh(&soc->ast_lock);
4442 			}
4443 
4444 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
4445 					    sa_peer_id);
4446 		}
4447 		peer_id = DP_INVALID_PEER;
4448 		DP_STATS_INC_PKT(vdev, tx_i[xmit_type].nawds_mcast,
4449 				 1, qdf_nbuf_len(nbuf));
4450 	}
4451 
4452 send_single:
4453 	/*  Single linear frame */
4454 	/*
4455 	 * If nbuf is a simple linear frame, use send_single function to
4456 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
4457 	 * SRNG. There is no need to setup a MSDU extension descriptor.
4458 	 */
4459 	nbuf = dp_tx_send_msdu_single_wrapper(vdev, nbuf, &msdu_info,
4460 					      peer_id, end_nbuf);
4461 	return nbuf;
4462 
4463 send_multiple:
4464 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
4465 
4466 	if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
4467 		dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
4468 
4469 	return nbuf;
4470 }
4471 
dp_tx_send_vdev_id_check(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,qdf_nbuf_t nbuf)4472 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
4473 				    uint8_t vdev_id, qdf_nbuf_t nbuf)
4474 {
4475 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4476 	struct dp_vdev *vdev = NULL;
4477 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
4478 
4479 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
4480 		return nbuf;
4481 
4482 	/*
4483 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
4484 	 * this in per packet path.
4485 	 *
4486 	 * As in this path vdev memory is already protected with netdev
4487 	 * tx lock
4488 	 */
4489 	vdev = soc->vdev_id_map[vdev_id];
4490 	if (qdf_unlikely(!vdev))
4491 		return nbuf;
4492 
4493 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
4494 			== QDF_STATUS_E_FAILURE)) {
4495 		DP_STATS_INC(vdev,
4496 			     tx_i[xmit_type].dropped.fail_per_pkt_vdev_id_check,
4497 			     1);
4498 		return nbuf;
4499 	}
4500 
4501 	return dp_tx_send(soc_hdl, vdev_id, nbuf);
4502 }
4503 
4504 #ifdef UMAC_SUPPORT_PROXY_ARP
4505 /**
4506  * dp_tx_proxy_arp() - Tx proxy arp handler
4507  * @vdev: datapath vdev handle
4508  * @nbuf: sk buffer
4509  *
4510  * Return: status
4511  */
dp_tx_proxy_arp(struct dp_vdev * vdev,qdf_nbuf_t nbuf)4512 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
4513 {
4514 	if (vdev->osif_proxy_arp)
4515 		return vdev->osif_proxy_arp(vdev->osif_vdev, nbuf);
4516 
4517 	/*
4518 	 * when UMAC_SUPPORT_PROXY_ARP is defined, we expect
4519 	 * osif_proxy_arp has a valid function pointer assigned
4520 	 * to it
4521 	 */
4522 	dp_tx_err("valid function pointer for osif_proxy_arp is expected!!\n");
4523 
4524 	return QDF_STATUS_NOT_INITIALIZED;
4525 }
4526 #else
dp_tx_proxy_arp(struct dp_vdev * vdev,qdf_nbuf_t nbuf)4527 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
4528 {
4529 	return QDF_STATUS_SUCCESS;
4530 }
4531 #endif
4532 
4533 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
4534 	!defined(CONFIG_MLO_SINGLE_DEV)
4535 #ifdef WLAN_MCAST_MLO
4536 static bool
dp_tx_reinject_mlo_hdl(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,qdf_nbuf_t nbuf,uint8_t reinject_reason)4537 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
4538 		       struct dp_tx_desc_s *tx_desc,
4539 		       qdf_nbuf_t nbuf,
4540 		       uint8_t reinject_reason)
4541 {
4542 	if (reinject_reason == HTT_TX_FW2WBM_REINJECT_REASON_MLO_MCAST) {
4543 		if (soc->arch_ops.dp_tx_mcast_handler)
4544 			soc->arch_ops.dp_tx_mcast_handler(soc, vdev, nbuf);
4545 
4546 		dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
4547 		return true;
4548 	}
4549 
4550 	return false;
4551 }
4552 #else /* WLAN_MCAST_MLO */
4553 static inline bool
dp_tx_reinject_mlo_hdl(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,qdf_nbuf_t nbuf,uint8_t reinject_reason)4554 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
4555 		       struct dp_tx_desc_s *tx_desc,
4556 		       qdf_nbuf_t nbuf,
4557 		       uint8_t reinject_reason)
4558 {
4559 	return false;
4560 }
4561 #endif /* WLAN_MCAST_MLO */
4562 #else
4563 static inline bool
dp_tx_reinject_mlo_hdl(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,qdf_nbuf_t nbuf,uint8_t reinject_reason)4564 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
4565 		       struct dp_tx_desc_s *tx_desc,
4566 		       qdf_nbuf_t nbuf,
4567 		       uint8_t reinject_reason)
4568 {
4569 	return false;
4570 }
4571 #endif
4572 
dp_tx_reinject_handler(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,uint8_t * status,uint8_t reinject_reason)4573 void dp_tx_reinject_handler(struct dp_soc *soc,
4574 			    struct dp_vdev *vdev,
4575 			    struct dp_tx_desc_s *tx_desc,
4576 			    uint8_t *status,
4577 			    uint8_t reinject_reason)
4578 {
4579 	struct dp_peer *peer = NULL;
4580 	uint32_t peer_id = HTT_INVALID_PEER;
4581 	qdf_nbuf_t nbuf = tx_desc->nbuf;
4582 	qdf_nbuf_t nbuf_copy = NULL;
4583 	struct dp_tx_msdu_info_s msdu_info;
4584 #ifdef WDS_VENDOR_EXTENSION
4585 	int is_mcast = 0, is_ucast = 0;
4586 	int num_peers_3addr = 0;
4587 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
4588 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
4589 #endif
4590 	struct dp_txrx_peer *txrx_peer;
4591 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
4592 
4593 	qdf_assert(vdev);
4594 
4595 	dp_tx_debug("Tx reinject path");
4596 
4597 	DP_STATS_INC_PKT(vdev, tx_i[xmit_type].reinject_pkts, 1,
4598 			 qdf_nbuf_len(tx_desc->nbuf));
4599 
4600 	if (dp_tx_reinject_mlo_hdl(soc, vdev, tx_desc, nbuf, reinject_reason))
4601 		return;
4602 
4603 #ifdef WDS_VENDOR_EXTENSION
4604 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
4605 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
4606 	} else {
4607 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
4608 	}
4609 	is_ucast = !is_mcast;
4610 
4611 	qdf_spin_lock_bh(&vdev->peer_list_lock);
4612 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4613 		txrx_peer = dp_get_txrx_peer(peer);
4614 
4615 		if (!txrx_peer || txrx_peer->bss_peer)
4616 			continue;
4617 
4618 		/* Detect wds peers that use 3-addr framing for mcast.
4619 		 * if there are any, the bss_peer is used to send the
4620 		 * the mcast frame using 3-addr format. all wds enabled
4621 		 * peers that use 4-addr framing for mcast frames will
4622 		 * be duplicated and sent as 4-addr frames below.
4623 		 */
4624 		if (!txrx_peer->wds_enabled ||
4625 		    !txrx_peer->wds_ecm.wds_tx_mcast_4addr) {
4626 			num_peers_3addr = 1;
4627 			break;
4628 		}
4629 	}
4630 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
4631 #endif
4632 
4633 	if (qdf_unlikely(vdev->mesh_vdev)) {
4634 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
4635 	} else {
4636 		qdf_spin_lock_bh(&vdev->peer_list_lock);
4637 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4638 			txrx_peer = dp_get_txrx_peer(peer);
4639 			if (!txrx_peer)
4640 				continue;
4641 
4642 			if ((txrx_peer->peer_id != HTT_INVALID_PEER) &&
4643 #ifdef WDS_VENDOR_EXTENSION
4644 			/*
4645 			 * . if 3-addr STA, then send on BSS Peer
4646 			 * . if Peer WDS enabled and accept 4-addr mcast,
4647 			 * send mcast on that peer only
4648 			 * . if Peer WDS enabled and accept 4-addr ucast,
4649 			 * send ucast on that peer only
4650 			 */
4651 			((txrx_peer->bss_peer && num_peers_3addr && is_mcast) ||
4652 			 (txrx_peer->wds_enabled &&
4653 			 ((is_mcast && txrx_peer->wds_ecm.wds_tx_mcast_4addr) ||
4654 			 (is_ucast &&
4655 			 txrx_peer->wds_ecm.wds_tx_ucast_4addr))))) {
4656 #else
4657 			(txrx_peer->bss_peer &&
4658 			 (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) {
4659 #endif
4660 				peer_id = DP_INVALID_PEER;
4661 
4662 				nbuf_copy = qdf_nbuf_copy(nbuf);
4663 
4664 				if (!nbuf_copy) {
4665 					dp_tx_debug("nbuf copy failed");
4666 					break;
4667 				}
4668 				qdf_mem_zero(&msdu_info, sizeof(msdu_info));
4669 				dp_tx_get_queue(vdev, nbuf,
4670 						&msdu_info.tx_queue);
4671 				msdu_info.xmit_type =
4672 					qdf_nbuf_get_vdev_xmit_type(nbuf);
4673 				nbuf_copy = dp_tx_send_msdu_single(vdev,
4674 						nbuf_copy,
4675 						&msdu_info,
4676 						peer_id,
4677 						NULL);
4678 
4679 				if (nbuf_copy) {
4680 					dp_tx_debug("pkt send failed");
4681 					qdf_nbuf_free(nbuf_copy);
4682 				}
4683 			}
4684 		}
4685 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4686 
4687 		qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
4688 					     QDF_DMA_TO_DEVICE, nbuf->len);
4689 		qdf_nbuf_free(nbuf);
4690 	}
4691 
4692 	dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
4693 }
4694 
4695 void dp_tx_inspect_handler(struct dp_soc *soc,
4696 			   struct dp_vdev *vdev,
4697 			   struct dp_tx_desc_s *tx_desc,
4698 			   uint8_t *status)
4699 {
4700 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(tx_desc->nbuf);
4701 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4702 			"%s Tx inspect path",
4703 			__func__);
4704 
4705 	DP_STATS_INC_PKT(vdev, tx_i[xmit_type].inspect_pkts, 1,
4706 			 qdf_nbuf_len(tx_desc->nbuf));
4707 
4708 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
4709 	dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
4710 }
4711 
4712 #ifdef MESH_MODE_SUPPORT
4713 /**
4714  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
4715  *                                         in mesh meta header
4716  * @tx_desc: software descriptor head pointer
4717  * @ts: pointer to tx completion stats
4718  * Return: none
4719  */
4720 static
4721 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
4722 		struct hal_tx_completion_status *ts)
4723 {
4724 	qdf_nbuf_t netbuf = tx_desc->nbuf;
4725 
4726 	if (!tx_desc->msdu_ext_desc) {
4727 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
4728 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4729 				"netbuf %pK offset %d",
4730 				netbuf, tx_desc->pkt_offset);
4731 			return;
4732 		}
4733 	}
4734 }
4735 
4736 #else
4737 static
4738 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
4739 		struct hal_tx_completion_status *ts)
4740 {
4741 }
4742 
4743 #endif
4744 
4745 #ifdef CONFIG_SAWF
4746 static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
4747 					 struct dp_vdev *vdev,
4748 					 struct dp_txrx_peer *txrx_peer,
4749 					 struct dp_tx_desc_s *tx_desc,
4750 					 struct hal_tx_completion_status *ts,
4751 					 uint8_t tid)
4752 {
4753 	dp_sawf_tx_compl_update_peer_stats(soc, vdev, txrx_peer, tx_desc,
4754 					   ts, tid);
4755 }
4756 
4757 static void dp_tx_compute_delay_avg(struct cdp_delay_tx_stats  *tx_delay,
4758 				    uint32_t nw_delay,
4759 				    uint32_t sw_delay,
4760 				    uint32_t hw_delay)
4761 {
4762 	dp_peer_tid_delay_avg(tx_delay,
4763 			      nw_delay,
4764 			      sw_delay,
4765 			      hw_delay);
4766 }
4767 #else
4768 static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
4769 					 struct dp_vdev *vdev,
4770 					 struct dp_txrx_peer *txrx_peer,
4771 					 struct dp_tx_desc_s *tx_desc,
4772 					 struct hal_tx_completion_status *ts,
4773 					 uint8_t tid)
4774 {
4775 }
4776 
4777 static inline void
4778 dp_tx_compute_delay_avg(struct cdp_delay_tx_stats *tx_delay,
4779 			uint32_t nw_delay, uint32_t sw_delay,
4780 			uint32_t hw_delay)
4781 {
4782 }
4783 #endif
4784 
4785 #ifdef QCA_PEER_EXT_STATS
4786 #ifdef WLAN_CONFIG_TX_DELAY
4787 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
4788 				    struct dp_tx_desc_s *tx_desc,
4789 				    struct hal_tx_completion_status *ts,
4790 				    struct dp_vdev *vdev)
4791 {
4792 	struct dp_soc *soc = vdev->pdev->soc;
4793 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
4794 	int64_t timestamp_ingress, timestamp_hw_enqueue;
4795 	uint32_t sw_enqueue_delay, fwhw_transmit_delay = 0;
4796 
4797 	if (!ts->valid)
4798 		return;
4799 
4800 	timestamp_ingress = qdf_nbuf_get_timestamp_us(tx_desc->nbuf);
4801 	timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
4802 
4803 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4804 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
4805 
4806 	if (soc->arch_ops.dp_tx_compute_hw_delay)
4807 		if (!soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts,
4808 							  &fwhw_transmit_delay))
4809 			dp_hist_update_stats(&tx_delay->hwtx_delay,
4810 					     fwhw_transmit_delay);
4811 
4812 	dp_tx_compute_delay_avg(tx_delay, 0, sw_enqueue_delay,
4813 				fwhw_transmit_delay);
4814 }
4815 #else
4816 /**
4817  * dp_tx_compute_tid_delay() - Compute per TID delay
4818  * @stats: Per TID delay stats
4819  * @tx_desc: Software Tx descriptor
4820  * @ts: Tx completion status
4821  * @vdev: vdev
4822  *
4823  * Compute the software enqueue and hw enqueue delays and
4824  * update the respective histograms
4825  *
4826  * Return: void
4827  */
4828 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
4829 				    struct dp_tx_desc_s *tx_desc,
4830 				    struct hal_tx_completion_status *ts,
4831 				    struct dp_vdev *vdev)
4832 {
4833 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
4834 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
4835 	uint32_t sw_enqueue_delay, fwhw_transmit_delay;
4836 
4837 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
4838 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
4839 	timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
4840 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4841 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
4842 					 timestamp_hw_enqueue);
4843 
4844 	/*
4845 	 * Update the Tx software enqueue delay and HW enque-Completion delay.
4846 	 */
4847 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
4848 	dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
4849 }
4850 #endif
4851 
4852 /**
4853  * dp_tx_update_peer_delay_stats() - Update the peer delay stats
4854  * @txrx_peer: DP peer context
4855  * @tx_desc: Tx software descriptor
4856  * @ts: Tx completion status
4857  * @ring_id: Rx CPU context ID/CPU_ID
4858  *
4859  * Update the peer extended stats. These are enhanced other
4860  * delay stats per msdu level.
4861  *
4862  * Return: void
4863  */
4864 static void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
4865 					  struct dp_tx_desc_s *tx_desc,
4866 					  struct hal_tx_completion_status *ts,
4867 					  uint8_t ring_id)
4868 {
4869 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4870 	struct dp_soc *soc = NULL;
4871 	struct dp_peer_delay_stats *delay_stats = NULL;
4872 	uint8_t tid;
4873 
4874 	soc = pdev->soc;
4875 	if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
4876 		return;
4877 
4878 	if (!txrx_peer->delay_stats)
4879 		return;
4880 
4881 	tid = ts->tid;
4882 	delay_stats = txrx_peer->delay_stats;
4883 
4884 	/*
4885 	 * For non-TID packets use the TID 9
4886 	 */
4887 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4888 		tid = CDP_MAX_DATA_TIDS - 1;
4889 
4890 	dp_tx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
4891 				tx_desc, ts, txrx_peer->vdev);
4892 }
4893 #else
4894 static inline
4895 void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
4896 				   struct dp_tx_desc_s *tx_desc,
4897 				   struct hal_tx_completion_status *ts,
4898 				   uint8_t ring_id)
4899 {
4900 }
4901 #endif
4902 
4903 #ifdef WLAN_PEER_JITTER
4904 /**
4905  * dp_tx_jitter_get_avg_jitter() - compute the average jitter
4906  * @curr_delay: Current delay
4907  * @prev_delay: Previous delay
4908  * @avg_jitter: Average Jitter
4909  * Return: Newly Computed Average Jitter
4910  */
4911 static uint32_t dp_tx_jitter_get_avg_jitter(uint32_t curr_delay,
4912 					    uint32_t prev_delay,
4913 					    uint32_t avg_jitter)
4914 {
4915 	uint32_t curr_jitter;
4916 	int32_t jitter_diff;
4917 
4918 	curr_jitter = qdf_abs(curr_delay - prev_delay);
4919 	if (!avg_jitter)
4920 		return curr_jitter;
4921 
4922 	jitter_diff = curr_jitter - avg_jitter;
4923 	if (jitter_diff < 0)
4924 		avg_jitter = avg_jitter -
4925 			(qdf_abs(jitter_diff) >> DP_AVG_JITTER_WEIGHT_DENOM);
4926 	else
4927 		avg_jitter = avg_jitter +
4928 			(qdf_abs(jitter_diff) >> DP_AVG_JITTER_WEIGHT_DENOM);
4929 
4930 	return avg_jitter;
4931 }
4932 
4933 /**
4934  * dp_tx_jitter_get_avg_delay() - compute the average delay
4935  * @curr_delay: Current delay
4936  * @avg_delay: Average delay
4937  * Return: Newly Computed Average Delay
4938  */
4939 static uint32_t dp_tx_jitter_get_avg_delay(uint32_t curr_delay,
4940 					   uint32_t avg_delay)
4941 {
4942 	int32_t delay_diff;
4943 
4944 	if (!avg_delay)
4945 		return curr_delay;
4946 
4947 	delay_diff = curr_delay - avg_delay;
4948 	if (delay_diff < 0)
4949 		avg_delay = avg_delay - (qdf_abs(delay_diff) >>
4950 					DP_AVG_DELAY_WEIGHT_DENOM);
4951 	else
4952 		avg_delay = avg_delay + (qdf_abs(delay_diff) >>
4953 					DP_AVG_DELAY_WEIGHT_DENOM);
4954 
4955 	return avg_delay;
4956 }
4957 
4958 #ifdef WLAN_CONFIG_TX_DELAY
4959 /**
4960  * dp_tx_compute_cur_delay() - get the current delay
4961  * @soc: soc handle
4962  * @vdev: vdev structure for data path state
4963  * @ts: Tx completion status
4964  * @curr_delay: current delay
4965  * @tx_desc: tx descriptor
4966  * Return: void
4967  */
4968 static
4969 QDF_STATUS dp_tx_compute_cur_delay(struct dp_soc *soc,
4970 				   struct dp_vdev *vdev,
4971 				   struct hal_tx_completion_status *ts,
4972 				   uint32_t *curr_delay,
4973 				   struct dp_tx_desc_s *tx_desc)
4974 {
4975 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
4976 
4977 	if (soc->arch_ops.dp_tx_compute_hw_delay)
4978 		status = soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts,
4979 							      curr_delay);
4980 	return status;
4981 }
4982 #else
4983 static
4984 QDF_STATUS dp_tx_compute_cur_delay(struct dp_soc *soc,
4985 				   struct dp_vdev *vdev,
4986 				   struct hal_tx_completion_status *ts,
4987 				   uint32_t *curr_delay,
4988 				   struct dp_tx_desc_s *tx_desc)
4989 {
4990 	int64_t current_timestamp, timestamp_hw_enqueue;
4991 
4992 	current_timestamp = qdf_ktime_to_us(qdf_ktime_real_get());
4993 	timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
4994 	*curr_delay = (uint32_t)(current_timestamp - timestamp_hw_enqueue);
4995 
4996 	return QDF_STATUS_SUCCESS;
4997 }
4998 #endif
4999 
5000 /**
5001  * dp_tx_compute_tid_jitter() - compute per tid per ring jitter
5002  * @jitter: per tid per ring jitter stats
5003  * @ts: Tx completion status
5004  * @vdev: vdev structure for data path state
5005  * @tx_desc: tx descriptor
5006  * Return: void
5007  */
5008 static void dp_tx_compute_tid_jitter(struct cdp_peer_tid_stats *jitter,
5009 				     struct hal_tx_completion_status *ts,
5010 				     struct dp_vdev *vdev,
5011 				     struct dp_tx_desc_s *tx_desc)
5012 {
5013 	uint32_t curr_delay, avg_delay, avg_jitter, prev_delay;
5014 	struct dp_soc *soc = vdev->pdev->soc;
5015 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
5016 
5017 	if (ts->status !=  HAL_TX_TQM_RR_FRAME_ACKED) {
5018 		jitter->tx_drop += 1;
5019 		return;
5020 	}
5021 
5022 	status = dp_tx_compute_cur_delay(soc, vdev, ts, &curr_delay,
5023 					 tx_desc);
5024 
5025 	if (QDF_IS_STATUS_SUCCESS(status)) {
5026 		avg_delay = jitter->tx_avg_delay;
5027 		avg_jitter = jitter->tx_avg_jitter;
5028 		prev_delay = jitter->tx_prev_delay;
5029 		avg_jitter = dp_tx_jitter_get_avg_jitter(curr_delay,
5030 							 prev_delay,
5031 							 avg_jitter);
5032 		avg_delay = dp_tx_jitter_get_avg_delay(curr_delay, avg_delay);
5033 		jitter->tx_avg_delay = avg_delay;
5034 		jitter->tx_avg_jitter = avg_jitter;
5035 		jitter->tx_prev_delay = curr_delay;
5036 		jitter->tx_total_success += 1;
5037 	} else if (status == QDF_STATUS_E_FAILURE) {
5038 		jitter->tx_avg_err += 1;
5039 	}
5040 }
5041 
5042 /* dp_tx_update_peer_jitter_stats() - Update the peer jitter stats
5043  * @txrx_peer: DP peer context
5044  * @tx_desc: Tx software descriptor
5045  * @ts: Tx completion status
5046  * @ring_id: Rx CPU context ID/CPU_ID
5047  * Return: void
5048  */
5049 static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer,
5050 					   struct dp_tx_desc_s *tx_desc,
5051 					   struct hal_tx_completion_status *ts,
5052 					   uint8_t ring_id)
5053 {
5054 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
5055 	struct dp_soc *soc = pdev->soc;
5056 	struct cdp_peer_tid_stats *jitter_stats = NULL;
5057 	uint8_t tid;
5058 	struct cdp_peer_tid_stats *rx_tid = NULL;
5059 
5060 	if (qdf_likely(!wlan_cfg_is_peer_jitter_stats_enabled(soc->wlan_cfg_ctx)))
5061 		return;
5062 
5063 	if (!txrx_peer->jitter_stats)
5064 		return;
5065 
5066 	tid = ts->tid;
5067 	jitter_stats = txrx_peer->jitter_stats;
5068 
5069 	/*
5070 	 * For non-TID packets use the TID 9
5071 	 */
5072 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
5073 		tid = CDP_MAX_DATA_TIDS - 1;
5074 
5075 	rx_tid = &jitter_stats[tid * CDP_MAX_TXRX_CTX + ring_id];
5076 	dp_tx_compute_tid_jitter(rx_tid,
5077 				 ts, txrx_peer->vdev, tx_desc);
5078 }
5079 #else
5080 static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer,
5081 					   struct dp_tx_desc_s *tx_desc,
5082 					   struct hal_tx_completion_status *ts,
5083 					   uint8_t ring_id)
5084 {
5085 }
5086 #endif
5087 
5088 #ifdef HW_TX_DELAY_STATS_ENABLE
5089 /**
5090  * dp_update_tx_delay_stats() - update the delay stats
5091  * @vdev: vdev handle
5092  * @delay: delay in ms or us based on the flag delay_in_us
5093  * @tid: tid value
5094  * @mode: type of tx delay mode
5095  * @ring_id: ring number
5096  * @delay_in_us: flag to indicate whether the delay is in ms or us
5097  *
5098  * Return: none
5099  */
5100 static inline
5101 void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
5102 			      uint8_t mode, uint8_t ring_id, bool delay_in_us)
5103 {
5104 	struct cdp_tid_tx_stats *tstats =
5105 		&vdev->stats.tid_tx_stats[ring_id][tid];
5106 
5107 	dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
5108 			      delay_in_us);
5109 }
5110 #else
5111 static inline
5112 void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
5113 			      uint8_t mode, uint8_t ring_id, bool delay_in_us)
5114 {
5115 	struct cdp_tid_tx_stats *tstats =
5116 		&vdev->pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
5117 
5118 	dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
5119 			      delay_in_us);
5120 }
5121 #endif
5122 
5123 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
5124 			 uint8_t tid, uint8_t ring_id)
5125 {
5126 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
5127 	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
5128 	uint32_t fwhw_transmit_delay_us;
5129 
5130 	if (qdf_likely(!vdev->pdev->delay_stats_flag) &&
5131 	    qdf_likely(!dp_is_vdev_tx_delay_stats_enabled(vdev)))
5132 		return;
5133 
5134 	if (dp_is_vdev_tx_delay_stats_enabled(vdev)) {
5135 		fwhw_transmit_delay_us =
5136 			qdf_ktime_to_us(qdf_ktime_real_get()) -
5137 			qdf_ktime_to_us(tx_desc->timestamp);
5138 
5139 		/*
5140 		 * Delay between packet enqueued to HW and Tx completion in us
5141 		 */
5142 		dp_update_tx_delay_stats(vdev, fwhw_transmit_delay_us, tid,
5143 					 CDP_DELAY_STATS_FW_HW_TRANSMIT,
5144 					 ring_id, true);
5145 		/*
5146 		 * For MCL, only enqueue to completion delay is required
5147 		 * so return if the vdev flag is enabled.
5148 		 */
5149 		return;
5150 	}
5151 
5152 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
5153 	timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
5154 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
5155 					 timestamp_hw_enqueue);
5156 
5157 	if (!timestamp_hw_enqueue)
5158 		return;
5159 	/*
5160 	 * Delay between packet enqueued to HW and Tx completion in ms
5161 	 */
5162 	dp_update_tx_delay_stats(vdev, fwhw_transmit_delay, tid,
5163 				 CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id,
5164 				 false);
5165 
5166 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
5167 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
5168 	interframe_delay = (uint32_t)(timestamp_ingress -
5169 				      vdev->prev_tx_enq_tstamp);
5170 
5171 	/*
5172 	 * Delay in software enqueue
5173 	 */
5174 	dp_update_tx_delay_stats(vdev, sw_enqueue_delay, tid,
5175 				 CDP_DELAY_STATS_SW_ENQ, ring_id,
5176 				 false);
5177 
5178 	/*
5179 	 * Update interframe delay stats calculated at hardstart receive point.
5180 	 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
5181 	 * interframe delay will not be calculate correctly for 1st frame.
5182 	 * On the other side, this will help in avoiding extra per packet check
5183 	 * of !vdev->prev_tx_enq_tstamp.
5184 	 */
5185 	dp_update_tx_delay_stats(vdev, interframe_delay, tid,
5186 				 CDP_DELAY_STATS_TX_INTERFRAME, ring_id,
5187 				 false);
5188 	vdev->prev_tx_enq_tstamp = timestamp_ingress;
5189 }
5190 
5191 #ifdef DISABLE_DP_STATS
5192 static
5193 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf,
5194 				   struct dp_txrx_peer *txrx_peer,
5195 				   uint8_t link_id)
5196 {
5197 }
5198 #else
5199 static inline void
5200 dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer,
5201 		       uint8_t link_id)
5202 {
5203 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
5204 
5205 	DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
5206 	if (subtype != QDF_PROTO_INVALID)
5207 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.no_ack_count[subtype],
5208 					  1, link_id);
5209 }
5210 #endif
5211 
5212 #ifndef QCA_ENHANCED_STATS_SUPPORT
5213 #ifdef DP_PEER_EXTENDED_API
5214 static inline uint8_t
5215 dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
5216 {
5217 	return txrx_peer->mpdu_retry_threshold;
5218 }
5219 #else
5220 static inline uint8_t
5221 dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
5222 {
5223 	return 0;
5224 }
5225 #endif
5226 
5227 /**
5228  * dp_tx_update_peer_extd_stats()- Update Tx extended path stats for peer
5229  *
5230  * @ts: Tx compltion status
5231  * @txrx_peer: datapath txrx_peer handle
5232  * @link_id: Link id
5233  *
5234  * Return: void
5235  */
5236 static inline void
5237 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
5238 			     struct dp_txrx_peer *txrx_peer, uint8_t link_id)
5239 {
5240 	uint8_t mcs, pkt_type, dst_mcs_idx;
5241 	uint8_t retry_threshold = dp_tx_get_mpdu_retry_threshold(txrx_peer);
5242 
5243 	mcs = ts->mcs;
5244 	pkt_type = ts->pkt_type;
5245 	/* do HW to SW pkt type conversion */
5246 	pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX :
5247 		    hal_2_dp_pkt_type_map[pkt_type]);
5248 
5249 	dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs);
5250 	if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
5251 		DP_PEER_EXTD_STATS_INC(txrx_peer,
5252 				       tx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
5253 				       1, link_id);
5254 
5255 	DP_PEER_EXTD_STATS_INC(txrx_peer, tx.sgi_count[ts->sgi], 1, link_id);
5256 	DP_PEER_EXTD_STATS_INC(txrx_peer, tx.bw[ts->bw], 1, link_id);
5257 	DP_PEER_EXTD_STATS_UPD(txrx_peer, tx.last_ack_rssi, ts->ack_frame_rssi,
5258 			       link_id);
5259 	DP_PEER_EXTD_STATS_INC(txrx_peer,
5260 			       tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1,
5261 			       link_id);
5262 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.stbc, 1, ts->stbc, link_id);
5263 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.ldpc, 1, ts->ldpc, link_id);
5264 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries, 1, ts->transmit_cnt > 1,
5265 				link_id);
5266 	if (ts->first_msdu) {
5267 		DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries_mpdu, 1,
5268 					ts->transmit_cnt > 1, link_id);
5269 
5270 		if (!retry_threshold)
5271 			return;
5272 		DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.mpdu_success_with_retries,
5273 					qdf_do_div(ts->transmit_cnt,
5274 						   retry_threshold),
5275 					ts->transmit_cnt > retry_threshold,
5276 					link_id);
5277 	}
5278 }
5279 #else
5280 static inline void
5281 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
5282 			     struct dp_txrx_peer *txrx_peer, uint8_t link_id)
5283 {
5284 }
5285 #endif
5286 
5287 #if defined(WLAN_FEATURE_11BE_MLO) && \
5288 	(defined(QCA_ENHANCED_STATS_SUPPORT) || \
5289 		defined(DP_MLO_LINK_STATS_SUPPORT))
5290 static inline uint8_t
5291 dp_tx_get_link_id_from_ppdu_id(struct dp_soc *soc,
5292 			       struct hal_tx_completion_status *ts,
5293 			       struct dp_txrx_peer *txrx_peer,
5294 			       struct dp_vdev *vdev)
5295 {
5296 	uint8_t hw_link_id = 0;
5297 	uint32_t ppdu_id;
5298 	uint8_t link_id_offset, link_id_bits;
5299 
5300 	if (!txrx_peer->is_mld_peer || !vdev->pdev->link_peer_stats)
5301 		return 0;
5302 
5303 	link_id_offset = soc->link_id_offset;
5304 	link_id_bits = soc->link_id_bits;
5305 	ppdu_id = ts->ppdu_id;
5306 	hw_link_id = ((DP_GET_HW_LINK_ID_FRM_PPDU_ID(ppdu_id, link_id_offset,
5307 						   link_id_bits)) + 1);
5308 	if (hw_link_id > DP_MAX_MLO_LINKS) {
5309 		hw_link_id = 0;
5310 		DP_PEER_PER_PKT_STATS_INC(
5311 				txrx_peer,
5312 				tx.inval_link_id_pkt_cnt, 1, hw_link_id);
5313 	}
5314 
5315 	return hw_link_id;
5316 }
5317 #else
5318 static inline uint8_t
5319 dp_tx_get_link_id_from_ppdu_id(struct dp_soc *soc,
5320 			       struct hal_tx_completion_status *ts,
5321 			       struct dp_txrx_peer *txrx_peer,
5322 			       struct dp_vdev *vdev)
5323 {
5324 	return 0;
5325 }
5326 #endif
5327 
5328 /**
5329  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
5330  *				per wbm ring
5331  *
5332  * @tx_desc: software descriptor head pointer
5333  * @ts: Tx completion status
5334  * @txrx_peer: peer handle
5335  * @ring_id: ring number
5336  * @link_id: Link id
5337  *
5338  * Return: None
5339  */
5340 static inline void
5341 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
5342 			struct hal_tx_completion_status *ts,
5343 			struct dp_txrx_peer *txrx_peer, uint8_t ring_id,
5344 			uint8_t link_id)
5345 {
5346 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
5347 	uint8_t tid = ts->tid;
5348 	uint32_t length;
5349 	struct cdp_tid_tx_stats *tid_stats;
5350 
5351 	if (!pdev)
5352 		return;
5353 
5354 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
5355 		tid = CDP_MAX_DATA_TIDS - 1;
5356 
5357 	tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
5358 
5359 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
5360 		dp_err_rl("Release source:%d is not from TQM", ts->release_src);
5361 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.release_src_not_tqm, 1,
5362 					  link_id);
5363 		return;
5364 	}
5365 
5366 	length = qdf_nbuf_len(tx_desc->nbuf);
5367 	DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
5368 
5369 	if (qdf_unlikely(pdev->delay_stats_flag) ||
5370 	    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(txrx_peer->vdev)))
5371 		dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id);
5372 
5373 	if (ts->status < CDP_MAX_TX_TQM_STATUS) {
5374 		tid_stats->tqm_status_cnt[ts->status]++;
5375 	}
5376 
5377 	if (qdf_likely(ts->status == HAL_TX_TQM_RR_FRAME_ACKED)) {
5378 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.retry_count, 1,
5379 					   ts->transmit_cnt > 1, link_id);
5380 
5381 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.multiple_retry_count,
5382 					   1, ts->transmit_cnt > 2, link_id);
5383 
5384 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.ofdma, 1, ts->ofdma,
5385 					   link_id);
5386 
5387 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.amsdu_cnt, 1,
5388 					   ts->msdu_part_of_amsdu, link_id);
5389 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.non_amsdu_cnt, 1,
5390 					   !ts->msdu_part_of_amsdu, link_id);
5391 
5392 		txrx_peer->stats[link_id].per_pkt_stats.tx.last_tx_ts =
5393 							qdf_system_ticks();
5394 
5395 		dp_tx_update_peer_extd_stats(ts, txrx_peer, link_id);
5396 
5397 		return;
5398 	}
5399 
5400 	/*
5401 	 * tx_failed is ideally supposed to be updated from HTT ppdu
5402 	 * completion stats. But in IPQ807X/IPQ6018 chipsets owing to
5403 	 * hw limitation there are no completions for failed cases.
5404 	 * Hence updating tx_failed from data path. Please note that
5405 	 * if tx_failed is fixed to be from ppdu, then this has to be
5406 	 * removed
5407 	 */
5408 	DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
5409 
5410 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.failed_retry_count, 1,
5411 				   ts->transmit_cnt > DP_RETRY_COUNT,
5412 				   link_id);
5413 	dp_update_no_ack_stats(tx_desc->nbuf, txrx_peer, link_id);
5414 
5415 	if (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED) {
5416 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.age_out, 1,
5417 					  link_id);
5418 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_REM) {
5419 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.dropped.fw_rem, 1,
5420 					      length, link_id);
5421 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX) {
5422 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_notx, 1,
5423 					  link_id);
5424 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TX) {
5425 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_tx, 1,
5426 					  link_id);
5427 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON1) {
5428 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason1, 1,
5429 					  link_id);
5430 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON2) {
5431 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason2, 1,
5432 					  link_id);
5433 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON3) {
5434 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason3, 1,
5435 					  link_id);
5436 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_DISABLE_QUEUE) {
5437 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
5438 					  tx.dropped.fw_rem_queue_disable, 1,
5439 					  link_id);
5440 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TILL_NONMATCHING) {
5441 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
5442 					  tx.dropped.fw_rem_no_match, 1,
5443 					  link_id);
5444 	} else if (ts->status == HAL_TX_TQM_RR_DROP_THRESHOLD) {
5445 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
5446 					  tx.dropped.drop_threshold, 1,
5447 					  link_id);
5448 	} else if (ts->status == HAL_TX_TQM_RR_LINK_DESC_UNAVAILABLE) {
5449 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
5450 					  tx.dropped.drop_link_desc_na, 1,
5451 					  link_id);
5452 	} else if (ts->status == HAL_TX_TQM_RR_DROP_OR_INVALID_MSDU) {
5453 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
5454 					  tx.dropped.invalid_drop, 1,
5455 					  link_id);
5456 	} else if (ts->status == HAL_TX_TQM_RR_MULTICAST_DROP) {
5457 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
5458 					  tx.dropped.mcast_vdev_drop, 1,
5459 					  link_id);
5460 	} else {
5461 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.invalid_rr, 1,
5462 					  link_id);
5463 	}
5464 }
5465 
5466 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
5467 /**
5468  * dp_tx_flow_pool_lock() - take flow pool lock
5469  * @soc: core txrx main context
5470  * @tx_desc: tx desc
5471  *
5472  * Return: None
5473  */
5474 static inline
5475 void dp_tx_flow_pool_lock(struct dp_soc *soc,
5476 			  struct dp_tx_desc_s *tx_desc)
5477 {
5478 	struct dp_tx_desc_pool_s *pool;
5479 	uint8_t desc_pool_id;
5480 
5481 	desc_pool_id = tx_desc->pool_id;
5482 	pool = &soc->tx_desc[desc_pool_id];
5483 
5484 	qdf_spin_lock_bh(&pool->flow_pool_lock);
5485 }
5486 
5487 /**
5488  * dp_tx_flow_pool_unlock() - release flow pool lock
5489  * @soc: core txrx main context
5490  * @tx_desc: tx desc
5491  *
5492  * Return: None
5493  */
5494 static inline
5495 void dp_tx_flow_pool_unlock(struct dp_soc *soc,
5496 			    struct dp_tx_desc_s *tx_desc)
5497 {
5498 	struct dp_tx_desc_pool_s *pool;
5499 	uint8_t desc_pool_id;
5500 
5501 	desc_pool_id = tx_desc->pool_id;
5502 	pool = &soc->tx_desc[desc_pool_id];
5503 
5504 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
5505 }
5506 #else
5507 static inline
5508 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
5509 {
5510 }
5511 
5512 static inline
5513 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
5514 {
5515 }
5516 #endif
5517 
5518 /**
5519  * dp_tx_notify_completion() - Notify tx completion for this desc
5520  * @soc: core txrx main context
5521  * @vdev: datapath vdev handle
5522  * @tx_desc: tx desc
5523  * @netbuf:  buffer
5524  * @status: tx status
5525  *
5526  * Return: none
5527  */
5528 static inline void dp_tx_notify_completion(struct dp_soc *soc,
5529 					   struct dp_vdev *vdev,
5530 					   struct dp_tx_desc_s *tx_desc,
5531 					   qdf_nbuf_t netbuf,
5532 					   uint8_t status)
5533 {
5534 	void *osif_dev;
5535 	ol_txrx_completion_fp tx_compl_cbk = NULL;
5536 	uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
5537 
5538 	qdf_assert(tx_desc);
5539 
5540 	if (!vdev ||
5541 	    !vdev->osif_vdev) {
5542 		return;
5543 	}
5544 
5545 	osif_dev = vdev->osif_vdev;
5546 	tx_compl_cbk = vdev->tx_comp;
5547 
5548 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
5549 		flag |= BIT(QDF_TX_RX_STATUS_OK);
5550 
5551 	if (tx_compl_cbk)
5552 		tx_compl_cbk(netbuf, osif_dev, flag);
5553 }
5554 
5555 /**
5556  * dp_tx_sojourn_stats_process() - Collect sojourn stats
5557  * @pdev: pdev handle
5558  * @txrx_peer: DP peer context
5559  * @tid: tid value
5560  * @txdesc_ts: timestamp from txdesc
5561  * @ppdu_id: ppdu id
5562  * @link_id: link id
5563  *
5564  * Return: none
5565  */
5566 #ifdef FEATURE_PERPKT_INFO
5567 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
5568 					       struct dp_txrx_peer *txrx_peer,
5569 					       uint8_t tid,
5570 					       uint64_t txdesc_ts,
5571 					       uint32_t ppdu_id,
5572 					       uint8_t link_id)
5573 {
5574 	uint64_t delta_ms;
5575 	struct cdp_tx_sojourn_stats *sojourn_stats;
5576 	struct dp_peer *primary_link_peer = NULL;
5577 	struct dp_soc *link_peer_soc = NULL;
5578 
5579 	if (qdf_unlikely(!pdev->enhanced_stats_en))
5580 		return;
5581 
5582 	if (qdf_unlikely(tid == HTT_INVALID_TID ||
5583 			 tid >= CDP_DATA_TID_MAX))
5584 		return;
5585 
5586 	if (qdf_unlikely(!pdev->sojourn_buf))
5587 		return;
5588 
5589 	primary_link_peer = dp_get_primary_link_peer_by_id(pdev->soc,
5590 							   txrx_peer->peer_id,
5591 							   DP_MOD_ID_TX_COMP);
5592 
5593 	if (qdf_unlikely(!primary_link_peer))
5594 		return;
5595 
5596 	sojourn_stats = (struct cdp_tx_sojourn_stats *)
5597 		qdf_nbuf_data(pdev->sojourn_buf);
5598 
5599 	link_peer_soc = primary_link_peer->vdev->pdev->soc;
5600 	sojourn_stats->cookie = (void *)
5601 			dp_monitor_peer_get_peerstats_ctx(link_peer_soc,
5602 							  primary_link_peer);
5603 
5604 	delta_ms = qdf_ktime_to_ms(qdf_ktime_real_get()) -
5605 				txdesc_ts;
5606 	qdf_ewma_tx_lag_add(&txrx_peer->stats[link_id].per_pkt_stats.tx.avg_sojourn_msdu[tid],
5607 			    delta_ms);
5608 	sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
5609 	sojourn_stats->num_msdus[tid] = 1;
5610 	sojourn_stats->avg_sojourn_msdu[tid].internal =
5611 		txrx_peer->stats[link_id].
5612 			per_pkt_stats.tx.avg_sojourn_msdu[tid].internal;
5613 	dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
5614 			     pdev->sojourn_buf, HTT_INVALID_PEER,
5615 			     WDI_NO_VAL, pdev->pdev_id);
5616 	sojourn_stats->sum_sojourn_msdu[tid] = 0;
5617 	sojourn_stats->num_msdus[tid] = 0;
5618 	sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
5619 
5620 	dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_TX_COMP);
5621 }
5622 #else
5623 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
5624 					       struct dp_txrx_peer *txrx_peer,
5625 					       uint8_t tid,
5626 					       uint64_t txdesc_ts,
5627 					       uint32_t ppdu_id)
5628 {
5629 }
5630 #endif
5631 
5632 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
5633 void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
5634 				       struct dp_tx_desc_s *desc,
5635 				       struct hal_tx_completion_status *ts)
5636 {
5637 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc,
5638 			     desc, ts->peer_id,
5639 			     WDI_NO_VAL, desc->pdev->pdev_id);
5640 }
5641 #endif
5642 
5643 void
5644 dp_tx_comp_process_desc(struct dp_soc *soc,
5645 			struct dp_tx_desc_s *desc,
5646 			struct hal_tx_completion_status *ts,
5647 			struct dp_txrx_peer *txrx_peer)
5648 {
5649 	uint64_t time_latency = 0;
5650 	uint16_t peer_id = DP_INVALID_PEER_ID;
5651 
5652 	/*
5653 	 * m_copy/tx_capture modes are not supported for
5654 	 * scatter gather packets
5655 	 */
5656 	if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
5657 		time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
5658 				qdf_ktime_to_ms(desc->timestamp));
5659 	}
5660 
5661 	dp_send_completion_to_pkt_capture(soc, desc, ts);
5662 
5663 	if (dp_tx_pkt_tracepoints_enabled())
5664 		qdf_trace_dp_packet(desc->nbuf, QDF_TX,
5665 				    desc->msdu_ext_desc ?
5666 				    desc->msdu_ext_desc->tso_desc : NULL,
5667 				    qdf_ktime_to_us(desc->timestamp));
5668 
5669 	if (!(desc->msdu_ext_desc)) {
5670 		dp_tx_enh_unmap(soc, desc);
5671 		if (txrx_peer)
5672 			peer_id = txrx_peer->peer_id;
5673 
5674 		if (QDF_STATUS_SUCCESS ==
5675 		    dp_monitor_tx_add_to_comp_queue(soc, desc, ts, peer_id)) {
5676 			return;
5677 		}
5678 
5679 		if (QDF_STATUS_SUCCESS ==
5680 		    dp_get_completion_indication_for_stack(soc,
5681 							   desc->pdev,
5682 							   txrx_peer, ts,
5683 							   desc->nbuf,
5684 							   time_latency)) {
5685 			dp_send_completion_to_stack(soc,
5686 						    desc->pdev,
5687 						    ts->peer_id,
5688 						    ts->ppdu_id,
5689 						    desc->nbuf);
5690 			return;
5691 		}
5692 	}
5693 
5694 	desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX;
5695 	dp_tx_comp_free_buf(soc, desc, false);
5696 }
5697 
5698 #ifdef DISABLE_DP_STATS
5699 /**
5700  * dp_tx_update_connectivity_stats() - update tx connectivity stats
5701  * @soc: core txrx main context
5702  * @vdev: virtual device instance
5703  * @tx_desc: tx desc
5704  * @status: tx status
5705  *
5706  * Return: none
5707  */
5708 static inline
5709 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
5710 				     struct dp_vdev *vdev,
5711 				     struct dp_tx_desc_s *tx_desc,
5712 				     uint8_t status)
5713 {
5714 }
5715 #else
5716 static inline
5717 void dp_tx_update_connectivity_stats(struct dp_soc *soc,
5718 				     struct dp_vdev *vdev,
5719 				     struct dp_tx_desc_s *tx_desc,
5720 				     uint8_t status)
5721 {
5722 	void *osif_dev;
5723 	ol_txrx_stats_rx_fp stats_cbk;
5724 	uint8_t pkt_type;
5725 
5726 	qdf_assert(tx_desc);
5727 
5728 	if (!vdev || vdev->delete.pending || !vdev->osif_vdev ||
5729 	    !vdev->stats_cb)
5730 		return;
5731 
5732 	osif_dev = vdev->osif_vdev;
5733 	stats_cbk = vdev->stats_cb;
5734 
5735 	stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
5736 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
5737 		stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
5738 			  &pkt_type);
5739 }
5740 #endif
5741 
5742 #if defined(WLAN_FEATURE_TSF_AUTO_REPORT) || defined(WLAN_CONFIG_TX_DELAY)
5743 /* Mask for bit29 ~ bit31 */
5744 #define DP_TX_TS_BIT29_31_MASK 0xE0000000
5745 /* Timestamp value (unit us) if bit29 is set */
5746 #define DP_TX_TS_BIT29_SET_VALUE BIT(29)
5747 /**
5748  * dp_tx_adjust_enqueue_buffer_ts() - adjust the enqueue buffer_timestamp
5749  * @ack_ts: OTA ack timestamp, unit us.
5750  * @enqueue_ts: TCL enqueue TX data to TQM timestamp, unit us.
5751  * @base_delta_ts: base timestamp delta for ack_ts and enqueue_ts
5752  *
5753  * this function will restore the bit29 ~ bit31 3 bits value for
5754  * buffer_timestamp in wbm2sw ring entry, currently buffer_timestamp only
5755  * can support 0x7FFF * 1024 us (29 bits), but if the timestamp is >
5756  * 0x7FFF * 1024 us, bit29~ bit31 will be lost.
5757  *
5758  * Return: the adjusted buffer_timestamp value
5759  */
5760 static inline
5761 uint32_t dp_tx_adjust_enqueue_buffer_ts(uint32_t ack_ts,
5762 					uint32_t enqueue_ts,
5763 					uint32_t base_delta_ts)
5764 {
5765 	uint32_t ack_buffer_ts;
5766 	uint32_t ack_buffer_ts_bit29_31;
5767 	uint32_t adjusted_enqueue_ts;
5768 
5769 	/* corresponding buffer_timestamp value when receive OTA Ack */
5770 	ack_buffer_ts = ack_ts - base_delta_ts;
5771 	ack_buffer_ts_bit29_31 = ack_buffer_ts & DP_TX_TS_BIT29_31_MASK;
5772 
5773 	/* restore the bit29 ~ bit31 value */
5774 	adjusted_enqueue_ts = ack_buffer_ts_bit29_31 | enqueue_ts;
5775 
5776 	/*
5777 	 * if actual enqueue_ts value occupied 29 bits only, this enqueue_ts
5778 	 * value + real UL delay overflow 29 bits, then 30th bit (bit-29)
5779 	 * should not be marked, otherwise extra 0x20000000 us is added to
5780 	 * enqueue_ts.
5781 	 */
5782 	if (qdf_unlikely(adjusted_enqueue_ts > ack_buffer_ts))
5783 		adjusted_enqueue_ts -= DP_TX_TS_BIT29_SET_VALUE;
5784 
5785 	return adjusted_enqueue_ts;
5786 }
5787 
5788 QDF_STATUS
5789 dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
5790 			  uint32_t delta_tsf,
5791 			  uint32_t *delay_us)
5792 {
5793 	uint32_t buffer_ts;
5794 	uint32_t delay;
5795 
5796 	if (!delay_us)
5797 		return QDF_STATUS_E_INVAL;
5798 
5799 	/* Tx_rate_stats_info_valid is 0 and tsf is invalid then */
5800 	if (!ts->valid)
5801 		return QDF_STATUS_E_INVAL;
5802 
5803 	/* buffer_timestamp is in units of 1024 us and is [31:13] of
5804 	 * WBM_RELEASE_RING_4. After left shift 10 bits, it's
5805 	 * valid up to 29 bits.
5806 	 */
5807 	buffer_ts = ts->buffer_timestamp << 10;
5808 	buffer_ts = dp_tx_adjust_enqueue_buffer_ts(ts->tsf,
5809 						   buffer_ts, delta_tsf);
5810 
5811 	delay = ts->tsf - buffer_ts - delta_tsf;
5812 
5813 	if (qdf_unlikely(delay & 0x80000000)) {
5814 		dp_err_rl("delay = 0x%x (-ve)\n"
5815 			  "release_src = %d\n"
5816 			  "ppdu_id = 0x%x\n"
5817 			  "peer_id = 0x%x\n"
5818 			  "tid = 0x%x\n"
5819 			  "release_reason = %d\n"
5820 			  "tsf = %u (0x%x)\n"
5821 			  "buffer_timestamp = %u (0x%x)\n"
5822 			  "delta_tsf = %u (0x%x)\n",
5823 			  delay, ts->release_src, ts->ppdu_id, ts->peer_id,
5824 			  ts->tid, ts->status, ts->tsf, ts->tsf,
5825 			  ts->buffer_timestamp, ts->buffer_timestamp,
5826 			  delta_tsf, delta_tsf);
5827 
5828 		delay = 0;
5829 		goto end;
5830 	}
5831 
5832 	delay &= 0x1FFFFFFF; /* mask 29 BITS */
5833 	if (delay > 0x1000000) {
5834 		dp_info_rl("----------------------\n"
5835 			   "Tx completion status:\n"
5836 			   "----------------------\n"
5837 			   "release_src = %d\n"
5838 			   "ppdu_id = 0x%x\n"
5839 			   "release_reason = %d\n"
5840 			   "tsf = %u (0x%x)\n"
5841 			   "buffer_timestamp = %u (0x%x)\n"
5842 			   "delta_tsf = %u (0x%x)\n",
5843 			   ts->release_src, ts->ppdu_id, ts->status,
5844 			   ts->tsf, ts->tsf, ts->buffer_timestamp,
5845 			   ts->buffer_timestamp, delta_tsf, delta_tsf);
5846 		return QDF_STATUS_E_FAILURE;
5847 	}
5848 
5849 
5850 end:
5851 	*delay_us = delay;
5852 
5853 	return QDF_STATUS_SUCCESS;
5854 }
5855 
5856 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5857 		      uint32_t delta_tsf)
5858 {
5859 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5860 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5861 						     DP_MOD_ID_CDP);
5862 
5863 	if (!vdev) {
5864 		dp_err_rl("vdev %d does not exist", vdev_id);
5865 		return;
5866 	}
5867 
5868 	vdev->delta_tsf = delta_tsf;
5869 	dp_debug("vdev id %u delta_tsf %u", vdev_id, delta_tsf);
5870 
5871 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5872 }
5873 #endif
5874 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
5875 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
5876 				      uint8_t vdev_id, bool enable)
5877 {
5878 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5879 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5880 						     DP_MOD_ID_CDP);
5881 
5882 	if (!vdev) {
5883 		dp_err_rl("vdev %d does not exist", vdev_id);
5884 		return QDF_STATUS_E_FAILURE;
5885 	}
5886 
5887 	qdf_atomic_set(&vdev->ul_delay_report, enable);
5888 
5889 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5890 
5891 	return QDF_STATUS_SUCCESS;
5892 }
5893 
5894 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5895 			       uint32_t *val)
5896 {
5897 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5898 	struct dp_vdev *vdev;
5899 	uint32_t delay_accum;
5900 	uint32_t pkts_accum;
5901 
5902 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
5903 	if (!vdev) {
5904 		dp_err_rl("vdev %d does not exist", vdev_id);
5905 		return QDF_STATUS_E_FAILURE;
5906 	}
5907 
5908 	if (!qdf_atomic_read(&vdev->ul_delay_report)) {
5909 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5910 		return QDF_STATUS_E_FAILURE;
5911 	}
5912 
5913 	/* Average uplink delay based on current accumulated values */
5914 	delay_accum = qdf_atomic_read(&vdev->ul_delay_accum);
5915 	pkts_accum = qdf_atomic_read(&vdev->ul_pkts_accum);
5916 
5917 	*val = delay_accum / pkts_accum;
5918 	dp_debug("uplink_delay %u delay_accum %u pkts_accum %u", *val,
5919 		 delay_accum, pkts_accum);
5920 
5921 	/* Reset accumulated values to 0 */
5922 	qdf_atomic_set(&vdev->ul_delay_accum, 0);
5923 	qdf_atomic_set(&vdev->ul_pkts_accum, 0);
5924 
5925 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5926 
5927 	return QDF_STATUS_SUCCESS;
5928 }
5929 
5930 static void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
5931 				      struct hal_tx_completion_status *ts)
5932 {
5933 	uint32_t ul_delay;
5934 
5935 	if (qdf_unlikely(!vdev)) {
5936 		dp_info_rl("vdev is null or delete in progress");
5937 		return;
5938 	}
5939 
5940 	if (!qdf_atomic_read(&vdev->ul_delay_report))
5941 		return;
5942 
5943 	if (QDF_IS_STATUS_ERROR(dp_tx_compute_hw_delay_us(ts,
5944 							  vdev->delta_tsf,
5945 							  &ul_delay)))
5946 		return;
5947 
5948 	ul_delay /= 1000; /* in unit of ms */
5949 
5950 	qdf_atomic_add(ul_delay, &vdev->ul_delay_accum);
5951 	qdf_atomic_inc(&vdev->ul_pkts_accum);
5952 }
5953 #else /* !WLAN_FEATURE_TSF_UPLINK_DELAY */
5954 static inline
5955 void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
5956 			       struct hal_tx_completion_status *ts)
5957 {
5958 }
5959 #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
5960 
5961 #ifndef CONFIG_AP_PLATFORM
5962 /**
5963  * dp_update_mcast_stats() - Update Tx Mcast stats
5964  * @txrx_peer: txrx_peer pointer
5965  * @link_id: Link ID
5966  * @length: packet length
5967  * @nbuf: nbuf handle
5968  *
5969  * Return: None
5970  */
5971 static inline void
5972 dp_update_mcast_stats(struct dp_txrx_peer *txrx_peer, uint8_t link_id,
5973 		      uint32_t length, qdf_nbuf_t nbuf)
5974 {
5975 	if (QDF_NBUF_CB_GET_IS_MCAST(nbuf))
5976 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1,
5977 					      length, link_id);
5978 }
5979 #else
5980 static inline void
5981 dp_update_mcast_stats(struct dp_txrx_peer *txrx_peer, uint8_t link_id,
5982 		      uint32_t length, qdf_nbuf_t nbuf)
5983 {
5984 }
5985 #endif
5986 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
5987 /**
5988  * dp_tx_comp_set_nbuf_band() - set nbuf band.
5989  * @soc: dp soc handle
5990  * @nbuf: nbuf handle
5991  * @ts: tx completion status
5992  *
5993  * Return: None
5994  */
5995 static inline void
5996 dp_tx_comp_set_nbuf_band(struct dp_soc *soc, qdf_nbuf_t nbuf,
5997 			 struct hal_tx_completion_status *ts)
5998 {
5999 	struct qdf_mac_addr *mac_addr;
6000 	struct dp_peer *peer;
6001 	struct dp_txrx_peer *txrx_peer;
6002 	uint8_t link_id;
6003 
6004 	if ((QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) !=
6005 		QDF_NBUF_CB_PACKET_TYPE_EAPOL &&
6006 	     QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) !=
6007 		QDF_NBUF_CB_PACKET_TYPE_DHCP &&
6008 	     QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) !=
6009 		QDF_NBUF_CB_PACKET_TYPE_DHCPV6) ||
6010 	    QDF_NBUF_CB_GET_IS_BCAST(nbuf))
6011 		return;
6012 
6013 	mac_addr = (struct qdf_mac_addr *)(qdf_nbuf_data(nbuf) +
6014 					   QDF_NBUF_DEST_MAC_OFFSET);
6015 
6016 	peer = dp_mld_peer_find_hash_find(soc, mac_addr->bytes, 0,
6017 					  DP_VDEV_ALL, DP_MOD_ID_TX_COMP);
6018 	if (qdf_likely(peer)) {
6019 		txrx_peer = dp_get_txrx_peer(peer);
6020 		if (qdf_likely(txrx_peer)) {
6021 			link_id =
6022 				dp_tx_get_link_id_from_ppdu_id(soc, ts,
6023 						  txrx_peer,
6024 						  txrx_peer->vdev);
6025 			qdf_nbuf_tx_set_band(nbuf, txrx_peer->ll_band[link_id]);
6026 		}
6027 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_COMP);
6028 	}
6029 }
6030 #else
6031 static inline void
6032 dp_tx_comp_set_nbuf_band(struct dp_soc *soc, qdf_nbuf_t nbuf,
6033 			 struct hal_tx_completion_status *ts)
6034 {
6035 }
6036 #endif
6037 
6038 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
6039 				  struct dp_tx_desc_s *tx_desc,
6040 				  struct hal_tx_completion_status *ts,
6041 				  struct dp_txrx_peer *txrx_peer,
6042 				  uint8_t ring_id)
6043 {
6044 	uint32_t length;
6045 	qdf_ether_header_t *eh;
6046 	struct dp_vdev *vdev = NULL;
6047 	qdf_nbuf_t nbuf = tx_desc->nbuf;
6048 	enum qdf_dp_tx_rx_status dp_status;
6049 	uint8_t link_id = 0;
6050 	enum QDF_OPMODE op_mode = QDF_MAX_NO_OF_MODE;
6051 
6052 	if (!nbuf) {
6053 		dp_info_rl("invalid tx descriptor. nbuf NULL");
6054 		goto out;
6055 	}
6056 
6057 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
6058 	length = dp_tx_get_pkt_len(tx_desc);
6059 
6060 	dp_status = dp_tx_hw_to_qdf(ts->status);
6061 	if (soc->dp_debug_log_en) {
6062 		dp_tx_comp_debug("--------------------\n"
6063 				 "Tx Completion Stats:\n"
6064 				 "--------------------\n"
6065 				 "ack_frame_rssi = %d\n"
6066 				 "first_msdu = %d\n"
6067 				 "last_msdu = %d\n"
6068 				 "msdu_part_of_amsdu = %d\n"
6069 				 "rate_stats valid = %d\n"
6070 				 "bw = %d\n"
6071 				 "pkt_type = %d\n"
6072 				 "stbc = %d\n"
6073 				 "ldpc = %d\n"
6074 				 "sgi = %d\n"
6075 				 "mcs = %d\n"
6076 				 "ofdma = %d\n"
6077 				 "tones_in_ru = %d\n"
6078 				 "tsf = %d\n"
6079 				 "ppdu_id = %d\n"
6080 				 "transmit_cnt = %d\n"
6081 				 "tid = %d\n"
6082 				 "peer_id = %d\n"
6083 				 "tx_status = %d\n"
6084 				 "tx_release_source = %d\n",
6085 				 ts->ack_frame_rssi, ts->first_msdu,
6086 				 ts->last_msdu, ts->msdu_part_of_amsdu,
6087 				 ts->valid, ts->bw, ts->pkt_type, ts->stbc,
6088 				 ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
6089 				 ts->tones_in_ru, ts->tsf, ts->ppdu_id,
6090 				 ts->transmit_cnt, ts->tid, ts->peer_id,
6091 				 ts->status, ts->release_src);
6092 	}
6093 
6094 	/* Update SoC level stats */
6095 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
6096 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
6097 
6098 	if (!txrx_peer) {
6099 		dp_tx_comp_set_nbuf_band(soc, nbuf, ts);
6100 		dp_info_rl("peer is null or deletion in progress");
6101 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
6102 
6103 		vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
6104 					     DP_MOD_ID_CDP);
6105 		if (qdf_likely(vdev)) {
6106 			op_mode = vdev->qdf_opmode;
6107 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6108 		}
6109 
6110 		goto out_log;
6111 	}
6112 	vdev = txrx_peer->vdev;
6113 
6114 	link_id = dp_tx_get_link_id_from_ppdu_id(soc, ts, txrx_peer, vdev);
6115 
6116 	dp_tx_set_nbuf_band(nbuf, txrx_peer, link_id);
6117 
6118 	op_mode = vdev->qdf_opmode;
6119 	dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
6120 	dp_tx_update_uplink_delay(soc, vdev, ts);
6121 
6122 	/* check tx complete notification */
6123 	if (qdf_nbuf_tx_notify_comp_get(nbuf))
6124 		dp_tx_notify_completion(soc, vdev, tx_desc,
6125 					nbuf, ts->status);
6126 
6127 	/* Update per-packet stats for mesh mode */
6128 	if (qdf_unlikely(vdev->mesh_vdev) &&
6129 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
6130 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
6131 
6132 	/* Update peer level stats */
6133 	if (qdf_unlikely(txrx_peer->bss_peer &&
6134 			 vdev->opmode == wlan_op_mode_ap)) {
6135 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
6136 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1,
6137 						      length, link_id);
6138 
6139 			if (txrx_peer->vdev->tx_encap_type ==
6140 				htt_cmn_pkt_type_ethernet &&
6141 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
6142 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
6143 							      tx.bcast, 1,
6144 							      length, link_id);
6145 			}
6146 		}
6147 	} else {
6148 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.ucast, 1, length,
6149 					      link_id);
6150 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
6151 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.tx_success,
6152 						      1, length, link_id);
6153 			if (qdf_unlikely(txrx_peer->in_twt)) {
6154 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
6155 							      tx.tx_success_twt,
6156 							      1, length,
6157 							      link_id);
6158 			}
6159 
6160 			dp_update_mcast_stats(txrx_peer, link_id, length, nbuf);
6161 		}
6162 	}
6163 
6164 	dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id, link_id);
6165 	dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts, ring_id);
6166 	dp_tx_update_peer_jitter_stats(txrx_peer, tx_desc, ts, ring_id);
6167 	dp_tx_update_peer_sawf_stats(soc, vdev, txrx_peer, tx_desc,
6168 				     ts, ts->tid);
6169 	dp_tx_send_pktlog(soc, vdev->pdev, tx_desc, nbuf, dp_status);
6170 	dp_tx_latency_stats_update(soc, txrx_peer, tx_desc, ts, link_id);
6171 
6172 #ifdef QCA_SUPPORT_RDK_STATS
6173 	if (soc->peerstats_enabled)
6174 		dp_tx_sojourn_stats_process(vdev->pdev, txrx_peer, ts->tid,
6175 					    qdf_ktime_to_ms(tx_desc->timestamp),
6176 					    ts->ppdu_id, link_id);
6177 #endif
6178 
6179 out_log:
6180 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
6181 			 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
6182 			 QDF_TRACE_DEFAULT_PDEV_ID,
6183 			 qdf_nbuf_data_addr(nbuf),
6184 			 sizeof(qdf_nbuf_data(nbuf)),
6185 			 tx_desc->id, ts->status, dp_status, op_mode));
6186 out:
6187 	return;
6188 }
6189 
6190 #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \
6191 	defined(QCA_ENHANCED_STATS_SUPPORT)
6192 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
6193 				   uint32_t length, uint8_t tx_status,
6194 				   bool update)
6195 {
6196 	if (update || (!txrx_peer->hw_txrx_stats_en)) {
6197 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
6198 
6199 		if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
6200 			DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
6201 	}
6202 }
6203 #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
6204 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
6205 				   uint32_t length, uint8_t tx_status,
6206 				   bool update)
6207 {
6208 	if (!txrx_peer->hw_txrx_stats_en) {
6209 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
6210 
6211 		if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
6212 			DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
6213 	}
6214 }
6215 
6216 #else
6217 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
6218 				   uint32_t length, uint8_t tx_status,
6219 				   bool update)
6220 {
6221 	DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
6222 
6223 	if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
6224 		DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
6225 }
6226 #endif
6227 
6228 /**
6229  * dp_tx_prefetch_next_nbuf_data(): Prefetch nbuf and nbuf data
6230  * @next: descriptor of the nrxt buffer
6231  *
6232  * Return: none
6233  */
6234 #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
6235 static inline
6236 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
6237 {
6238 	qdf_nbuf_t nbuf = NULL;
6239 
6240 	if (next)
6241 		nbuf = next->nbuf;
6242 	if (nbuf)
6243 		qdf_prefetch(nbuf);
6244 }
6245 #else
6246 static inline
6247 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
6248 {
6249 }
6250 #endif
6251 
6252 /**
6253  * dp_tx_mcast_reinject_handler() - Tx reinjected multicast packets handler
6254  * @soc: core txrx main context
6255  * @desc: software descriptor
6256  *
6257  * Return: true when packet is reinjected
6258  */
6259 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
6260 	defined(WLAN_MCAST_MLO) && !defined(CONFIG_MLO_SINGLE_DEV)
6261 static inline bool
6262 dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
6263 {
6264 	struct dp_vdev *vdev = NULL;
6265 	uint8_t xmit_type;
6266 
6267 	if (desc->tx_status == HAL_TX_TQM_RR_MULTICAST_DROP) {
6268 		if (!soc->arch_ops.dp_tx_mcast_handler ||
6269 		    !soc->arch_ops.dp_tx_is_mcast_primary)
6270 			return false;
6271 
6272 		vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
6273 					     DP_MOD_ID_REINJECT);
6274 
6275 		if (qdf_unlikely(!vdev)) {
6276 			dp_tx_comp_info_rl("Unable to get vdev ref  %d",
6277 					   desc->id);
6278 			return false;
6279 		}
6280 
6281 		if (!(soc->arch_ops.dp_tx_is_mcast_primary(soc, vdev))) {
6282 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
6283 			return false;
6284 		}
6285 		xmit_type = qdf_nbuf_get_vdev_xmit_type(desc->nbuf);
6286 		DP_STATS_INC_PKT(vdev, tx_i[xmit_type].reinject_pkts, 1,
6287 				 qdf_nbuf_len(desc->nbuf));
6288 		soc->arch_ops.dp_tx_mcast_handler(soc, vdev, desc->nbuf);
6289 		dp_tx_desc_release(soc, desc, desc->pool_id);
6290 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
6291 		return true;
6292 	}
6293 
6294 	return false;
6295 }
6296 #else
6297 static inline bool
6298 dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
6299 {
6300 	return false;
6301 }
6302 #endif
6303 
6304 #ifdef QCA_DP_TX_NBUF_LIST_FREE
6305 static inline void
6306 dp_tx_nbuf_queue_head_init(qdf_nbuf_queue_head_t *nbuf_queue_head)
6307 {
6308 	qdf_nbuf_queue_head_init(nbuf_queue_head);
6309 }
6310 
6311 static inline void
6312 dp_tx_nbuf_dev_queue_free(qdf_nbuf_queue_head_t *nbuf_queue_head,
6313 			  struct dp_tx_desc_s *desc)
6314 {
6315 	qdf_nbuf_t nbuf = NULL;
6316 
6317 	nbuf = desc->nbuf;
6318 	if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_FAST))
6319 		qdf_nbuf_dev_queue_head(nbuf_queue_head, nbuf);
6320 	else
6321 		qdf_nbuf_free(nbuf);
6322 }
6323 
6324 static inline void
6325 dp_tx_nbuf_dev_queue_free_no_flag(qdf_nbuf_queue_head_t *nbuf_queue_head,
6326 				  qdf_nbuf_t nbuf)
6327 {
6328 	if (!nbuf)
6329 		return;
6330 
6331 	if (nbuf->is_from_recycler)
6332 		qdf_nbuf_dev_queue_head(nbuf_queue_head, nbuf);
6333 	else
6334 		qdf_nbuf_free(nbuf);
6335 }
6336 
6337 static inline void
6338 dp_tx_nbuf_dev_kfree_list(qdf_nbuf_queue_head_t *nbuf_queue_head)
6339 {
6340 	qdf_nbuf_dev_kfree_list(nbuf_queue_head);
6341 }
6342 #else
6343 static inline void
6344 dp_tx_nbuf_queue_head_init(qdf_nbuf_queue_head_t *nbuf_queue_head)
6345 {
6346 }
6347 
6348 static inline void
6349 dp_tx_nbuf_dev_queue_free(qdf_nbuf_queue_head_t *nbuf_queue_head,
6350 			  struct dp_tx_desc_s *desc)
6351 {
6352 	qdf_nbuf_free(desc->nbuf);
6353 }
6354 
6355 static inline void
6356 dp_tx_nbuf_dev_queue_free_no_flag(qdf_nbuf_queue_head_t *nbuf_queue_head,
6357 				  qdf_nbuf_t nbuf)
6358 {
6359 	qdf_nbuf_free(nbuf);
6360 }
6361 
6362 static inline void
6363 dp_tx_nbuf_dev_kfree_list(qdf_nbuf_queue_head_t *nbuf_queue_head)
6364 {
6365 }
6366 #endif
6367 
6368 #ifdef WLAN_SUPPORT_PPEDS
6369 static inline void
6370 dp_tx_update_ppeds_tx_comp_stats(struct dp_soc *soc,
6371 				 struct dp_txrx_peer *txrx_peer,
6372 				 struct hal_tx_completion_status *ts,
6373 				 struct dp_tx_desc_s *desc,
6374 				 uint8_t ring_id)
6375 {
6376 	uint8_t link_id = 0;
6377 	struct dp_vdev *vdev = NULL;
6378 
6379 	if (qdf_likely(txrx_peer)) {
6380 		if (!(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
6381 			hal_tx_comp_get_status(&desc->comp,
6382 					       ts,
6383 					       soc->hal_soc);
6384 			vdev = txrx_peer->vdev;
6385 			link_id = dp_tx_get_link_id_from_ppdu_id(soc,
6386 								 ts,
6387 								 txrx_peer,
6388 								 vdev);
6389 			if (link_id < 1 || link_id > DP_MAX_MLO_LINKS)
6390 				link_id = 0;
6391 			dp_tx_update_peer_stats(desc, ts,
6392 						txrx_peer,
6393 						ring_id,
6394 						link_id);
6395 		} else {
6396 			dp_tx_update_peer_basic_stats(txrx_peer, desc->length,
6397 						      desc->tx_status, false);
6398 		}
6399 	}
6400 }
6401 #else
6402 static inline void
6403 dp_tx_update_ppeds_tx_comp_stats(struct dp_soc *soc,
6404 				 struct dp_txrx_peer *txrx_peer,
6405 				 struct hal_tx_completion_status *ts,
6406 				 struct dp_tx_desc_s *desc,
6407 				 uint8_t ring_id)
6408 {
6409 }
6410 #endif
6411 
6412 void
6413 dp_tx_comp_process_desc_list_fast(struct dp_soc *soc,
6414 				  struct dp_tx_desc_s *head_desc,
6415 				  struct dp_tx_desc_s *tail_desc,
6416 				  uint8_t ring_id,
6417 				  uint32_t fast_desc_count)
6418 {
6419 	struct dp_tx_desc_pool_s *pool = NULL;
6420 
6421 	pool = dp_get_tx_desc_pool(soc, head_desc->pool_id);
6422 	dp_tx_outstanding_sub(head_desc->pdev, fast_desc_count);
6423 	dp_tx_desc_free_list(pool, head_desc, tail_desc, fast_desc_count);
6424 }
6425 
6426 void
6427 dp_tx_comp_process_desc_list(struct dp_soc *soc,
6428 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id)
6429 {
6430 	struct dp_tx_desc_s *desc;
6431 	struct dp_tx_desc_s *next;
6432 	struct hal_tx_completion_status ts;
6433 	struct dp_txrx_peer *txrx_peer = NULL;
6434 	uint16_t peer_id = DP_INVALID_PEER;
6435 	dp_txrx_ref_handle txrx_ref_handle = NULL;
6436 	qdf_nbuf_queue_head_t h;
6437 
6438 	desc = comp_head;
6439 
6440 	dp_tx_nbuf_queue_head_init(&h);
6441 
6442 	while (desc) {
6443 		next = desc->next;
6444 		dp_tx_prefetch_next_nbuf_data(next);
6445 
6446 		if (peer_id != desc->peer_id) {
6447 			if (txrx_peer)
6448 				dp_txrx_peer_unref_delete(txrx_ref_handle,
6449 							  DP_MOD_ID_TX_COMP);
6450 			peer_id = desc->peer_id;
6451 			txrx_peer =
6452 				dp_txrx_peer_get_ref_by_id(soc, peer_id,
6453 							   &txrx_ref_handle,
6454 							   DP_MOD_ID_TX_COMP);
6455 		}
6456 
6457 		if (dp_tx_mcast_reinject_handler(soc, desc)) {
6458 			desc = next;
6459 			continue;
6460 		}
6461 
6462 		if (desc->flags & DP_TX_DESC_FLAG_PPEDS) {
6463 			qdf_nbuf_t nbuf;
6464 			dp_tx_update_ppeds_tx_comp_stats(soc, txrx_peer, &ts,
6465 							 desc, ring_id);
6466 
6467 			if (desc->pool_id != DP_TX_PPEDS_POOL_ID) {
6468 				nbuf = desc->nbuf;
6469 				dp_tx_nbuf_dev_queue_free_no_flag(&h, nbuf);
6470 				if (desc->flags & DP_TX_DESC_FLAG_SPECIAL)
6471 					dp_tx_spcl_desc_free(soc, desc,
6472 							     desc->pool_id);
6473 				else
6474 					dp_tx_desc_free(soc, desc,
6475 							desc->pool_id);
6476 
6477 				__dp_tx_outstanding_dec(soc);
6478 			} else {
6479 				nbuf = dp_ppeds_tx_desc_free(soc, desc);
6480 				dp_tx_nbuf_dev_queue_free_no_flag(&h, nbuf);
6481 			}
6482 			desc = next;
6483 			continue;
6484 		}
6485 
6486 		if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
6487 			struct dp_pdev *pdev = desc->pdev;
6488 
6489 			if (qdf_likely(txrx_peer))
6490 				dp_tx_update_peer_basic_stats(txrx_peer,
6491 							      desc->length,
6492 							      desc->tx_status,
6493 							      false);
6494 			qdf_assert(pdev);
6495 			dp_tx_outstanding_dec(pdev);
6496 			/*
6497 			 * Calling a QDF WRAPPER here is creating significant
6498 			 * performance impact so avoided the wrapper call here
6499 			 */
6500 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
6501 					       desc->id, DP_TX_COMP_UNMAP);
6502 			dp_tx_nbuf_unmap(soc, desc);
6503 			dp_tx_nbuf_dev_queue_free(&h, desc);
6504 			dp_tx_desc_free(soc, desc, desc->pool_id);
6505 			desc = next;
6506 			continue;
6507 		}
6508 
6509 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
6510 
6511 		dp_tx_comp_process_tx_status(soc, desc, &ts, txrx_peer,
6512 					     ring_id);
6513 
6514 		dp_tx_comp_process_desc(soc, desc, &ts, txrx_peer);
6515 
6516 		dp_tx_desc_release(soc, desc, desc->pool_id);
6517 		desc = next;
6518 	}
6519 	dp_tx_nbuf_dev_kfree_list(&h);
6520 	if (txrx_peer)
6521 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
6522 }
6523 
6524 #ifndef WLAN_SOFTUMAC_SUPPORT
6525 /**
6526  * dp_tx_dump_tx_desc() - Dump tx desc for debugging
6527  * @tx_desc: software descriptor head pointer
6528  *
6529  * This function will dump tx desc for further debugging
6530  *
6531  * Return: none
6532  */
6533 static
6534 void dp_tx_dump_tx_desc(struct dp_tx_desc_s *tx_desc)
6535 {
6536 	if (tx_desc) {
6537 		dp_tx_comp_warn("tx_desc->nbuf: %pK", tx_desc->nbuf);
6538 		dp_tx_comp_warn("tx_desc->flags: 0x%x", tx_desc->flags);
6539 		dp_tx_comp_warn("tx_desc->id: %u", tx_desc->id);
6540 		dp_tx_comp_warn("tx_desc->dma_addr: 0x%x",
6541 				(unsigned int)tx_desc->dma_addr);
6542 		dp_tx_comp_warn("tx_desc->vdev_id: %u",
6543 				tx_desc->vdev_id);
6544 		dp_tx_comp_warn("tx_desc->tx_status: %u",
6545 				tx_desc->tx_status);
6546 		dp_tx_comp_warn("tx_desc->pdev: %pK",
6547 				tx_desc->pdev);
6548 		dp_tx_comp_warn("tx_desc->tx_encap_type: %u",
6549 				tx_desc->tx_encap_type);
6550 		dp_tx_comp_warn("tx_desc->buffer_src: %u",
6551 				tx_desc->buffer_src);
6552 		dp_tx_comp_warn("tx_desc->frm_type: %u",
6553 				tx_desc->frm_type);
6554 		dp_tx_comp_warn("tx_desc->pkt_offset: %u",
6555 				tx_desc->pkt_offset);
6556 		dp_tx_comp_warn("tx_desc->pool_id: %u",
6557 				tx_desc->pool_id);
6558 	}
6559 }
6560 #endif
6561 
6562 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
6563 static inline
6564 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
6565 				   int max_reap_limit)
6566 {
6567 	bool limit_hit = false;
6568 
6569 	limit_hit =
6570 		(num_reaped >= max_reap_limit) ? true : false;
6571 
6572 	if (limit_hit)
6573 		DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
6574 
6575 	return limit_hit;
6576 }
6577 
6578 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
6579 {
6580 	return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
6581 }
6582 
6583 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
6584 {
6585 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
6586 
6587 	return cfg->tx_comp_loop_pkt_limit;
6588 }
6589 #else
6590 static inline
6591 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
6592 				   int max_reap_limit)
6593 {
6594 	return false;
6595 }
6596 
6597 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
6598 {
6599 	return false;
6600 }
6601 
6602 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
6603 {
6604 	return 0;
6605 }
6606 #endif
6607 
6608 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
6609 static inline int
6610 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
6611 				  int *max_reap_limit)
6612 {
6613 	return soc->arch_ops.dp_srng_test_and_update_nf_params(soc, dp_srng,
6614 							       max_reap_limit);
6615 }
6616 #else
6617 static inline int
6618 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
6619 				  int *max_reap_limit)
6620 {
6621 	return 0;
6622 }
6623 #endif
6624 
6625 #ifdef DP_TX_TRACKING
6626 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
6627 {
6628 	if ((tx_desc->magic != DP_TX_MAGIC_PATTERN_INUSE) &&
6629 	    (tx_desc->magic != DP_TX_MAGIC_PATTERN_FREE)) {
6630 		dp_err_rl("tx_desc %u is corrupted", tx_desc->id);
6631 		qdf_trigger_self_recovery(NULL, QDF_TX_DESC_LEAK);
6632 	}
6633 }
6634 #endif
6635 
6636 #ifndef WLAN_SOFTUMAC_SUPPORT
6637 #ifdef DP_TX_COMP_RING_DESC_SANITY_CHECK
6638 
6639 /* Increasing this value, runs the risk of srng backpressure */
6640 #define DP_STALE_TX_COMP_WAIT_TIMEOUT_US 1000
6641 
6642 static inline void
6643 dp_tx_comp_reset_stale_entry_detection(struct dp_soc *soc, uint32_t ring_num)
6644 {
6645 	soc->stale_entry[ring_num].detected = 0;
6646 }
6647 
6648 /**
6649  * dp_tx_comp_stale_entry_handle() - Detect stale entry condition in tx
6650  *				     completion srng.
6651  * @soc: DP SoC handle
6652  * @ring_num: tx completion ring number
6653  * @status: QDF_STATUS from tx_comp_get_params_from_hal_desc arch ops
6654  *
6655  * Return: QDF_STATUS_SUCCESS if stale entry is detected and handled
6656  *	   QDF_STATUS error code in other cases.
6657  */
6658 static inline QDF_STATUS
6659 dp_tx_comp_stale_entry_handle(struct dp_soc *soc, uint32_t ring_num,
6660 			      QDF_STATUS status)
6661 {
6662 	uint64_t curr_timestamp = qdf_get_log_timestamp_usecs();
6663 	uint64_t delta_us;
6664 
6665 	if (status != QDF_STATUS_E_PENDING) {
6666 		dp_tx_comp_reset_stale_entry_detection(soc, ring_num);
6667 		return QDF_STATUS_E_INVAL;
6668 	}
6669 
6670 	if (soc->stale_entry[ring_num].detected) {
6671 		/* stale entry process continuation */
6672 		delta_us = curr_timestamp -
6673 				soc->stale_entry[ring_num].start_time;
6674 		if (delta_us > DP_STALE_TX_COMP_WAIT_TIMEOUT_US) {
6675 			dp_err("Stale tx comp desc, waited %d us", delta_us);
6676 			return QDF_STATUS_E_FAILURE;
6677 		}
6678 	} else {
6679 		/* This is the start of stale entry detection */
6680 		soc->stale_entry[ring_num].detected = 1;
6681 		soc->stale_entry[ring_num].start_time = curr_timestamp;
6682 	}
6683 
6684 	return QDF_STATUS_SUCCESS;
6685 }
6686 #else
6687 
6688 static inline void
6689 dp_tx_comp_reset_stale_entry_detection(struct dp_soc *soc, uint32_t ring_num)
6690 {
6691 }
6692 
6693 static inline QDF_STATUS
6694 dp_tx_comp_stale_entry_handle(struct dp_soc *soc, uint32_t ring_num,
6695 			      QDF_STATUS status)
6696 {
6697 	return QDF_STATUS_SUCCESS;
6698 }
6699 #endif
6700 
6701 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
6702 			    hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
6703 			    uint32_t quota)
6704 {
6705 	void *tx_comp_hal_desc;
6706 	void *last_prefetched_hw_desc = NULL;
6707 	void *last_hw_desc = NULL;
6708 	struct dp_tx_desc_s *last_prefetched_sw_desc = NULL;
6709 	hal_soc_handle_t hal_soc;
6710 	uint8_t buffer_src;
6711 	struct dp_tx_desc_s *tx_desc = NULL;
6712 	struct dp_tx_desc_s *head_desc = NULL;
6713 	struct dp_tx_desc_s *tail_desc = NULL;
6714 	struct dp_tx_desc_s *fast_head_desc = NULL;
6715 	struct dp_tx_desc_s *fast_tail_desc = NULL;
6716 	uint32_t num_processed = 0;
6717 	uint32_t fast_desc_count = 0;
6718 	uint32_t count;
6719 	uint32_t num_avail_for_reap = 0;
6720 	bool force_break = false;
6721 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
6722 	int max_reap_limit, ring_near_full;
6723 	uint32_t num_entries;
6724 	qdf_nbuf_queue_head_t h;
6725 	QDF_STATUS status;
6726 
6727 	DP_HIST_INIT();
6728 
6729 	num_entries = hal_srng_get_num_entries(soc->hal_soc, hal_ring_hdl);
6730 
6731 more_data:
6732 
6733 	hal_soc = soc->hal_soc;
6734 	/* Re-initialize local variables to be re-used */
6735 	head_desc = NULL;
6736 	tail_desc = NULL;
6737 	count = 0;
6738 	max_reap_limit = dp_tx_comp_get_loop_pkt_limit(soc);
6739 
6740 	ring_near_full = dp_srng_test_and_update_nf_params(soc, tx_comp_ring,
6741 							   &max_reap_limit);
6742 
6743 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
6744 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
6745 		return 0;
6746 	}
6747 
6748 	hal_srng_update_ring_usage_wm_no_lock(soc->hal_soc, hal_ring_hdl);
6749 
6750 	if (!num_avail_for_reap)
6751 		num_avail_for_reap = hal_srng_dst_num_valid(hal_soc,
6752 							    hal_ring_hdl, 0);
6753 
6754 	if (num_avail_for_reap >= quota)
6755 		num_avail_for_reap = quota;
6756 
6757 	last_hw_desc = dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl,
6758 						    num_avail_for_reap);
6759 	last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc,
6760 							    hal_ring_hdl,
6761 							    num_avail_for_reap);
6762 
6763 	dp_tx_nbuf_queue_head_init(&h);
6764 
6765 	/* Find head descriptor from completion ring */
6766 	while (qdf_likely(num_avail_for_reap--)) {
6767 
6768 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
6769 		if (qdf_unlikely(!tx_comp_hal_desc))
6770 			break;
6771 		buffer_src = hal_tx_comp_get_buffer_source(hal_soc,
6772 							   tx_comp_hal_desc);
6773 
6774 		/* If this buffer was not released by TQM or FW, then it is not
6775 		 * Tx completion indication, assert */
6776 		if (qdf_unlikely(buffer_src !=
6777 					HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
6778 				 (qdf_unlikely(buffer_src !=
6779 					HAL_TX_COMP_RELEASE_SOURCE_FW))) {
6780 			uint8_t wbm_internal_error;
6781 
6782 			dp_err_rl(
6783 				"Tx comp release_src != TQM | FW but from %d",
6784 				buffer_src);
6785 			hal_dump_comp_desc(tx_comp_hal_desc);
6786 			DP_STATS_INC(soc, tx.invalid_release_source, 1);
6787 
6788 			/* When WBM sees NULL buffer_addr_info in any of
6789 			 * ingress rings it sends an error indication,
6790 			 * with wbm_internal_error=1, to a specific ring.
6791 			 * The WBM2SW ring used to indicate these errors is
6792 			 * fixed in HW, and that ring is being used as Tx
6793 			 * completion ring. These errors are not related to
6794 			 * Tx completions, and should just be ignored
6795 			 */
6796 			wbm_internal_error = hal_get_wbm_internal_error(
6797 							hal_soc,
6798 							tx_comp_hal_desc);
6799 
6800 			if (wbm_internal_error) {
6801 				dp_err_rl("Tx comp wbm_internal_error!!");
6802 				DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
6803 
6804 				if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
6805 								buffer_src)
6806 					dp_handle_wbm_internal_error(
6807 						soc,
6808 						tx_comp_hal_desc,
6809 						hal_tx_comp_get_buffer_type(
6810 							tx_comp_hal_desc));
6811 
6812 			} else {
6813 				dp_err_rl("Tx comp wbm_internal_error false");
6814 				DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
6815 			}
6816 			continue;
6817 		}
6818 
6819 		status = soc->arch_ops.tx_comp_get_params_from_hal_desc(
6820 							soc, tx_comp_hal_desc,
6821 							&tx_desc);
6822 		if (qdf_unlikely(!tx_desc)) {
6823 			if (QDF_IS_STATUS_SUCCESS(
6824 				dp_tx_comp_stale_entry_handle(soc, ring_id,
6825 							      status))) {
6826 				hal_srng_dst_dec_tp(hal_soc, hal_ring_hdl);
6827 				break;
6828 			}
6829 
6830 			dp_err("unable to retrieve tx_desc!");
6831 			hal_dump_comp_desc(tx_comp_hal_desc);
6832 			DP_STATS_INC(soc, tx.invalid_tx_comp_desc, 1);
6833 			QDF_BUG(0);
6834 			continue;
6835 		}
6836 
6837 		dp_tx_comp_reset_stale_entry_detection(soc, ring_id);
6838 		tx_desc->buffer_src = buffer_src;
6839 
6840 		/*
6841 		 * If the release source is FW, process the HTT status
6842 		 */
6843 		if (qdf_unlikely(buffer_src ==
6844 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
6845 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
6846 
6847 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
6848 					htt_tx_status);
6849 			/* Collect hw completion contents */
6850 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
6851 					      &tx_desc->comp, 1);
6852 			soc->arch_ops.dp_tx_process_htt_completion(
6853 							soc,
6854 							tx_desc,
6855 							htt_tx_status,
6856 							ring_id);
6857 			if (qdf_unlikely(!tx_desc->pdev)) {
6858 				dp_tx_dump_tx_desc(tx_desc);
6859 			}
6860 		} else {
6861 			if (tx_desc->flags & DP_TX_DESC_FLAG_FASTPATH_SIMPLE ||
6862 			    tx_desc->flags & DP_TX_DESC_FLAG_PPEDS)
6863 				goto add_to_pool2;
6864 
6865 			tx_desc->tx_status =
6866 				hal_tx_comp_get_tx_status(tx_comp_hal_desc);
6867 			tx_desc->buffer_src = buffer_src;
6868 			/*
6869 			 * If the fast completion mode is enabled extended
6870 			 * metadata from descriptor is not copied
6871 			 */
6872 			if (qdf_likely(tx_desc->flags &
6873 						DP_TX_DESC_FLAG_SIMPLE))
6874 				goto add_to_pool;
6875 
6876 			/*
6877 			 * If the descriptor is already freed in vdev_detach,
6878 			 * continue to next descriptor
6879 			 */
6880 			if (qdf_unlikely
6881 				((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
6882 				 !tx_desc->flags)) {
6883 				dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
6884 						   tx_desc->id);
6885 				DP_STATS_INC(soc, tx.tx_comp_exception, 1);
6886 				dp_tx_desc_check_corruption(tx_desc);
6887 				continue;
6888 			}
6889 
6890 			if (qdf_unlikely(!tx_desc->pdev)) {
6891 				dp_tx_comp_warn("The pdev is NULL in TX desc, ignored.");
6892 				dp_tx_dump_tx_desc(tx_desc);
6893 				DP_STATS_INC(soc, tx.tx_comp_exception, 1);
6894 				continue;
6895 			}
6896 
6897 			if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
6898 				dp_tx_comp_info_rl("pdev in down state %d",
6899 						   tx_desc->id);
6900 				tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
6901 				dp_tx_comp_free_buf(soc, tx_desc, false);
6902 				dp_tx_desc_release(soc, tx_desc,
6903 						   tx_desc->pool_id);
6904 				goto next_desc;
6905 			}
6906 
6907 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
6908 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
6909 				dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
6910 						 tx_desc->flags, tx_desc->id);
6911 				qdf_assert_always(0);
6912 			}
6913 
6914 			/* Collect hw completion contents */
6915 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
6916 					      &tx_desc->comp, 1);
6917 add_to_pool:
6918 			DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
6919 
6920 add_to_pool2:
6921 			/* First ring descriptor on the cycle */
6922 
6923 			if (tx_desc->flags & DP_TX_DESC_FLAG_FASTPATH_SIMPLE ||
6924 			    tx_desc->flags & DP_TX_DESC_FLAG_PPEDS) {
6925 				dp_tx_nbuf_dev_queue_free(&h, tx_desc);
6926 				fast_desc_count++;
6927 				if (!fast_head_desc) {
6928 					fast_head_desc = tx_desc;
6929 					fast_tail_desc = tx_desc;
6930 				}
6931 				fast_tail_desc->next = tx_desc;
6932 				fast_tail_desc = tx_desc;
6933 				dp_tx_desc_clear(tx_desc);
6934 			} else {
6935 				if (!head_desc) {
6936 					head_desc = tx_desc;
6937 					tail_desc = tx_desc;
6938 				}
6939 
6940 				tail_desc->next = tx_desc;
6941 				tx_desc->next = NULL;
6942 				tail_desc = tx_desc;
6943 			}
6944 		}
6945 next_desc:
6946 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
6947 
6948 		/*
6949 		 * Processed packet count is more than given quota
6950 		 * stop to processing
6951 		 */
6952 
6953 		count++;
6954 
6955 		dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
6956 					       num_avail_for_reap,
6957 					       hal_ring_hdl,
6958 					       &last_prefetched_hw_desc,
6959 					       &last_prefetched_sw_desc,
6960 					       last_hw_desc);
6961 
6962 		if (dp_tx_comp_loop_pkt_limit_hit(soc, count, max_reap_limit))
6963 			break;
6964 	}
6965 
6966 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
6967 
6968 	/* Process the reaped descriptors that were sent via fast path */
6969 	if (fast_head_desc) {
6970 		dp_tx_comp_process_desc_list_fast(soc, fast_head_desc,
6971 						  fast_tail_desc, ring_id,
6972 						  fast_desc_count);
6973 		dp_tx_nbuf_dev_kfree_list(&h);
6974 	}
6975 
6976 	/* Process the reaped descriptors */
6977 	if (head_desc)
6978 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
6979 
6980 	DP_STATS_INC(soc, tx.tx_comp[ring_id], count);
6981 
6982 	/*
6983 	 * If we are processing in near-full condition, there are 3 scenario
6984 	 * 1) Ring entries has reached critical state
6985 	 * 2) Ring entries are still near high threshold
6986 	 * 3) Ring entries are below the safe level
6987 	 *
6988 	 * One more loop will move the state to normal processing and yield
6989 	 */
6990 	if (ring_near_full)
6991 		goto more_data;
6992 
6993 	if (dp_tx_comp_enable_eol_data_check(soc)) {
6994 
6995 		if (num_processed >= quota)
6996 			force_break = true;
6997 
6998 		if (!force_break &&
6999 		    hal_srng_dst_peek_sync_locked(soc->hal_soc,
7000 						  hal_ring_hdl)) {
7001 			DP_STATS_INC(soc, tx.hp_oos2, 1);
7002 			if (!hif_exec_should_yield(soc->hif_handle,
7003 						   int_ctx->dp_intr_id))
7004 				goto more_data;
7005 
7006 			num_avail_for_reap =
7007 				hal_srng_dst_num_valid_locked(soc->hal_soc,
7008 							      hal_ring_hdl,
7009 							      true);
7010 			if (qdf_unlikely(num_entries &&
7011 					 (num_avail_for_reap >=
7012 					  num_entries >> 1))) {
7013 				DP_STATS_INC(soc, tx.near_full, 1);
7014 				goto more_data;
7015 			}
7016 		}
7017 	}
7018 	DP_TX_HIST_STATS_PER_PDEV();
7019 
7020 	return num_processed;
7021 }
7022 #endif
7023 
7024 #ifdef FEATURE_WLAN_TDLS
7025 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7026 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
7027 {
7028 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7029 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7030 						     DP_MOD_ID_TDLS);
7031 
7032 	if (!vdev) {
7033 		dp_err("vdev handle for id %d is NULL", vdev_id);
7034 		return NULL;
7035 	}
7036 
7037 	if (tx_spec & OL_TX_SPEC_NO_FREE)
7038 		vdev->is_tdls_frame = true;
7039 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
7040 
7041 	return dp_tx_send(soc_hdl, vdev_id, msdu_list);
7042 }
7043 #endif
7044 
7045 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
7046 {
7047 	int pdev_id;
7048 	/*
7049 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
7050 	 */
7051 	DP_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
7052 				    DP_TCL_METADATA_TYPE_VDEV_BASED);
7053 
7054 	DP_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
7055 				       vdev->vdev_id);
7056 
7057 	pdev_id =
7058 		dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
7059 						       vdev->pdev->pdev_id);
7060 	DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
7061 
7062 	/*
7063 	 * Set HTT Extension Valid bit to 0 by default
7064 	 */
7065 	DP_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
7066 
7067 	dp_tx_vdev_update_search_flags(vdev);
7068 
7069 	return QDF_STATUS_SUCCESS;
7070 }
7071 
7072 #ifndef FEATURE_WDS
7073 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
7074 {
7075 	return false;
7076 }
7077 #endif
7078 
7079 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
7080 {
7081 	struct dp_soc *soc = vdev->pdev->soc;
7082 
7083 	/*
7084 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
7085 	 * for TDLS link
7086 	 *
7087 	 * Enable AddrY (SA based search) only for non-WDS STA and
7088 	 * ProxySTA VAP (in HKv1) modes.
7089 	 *
7090 	 * In all other VAP modes, only DA based search should be
7091 	 * enabled
7092 	 */
7093 	if (vdev->opmode == wlan_op_mode_sta &&
7094 	    vdev->tdls_link_connected)
7095 		vdev->hal_desc_addr_search_flags =
7096 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
7097 	else if ((vdev->opmode == wlan_op_mode_sta) &&
7098 		 !dp_tx_da_search_override(vdev))
7099 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
7100 	else
7101 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
7102 
7103 	if (vdev->opmode == wlan_op_mode_sta && !vdev->tdls_link_connected)
7104 		vdev->search_type = soc->sta_mode_search_policy;
7105 	else
7106 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
7107 }
7108 
7109 #ifdef WLAN_SUPPORT_PPEDS
7110 static inline bool
7111 dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
7112 			  struct dp_vdev *vdev,
7113 			  struct dp_tx_desc_s *tx_desc)
7114 {
7115 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
7116 		return false;
7117 
7118 	if (tx_desc->flags & DP_TX_DESC_FLAG_PPEDS)
7119 		return true;
7120 	/*
7121 	 * if vdev is given, then only check whether desc
7122 	 * vdev match. if vdev is NULL, then check whether
7123 	 * desc pdev match.
7124 	 */
7125 	return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
7126 		(tx_desc->pdev == pdev);
7127 }
7128 #else
7129 static inline bool
7130 dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
7131 			  struct dp_vdev *vdev,
7132 			  struct dp_tx_desc_s *tx_desc)
7133 {
7134 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
7135 		return false;
7136 
7137 	/*
7138 	 * if vdev is given, then only check whether desc
7139 	 * vdev match. if vdev is NULL, then check whether
7140 	 * desc pdev match.
7141 	 */
7142 	return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
7143 		(tx_desc->pdev == pdev);
7144 }
7145 #endif
7146 
7147 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7148 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
7149 		      bool force_free)
7150 {
7151 	uint8_t i;
7152 	uint32_t j;
7153 	uint32_t num_desc, page_id, offset;
7154 	uint16_t num_desc_per_page;
7155 	struct dp_soc *soc = pdev->soc;
7156 	struct dp_tx_desc_s *tx_desc = NULL;
7157 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
7158 
7159 	if (!vdev && !force_free) {
7160 		dp_err("Reset TX desc vdev, Vdev param is required!");
7161 		return;
7162 	}
7163 
7164 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
7165 		tx_desc_pool = &soc->tx_desc[i];
7166 		if (!(tx_desc_pool->pool_size) ||
7167 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
7168 		    !(tx_desc_pool->desc_pages.cacheable_pages))
7169 			continue;
7170 
7171 		/*
7172 		 * Add flow pool lock protection in case pool is freed
7173 		 * due to all tx_desc is recycled when handle TX completion.
7174 		 * this is not necessary when do force flush as:
7175 		 * a. double lock will happen if dp_tx_desc_release is
7176 		 *    also trying to acquire it.
7177 		 * b. dp interrupt has been disabled before do force TX desc
7178 		 *    flush in dp_pdev_deinit().
7179 		 */
7180 		if (!force_free)
7181 			qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
7182 		num_desc = tx_desc_pool->pool_size;
7183 		num_desc_per_page =
7184 			tx_desc_pool->desc_pages.num_element_per_page;
7185 		for (j = 0; j < num_desc; j++) {
7186 			page_id = j / num_desc_per_page;
7187 			offset = j % num_desc_per_page;
7188 
7189 			if (qdf_unlikely(!(tx_desc_pool->
7190 					 desc_pages.cacheable_pages)))
7191 				break;
7192 
7193 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset,
7194 						  false);
7195 
7196 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
7197 				/*
7198 				 * Free TX desc if force free is
7199 				 * required, otherwise only reset vdev
7200 				 * in this TX desc.
7201 				 */
7202 				if (force_free) {
7203 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
7204 					dp_tx_comp_free_buf(soc, tx_desc,
7205 							    false);
7206 					dp_tx_desc_release(soc, tx_desc, i);
7207 				} else {
7208 					tx_desc->vdev_id = DP_INVALID_VDEV_ID;
7209 				}
7210 			}
7211 		}
7212 		if (!force_free)
7213 			qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
7214 	}
7215 }
7216 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
7217 /**
7218  * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
7219  *
7220  * @soc: Handle to DP soc structure
7221  * @tx_desc: pointer of one TX desc
7222  * @desc_pool_id: TX Desc pool id
7223  * @spcl_pool: Special pool
7224  */
7225 static inline void
7226 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
7227 		      uint8_t desc_pool_id, bool spcl_pool)
7228 {
7229 	struct dp_tx_desc_pool_s *pool = NULL;
7230 
7231 	pool = spcl_pool ? dp_get_spcl_tx_desc_pool(soc, desc_pool_id) :
7232 				dp_get_tx_desc_pool(soc, desc_pool_id);
7233 	TX_DESC_LOCK_LOCK(&pool->lock);
7234 
7235 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
7236 
7237 	TX_DESC_LOCK_UNLOCK(&pool->lock);
7238 }
7239 
7240 void __dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
7241 			bool force_free, bool spcl_pool)
7242 {
7243 	uint8_t i, num_pool;
7244 	uint32_t j;
7245 	uint32_t num_desc_t, page_id, offset;
7246 	uint16_t num_desc_per_page;
7247 	struct dp_soc *soc = pdev->soc;
7248 	struct dp_tx_desc_s *tx_desc = NULL;
7249 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
7250 
7251 	if (!vdev && !force_free) {
7252 		dp_err("Reset TX desc vdev, Vdev param is required!");
7253 		return;
7254 	}
7255 
7256 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
7257 
7258 	for (i = 0; i < num_pool; i++) {
7259 		tx_desc_pool = spcl_pool ? dp_get_spcl_tx_desc_pool(soc, i) :
7260 						dp_get_tx_desc_pool(soc, i);
7261 
7262 		num_desc_t = tx_desc_pool->elem_count;
7263 		if (!tx_desc_pool->desc_pages.cacheable_pages)
7264 			continue;
7265 
7266 		num_desc_per_page =
7267 			tx_desc_pool->desc_pages.num_element_per_page;
7268 		for (j = 0; j < num_desc_t; j++) {
7269 			page_id = j / num_desc_per_page;
7270 			offset = j % num_desc_per_page;
7271 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset,
7272 						  spcl_pool);
7273 
7274 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
7275 				if (force_free) {
7276 					dp_tx_comp_free_buf(soc, tx_desc,
7277 							    false);
7278 					dp_tx_desc_release(soc, tx_desc, i);
7279 				} else {
7280 					dp_tx_desc_reset_vdev(soc, tx_desc,
7281 							      i, spcl_pool);
7282 				}
7283 			}
7284 		}
7285 	}
7286 }
7287 
7288 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
7289 		      bool force_free)
7290 {
7291 	__dp_tx_desc_flush(pdev, vdev, force_free, false);
7292 	__dp_tx_desc_flush(pdev, vdev, force_free, true);
7293 }
7294 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
7295 
7296 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
7297 {
7298 	struct dp_pdev *pdev = vdev->pdev;
7299 
7300 	/* Reset TX desc associated to this Vdev as NULL */
7301 	dp_tx_desc_flush(pdev, vdev, false);
7302 
7303 	return QDF_STATUS_SUCCESS;
7304 }
7305 
7306 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7307 /* Pools will be allocated dynamically */
7308 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
7309 					   int num_desc)
7310 {
7311 	uint8_t i;
7312 
7313 	for (i = 0; i < num_pool; i++) {
7314 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
7315 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
7316 	}
7317 
7318 	return QDF_STATUS_SUCCESS;
7319 }
7320 
7321 static QDF_STATUS dp_tx_spcl_alloc_static_pools(struct dp_soc *soc,
7322 						int num_pool,
7323 						int num_spcl_desc)
7324 {
7325 	return QDF_STATUS_SUCCESS;
7326 }
7327 
7328 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
7329 					  uint32_t num_desc)
7330 {
7331 	return QDF_STATUS_SUCCESS;
7332 }
7333 
7334 static QDF_STATUS dp_tx_spcl_init_static_pools(struct dp_soc *soc, int num_pool,
7335 					       uint32_t num_spcl_desc)
7336 {
7337 	return QDF_STATUS_SUCCESS;
7338 }
7339 
7340 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
7341 {
7342 }
7343 
7344 static void dp_tx_spcl_deinit_static_pools(struct dp_soc *soc, int num_pool)
7345 {
7346 }
7347 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
7348 {
7349 	uint8_t i;
7350 
7351 	for (i = 0; i < num_pool; i++)
7352 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
7353 }
7354 
7355 static void dp_tx_spcl_delete_static_pools(struct dp_soc *soc, int num_pool)
7356 {
7357 }
7358 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
7359 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
7360 					   uint32_t num_desc)
7361 {
7362 	uint8_t i, count;
7363 	struct dp_global_context *dp_global;
7364 
7365 	dp_global = wlan_objmgr_get_global_ctx();
7366 
7367 	/* Allocate software Tx descriptor pools */
7368 
7369 	if (dp_global->tx_desc_pool_alloc_cnt[soc->arch_id] == 0) {
7370 		for (i = 0; i < num_pool; i++) {
7371 			if (dp_tx_desc_pool_alloc(soc, i, num_desc, false)) {
7372 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7373 				  FL("Tx Desc Pool alloc %d failed %pK"),
7374 				      i, soc);
7375 			goto fail;
7376 			}
7377 		}
7378 	}
7379 	dp_global->tx_desc_pool_alloc_cnt[soc->arch_id]++;
7380 	return QDF_STATUS_SUCCESS;
7381 
7382 fail:
7383 	for (count = 0; count < i; count++)
7384 		dp_tx_desc_pool_free(soc, count, false);
7385 	return QDF_STATUS_E_NOMEM;
7386 }
7387 
7388 static QDF_STATUS dp_tx_spcl_alloc_static_pools(struct dp_soc *soc,
7389 						int num_pool,
7390 						uint32_t num_spcl_desc)
7391 {
7392 	uint8_t j, count;
7393 	struct dp_global_context *dp_global;
7394 
7395 	dp_global = wlan_objmgr_get_global_ctx();
7396 
7397 	/* Allocate software Tx descriptor pools */
7398 	if (dp_global->spcl_tx_desc_pool_alloc_cnt[soc->arch_id] == 0) {
7399 		for (j = 0; j < num_pool; j++) {
7400 			if (dp_tx_desc_pool_alloc(soc, j, num_spcl_desc, true)) {
7401 				QDF_TRACE(QDF_MODULE_ID_DP,
7402 					  QDF_TRACE_LEVEL_ERROR,
7403 					  FL("Tx special Desc Pool alloc %d failed %pK"),
7404 					      j, soc);
7405 				goto fail;
7406 			}
7407 		}
7408 	}
7409 	dp_global->spcl_tx_desc_pool_alloc_cnt[soc->arch_id]++;
7410 	return QDF_STATUS_SUCCESS;
7411 
7412 fail:
7413 	for (count = 0; count < j; count++)
7414 		dp_tx_desc_pool_free(soc, count, true);
7415 	return QDF_STATUS_E_NOMEM;
7416 }
7417 
7418 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
7419 					  uint32_t num_desc)
7420 {
7421 	uint8_t i;
7422 	struct dp_global_context *dp_global;
7423 
7424 	dp_global = wlan_objmgr_get_global_ctx();
7425 
7426 	if (dp_global->tx_desc_pool_init_cnt[soc->arch_id] == 0) {
7427 		for (i = 0; i < num_pool; i++) {
7428 			if (dp_tx_desc_pool_init(soc, i, num_desc, false)) {
7429 				QDF_TRACE(QDF_MODULE_ID_DP,
7430 					  QDF_TRACE_LEVEL_ERROR,
7431 					  FL("Tx Desc Pool init %d failed %pK"),
7432 					  i, soc);
7433 				return QDF_STATUS_E_NOMEM;
7434 			}
7435 		}
7436 	}
7437 	dp_global->tx_desc_pool_init_cnt[soc->arch_id]++;
7438 	return QDF_STATUS_SUCCESS;
7439 }
7440 
7441 static QDF_STATUS dp_tx_spcl_init_static_pools(struct dp_soc *soc, int num_pool,
7442 					       uint32_t num_spcl_desc)
7443 {
7444 	uint8_t i;
7445 	struct dp_global_context *dp_global;
7446 
7447 	dp_global = wlan_objmgr_get_global_ctx();
7448 
7449 	if (dp_global->spcl_tx_desc_pool_init_cnt[soc->arch_id] == 0) {
7450 		for (i = 0; i < num_pool; i++) {
7451 			if (dp_tx_desc_pool_init(soc, i, num_spcl_desc, true)) {
7452 				QDF_TRACE(QDF_MODULE_ID_DP,
7453 					  QDF_TRACE_LEVEL_ERROR,
7454 					  FL("Tx special Desc Pool init %d failed %pK"),
7455 					  i, soc);
7456 				return QDF_STATUS_E_NOMEM;
7457 			}
7458 		}
7459 	}
7460 	dp_global->spcl_tx_desc_pool_init_cnt[soc->arch_id]++;
7461 	return QDF_STATUS_SUCCESS;
7462 }
7463 
7464 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
7465 {
7466 	uint8_t i;
7467 	struct dp_global_context *dp_global;
7468 
7469 	dp_global = wlan_objmgr_get_global_ctx();
7470 
7471 	dp_global->tx_desc_pool_init_cnt[soc->arch_id]--;
7472 	if (dp_global->tx_desc_pool_init_cnt[soc->arch_id] == 0) {
7473 		for (i = 0; i < num_pool; i++)
7474 			dp_tx_desc_pool_deinit(soc, i, false);
7475 	}
7476 }
7477 
7478 static void dp_tx_spcl_deinit_static_pools(struct dp_soc *soc, int num_pool)
7479 {
7480 	uint8_t i;
7481 	struct dp_global_context *dp_global;
7482 
7483 	dp_global = wlan_objmgr_get_global_ctx();
7484 
7485 	dp_global->spcl_tx_desc_pool_init_cnt[soc->arch_id]--;
7486 	if (dp_global->spcl_tx_desc_pool_init_cnt[soc->arch_id] == 0) {
7487 		for (i = 0; i < num_pool; i++)
7488 			dp_tx_desc_pool_deinit(soc, i, true);
7489 	}
7490 }
7491 
7492 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
7493 {
7494 	uint8_t i;
7495 	struct dp_global_context *dp_global;
7496 
7497 	dp_global = wlan_objmgr_get_global_ctx();
7498 
7499 	dp_global->tx_desc_pool_alloc_cnt[soc->arch_id]--;
7500 	if (dp_global->tx_desc_pool_alloc_cnt[soc->arch_id] == 0) {
7501 		for (i = 0; i < num_pool; i++)
7502 			dp_tx_desc_pool_free(soc, i, false);
7503 	}
7504 }
7505 
7506 static void dp_tx_spcl_delete_static_pools(struct dp_soc *soc, int num_pool)
7507 {
7508 	uint8_t i;
7509 	struct dp_global_context *dp_global;
7510 
7511 	dp_global = wlan_objmgr_get_global_ctx();
7512 
7513 	dp_global->spcl_tx_desc_pool_alloc_cnt[soc->arch_id]--;
7514 	if (dp_global->spcl_tx_desc_pool_alloc_cnt[soc->arch_id] == 0) {
7515 		for (i = 0; i < num_pool; i++)
7516 			dp_tx_desc_pool_free(soc, i, true);
7517 	}
7518 }
7519 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
7520 
7521 /**
7522  * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
7523  * @soc: core txrx main context
7524  * @num_pool: number of pools
7525  *
7526  */
7527 static void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
7528 {
7529 	dp_tx_tso_desc_pool_deinit(soc, num_pool);
7530 	dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
7531 }
7532 
7533 /**
7534  * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
7535  * @soc: core txrx main context
7536  * @num_pool: number of pools
7537  *
7538  */
7539 static void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
7540 {
7541 	dp_tx_tso_desc_pool_free(soc, num_pool);
7542 	dp_tx_tso_num_seg_pool_free(soc, num_pool);
7543 }
7544 
7545 #ifndef WLAN_SOFTUMAC_SUPPORT
7546 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
7547 {
7548 	uint8_t num_pool, num_ext_pool;
7549 
7550 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
7551 		return;
7552 
7553 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
7554 	num_ext_pool = dp_get_ext_tx_desc_pool_num(soc);
7555 
7556 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
7557 	dp_tx_ext_desc_pool_free(soc, num_ext_pool);
7558 	dp_tx_delete_static_pools(soc, num_pool);
7559 	dp_tx_spcl_delete_static_pools(soc, num_pool);
7560 }
7561 
7562 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
7563 {
7564 	uint8_t num_pool, num_ext_pool;
7565 
7566 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
7567 		return;
7568 
7569 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
7570 	num_ext_pool = dp_get_ext_tx_desc_pool_num(soc);
7571 
7572 	dp_tx_flow_control_deinit(soc);
7573 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
7574 	dp_tx_ext_desc_pool_deinit(soc, num_ext_pool);
7575 	dp_tx_deinit_static_pools(soc, num_pool);
7576 	dp_tx_spcl_deinit_static_pools(soc, num_pool);
7577 }
7578 #else
7579 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
7580 {
7581 	uint8_t num_pool;
7582 
7583 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
7584 		return;
7585 
7586 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
7587 
7588 	dp_tx_delete_static_pools(soc, num_pool);
7589 	dp_tx_spcl_delete_static_pools(soc, num_pool);
7590 }
7591 
7592 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
7593 {
7594 	uint8_t num_pool;
7595 
7596 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
7597 		return;
7598 
7599 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
7600 
7601 	dp_tx_flow_control_deinit(soc);
7602 	dp_tx_deinit_static_pools(soc, num_pool);
7603 	dp_tx_spcl_deinit_static_pools(soc, num_pool);
7604 }
7605 #endif /*WLAN_SOFTUMAC_SUPPORT*/
7606 
7607 /**
7608  * dp_tx_tso_cmn_desc_pool_alloc() - TSO cmn desc pool allocator
7609  * @soc: DP soc handle
7610  * @num_pool: Number of pools
7611  * @num_desc: Number of descriptors
7612  *
7613  * Reserve TSO descriptor buffers
7614  *
7615  * Return: QDF_STATUS_E_FAILURE on failure or
7616  *         QDF_STATUS_SUCCESS on success
7617  */
7618 static QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
7619 						uint8_t num_pool,
7620 						uint32_t num_desc)
7621 {
7622 	if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
7623 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
7624 		return QDF_STATUS_E_FAILURE;
7625 	}
7626 
7627 	if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
7628 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
7629 		       num_pool, soc);
7630 		return QDF_STATUS_E_FAILURE;
7631 	}
7632 	return QDF_STATUS_SUCCESS;
7633 }
7634 
7635 /**
7636  * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
7637  * @soc: DP soc handle
7638  * @num_pool: Number of pools
7639  * @num_desc: Number of descriptors
7640  *
7641  * Initialize TSO descriptor pools
7642  *
7643  * Return: QDF_STATUS_E_FAILURE on failure or
7644  *         QDF_STATUS_SUCCESS on success
7645  */
7646 
7647 static QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
7648 					       uint8_t num_pool,
7649 					       uint32_t num_desc)
7650 {
7651 	if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
7652 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
7653 		return QDF_STATUS_E_FAILURE;
7654 	}
7655 
7656 	if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
7657 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
7658 		       num_pool, soc);
7659 		return QDF_STATUS_E_FAILURE;
7660 	}
7661 	return QDF_STATUS_SUCCESS;
7662 }
7663 
7664 #ifndef WLAN_SOFTUMAC_SUPPORT
7665 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
7666 {
7667 	uint8_t num_pool, num_ext_pool;
7668 	uint32_t num_desc;
7669 	uint32_t num_spcl_desc;
7670 	uint32_t num_ext_desc;
7671 
7672 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
7673 		return QDF_STATUS_SUCCESS;
7674 
7675 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
7676 	num_ext_pool = dp_get_ext_tx_desc_pool_num(soc);
7677 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
7678 	num_spcl_desc = wlan_cfg_get_num_tx_spl_desc(soc->wlan_cfg_ctx);
7679 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
7680 
7681 	dp_info("Tx Desc Alloc num_pool: %d descs: %d", num_pool, num_desc);
7682 
7683 	if ((num_pool > MAX_TXDESC_POOLS) ||
7684 	    (num_ext_pool > MAX_TXDESC_POOLS) ||
7685 	    (num_desc > WLAN_CFG_NUM_TX_DESC_MAX) ||
7686 	    (num_spcl_desc > WLAN_CFG_NUM_TX_SPL_DESC_MAX))
7687 		goto fail1;
7688 
7689 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
7690 		goto fail1;
7691 
7692 	if (dp_tx_spcl_alloc_static_pools(soc, num_pool, num_spcl_desc))
7693 		goto fail2;
7694 
7695 	if (dp_tx_ext_desc_pool_alloc(soc, num_ext_pool, num_ext_desc))
7696 		goto fail3;
7697 
7698 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
7699 		return QDF_STATUS_SUCCESS;
7700 
7701 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_ext_pool, num_ext_desc))
7702 		goto fail4;
7703 
7704 	return QDF_STATUS_SUCCESS;
7705 
7706 fail4:
7707 	dp_tx_ext_desc_pool_free(soc, num_ext_pool);
7708 fail3:
7709 	dp_tx_spcl_delete_static_pools(soc, num_pool);
7710 fail2:
7711 	dp_tx_delete_static_pools(soc, num_pool);
7712 fail1:
7713 	return QDF_STATUS_E_RESOURCES;
7714 }
7715 
7716 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
7717 {
7718 	uint8_t num_pool, num_ext_pool;
7719 	uint32_t num_desc;
7720 	uint32_t num_spcl_desc;
7721 	uint32_t num_ext_desc;
7722 
7723 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
7724 		return QDF_STATUS_SUCCESS;
7725 
7726 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
7727 	num_ext_pool = dp_get_ext_tx_desc_pool_num(soc);
7728 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
7729 	num_spcl_desc = wlan_cfg_get_num_tx_spl_desc(soc->wlan_cfg_ctx);
7730 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
7731 
7732 	if (dp_tx_init_static_pools(soc, num_pool, num_desc))
7733 		goto fail1;
7734 
7735 	if (dp_tx_spcl_init_static_pools(soc, num_pool, num_spcl_desc))
7736 		goto fail2;
7737 
7738 	if (dp_tx_ext_desc_pool_init(soc, num_ext_pool, num_ext_desc))
7739 		goto fail3;
7740 
7741 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
7742 		return QDF_STATUS_SUCCESS;
7743 
7744 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_ext_pool, num_ext_desc))
7745 		goto fail4;
7746 
7747 	dp_tx_flow_control_init(soc);
7748 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
7749 	return QDF_STATUS_SUCCESS;
7750 
7751 fail4:
7752 	dp_tx_ext_desc_pool_deinit(soc, num_ext_pool);
7753 fail3:
7754 	dp_tx_spcl_deinit_static_pools(soc, num_pool);
7755 fail2:
7756 	dp_tx_deinit_static_pools(soc, num_pool);
7757 fail1:
7758 	return QDF_STATUS_E_RESOURCES;
7759 }
7760 
7761 #else
7762 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
7763 {
7764 	uint8_t num_pool;
7765 	uint32_t num_desc;
7766 	uint32_t num_spcl_desc;
7767 
7768 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
7769 		return QDF_STATUS_SUCCESS;
7770 
7771 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
7772 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
7773 	num_spcl_desc = wlan_cfg_get_num_tx_spl_desc(soc->wlan_cfg_ctx);
7774 
7775 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7776 		  "%s Tx Desc Alloc num_pool = %d, descs = %d",
7777 		  __func__, num_pool, num_desc);
7778 
7779 	if ((num_pool > MAX_TXDESC_POOLS) ||
7780 	    (num_desc > WLAN_CFG_NUM_TX_DESC_MAX) ||
7781 	    (num_spcl_desc > WLAN_CFG_NUM_TX_SPL_DESC_MAX))
7782 		goto fail1;
7783 
7784 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
7785 		goto fail1;
7786 
7787 	if (dp_tx_spcl_alloc_static_pools(soc, num_pool, num_spcl_desc))
7788 		goto fail2;
7789 	return QDF_STATUS_SUCCESS;
7790 
7791 fail2:
7792 	dp_tx_delete_static_pools(soc, num_pool);
7793 fail1:
7794 	return QDF_STATUS_E_RESOURCES;
7795 }
7796 
7797 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
7798 {
7799 	uint8_t num_pool;
7800 	uint32_t num_desc;
7801 	uint32_t num_spcl_desc;
7802 
7803 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
7804 		return QDF_STATUS_SUCCESS;
7805 
7806 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
7807 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
7808 	num_spcl_desc = wlan_cfg_get_num_tx_spl_desc(soc->wlan_cfg_ctx);
7809 
7810 	if (dp_tx_init_static_pools(soc, num_pool, num_desc))
7811 		goto fail;
7812 
7813 	if (dp_tx_spcl_init_static_pools(soc, num_pool, num_spcl_desc))
7814 		goto fail1;
7815 
7816 	dp_tx_flow_control_init(soc);
7817 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
7818 	return QDF_STATUS_SUCCESS;
7819 fail1:
7820 	dp_tx_deinit_static_pools(soc, num_pool);
7821 fail:
7822 	return QDF_STATUS_E_RESOURCES;
7823 }
7824 #endif
7825 
7826 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
7827 {
7828 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
7829 	uint8_t num_ext_desc_pool;
7830 	uint32_t num_ext_desc;
7831 
7832 	num_ext_desc_pool = dp_get_ext_tx_desc_pool_num(soc);
7833 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
7834 
7835 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_ext_desc_pool, num_ext_desc))
7836 		return QDF_STATUS_E_FAILURE;
7837 
7838 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_ext_desc_pool, num_ext_desc))
7839 		return QDF_STATUS_E_FAILURE;
7840 
7841 	return QDF_STATUS_SUCCESS;
7842 }
7843 
7844 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
7845 {
7846 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
7847 	uint8_t num_ext_desc_pool = dp_get_ext_tx_desc_pool_num(soc);
7848 
7849 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_ext_desc_pool);
7850 	dp_tx_tso_cmn_desc_pool_free(soc, num_ext_desc_pool);
7851 
7852 	return QDF_STATUS_SUCCESS;
7853 }
7854 
7855 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
7856 void dp_pkt_add_timestamp(struct dp_vdev *vdev,
7857 			  enum qdf_pkt_timestamp_index index, uint64_t time,
7858 			  qdf_nbuf_t nbuf)
7859 {
7860 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled())) {
7861 		uint64_t tsf_time;
7862 
7863 		if (vdev->get_tsf_time) {
7864 			vdev->get_tsf_time(vdev->osif_vdev, time, &tsf_time);
7865 			qdf_add_dp_pkt_timestamp(nbuf, index, tsf_time);
7866 		}
7867 	}
7868 }
7869 
7870 void dp_pkt_get_timestamp(uint64_t *time)
7871 {
7872 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled()))
7873 		*time = qdf_get_log_timestamp();
7874 }
7875 #endif
7876 
7877 #ifdef QCA_MULTIPASS_SUPPORT
7878 void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev,
7879 				 struct dp_tx_msdu_info_s *msdu_info,
7880 				 uint16_t group_key)
7881 {
7882 	struct htt_tx_msdu_desc_ext2_t *meta_data =
7883 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
7884 
7885 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
7886 
7887 	/*
7888 	 * When attempting to send a multicast packet with multi-passphrase,
7889 	 * host shall add HTT EXT meta data "struct htt_tx_msdu_desc_ext2_t"
7890 	 * ref htt.h indicating the group_id field in "key_flags" also having
7891 	 * "valid_key_flags" as 1. Assign “key_flags = group_key_ix”.
7892 	 */
7893 	HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(msdu_info->meta_data[0],
7894 						       1);
7895 	HTT_TX_MSDU_EXT2_DESC_KEY_FLAGS_SET(msdu_info->meta_data[2], group_key);
7896 }
7897 
7898 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
7899 	defined(WLAN_MCAST_MLO)
7900 /**
7901  * dp_tx_need_mcast_reinject() - If frame needs to be processed in reinject path
7902  * @vdev: DP vdev handle
7903  *
7904  * Return: true if reinject handling is required else false
7905  */
7906 static inline bool
7907 dp_tx_need_mcast_reinject(struct dp_vdev *vdev)
7908 {
7909 	if (vdev->mlo_vdev && vdev->opmode == wlan_op_mode_ap)
7910 		return true;
7911 
7912 	return false;
7913 }
7914 #else
7915 static inline bool
7916 dp_tx_need_mcast_reinject(struct dp_vdev *vdev)
7917 {
7918 	return false;
7919 }
7920 #endif
7921 
7922 /**
7923  * dp_tx_need_multipass_process() - If frame needs multipass phrase processing
7924  * @soc: dp soc handle
7925  * @vdev: DP vdev handle
7926  * @buf: frame
7927  * @vlan_id: vlan id of frame
7928  *
7929  * Return: whether peer is special or classic
7930  */
7931 static
7932 uint8_t dp_tx_need_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
7933 				     qdf_nbuf_t buf, uint16_t *vlan_id)
7934 {
7935 	struct dp_txrx_peer *txrx_peer = NULL;
7936 	struct dp_peer *peer = NULL;
7937 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
7938 	struct vlan_ethhdr *veh = NULL;
7939 	bool not_vlan = ((vdev->tx_encap_type == htt_cmn_pkt_type_raw) ||
7940 			(htons(eh->ether_type) != ETH_P_8021Q));
7941 	struct cdp_peer_info peer_info = { 0 };
7942 
7943 	if (qdf_unlikely(not_vlan))
7944 		return DP_VLAN_UNTAGGED;
7945 
7946 	veh = (struct vlan_ethhdr *)eh;
7947 	*vlan_id = (ntohs(veh->h_vlan_TCI) & VLAN_VID_MASK);
7948 
7949 	if (qdf_unlikely(DP_FRAME_IS_MULTICAST((eh)->ether_dhost))) {
7950 		/* look for handling of multicast packets in reinject path */
7951 		if (dp_tx_need_mcast_reinject(vdev))
7952 			return DP_VLAN_UNTAGGED;
7953 
7954 		qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
7955 		TAILQ_FOREACH(txrx_peer, &vdev->mpass_peer_list,
7956 			      mpass_peer_list_elem) {
7957 			if (*vlan_id == txrx_peer->vlan_id) {
7958 				qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
7959 				return DP_VLAN_TAGGED_MULTICAST;
7960 			}
7961 		}
7962 		qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
7963 		return DP_VLAN_UNTAGGED;
7964 	}
7965 
7966 	DP_PEER_INFO_PARAMS_INIT(&peer_info, DP_VDEV_ALL, eh->ether_dhost,
7967 				 false, CDP_WILD_PEER_TYPE);
7968 	peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info,
7969 					 DP_MOD_ID_TX_MULTIPASS);
7970 	if (qdf_unlikely(!peer))
7971 		return DP_VLAN_UNTAGGED;
7972 
7973 	/*
7974 	 * Do not drop the frame when vlan_id doesn't match.
7975 	 * Send the frame as it is.
7976 	 */
7977 	if (*vlan_id == peer->txrx_peer->vlan_id) {
7978 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
7979 		return DP_VLAN_TAGGED_UNICAST;
7980 	}
7981 
7982 	dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
7983 	return DP_VLAN_UNTAGGED;
7984 }
7985 
7986 #ifndef WLAN_REPEATER_NOT_SUPPORTED
7987 static inline void
7988 dp_tx_multipass_send_pkt_to_repeater(struct dp_soc *soc, struct dp_vdev *vdev,
7989 				     qdf_nbuf_t nbuf,
7990 				     struct dp_tx_msdu_info_s *msdu_info)
7991 {
7992 	qdf_nbuf_t nbuf_copy = NULL;
7993 
7994 	/* AP can have classic clients, special clients &
7995 	 * classic repeaters.
7996 	 * 1. Classic clients & special client:
7997 	 *	Remove vlan header, find corresponding group key
7998 	 *	index, fill in metaheader and enqueue multicast
7999 	 *	frame to TCL.
8000 	 * 2. Classic repeater:
8001 	 *	Pass through to classic repeater with vlan tag
8002 	 *	intact without any group key index. Hardware
8003 	 *	will know which key to use to send frame to
8004 	 *	repeater.
8005 	 */
8006 	nbuf_copy = qdf_nbuf_copy(nbuf);
8007 
8008 	/*
8009 	 * Send multicast frame to special peers even
8010 	 * if pass through to classic repeater fails.
8011 	 */
8012 	if (nbuf_copy) {
8013 		struct dp_tx_msdu_info_s msdu_info_copy;
8014 
8015 		qdf_mem_zero(&msdu_info_copy, sizeof(msdu_info_copy));
8016 		msdu_info_copy.tid = HTT_TX_EXT_TID_INVALID;
8017 		msdu_info_copy.xmit_type =
8018 			qdf_nbuf_get_vdev_xmit_type(nbuf);
8019 		HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(msdu_info_copy.meta_data[0], 1);
8020 		nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy,
8021 						   &msdu_info_copy,
8022 						   HTT_INVALID_PEER, NULL);
8023 		if (nbuf_copy) {
8024 			qdf_nbuf_free(nbuf_copy);
8025 			dp_info_rl("nbuf_copy send failed");
8026 		}
8027 	}
8028 }
8029 #else
8030 static inline void
8031 dp_tx_multipass_send_pkt_to_repeater(struct dp_soc *soc, struct dp_vdev *vdev,
8032 				     qdf_nbuf_t nbuf,
8033 				     struct dp_tx_msdu_info_s *msdu_info)
8034 {
8035 }
8036 #endif
8037 
8038 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
8039 			     qdf_nbuf_t nbuf,
8040 			     struct dp_tx_msdu_info_s *msdu_info)
8041 {
8042 	uint16_t vlan_id = 0;
8043 	uint16_t group_key = 0;
8044 	uint8_t is_spcl_peer = DP_VLAN_UNTAGGED;
8045 
8046 	if (HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->meta_data[0]))
8047 		return true;
8048 
8049 	is_spcl_peer = dp_tx_need_multipass_process(soc, vdev, nbuf, &vlan_id);
8050 
8051 	if ((is_spcl_peer != DP_VLAN_TAGGED_MULTICAST) &&
8052 	    (is_spcl_peer != DP_VLAN_TAGGED_UNICAST))
8053 		return true;
8054 
8055 	if (is_spcl_peer == DP_VLAN_TAGGED_UNICAST) {
8056 		dp_tx_remove_vlan_tag(vdev, nbuf);
8057 		return true;
8058 	}
8059 
8060 	dp_tx_multipass_send_pkt_to_repeater(soc, vdev, nbuf, msdu_info);
8061 	group_key = vdev->iv_vlan_map[vlan_id];
8062 
8063 	/*
8064 	 * If group key is not installed, drop the frame.
8065 	 */
8066 	if (!group_key)
8067 		return false;
8068 
8069 	dp_tx_remove_vlan_tag(vdev, nbuf);
8070 	dp_tx_add_groupkey_metadata(vdev, msdu_info, group_key);
8071 	msdu_info->exception_fw = 1;
8072 	return true;
8073 }
8074 #endif /* QCA_MULTIPASS_SUPPORT */
8075