xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.h (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef __DP_TX_H
20 #define __DP_TX_H
21 
22 #include <qdf_types.h>
23 #include <qdf_nbuf.h>
24 #include "dp_types.h"
25 #ifdef FEATURE_PERPKT_INFO
26 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
27 	defined(QCA_TX_CAPTURE_SUPPORT) || \
28 	defined(QCA_MCOPY_SUPPORT)
29 #include "if_meta_hdr.h"
30 #endif
31 #endif
32 #include "dp_internal.h"
33 #include "hal_tx.h"
34 #include <qdf_tracepoint.h>
35 #ifdef CONFIG_SAWF
36 #include "dp_sawf.h"
37 #endif
38 #include <qdf_pkt_add_timestamp.h>
39 
40 #define DP_INVALID_VDEV_ID 0xFF
41 
42 #define DP_TX_MAX_NUM_FRAGS 6
43 
44 /*
45  * DP_TX_DESC_FLAG_FRAG flags should always be defined to 0x1
46  * please do not change this flag's definition
47  */
48 #define DP_TX_DESC_FLAG_FRAG		0x1
49 #define DP_TX_DESC_FLAG_TO_FW		0x2
50 #define DP_TX_DESC_FLAG_SIMPLE		0x4
51 #define DP_TX_DESC_FLAG_RAW		0x8
52 #define DP_TX_DESC_FLAG_MESH		0x10
53 #define DP_TX_DESC_FLAG_QUEUED_TX	0x20
54 #define DP_TX_DESC_FLAG_COMPLETED_TX	0x40
55 #define DP_TX_DESC_FLAG_ME		0x80
56 #define DP_TX_DESC_FLAG_TDLS_FRAME	0x100
57 #define DP_TX_DESC_FLAG_ALLOCATED	0x200
58 #define DP_TX_DESC_FLAG_MESH_MODE	0x400
59 #define DP_TX_DESC_FLAG_UNMAP_DONE	0x800
60 #define DP_TX_DESC_FLAG_TX_COMP_ERR	0x1000
61 #define DP_TX_DESC_FLAG_FLUSH		0x2000
62 #define DP_TX_DESC_FLAG_TRAFFIC_END_IND	0x4000
63 #define DP_TX_DESC_FLAG_RMNET		0x8000
64 /*
65  * Since the Tx descriptor flag is of only 16-bit and no more bit is free for
66  * any new flag, therefore for time being overloading PPEDS flag with that of
67  * FLUSH flag and FLAG_FAST with TDLS which is not enabled for WIN.
68  */
69 #define DP_TX_DESC_FLAG_PPEDS		0x2000
70 #define DP_TX_DESC_FLAG_FAST		0x100
71 
72 #define DP_TX_EXT_DESC_FLAG_METADATA_VALID 0x1
73 
74 #define DP_TX_FREE_SINGLE_BUF(soc, buf)                  \
75 do {                                                           \
76 	qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE);  \
77 	qdf_nbuf_free(buf);                                    \
78 } while (0)
79 
80 #define OCB_HEADER_VERSION	 1
81 
82 #ifdef TX_PER_PDEV_DESC_POOL
83 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
84 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
85 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
86 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
87 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
88 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
89 #else
90 	#ifdef TX_PER_VDEV_DESC_POOL
91 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
92 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
93 	#endif /* TX_PER_VDEV_DESC_POOL */
94 #endif /* TX_PER_PDEV_DESC_POOL */
95 #define DP_TX_QUEUE_MASK 0x3
96 
97 #define MAX_CDP_SEC_TYPE 12
98 
99 /* number of dwords for htt_tx_msdu_desc_ext2_t */
100 #define DP_TX_MSDU_INFO_META_DATA_DWORDS 7
101 
102 #define dp_tx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX, params)
103 #define dp_tx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX, params)
104 #define dp_tx_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP_TX, params)
105 #define dp_tx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX, params)
106 #define dp_tx_info(params...) \
107 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX, ## params)
108 #define dp_tx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX, params)
109 
110 #define dp_tx_comp_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX_COMP, params)
111 #define dp_tx_comp_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX_COMP, params)
112 #define dp_tx_comp_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX_COMP, params)
113 #define dp_tx_comp_info(params...) \
114 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
115 #define dp_tx_comp_info_rl(params...) \
116 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
117 #define dp_tx_comp_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX_COMP, params)
118 
119 #ifndef QCA_HOST_MODE_WIFI_DISABLED
120 
121 /**
122  * struct dp_tx_frag_info_s
123  * @vaddr: hlos virtual address for buffer
124  * @paddr_lo: physical address lower 32bits
125  * @paddr_hi: physical address higher bits
126  * @len: length of the buffer
127  */
128 struct dp_tx_frag_info_s {
129 	uint8_t  *vaddr;
130 	uint32_t paddr_lo;
131 	uint16_t paddr_hi;
132 	uint16_t len;
133 };
134 
135 /**
136  * struct dp_tx_seg_info_s - Segmentation Descriptor
137  * @nbuf: NBUF pointer if segment corresponds to separate nbuf
138  * @frag_cnt: Fragment count in this segment
139  * @total_len: Total length of segment
140  * @frags: per-Fragment information
141  * @next: pointer to next MSDU segment
142  */
143 struct dp_tx_seg_info_s  {
144 	qdf_nbuf_t nbuf;
145 	uint16_t frag_cnt;
146 	uint16_t total_len;
147 	struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS];
148 	struct dp_tx_seg_info_s *next;
149 };
150 
151 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
152 
153 /**
154  * struct dp_tx_sg_info_s - Scatter Gather Descriptor
155  * @num_segs: Number of segments (TSO/ME) in the frame
156  * @total_len: Total length of the frame
157  * @curr_seg: Points to current segment descriptor to be processed. Chain of
158  * 	      descriptors for SG frames/multicast-unicast converted packets.
159  *
160  * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to
161  * carry fragmentation information
162  * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries
163  * indicated through flags in SKB CB (first_msdu and last_msdu). This will be
164  * converted into set of skb sg (nr_frags) structures.
165  */
166 struct dp_tx_sg_info_s {
167 	uint32_t num_segs;
168 	uint32_t total_len;
169 	struct dp_tx_seg_info_s *curr_seg;
170 };
171 
172 /**
173  * struct dp_tx_queue - Tx queue
174  * @desc_pool_id: Descriptor Pool to be used for the tx queue
175  * @ring_id: TCL descriptor ring ID corresponding to the tx queue
176  *
177  * Tx queue contains information of the software (Descriptor pool)
178  * and hardware resources (TCL ring id) to be used for a particular
179  * transmit queue (obtained from skb_queue_mapping in case of linux)
180  */
181 struct dp_tx_queue {
182 	uint8_t desc_pool_id;
183 	uint8_t ring_id;
184 };
185 
186 /**
187  * struct dp_tx_msdu_info_s - MSDU Descriptor
188  * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement
189  * @tx_queue: Tx queue on which this MSDU should be transmitted
190  * @num_seg: Number of segments (TSO)
191  * @tid: TID (override) that is sent from HLOS
192  * @u.tso_info: TSO information for TSO frame types
193  * 	     (chain of the TSO segments, number of segments)
194  * @u.sg_info: Scatter Gather information for non-TSO SG frames
195  * @meta_data: Mesh meta header information
196  * @exception_fw: Duplicate frame to be sent to firmware
197  * @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions
198  * @ix_tx_sniffer: Indicates if the packet has to be sniffed
199  * @gsn: global sequence for reinjected mcast packets
200  * @vdev_id : vdev_id for reinjected mcast packets
201  * @skip_hp_update : Skip HP update for TSO segments and update in last segment
202  *
203  * This structure holds the complete MSDU information needed to program the
204  * Hardware TCL and MSDU extension descriptors for different frame types
205  *
206  */
207 struct dp_tx_msdu_info_s {
208 	enum dp_tx_frm_type frm_type;
209 	struct dp_tx_queue tx_queue;
210 	uint32_t num_seg;
211 	uint8_t tid;
212 	uint8_t exception_fw;
213 	uint8_t is_tx_sniffer;
214 	union {
215 		struct qdf_tso_info_t tso_info;
216 		struct dp_tx_sg_info_s sg_info;
217 	} u;
218 	uint32_t meta_data[DP_TX_MSDU_INFO_META_DATA_DWORDS];
219 	uint16_t ppdu_cookie;
220 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
221 #ifdef WLAN_MCAST_MLO
222 	uint16_t gsn;
223 	uint8_t vdev_id;
224 #endif
225 #endif
226 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
227 	uint8_t skip_hp_update;
228 #endif
229 #ifdef QCA_DP_TX_RMNET_OPTIMIZATION
230 	uint16_t buf_len;
231 	uint8_t *payload_addr;
232 #endif
233 };
234 
235 #ifndef QCA_HOST_MODE_WIFI_DISABLED
236 /**
237  * dp_tx_deinit_pair_by_index() - Deinit TX rings based on index
238  * @soc: core txrx context
239  * @index: index of ring to deinit
240  *
241  * Deinit 1 TCL and 1 WBM2SW release ring on as needed basis using
242  * index of the respective TCL/WBM2SW release in soc structure.
243  * For example, if the index is 2 then &soc->tcl_data_ring[2]
244  * and &soc->tx_comp_ring[2] will be deinitialized.
245  *
246  * Return: none
247  */
248 void dp_tx_deinit_pair_by_index(struct dp_soc *soc, int index);
249 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
250 
251 void
252 dp_tx_comp_process_desc_list(struct dp_soc *soc,
253 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id);
254 qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
255 			       bool delayed_free);
256 void dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id);
257 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
258 			 uint8_t tid, uint8_t ring_id);
259 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
260 				  struct dp_tx_desc_s *tx_desc,
261 				  struct hal_tx_completion_status *ts,
262 				  struct dp_txrx_peer *txrx_peer,
263 				  uint8_t ring_id);
264 void dp_tx_comp_process_desc(struct dp_soc *soc,
265 			     struct dp_tx_desc_s *desc,
266 			     struct hal_tx_completion_status *ts,
267 			     struct dp_txrx_peer *txrx_peer);
268 void dp_tx_reinject_handler(struct dp_soc *soc,
269 			    struct dp_vdev *vdev,
270 			    struct dp_tx_desc_s *tx_desc,
271 			    uint8_t *status,
272 			    uint8_t reinject_reason);
273 void dp_tx_inspect_handler(struct dp_soc *soc,
274 			   struct dp_vdev *vdev,
275 			   struct dp_tx_desc_s *tx_desc,
276 			   uint8_t *status);
277 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
278 				   uint32_t length, uint8_t tx_status,
279 				   bool update);
280 
281 #ifdef DP_UMAC_HW_RESET_SUPPORT
282 qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
283 
284 qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
285 			  qdf_nbuf_t nbuf,
286 			  struct cdp_tx_exception_metadata *tx_exc_metadata);
287 #endif
288 #ifdef WLAN_SUPPORT_PPEDS
289 void dp_ppeds_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc);
290 #else
291 static inline
292 void dp_ppeds_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
293 {
294 }
295 #endif
296 #ifndef QCA_HOST_MODE_WIFI_DISABLED
297 /**
298  * dp_tso_attach() - TSO Attach handler
299  * @txrx_soc: Opaque Dp handle
300  *
301  * Reserve TSO descriptor buffers
302  *
303  * Return: QDF_STATUS_E_FAILURE on failure or
304  * QDF_STATUS_SUCCESS on success
305  */
306 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc);
307 
308 /**
309  * dp_tso_detach() - TSO Detach handler
310  * @txrx_soc: Opaque Dp handle
311  *
312  * Deallocate TSO descriptor buffers
313  *
314  * Return: QDF_STATUS_E_FAILURE on failure or
315  * QDF_STATUS_SUCCESS on success
316  */
317 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc);
318 
319 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
320 
321 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc, uint8_t vdev_id,
322 				    qdf_nbuf_t nbuf);
323 
324 qdf_nbuf_t dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id,
325 				qdf_nbuf_t nbuf,
326 				struct cdp_tx_exception_metadata *tx_exc);
327 
328 qdf_nbuf_t dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc,
329 					      uint8_t vdev_id,
330 					      qdf_nbuf_t nbuf,
331 				struct cdp_tx_exception_metadata *tx_exc);
332 
333 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
334 			   qdf_nbuf_t nbuf);
335 qdf_nbuf_t
336 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
337 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
338 		       struct cdp_tx_exception_metadata *tx_exc_metadata);
339 
340 #if QDF_LOCK_STATS
341 noinline qdf_nbuf_t
342 dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
343 			 struct dp_tx_msdu_info_s *msdu_info);
344 #else
345 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
346 				    struct dp_tx_msdu_info_s *msdu_info);
347 #endif
348 #ifdef FEATURE_WLAN_TDLS
349 /**
350  * dp_tx_non_std() - Allow the control-path SW to send data frames
351  * @soc_hdl: Datapath soc handle
352  * @vdev_id: id of vdev
353  * @tx_spec: what non-standard handling to apply to the tx data frames
354  * @msdu_list: NULL-terminated list of tx MSDUs
355  *
356  * Return: NULL on success,
357  *         nbuf when it fails to send
358  */
359 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
360 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
361 #endif
362 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac);
363 
364 /**
365  * dp_tx_comp_handler() - Tx completion handler
366  * @int_ctx: pointer to DP interrupt context
367  * @soc: core txrx main context
368  * @hal_srng: Opaque HAL SRNG pointer
369  * @ring_id: completion ring id
370  * @quota: No. of packets/descriptors that can be serviced in one loop
371  *
372  * This function will collect hardware release ring element contents and
373  * handle descriptor contents. Based on contents, free packet or handle error
374  * conditions
375  *
376  * Return: Number of TX completions processed
377  */
378 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
379 			    hal_ring_handle_t hal_srng, uint8_t ring_id,
380 			    uint32_t quota);
381 
382 QDF_STATUS
383 dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
384 
385 QDF_STATUS
386 dp_tx_prepare_send_igmp_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
387 
388 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
389 
390 #if defined(QCA_HOST_MODE_WIFI_DISABLED) || !defined(ATH_SUPPORT_IQUE)
391 static inline void dp_tx_me_exit(struct dp_pdev *pdev)
392 {
393 	return;
394 }
395 #endif
396 
397 /**
398  * dp_tx_pdev_init() - dp tx pdev init
399  * @pdev: physical device instance
400  *
401  * Return: QDF_STATUS_SUCCESS: success
402  *         QDF_STATUS_E_RESOURCES: Error return
403  */
404 static inline QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev)
405 {
406 	struct dp_soc *soc = pdev->soc;
407 
408 	/* Initialize Flow control counters */
409 	qdf_atomic_init(&pdev->num_tx_outstanding);
410 	pdev->tx_descs_max = 0;
411 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
412 		/* Initialize descriptors in TCL Ring */
413 		hal_tx_init_data_ring(soc->hal_soc,
414 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
415 	}
416 
417 	return QDF_STATUS_SUCCESS;
418 }
419 
420 /**
421  * dp_tx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc
422  * @soc: Handle to HAL Soc structure
423  * @hal_soc: HAL SOC handle
424  * @num_avail_for_reap: descriptors available for reap
425  * @hal_ring_hdl: ring pointer
426  * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
427  * @last_prefetched_sw_desc: pointer to last prefetch SW desc
428  *
429  * Return: None
430  */
431 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
432 static inline
433 void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
434 				    hal_soc_handle_t hal_soc,
435 				    uint32_t num_avail_for_reap,
436 				    hal_ring_handle_t hal_ring_hdl,
437 				    void **last_prefetched_hw_desc,
438 				    struct dp_tx_desc_s
439 				    **last_prefetched_sw_desc)
440 {
441 	if (*last_prefetched_sw_desc) {
442 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
443 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
444 	}
445 
446 	if (num_avail_for_reap && *last_prefetched_hw_desc) {
447 		soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
448 						       *last_prefetched_hw_desc,
449 						       last_prefetched_sw_desc);
450 
451 		if ((uintptr_t)*last_prefetched_hw_desc & 0x3f)
452 			*last_prefetched_hw_desc =
453 				hal_srng_dst_prefetch_next_cached_desc(
454 					hal_soc,
455 					hal_ring_hdl,
456 					(uint8_t *)*last_prefetched_hw_desc);
457 		else
458 			*last_prefetched_hw_desc =
459 				hal_srng_dst_get_next_32_byte_desc(hal_soc,
460 					hal_ring_hdl,
461 					(uint8_t *)*last_prefetched_hw_desc);
462 	}
463 }
464 #else
465 static inline
466 void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
467 				    hal_soc_handle_t hal_soc,
468 				    uint32_t num_avail_for_reap,
469 				    hal_ring_handle_t hal_ring_hdl,
470 				    void **last_prefetched_hw_desc,
471 				    struct dp_tx_desc_s
472 				    **last_prefetched_sw_desc)
473 {
474 }
475 #endif
476 
477 #ifndef FEATURE_WDS
478 static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
479 {
480 	return;
481 }
482 #endif
483 
484 #ifndef QCA_MULTIPASS_SUPPORT
485 static inline
486 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
487 			     qdf_nbuf_t nbuf,
488 			     struct dp_tx_msdu_info_s *msdu_info)
489 {
490 	return true;
491 }
492 
493 static inline
494 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
495 {
496 }
497 
498 #else
499 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
500 			     qdf_nbuf_t nbuf,
501 			     struct dp_tx_msdu_info_s *msdu_info);
502 
503 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev);
504 void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
505 void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev,
506 				 struct dp_tx_msdu_info_s *msdu_info,
507 				 uint16_t group_key);
508 #endif
509 
510 /**
511  * dp_tx_hw_to_qdf()- convert hw status to qdf status
512  * @status: hw status
513  *
514  * Return: qdf tx rx status
515  */
516 static inline enum qdf_dp_tx_rx_status dp_tx_hw_to_qdf(uint16_t status)
517 {
518 	switch (status) {
519 	case HAL_TX_TQM_RR_FRAME_ACKED:
520 		return QDF_TX_RX_STATUS_OK;
521 	case HAL_TX_TQM_RR_REM_CMD_TX:
522 		return QDF_TX_RX_STATUS_NO_ACK;
523 	case HAL_TX_TQM_RR_REM_CMD_REM:
524 	case HAL_TX_TQM_RR_REM_CMD_NOTX:
525 	case HAL_TX_TQM_RR_REM_CMD_AGED:
526 		return QDF_TX_RX_STATUS_FW_DISCARD;
527 	default:
528 		return QDF_TX_RX_STATUS_DEFAULT;
529 	}
530 }
531 
532 #ifndef QCA_HOST_MODE_WIFI_DISABLED
533 /**
534  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
535  * @vdev: DP Virtual device handle
536  * @nbuf: Buffer pointer
537  * @queue: queue ids container for nbuf
538  *
539  * TX packet queue has 2 instances, software descriptors id and dma ring id
540  * Based on tx feature and hardware configuration queue id combination could be
541  * different.
542  * For example -
543  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
544  * With no XPS,lock based resource protection, Descriptor pool ids are different
545  * for each vdev, dma ring id will be same as single pdev id
546  *
547  * Return: None
548  */
549 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
550 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
551 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
552 {
553 	queue->ring_id = qdf_get_cpu();
554 	queue->desc_pool_id = queue->ring_id;
555 }
556 
557 /*
558  * dp_tx_get_hal_ring_hdl()- Get the hal_tx_ring_hdl for data transmission
559  * @dp_soc - DP soc structure pointer
560  * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled
561  *
562  * Return - HAL ring handle
563  */
564 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
565 						       uint8_t ring_id)
566 {
567 	if (ring_id == soc->num_tcl_data_rings)
568 		return soc->tcl_cmd_credit_ring.hal_srng;
569 
570 	return soc->tcl_data_ring[ring_id].hal_srng;
571 }
572 
573 #else /* QCA_OL_TX_MULTIQ_SUPPORT */
574 
575 #ifdef TX_MULTI_TCL
576 #ifdef IPA_OFFLOAD
577 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
578 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
579 {
580 	/* get flow id */
581 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
582 	if (vdev->pdev->soc->wlan_cfg_ctx->ipa_enabled)
583 		queue->ring_id = DP_TX_GET_RING_ID(vdev);
584 	else
585 		queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
586 					vdev->pdev->soc->num_tcl_data_rings);
587 }
588 #else
589 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
590 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
591 {
592 	/* get flow id */
593 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
594 	queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
595 				vdev->pdev->soc->num_tcl_data_rings);
596 }
597 #endif
598 #else
599 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
600 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
601 {
602 	/* get flow id */
603 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
604 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
605 }
606 #endif
607 
608 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
609 						       uint8_t ring_id)
610 {
611 	return soc->tcl_data_ring[ring_id].hal_srng;
612 }
613 #endif
614 
615 #ifdef QCA_OL_TX_LOCK_LESS_ACCESS
616 /*
617  * dp_tx_hal_ring_access_start()- hal_tx_ring access for data transmission
618  * @dp_soc - DP soc structure pointer
619  * @hal_ring_hdl - HAL ring handle
620  *
621  * Return - None
622  */
623 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
624 					      hal_ring_handle_t hal_ring_hdl)
625 {
626 	return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
627 }
628 
629 /*
630  * dp_tx_hal_ring_access_end()- hal_tx_ring access for data transmission
631  * @dp_soc - DP soc structure pointer
632  * @hal_ring_hdl - HAL ring handle
633  *
634  * Return - None
635  */
636 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
637 					     hal_ring_handle_t hal_ring_hdl)
638 {
639 	hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
640 }
641 
642 /*
643  * dp_tx_hal_ring_access_reap()- hal_tx_ring access for data transmission
644  * @dp_soc - DP soc structure pointer
645  * @hal_ring_hdl - HAL ring handle
646  *
647  * Return - None
648  */
649 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
650 						  hal_ring_handle_t
651 						  hal_ring_hdl)
652 {
653 }
654 
655 #else
656 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
657 					      hal_ring_handle_t hal_ring_hdl)
658 {
659 	return hal_srng_access_start(soc->hal_soc, hal_ring_hdl);
660 }
661 
662 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
663 					     hal_ring_handle_t hal_ring_hdl)
664 {
665 	hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
666 }
667 
668 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
669 						  hal_ring_handle_t
670 						  hal_ring_hdl)
671 {
672 	hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
673 }
674 #endif
675 
676 #ifdef ATH_TX_PRI_OVERRIDE
677 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \
678 	((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf))
679 #else
680 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf)
681 #endif
682 
683 /* TODO TX_FEATURE_NOT_YET */
684 static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc)
685 {
686 	return;
687 }
688 /* TODO TX_FEATURE_NOT_YET */
689 
690 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
691 		      bool force_free);
692 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev);
693 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev);
694 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev);
695 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc);
696 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc);
697 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc);
698 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc);
699 void
700 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
701 			     uint32_t buf_type);
702 #else /* QCA_HOST_MODE_WIFI_DISABLED */
703 
704 static inline
705 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
706 {
707 	return QDF_STATUS_SUCCESS;
708 }
709 
710 static inline
711 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
712 {
713 	return QDF_STATUS_SUCCESS;
714 }
715 
716 static inline void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
717 {
718 }
719 
720 static inline void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
721 {
722 }
723 
724 static inline
725 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
726 		      bool force_free)
727 {
728 }
729 
730 static inline QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
731 {
732 	return QDF_STATUS_SUCCESS;
733 }
734 
735 static inline QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
736 {
737 	return QDF_STATUS_SUCCESS;
738 }
739 
740 static inline void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
741 {
742 }
743 
744 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
745 
746 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
747 	defined(QCA_TX_CAPTURE_SUPPORT) || \
748 	defined(QCA_MCOPY_SUPPORT)
749 #ifdef FEATURE_PERPKT_INFO
750 QDF_STATUS
751 dp_get_completion_indication_for_stack(struct dp_soc *soc,
752 				       struct dp_pdev *pdev,
753 				       struct dp_txrx_peer *peer,
754 				       struct hal_tx_completion_status *ts,
755 				       qdf_nbuf_t netbuf,
756 				       uint64_t time_latency);
757 
758 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
759 			    uint16_t peer_id, uint32_t ppdu_id,
760 			    qdf_nbuf_t netbuf);
761 #endif
762 #else
763 static inline
764 QDF_STATUS dp_get_completion_indication_for_stack(struct dp_soc *soc,
765 				       struct dp_pdev *pdev,
766 				       struct dp_txrx_peer *peer,
767 				       struct hal_tx_completion_status *ts,
768 				       qdf_nbuf_t netbuf,
769 				       uint64_t time_latency)
770 {
771 	return QDF_STATUS_E_NOSUPPORT;
772 }
773 
774 static inline
775 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
776 			    uint16_t peer_id, uint32_t ppdu_id,
777 			    qdf_nbuf_t netbuf)
778 {
779 }
780 #endif
781 
782 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
783 void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
784 				       struct dp_tx_desc_s *desc,
785 				       struct hal_tx_completion_status *ts);
786 #else
787 static inline void
788 dp_send_completion_to_pkt_capture(struct dp_soc *soc,
789 				  struct dp_tx_desc_s *desc,
790 				  struct hal_tx_completion_status *ts)
791 {
792 }
793 #endif
794 
795 #ifndef QCA_HOST_MODE_WIFI_DISABLED
796 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
797 /**
798  * dp_tx_update_stats() - Update soc level tx stats
799  * @soc: DP soc handle
800  * @tx_desc: TX descriptor reference
801  * @ring_id: TCL ring id
802  *
803  * Returns: none
804  */
805 void dp_tx_update_stats(struct dp_soc *soc,
806 			struct dp_tx_desc_s *tx_desc,
807 			uint8_t ring_id);
808 
809 /**
810  * dp_tx_attempt_coalescing() - Check and attempt TCL register write coalescing
811  * @soc: Datapath soc handle
812  * @tx_desc: tx packet descriptor
813  * @tid: TID for pkt transmission
814  * @msdu_info: MSDU info of tx packet
815  * @ring_id: TCL ring id
816  *
817  * Returns: 1, if coalescing is to be done
818  *	    0, if coalescing is not to be done
819  */
820 int
821 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
822 			 struct dp_tx_desc_s *tx_desc,
823 			 uint8_t tid,
824 			 struct dp_tx_msdu_info_s *msdu_info,
825 			 uint8_t ring_id);
826 
827 /**
828  * dp_tx_ring_access_end() - HAL ring access end for data transmission
829  * @soc: Datapath soc handle
830  * @hal_ring_hdl: HAL ring handle
831  * @coalesce: Coalesce the current write or not
832  *
833  * Returns: none
834  */
835 void
836 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
837 		      int coalesce);
838 #else
839 /**
840  * dp_tx_update_stats() - Update soc level tx stats
841  * @soc: DP soc handle
842  * @tx_desc: TX descriptor reference
843  * @ring_id: TCL ring id
844  *
845  * Returns: none
846  */
847 static inline void dp_tx_update_stats(struct dp_soc *soc,
848 				      struct dp_tx_desc_s *tx_desc,
849 				      uint8_t ring_id){ }
850 
851 static inline void
852 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
853 		      int coalesce)
854 {
855 	dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
856 }
857 
858 static inline int
859 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
860 			 struct dp_tx_desc_s *tx_desc,
861 			 uint8_t tid,
862 			 struct dp_tx_msdu_info_s *msdu_info,
863 			 uint8_t ring_id)
864 {
865 	return 0;
866 }
867 
868 #endif /* WLAN_DP_FEATURE_SW_LATENCY_MGR */
869 
870 #ifdef FEATURE_RUNTIME_PM
871 /**
872  * dp_set_rtpm_tput_policy_requirement() - Update RTPM throughput policy
873  * @soc_hdl: DP soc handle
874  * @is_high_tput: flag to indicate whether throughput is high
875  *
876  * Returns: none
877  */
878 static inline
879 void dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
880 					 bool is_high_tput)
881 {
882 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
883 
884 	qdf_atomic_set(&soc->rtpm_high_tput_flag, is_high_tput);
885 }
886 
887 void
888 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
889 			      hal_ring_handle_t hal_ring_hdl,
890 			      int coalesce);
891 #else
892 #ifdef DP_POWER_SAVE
893 void
894 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
895 			      hal_ring_handle_t hal_ring_hdl,
896 			      int coalesce);
897 #else
898 static inline void
899 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
900 			      hal_ring_handle_t hal_ring_hdl,
901 			      int coalesce)
902 {
903 	dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
904 }
905 #endif
906 
907 static inline void
908 dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
909 				    bool is_high_tput)
910 { }
911 #endif
912 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
913 
914 #ifdef DP_TX_HW_DESC_HISTORY
915 static inline void
916 dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
917 			 hal_ring_handle_t hal_ring_hdl,
918 			 struct dp_soc *soc, uint8_t ring_id)
919 {
920 	struct dp_tx_hw_desc_history *tx_hw_desc_history =
921 						&soc->tx_hw_desc_history;
922 	struct dp_tx_hw_desc_evt *evt;
923 	uint32_t idx = 0;
924 	uint16_t slot = 0;
925 
926 	if (!tx_hw_desc_history->allocated)
927 		return;
928 
929 	dp_get_frag_hist_next_atomic_idx(&tx_hw_desc_history->index, &idx,
930 					 &slot,
931 					 DP_TX_HW_DESC_HIST_SLOT_SHIFT,
932 					 DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
933 					 DP_TX_HW_DESC_HIST_MAX);
934 
935 	evt = &tx_hw_desc_history->entry[slot][idx];
936 	qdf_mem_copy(evt->tcl_desc, hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
937 	evt->posted = qdf_get_log_timestamp();
938 	evt->tcl_ring_id = ring_id;
939 	hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &evt->tp, &evt->hp);
940 }
941 #else
942 static inline void
943 dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
944 			 hal_ring_handle_t hal_ring_hdl,
945 			 struct dp_soc *soc, uint8_t ring_id)
946 {
947 }
948 #endif
949 
950 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
951 /**
952  * dp_tx_compute_hw_delay_us() - Compute hardware Tx completion delay
953  * @ts: Tx completion status
954  * @delta_tsf: Difference between TSF clock and qtimer
955  * @delay_us: Delay in microseconds
956  *
957  * Return: QDF_STATUS_SUCCESS   : Success
958  *         QDF_STATUS_E_INVAL   : Tx completion status is invalid or
959  *                                delay_us is NULL
960  *         QDF_STATUS_E_FAILURE : Error in delay calculation
961  */
962 QDF_STATUS
963 dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
964 			  uint32_t delta_tsf,
965 			  uint32_t *delay_us);
966 
967 /**
968  * dp_set_delta_tsf() - Set delta_tsf to dp_soc structure
969  * @soc_hdl: cdp soc pointer
970  * @vdev_id: vdev id
971  * @delta_tsf: difference between TSF clock and qtimer
972  *
973  * Return: None
974  */
975 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
976 		      uint32_t delta_tsf);
977 #endif
978 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
979 /**
980  * dp_set_tsf_report_ul_delay() - Enable or disable reporting uplink delay
981  * @soc_hdl: cdp soc pointer
982  * @vdev_id: vdev id
983  * @enable: true to enable and false to disable
984  *
985  * Return: QDF_STATUS
986  */
987 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
988 				      uint8_t vdev_id, bool enable);
989 
990 /**
991  * dp_get_uplink_delay() - Get uplink delay value
992  * @soc_hdl: cdp soc pointer
993  * @vdev_id: vdev id
994  * @val: pointer to save uplink delay value
995  *
996  * Return: QDF_STATUS
997  */
998 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
999 			       uint32_t *val);
1000 #endif /* WLAN_FEATURE_TSF_UPLINK_TSF */
1001 
1002 /**
1003  * dp_tx_pkt_tracepoints_enabled() - Get the state of tx pkt tracepoint
1004  *
1005  * Return: True if any tx pkt tracepoint is enabled else false
1006  */
1007 static inline
1008 bool dp_tx_pkt_tracepoints_enabled(void)
1009 {
1010 	return (qdf_trace_dp_tx_comp_tcp_pkt_enabled() ||
1011 		qdf_trace_dp_tx_comp_udp_pkt_enabled() ||
1012 		qdf_trace_dp_tx_comp_pkt_enabled());
1013 }
1014 
1015 #ifdef DP_TX_TRACKING
1016 /**
1017  * dp_tx_desc_set_timestamp() - set timestamp in tx descriptor
1018  * @tx_desc - tx descriptor
1019  *
1020  * Return: None
1021  */
1022 static inline
1023 void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
1024 {
1025 	tx_desc->timestamp_tick = qdf_system_ticks();
1026 }
1027 
1028 /**
1029  * dp_tx_desc_check_corruption() - Verify magic pattern in tx descriptor
1030  * @tx_desc: tx descriptor
1031  *
1032  * Check for corruption in tx descriptor, if magic pattern is not matching
1033  * trigger self recovery
1034  *
1035  * Return: none
1036  */
1037 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc);
1038 #else
1039 static inline
1040 void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
1041 {
1042 }
1043 
1044 static inline
1045 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
1046 {
1047 }
1048 #endif
1049 
1050 #ifndef CONFIG_SAWF
1051 static inline bool dp_sawf_tag_valid_get(qdf_nbuf_t nbuf)
1052 {
1053 	return false;
1054 }
1055 #endif
1056 
1057 #ifdef HW_TX_DELAY_STATS_ENABLE
1058 /**
1059  * dp_tx_desc_set_ktimestamp() - set kernel timestamp in tx descriptor
1060  * @vdev: DP vdev handle
1061  * @tx_desc: tx descriptor
1062  *
1063  * Return: true when descriptor is timestamped, false otherwise
1064  */
1065 static inline
1066 bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
1067 			       struct dp_tx_desc_s *tx_desc)
1068 {
1069 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1070 	    qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
1071 	    qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
1072 	    qdf_unlikely(vdev->pdev->soc->peerstats_enabled) ||
1073 	    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev))) {
1074 		tx_desc->timestamp = qdf_ktime_real_get();
1075 		return true;
1076 	}
1077 	return false;
1078 }
1079 #else
1080 static inline
1081 bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
1082 			       struct dp_tx_desc_s *tx_desc)
1083 {
1084 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1085 	    qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
1086 	    qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
1087 	    qdf_unlikely(vdev->pdev->soc->peerstats_enabled)) {
1088 		tx_desc->timestamp = qdf_ktime_real_get();
1089 		return true;
1090 	}
1091 	return false;
1092 }
1093 #endif
1094 
1095 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
1096 /**
1097  * dp_pkt_add_timestamp() - add timestamp in data payload
1098  *
1099  * @vdev: dp vdev
1100  * @index: index to decide offset in payload
1101  * @time: timestamp to add in data payload
1102  * @nbuf: network buffer
1103  *
1104  * Return: none
1105  */
1106 void dp_pkt_add_timestamp(struct dp_vdev *vdev,
1107 			  enum qdf_pkt_timestamp_index index, uint64_t time,
1108 			  qdf_nbuf_t nbuf);
1109 /**
1110  * dp_pkt_get_timestamp() - get current system time
1111  *
1112  * @time: return current system time
1113  *
1114  * Return: none
1115  */
1116 void dp_pkt_get_timestamp(uint64_t *time);
1117 #else
1118 #define dp_pkt_add_timestamp(vdev, index, time, nbuf)
1119 
1120 static inline
1121 void dp_pkt_get_timestamp(uint64_t *time)
1122 {
1123 }
1124 #endif
1125 
1126 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
1127 /**
1128  * dp_update_tx_desc_stats - Update the increase or decrease in
1129  * outstanding tx desc count
1130  * values on pdev and soc
1131  * @vdev: DP pdev handle
1132  *
1133  * Return: void
1134  */
1135 static inline void
1136 dp_update_tx_desc_stats(struct dp_pdev *pdev)
1137 {
1138 	int32_t tx_descs_cnt =
1139 		qdf_atomic_read(&pdev->num_tx_outstanding);
1140 	if (pdev->tx_descs_max < tx_descs_cnt)
1141 		pdev->tx_descs_max = tx_descs_cnt;
1142 	qdf_mem_tx_desc_cnt_update(pdev->num_tx_outstanding,
1143 				   pdev->tx_descs_max);
1144 }
1145 
1146 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
1147 
1148 static inline void
1149 dp_update_tx_desc_stats(struct dp_pdev *pdev)
1150 {
1151 }
1152 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
1153 
1154 #ifdef QCA_TX_LIMIT_CHECK
1155 static inline bool is_spl_packet(qdf_nbuf_t nbuf)
1156 {
1157 	if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1158 		return true;
1159 	return false;
1160 }
1161 
1162 /**
1163  * is_dp_spl_tx_limit_reached - Check if the packet is a special packet to allow
1164  * allocation if allocated tx descriptors are within the soc max limit
1165  * and pdev max limit.
1166  * @vdev: DP vdev handle
1167  *
1168  * Return: true if allocated tx descriptors reached max configured value, else
1169  * false
1170  */
1171 static inline bool
1172 is_dp_spl_tx_limit_reached(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1173 {
1174 	struct dp_pdev *pdev = vdev->pdev;
1175 	struct dp_soc *soc = pdev->soc;
1176 
1177 	if (is_spl_packet(nbuf)) {
1178 		if (qdf_atomic_read(&soc->num_tx_outstanding) >=
1179 				soc->num_tx_allowed)
1180 			return true;
1181 
1182 		if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
1183 			pdev->num_tx_allowed)
1184 			return true;
1185 
1186 		return false;
1187 	}
1188 
1189 	return true;
1190 }
1191 
1192 /**
1193  * dp_tx_limit_check - Check if allocated tx descriptors reached
1194  * soc max reg limit and pdev max reg limit for regular packets. Also check if
1195  * the limit is reached for special packets.
1196  * @vdev: DP vdev handle
1197  *
1198  * Return: true if allocated tx descriptors reached max limit for regular
1199  * packets and in case of special packets, if the limit is reached max
1200  * configured vale for the soc/pdev, else false
1201  */
1202 static inline bool
1203 dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1204 {
1205 	struct dp_pdev *pdev = vdev->pdev;
1206 	struct dp_soc *soc = pdev->soc;
1207 
1208 	if (qdf_atomic_read(&soc->num_tx_outstanding) >=
1209 			soc->num_reg_tx_allowed) {
1210 		if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
1211 			dp_tx_info("queued packets are more than max tx, drop the frame");
1212 			DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1213 			return true;
1214 		}
1215 	}
1216 
1217 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
1218 			pdev->num_reg_tx_allowed) {
1219 		if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
1220 			dp_tx_info("queued packets are more than max tx, drop the frame");
1221 			DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1222 			DP_STATS_INC(vdev,
1223 				     tx_i.dropped.desc_na_exc_outstand.num, 1);
1224 			return true;
1225 		}
1226 	}
1227 	return false;
1228 }
1229 
1230 /**
1231  * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
1232  * reached soc max limit
1233  * @vdev: DP vdev handle
1234  *
1235  * Return: true if allocated tx descriptors reached max configured value, else
1236  * false
1237  */
1238 static inline bool
1239 dp_tx_exception_limit_check(struct dp_vdev *vdev)
1240 {
1241 	struct dp_pdev *pdev = vdev->pdev;
1242 	struct dp_soc *soc = pdev->soc;
1243 
1244 	if (qdf_atomic_read(&soc->num_tx_exception) >=
1245 			soc->num_msdu_exception_desc) {
1246 		dp_info("exc packets are more than max drop the exc pkt");
1247 		DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
1248 		return true;
1249 	}
1250 
1251 	return false;
1252 }
1253 
1254 /**
1255  * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
1256  * @vdev: DP pdev handle
1257  *
1258  * Return: void
1259  */
1260 static inline void
1261 dp_tx_outstanding_inc(struct dp_pdev *pdev)
1262 {
1263 	struct dp_soc *soc = pdev->soc;
1264 
1265 	qdf_atomic_inc(&pdev->num_tx_outstanding);
1266 	qdf_atomic_inc(&soc->num_tx_outstanding);
1267 	dp_update_tx_desc_stats(pdev);
1268 }
1269 
1270 /**
1271  * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
1272  * @vdev: DP pdev handle
1273  *
1274  * Return: void
1275  */
1276 static inline void
1277 dp_tx_outstanding_dec(struct dp_pdev *pdev)
1278 {
1279 	struct dp_soc *soc = pdev->soc;
1280 
1281 	qdf_atomic_dec(&pdev->num_tx_outstanding);
1282 	qdf_atomic_dec(&soc->num_tx_outstanding);
1283 	dp_update_tx_desc_stats(pdev);
1284 }
1285 
1286 #else //QCA_TX_LIMIT_CHECK
1287 static inline bool
1288 dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1289 {
1290 	return false;
1291 }
1292 
1293 static inline bool
1294 dp_tx_exception_limit_check(struct dp_vdev *vdev)
1295 {
1296 	return false;
1297 }
1298 
1299 static inline void
1300 dp_tx_outstanding_inc(struct dp_pdev *pdev)
1301 {
1302 	qdf_atomic_inc(&pdev->num_tx_outstanding);
1303 	dp_update_tx_desc_stats(pdev);
1304 }
1305 
1306 static inline void
1307 dp_tx_outstanding_dec(struct dp_pdev *pdev)
1308 {
1309 	qdf_atomic_dec(&pdev->num_tx_outstanding);
1310 	dp_update_tx_desc_stats(pdev);
1311 }
1312 #endif //QCA_TX_LIMIT_CHECK
1313 /**
1314  * dp_tx_get_pkt_len() - Get the packet length of a msdu
1315  * @tx_desc: tx descriptor
1316  *
1317  * Return: Packet length of a msdu. If the packet is fragmented,
1318  * it will return the single fragment length.
1319  *
1320  * In TSO mode, the msdu from stack will be fragmented into small
1321  * fragments and each of these new fragments will be transmitted
1322  * as an individual msdu.
1323  *
1324  * Please note that the length of a msdu from stack may be smaller
1325  * than the length of the total length of the fragments it has been
1326  * fragmentted because each of the fragments has a nbuf header.
1327  */
1328 static inline uint32_t dp_tx_get_pkt_len(struct dp_tx_desc_s *tx_desc)
1329 {
1330 	return tx_desc->frm_type == dp_tx_frm_tso ?
1331 		tx_desc->msdu_ext_desc->tso_desc->seg.total_len :
1332 		qdf_nbuf_len(tx_desc->nbuf);
1333 }
1334 #endif
1335