xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.h (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef __DP_TX_H
20 #define __DP_TX_H
21 
22 #include <qdf_types.h>
23 #include <qdf_nbuf.h>
24 #include "dp_types.h"
25 #ifdef FEATURE_PERPKT_INFO
26 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
27 	defined(QCA_TX_CAPTURE_SUPPORT) || \
28 	defined(QCA_MCOPY_SUPPORT)
29 #include "if_meta_hdr.h"
30 #endif
31 #endif
32 #include "dp_internal.h"
33 #include "hal_tx.h"
34 #include <qdf_tracepoint.h>
35 #ifdef CONFIG_SAWF
36 #include "dp_sawf.h"
37 #endif
38 #include <qdf_pkt_add_timestamp.h>
39 
40 #define DP_INVALID_VDEV_ID 0xFF
41 
42 #define DP_TX_MAX_NUM_FRAGS 6
43 
44 /*
45  * DP_TX_DESC_FLAG_FRAG flags should always be defined to 0x1
46  * please do not change this flag's definition
47  */
48 #define DP_TX_DESC_FLAG_FRAG		0x1
49 #define DP_TX_DESC_FLAG_TO_FW		0x2
50 #define DP_TX_DESC_FLAG_SIMPLE		0x4
51 #define DP_TX_DESC_FLAG_RAW		0x8
52 #define DP_TX_DESC_FLAG_MESH		0x10
53 #define DP_TX_DESC_FLAG_QUEUED_TX	0x20
54 #define DP_TX_DESC_FLAG_COMPLETED_TX	0x40
55 #define DP_TX_DESC_FLAG_ME		0x80
56 #define DP_TX_DESC_FLAG_TDLS_FRAME	0x100
57 #define DP_TX_DESC_FLAG_ALLOCATED	0x200
58 #define DP_TX_DESC_FLAG_MESH_MODE	0x400
59 #define DP_TX_DESC_FLAG_UNMAP_DONE	0x800
60 #define DP_TX_DESC_FLAG_TX_COMP_ERR	0x1000
61 #define DP_TX_DESC_FLAG_FLUSH		0x2000
62 #define DP_TX_DESC_FLAG_TRAFFIC_END_IND	0x4000
63 #define DP_TX_DESC_FLAG_RMNET		0x8000
64 /*
65  * Since the Tx descriptor flag is of only 16-bit and no more bit is free for
66  * any new flag, therefore for time being overloading PPEDS flag with that of
67  * FLUSH flag and FLAG_FAST with TDLS which is not enabled for WIN.
68  */
69 #define DP_TX_DESC_FLAG_PPEDS		0x2000
70 #define DP_TX_DESC_FLAG_FAST		0x100
71 
72 #define DP_TX_EXT_DESC_FLAG_METADATA_VALID 0x1
73 
74 #define DP_TX_FREE_SINGLE_BUF(soc, buf)                  \
75 do {                                                           \
76 	qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE);  \
77 	qdf_nbuf_free(buf);                                    \
78 } while (0)
79 
80 #define OCB_HEADER_VERSION	 1
81 
82 #ifdef TX_PER_PDEV_DESC_POOL
83 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
84 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
85 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
86 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
87 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
88 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
89 #else
90 	#ifdef TX_PER_VDEV_DESC_POOL
91 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
92 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
93 	#endif /* TX_PER_VDEV_DESC_POOL */
94 #endif /* TX_PER_PDEV_DESC_POOL */
95 #define DP_TX_QUEUE_MASK 0x3
96 
97 #define MAX_CDP_SEC_TYPE 12
98 
99 /* number of dwords for htt_tx_msdu_desc_ext2_t */
100 #define DP_TX_MSDU_INFO_META_DATA_DWORDS 7
101 
102 #define dp_tx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX, params)
103 #define dp_tx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX, params)
104 #define dp_tx_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP_TX, params)
105 #define dp_tx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX, params)
106 #define dp_tx_info(params...) \
107 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX, ## params)
108 #define dp_tx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX, params)
109 
110 #define dp_tx_comp_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX_COMP, params)
111 #define dp_tx_comp_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX_COMP, params)
112 #define dp_tx_comp_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX_COMP, params)
113 #define dp_tx_comp_info(params...) \
114 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
115 #define dp_tx_comp_info_rl(params...) \
116 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
117 #define dp_tx_comp_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX_COMP, params)
118 
119 #ifndef QCA_HOST_MODE_WIFI_DISABLED
120 
121 /**
122  * struct dp_tx_frag_info_s
123  * @vaddr: hlos virtual address for buffer
124  * @paddr_lo: physical address lower 32bits
125  * @paddr_hi: physical address higher bits
126  * @len: length of the buffer
127  */
128 struct dp_tx_frag_info_s {
129 	uint8_t  *vaddr;
130 	uint32_t paddr_lo;
131 	uint16_t paddr_hi;
132 	uint16_t len;
133 };
134 
135 /**
136  * struct dp_tx_seg_info_s - Segmentation Descriptor
137  * @nbuf: NBUF pointer if segment corresponds to separate nbuf
138  * @frag_cnt: Fragment count in this segment
139  * @total_len: Total length of segment
140  * @frags: per-Fragment information
141  * @next: pointer to next MSDU segment
142  */
143 struct dp_tx_seg_info_s  {
144 	qdf_nbuf_t nbuf;
145 	uint16_t frag_cnt;
146 	uint16_t total_len;
147 	struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS];
148 	struct dp_tx_seg_info_s *next;
149 };
150 
151 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
152 
153 /**
154  * struct dp_tx_sg_info_s - Scatter Gather Descriptor
155  * @num_segs: Number of segments (TSO/ME) in the frame
156  * @total_len: Total length of the frame
157  * @curr_seg: Points to current segment descriptor to be processed. Chain of
158  * 	      descriptors for SG frames/multicast-unicast converted packets.
159  *
160  * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to
161  * carry fragmentation information
162  * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries
163  * indicated through flags in SKB CB (first_msdu and last_msdu). This will be
164  * converted into set of skb sg (nr_frags) structures.
165  */
166 struct dp_tx_sg_info_s {
167 	uint32_t num_segs;
168 	uint32_t total_len;
169 	struct dp_tx_seg_info_s *curr_seg;
170 };
171 
172 /**
173  * struct dp_tx_queue - Tx queue
174  * @desc_pool_id: Descriptor Pool to be used for the tx queue
175  * @ring_id: TCL descriptor ring ID corresponding to the tx queue
176  *
177  * Tx queue contains information of the software (Descriptor pool)
178  * and hardware resources (TCL ring id) to be used for a particular
179  * transmit queue (obtained from skb_queue_mapping in case of linux)
180  */
181 struct dp_tx_queue {
182 	uint8_t desc_pool_id;
183 	uint8_t ring_id;
184 };
185 
186 /**
187  * struct dp_tx_msdu_info_s - MSDU Descriptor
188  * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement
189  * @tx_queue: Tx queue on which this MSDU should be transmitted
190  * @num_seg: Number of segments (TSO)
191  * @tid: TID (override) that is sent from HLOS
192  * @u.tso_info: TSO information for TSO frame types
193  * 	     (chain of the TSO segments, number of segments)
194  * @u.sg_info: Scatter Gather information for non-TSO SG frames
195  * @meta_data: Mesh meta header information
196  * @exception_fw: Duplicate frame to be sent to firmware
197  * @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions
198  * @ix_tx_sniffer: Indicates if the packet has to be sniffed
199  * @gsn: global sequence for reinjected mcast packets
200  * @vdev_id : vdev_id for reinjected mcast packets
201  * @skip_hp_update : Skip HP update for TSO segments and update in last segment
202  *
203  * This structure holds the complete MSDU information needed to program the
204  * Hardware TCL and MSDU extension descriptors for different frame types
205  *
206  */
207 struct dp_tx_msdu_info_s {
208 	enum dp_tx_frm_type frm_type;
209 	struct dp_tx_queue tx_queue;
210 	uint32_t num_seg;
211 	uint8_t tid;
212 	uint8_t exception_fw;
213 	uint8_t is_tx_sniffer;
214 	union {
215 		struct qdf_tso_info_t tso_info;
216 		struct dp_tx_sg_info_s sg_info;
217 	} u;
218 	uint32_t meta_data[DP_TX_MSDU_INFO_META_DATA_DWORDS];
219 	uint16_t ppdu_cookie;
220 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
221 #ifdef WLAN_MCAST_MLO
222 	uint16_t gsn;
223 	uint8_t vdev_id;
224 #endif
225 #endif
226 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
227 	uint8_t skip_hp_update;
228 #endif
229 #ifdef QCA_DP_TX_RMNET_OPTIMIZATION
230 	uint16_t buf_len;
231 	uint8_t *payload_addr;
232 #endif
233 };
234 
235 #ifndef QCA_HOST_MODE_WIFI_DISABLED
236 /**
237  * dp_tx_deinit_pair_by_index() - Deinit TX rings based on index
238  * @soc: core txrx context
239  * @index: index of ring to deinit
240  *
241  * Deinit 1 TCL and 1 WBM2SW release ring on as needed basis using
242  * index of the respective TCL/WBM2SW release in soc structure.
243  * For example, if the index is 2 then &soc->tcl_data_ring[2]
244  * and &soc->tx_comp_ring[2] will be deinitialized.
245  *
246  * Return: none
247  */
248 void dp_tx_deinit_pair_by_index(struct dp_soc *soc, int index);
249 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
250 
251 void
252 dp_tx_comp_process_desc_list(struct dp_soc *soc,
253 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id);
254 qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
255 			       bool delayed_free);
256 void dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id);
257 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
258 			 uint8_t tid, uint8_t ring_id);
259 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
260 				  struct dp_tx_desc_s *tx_desc,
261 				  struct hal_tx_completion_status *ts,
262 				  struct dp_txrx_peer *txrx_peer,
263 				  uint8_t ring_id);
264 void dp_tx_comp_process_desc(struct dp_soc *soc,
265 			     struct dp_tx_desc_s *desc,
266 			     struct hal_tx_completion_status *ts,
267 			     struct dp_txrx_peer *txrx_peer);
268 void dp_tx_reinject_handler(struct dp_soc *soc,
269 			    struct dp_vdev *vdev,
270 			    struct dp_tx_desc_s *tx_desc,
271 			    uint8_t *status,
272 			    uint8_t reinject_reason);
273 void dp_tx_inspect_handler(struct dp_soc *soc,
274 			   struct dp_vdev *vdev,
275 			   struct dp_tx_desc_s *tx_desc,
276 			   uint8_t *status);
277 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
278 				   uint32_t length, uint8_t tx_status,
279 				   bool update);
280 
281 #ifdef DP_UMAC_HW_RESET_SUPPORT
282 qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
283 
284 qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
285 			  qdf_nbuf_t nbuf,
286 			  struct cdp_tx_exception_metadata *tx_exc_metadata);
287 #endif
288 #ifdef WLAN_SUPPORT_PPEDS
289 void dp_ppeds_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc);
290 #else
291 static inline
292 void dp_ppeds_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
293 {
294 }
295 #endif
296 #ifndef QCA_HOST_MODE_WIFI_DISABLED
297 /**
298  * dp_tso_attach() - TSO Attach handler
299  * @txrx_soc: Opaque Dp handle
300  *
301  * Reserve TSO descriptor buffers
302  *
303  * Return: QDF_STATUS_E_FAILURE on failure or
304  * QDF_STATUS_SUCCESS on success
305  */
306 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc);
307 
308 /**
309  * dp_tso_detach() - TSO Detach handler
310  * @txrx_soc: Opaque Dp handle
311  *
312  * Deallocate TSO descriptor buffers
313  *
314  * Return: QDF_STATUS_E_FAILURE on failure or
315  * QDF_STATUS_SUCCESS on success
316  */
317 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc);
318 
319 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
320 
321 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc, uint8_t vdev_id,
322 				    qdf_nbuf_t nbuf);
323 
324 qdf_nbuf_t dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id,
325 				qdf_nbuf_t nbuf,
326 				struct cdp_tx_exception_metadata *tx_exc);
327 
328 qdf_nbuf_t dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc,
329 					      uint8_t vdev_id,
330 					      qdf_nbuf_t nbuf,
331 				struct cdp_tx_exception_metadata *tx_exc);
332 
333 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
334 			   qdf_nbuf_t nbuf);
335 qdf_nbuf_t
336 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
337 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
338 		       struct cdp_tx_exception_metadata *tx_exc_metadata);
339 
340 /**
341  * dp_tx_mcast_enhance
342  * @vdev: DP vdev handle
343  * @nbuf: network buffer to be transmitted
344  *
345  * Return: true on success
346  *         false on failure
347  */
348 bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t buf);
349 
350 #if QDF_LOCK_STATS
351 noinline qdf_nbuf_t
352 dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
353 			 struct dp_tx_msdu_info_s *msdu_info);
354 #else
355 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
356 				    struct dp_tx_msdu_info_s *msdu_info);
357 #endif
358 #ifdef FEATURE_WLAN_TDLS
359 /**
360  * dp_tx_non_std() - Allow the control-path SW to send data frames
361  * @soc_hdl: Datapath soc handle
362  * @vdev_id: id of vdev
363  * @tx_spec: what non-standard handling to apply to the tx data frames
364  * @msdu_list: NULL-terminated list of tx MSDUs
365  *
366  * Return: NULL on success,
367  *         nbuf when it fails to send
368  */
369 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
370 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
371 #endif
372 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac);
373 
374 /**
375  * dp_tx_comp_handler() - Tx completion handler
376  * @int_ctx: pointer to DP interrupt context
377  * @soc: core txrx main context
378  * @hal_srng: Opaque HAL SRNG pointer
379  * @ring_id: completion ring id
380  * @quota: No. of packets/descriptors that can be serviced in one loop
381  *
382  * This function will collect hardware release ring element contents and
383  * handle descriptor contents. Based on contents, free packet or handle error
384  * conditions
385  *
386  * Return: Number of TX completions processed
387  */
388 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
389 			    hal_ring_handle_t hal_srng, uint8_t ring_id,
390 			    uint32_t quota);
391 
392 QDF_STATUS
393 dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
394 
395 QDF_STATUS
396 dp_tx_prepare_send_igmp_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
397 
398 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
399 
400 #if defined(QCA_HOST_MODE_WIFI_DISABLED) || !defined(ATH_SUPPORT_IQUE)
401 static inline void dp_tx_me_exit(struct dp_pdev *pdev)
402 {
403 	return;
404 }
405 #endif
406 
407 /**
408  * dp_tx_pdev_init() - dp tx pdev init
409  * @pdev: physical device instance
410  *
411  * Return: QDF_STATUS_SUCCESS: success
412  *         QDF_STATUS_E_RESOURCES: Error return
413  */
414 static inline QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev)
415 {
416 	struct dp_soc *soc = pdev->soc;
417 
418 	/* Initialize Flow control counters */
419 	qdf_atomic_init(&pdev->num_tx_outstanding);
420 	pdev->tx_descs_max = 0;
421 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
422 		/* Initialize descriptors in TCL Ring */
423 		hal_tx_init_data_ring(soc->hal_soc,
424 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
425 	}
426 
427 	return QDF_STATUS_SUCCESS;
428 }
429 
430 /**
431  * dp_tx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc
432  * @soc: Handle to HAL Soc structure
433  * @hal_soc: HAL SOC handle
434  * @num_avail_for_reap: descriptors available for reap
435  * @hal_ring_hdl: ring pointer
436  * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
437  * @last_prefetched_sw_desc: pointer to last prefetch SW desc
438  *
439  * Return: None
440  */
441 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
442 static inline
443 void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
444 				    hal_soc_handle_t hal_soc,
445 				    uint32_t num_avail_for_reap,
446 				    hal_ring_handle_t hal_ring_hdl,
447 				    void **last_prefetched_hw_desc,
448 				    struct dp_tx_desc_s
449 				    **last_prefetched_sw_desc)
450 {
451 	if (*last_prefetched_sw_desc) {
452 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
453 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
454 	}
455 
456 	if (num_avail_for_reap && *last_prefetched_hw_desc) {
457 		soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
458 						       *last_prefetched_hw_desc,
459 						       last_prefetched_sw_desc);
460 
461 		if ((uintptr_t)*last_prefetched_hw_desc & 0x3f)
462 			*last_prefetched_hw_desc =
463 				hal_srng_dst_prefetch_next_cached_desc(
464 					hal_soc,
465 					hal_ring_hdl,
466 					(uint8_t *)*last_prefetched_hw_desc);
467 		else
468 			*last_prefetched_hw_desc =
469 				hal_srng_dst_get_next_32_byte_desc(hal_soc,
470 					hal_ring_hdl,
471 					(uint8_t *)*last_prefetched_hw_desc);
472 	}
473 }
474 #else
475 static inline
476 void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
477 				    hal_soc_handle_t hal_soc,
478 				    uint32_t num_avail_for_reap,
479 				    hal_ring_handle_t hal_ring_hdl,
480 				    void **last_prefetched_hw_desc,
481 				    struct dp_tx_desc_s
482 				    **last_prefetched_sw_desc)
483 {
484 }
485 #endif
486 
487 #ifndef FEATURE_WDS
488 static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
489 {
490 	return;
491 }
492 #endif
493 
494 #ifndef QCA_MULTIPASS_SUPPORT
495 static inline
496 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
497 			     qdf_nbuf_t nbuf,
498 			     struct dp_tx_msdu_info_s *msdu_info)
499 {
500 	return true;
501 }
502 
503 static inline
504 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
505 {
506 }
507 
508 #else
509 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
510 			     qdf_nbuf_t nbuf,
511 			     struct dp_tx_msdu_info_s *msdu_info);
512 
513 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev);
514 void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
515 void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev,
516 				 struct dp_tx_msdu_info_s *msdu_info,
517 				 uint16_t group_key);
518 #endif
519 
520 /**
521  * dp_tx_hw_to_qdf()- convert hw status to qdf status
522  * @status: hw status
523  *
524  * Return: qdf tx rx status
525  */
526 static inline enum qdf_dp_tx_rx_status dp_tx_hw_to_qdf(uint16_t status)
527 {
528 	switch (status) {
529 	case HAL_TX_TQM_RR_FRAME_ACKED:
530 		return QDF_TX_RX_STATUS_OK;
531 	case HAL_TX_TQM_RR_REM_CMD_TX:
532 		return QDF_TX_RX_STATUS_NO_ACK;
533 	case HAL_TX_TQM_RR_REM_CMD_REM:
534 	case HAL_TX_TQM_RR_REM_CMD_NOTX:
535 	case HAL_TX_TQM_RR_REM_CMD_AGED:
536 		return QDF_TX_RX_STATUS_FW_DISCARD;
537 	default:
538 		return QDF_TX_RX_STATUS_DEFAULT;
539 	}
540 }
541 
542 #ifndef QCA_HOST_MODE_WIFI_DISABLED
543 /**
544  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
545  * @vdev: DP Virtual device handle
546  * @nbuf: Buffer pointer
547  * @queue: queue ids container for nbuf
548  *
549  * TX packet queue has 2 instances, software descriptors id and dma ring id
550  * Based on tx feature and hardware configuration queue id combination could be
551  * different.
552  * For example -
553  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
554  * With no XPS,lock based resource protection, Descriptor pool ids are different
555  * for each vdev, dma ring id will be same as single pdev id
556  *
557  * Return: None
558  */
559 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
560 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
561 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
562 {
563 	queue->ring_id = qdf_get_cpu();
564 	queue->desc_pool_id = queue->ring_id;
565 }
566 
567 /*
568  * dp_tx_get_hal_ring_hdl()- Get the hal_tx_ring_hdl for data transmission
569  * @dp_soc - DP soc structure pointer
570  * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled
571  *
572  * Return - HAL ring handle
573  */
574 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
575 						       uint8_t ring_id)
576 {
577 	if (ring_id == soc->num_tcl_data_rings)
578 		return soc->tcl_cmd_credit_ring.hal_srng;
579 
580 	return soc->tcl_data_ring[ring_id].hal_srng;
581 }
582 
583 #else /* QCA_OL_TX_MULTIQ_SUPPORT */
584 
585 #ifdef TX_MULTI_TCL
586 #ifdef IPA_OFFLOAD
587 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
588 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
589 {
590 	/* get flow id */
591 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
592 	if (vdev->pdev->soc->wlan_cfg_ctx->ipa_enabled)
593 		queue->ring_id = DP_TX_GET_RING_ID(vdev);
594 	else
595 		queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
596 					vdev->pdev->soc->num_tcl_data_rings);
597 }
598 #else
599 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
600 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
601 {
602 	/* get flow id */
603 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
604 	queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
605 				vdev->pdev->soc->num_tcl_data_rings);
606 }
607 #endif
608 #else
609 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
610 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
611 {
612 	/* get flow id */
613 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
614 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
615 }
616 #endif
617 
618 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
619 						       uint8_t ring_id)
620 {
621 	return soc->tcl_data_ring[ring_id].hal_srng;
622 }
623 #endif
624 
625 #ifdef QCA_OL_TX_LOCK_LESS_ACCESS
626 /*
627  * dp_tx_hal_ring_access_start()- hal_tx_ring access for data transmission
628  * @dp_soc - DP soc structure pointer
629  * @hal_ring_hdl - HAL ring handle
630  *
631  * Return - None
632  */
633 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
634 					      hal_ring_handle_t hal_ring_hdl)
635 {
636 	return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
637 }
638 
639 /*
640  * dp_tx_hal_ring_access_end()- hal_tx_ring access for data transmission
641  * @dp_soc - DP soc structure pointer
642  * @hal_ring_hdl - HAL ring handle
643  *
644  * Return - None
645  */
646 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
647 					     hal_ring_handle_t hal_ring_hdl)
648 {
649 	hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
650 }
651 
652 /*
653  * dp_tx_hal_ring_access_reap()- hal_tx_ring access for data transmission
654  * @dp_soc - DP soc structure pointer
655  * @hal_ring_hdl - HAL ring handle
656  *
657  * Return - None
658  */
659 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
660 						  hal_ring_handle_t
661 						  hal_ring_hdl)
662 {
663 }
664 
665 #else
666 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
667 					      hal_ring_handle_t hal_ring_hdl)
668 {
669 	return hal_srng_access_start(soc->hal_soc, hal_ring_hdl);
670 }
671 
672 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
673 					     hal_ring_handle_t hal_ring_hdl)
674 {
675 	hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
676 }
677 
678 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
679 						  hal_ring_handle_t
680 						  hal_ring_hdl)
681 {
682 	hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
683 }
684 #endif
685 
686 #ifdef ATH_TX_PRI_OVERRIDE
687 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \
688 	((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf))
689 #else
690 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf)
691 #endif
692 
693 /* TODO TX_FEATURE_NOT_YET */
694 static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc)
695 {
696 	return;
697 }
698 /* TODO TX_FEATURE_NOT_YET */
699 
700 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
701 		      bool force_free);
702 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev);
703 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev);
704 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev);
705 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc);
706 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc);
707 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc);
708 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc);
709 void
710 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
711 			     uint32_t buf_type);
712 #else /* QCA_HOST_MODE_WIFI_DISABLED */
713 
714 static inline
715 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
716 {
717 	return QDF_STATUS_SUCCESS;
718 }
719 
720 static inline
721 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
722 {
723 	return QDF_STATUS_SUCCESS;
724 }
725 
726 static inline void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
727 {
728 }
729 
730 static inline void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
731 {
732 }
733 
734 static inline
735 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
736 		      bool force_free)
737 {
738 }
739 
740 static inline QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
741 {
742 	return QDF_STATUS_SUCCESS;
743 }
744 
745 static inline QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
746 {
747 	return QDF_STATUS_SUCCESS;
748 }
749 
750 static inline void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
751 {
752 }
753 
754 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
755 
756 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
757 	defined(QCA_TX_CAPTURE_SUPPORT) || \
758 	defined(QCA_MCOPY_SUPPORT)
759 #ifdef FEATURE_PERPKT_INFO
760 QDF_STATUS
761 dp_get_completion_indication_for_stack(struct dp_soc *soc,
762 				       struct dp_pdev *pdev,
763 				       struct dp_txrx_peer *peer,
764 				       struct hal_tx_completion_status *ts,
765 				       qdf_nbuf_t netbuf,
766 				       uint64_t time_latency);
767 
768 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
769 			    uint16_t peer_id, uint32_t ppdu_id,
770 			    qdf_nbuf_t netbuf);
771 #endif
772 #else
773 static inline
774 QDF_STATUS dp_get_completion_indication_for_stack(struct dp_soc *soc,
775 				       struct dp_pdev *pdev,
776 				       struct dp_txrx_peer *peer,
777 				       struct hal_tx_completion_status *ts,
778 				       qdf_nbuf_t netbuf,
779 				       uint64_t time_latency)
780 {
781 	return QDF_STATUS_E_NOSUPPORT;
782 }
783 
784 static inline
785 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
786 			    uint16_t peer_id, uint32_t ppdu_id,
787 			    qdf_nbuf_t netbuf)
788 {
789 }
790 #endif
791 
792 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
793 void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
794 				       struct dp_tx_desc_s *desc,
795 				       struct hal_tx_completion_status *ts);
796 #else
797 static inline void
798 dp_send_completion_to_pkt_capture(struct dp_soc *soc,
799 				  struct dp_tx_desc_s *desc,
800 				  struct hal_tx_completion_status *ts)
801 {
802 }
803 #endif
804 
805 #ifndef QCA_HOST_MODE_WIFI_DISABLED
806 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
807 /**
808  * dp_tx_update_stats() - Update soc level tx stats
809  * @soc: DP soc handle
810  * @tx_desc: TX descriptor reference
811  * @ring_id: TCL ring id
812  *
813  * Returns: none
814  */
815 void dp_tx_update_stats(struct dp_soc *soc,
816 			struct dp_tx_desc_s *tx_desc,
817 			uint8_t ring_id);
818 
819 /**
820  * dp_tx_attempt_coalescing() - Check and attempt TCL register write coalescing
821  * @soc: Datapath soc handle
822  * @tx_desc: tx packet descriptor
823  * @tid: TID for pkt transmission
824  * @msdu_info: MSDU info of tx packet
825  * @ring_id: TCL ring id
826  *
827  * Returns: 1, if coalescing is to be done
828  *	    0, if coalescing is not to be done
829  */
830 int
831 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
832 			 struct dp_tx_desc_s *tx_desc,
833 			 uint8_t tid,
834 			 struct dp_tx_msdu_info_s *msdu_info,
835 			 uint8_t ring_id);
836 
837 /**
838  * dp_tx_ring_access_end() - HAL ring access end for data transmission
839  * @soc: Datapath soc handle
840  * @hal_ring_hdl: HAL ring handle
841  * @coalesce: Coalesce the current write or not
842  *
843  * Returns: none
844  */
845 void
846 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
847 		      int coalesce);
848 #else
849 /**
850  * dp_tx_update_stats() - Update soc level tx stats
851  * @soc: DP soc handle
852  * @tx_desc: TX descriptor reference
853  * @ring_id: TCL ring id
854  *
855  * Returns: none
856  */
857 static inline void dp_tx_update_stats(struct dp_soc *soc,
858 				      struct dp_tx_desc_s *tx_desc,
859 				      uint8_t ring_id){ }
860 
861 static inline void
862 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
863 		      int coalesce)
864 {
865 	dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
866 }
867 
868 static inline int
869 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
870 			 struct dp_tx_desc_s *tx_desc,
871 			 uint8_t tid,
872 			 struct dp_tx_msdu_info_s *msdu_info,
873 			 uint8_t ring_id)
874 {
875 	return 0;
876 }
877 
878 #endif /* WLAN_DP_FEATURE_SW_LATENCY_MGR */
879 
880 #ifdef FEATURE_RUNTIME_PM
881 /**
882  * dp_set_rtpm_tput_policy_requirement() - Update RTPM throughput policy
883  * @soc_hdl: DP soc handle
884  * @is_high_tput: flag to indicate whether throughput is high
885  *
886  * Returns: none
887  */
888 static inline
889 void dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
890 					 bool is_high_tput)
891 {
892 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
893 
894 	qdf_atomic_set(&soc->rtpm_high_tput_flag, is_high_tput);
895 }
896 
897 void
898 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
899 			      hal_ring_handle_t hal_ring_hdl,
900 			      int coalesce);
901 #else
902 #ifdef DP_POWER_SAVE
903 void
904 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
905 			      hal_ring_handle_t hal_ring_hdl,
906 			      int coalesce);
907 #else
908 static inline void
909 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
910 			      hal_ring_handle_t hal_ring_hdl,
911 			      int coalesce)
912 {
913 	dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
914 }
915 #endif
916 
917 static inline void
918 dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
919 				    bool is_high_tput)
920 { }
921 #endif
922 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
923 
924 #ifdef DP_TX_HW_DESC_HISTORY
925 static inline void
926 dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
927 			 hal_ring_handle_t hal_ring_hdl,
928 			 struct dp_soc *soc, uint8_t ring_id)
929 {
930 	struct dp_tx_hw_desc_history *tx_hw_desc_history =
931 						&soc->tx_hw_desc_history;
932 	struct dp_tx_hw_desc_evt *evt;
933 	uint32_t idx = 0;
934 	uint16_t slot = 0;
935 
936 	if (!tx_hw_desc_history->allocated)
937 		return;
938 
939 	dp_get_frag_hist_next_atomic_idx(&tx_hw_desc_history->index, &idx,
940 					 &slot,
941 					 DP_TX_HW_DESC_HIST_SLOT_SHIFT,
942 					 DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
943 					 DP_TX_HW_DESC_HIST_MAX);
944 
945 	evt = &tx_hw_desc_history->entry[slot][idx];
946 	qdf_mem_copy(evt->tcl_desc, hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
947 	evt->posted = qdf_get_log_timestamp();
948 	evt->tcl_ring_id = ring_id;
949 	hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &evt->tp, &evt->hp);
950 }
951 #else
952 static inline void
953 dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
954 			 hal_ring_handle_t hal_ring_hdl,
955 			 struct dp_soc *soc, uint8_t ring_id)
956 {
957 }
958 #endif
959 
960 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
961 /**
962  * dp_tx_compute_hw_delay_us() - Compute hardware Tx completion delay
963  * @ts: Tx completion status
964  * @delta_tsf: Difference between TSF clock and qtimer
965  * @delay_us: Delay in microseconds
966  *
967  * Return: QDF_STATUS_SUCCESS   : Success
968  *         QDF_STATUS_E_INVAL   : Tx completion status is invalid or
969  *                                delay_us is NULL
970  *         QDF_STATUS_E_FAILURE : Error in delay calculation
971  */
972 QDF_STATUS
973 dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
974 			  uint32_t delta_tsf,
975 			  uint32_t *delay_us);
976 
977 /**
978  * dp_set_delta_tsf() - Set delta_tsf to dp_soc structure
979  * @soc_hdl: cdp soc pointer
980  * @vdev_id: vdev id
981  * @delta_tsf: difference between TSF clock and qtimer
982  *
983  * Return: None
984  */
985 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
986 		      uint32_t delta_tsf);
987 #endif
988 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
989 /**
990  * dp_set_tsf_report_ul_delay() - Enable or disable reporting uplink delay
991  * @soc_hdl: cdp soc pointer
992  * @vdev_id: vdev id
993  * @enable: true to enable and false to disable
994  *
995  * Return: QDF_STATUS
996  */
997 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
998 				      uint8_t vdev_id, bool enable);
999 
1000 /**
1001  * dp_get_uplink_delay() - Get uplink delay value
1002  * @soc_hdl: cdp soc pointer
1003  * @vdev_id: vdev id
1004  * @val: pointer to save uplink delay value
1005  *
1006  * Return: QDF_STATUS
1007  */
1008 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1009 			       uint32_t *val);
1010 #endif /* WLAN_FEATURE_TSF_UPLINK_TSF */
1011 
1012 /**
1013  * dp_tx_pkt_tracepoints_enabled() - Get the state of tx pkt tracepoint
1014  *
1015  * Return: True if any tx pkt tracepoint is enabled else false
1016  */
1017 static inline
1018 bool dp_tx_pkt_tracepoints_enabled(void)
1019 {
1020 	return (qdf_trace_dp_tx_comp_tcp_pkt_enabled() ||
1021 		qdf_trace_dp_tx_comp_udp_pkt_enabled() ||
1022 		qdf_trace_dp_tx_comp_pkt_enabled());
1023 }
1024 
1025 #ifdef DP_TX_TRACKING
1026 /**
1027  * dp_tx_desc_set_timestamp() - set timestamp in tx descriptor
1028  * @tx_desc - tx descriptor
1029  *
1030  * Return: None
1031  */
1032 static inline
1033 void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
1034 {
1035 	tx_desc->timestamp_tick = qdf_system_ticks();
1036 }
1037 
1038 /**
1039  * dp_tx_desc_check_corruption() - Verify magic pattern in tx descriptor
1040  * @tx_desc: tx descriptor
1041  *
1042  * Check for corruption in tx descriptor, if magic pattern is not matching
1043  * trigger self recovery
1044  *
1045  * Return: none
1046  */
1047 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc);
1048 #else
1049 static inline
1050 void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
1051 {
1052 }
1053 
1054 static inline
1055 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
1056 {
1057 }
1058 #endif
1059 
1060 #ifndef CONFIG_SAWF
1061 static inline bool dp_sawf_tag_valid_get(qdf_nbuf_t nbuf)
1062 {
1063 	return false;
1064 }
1065 #endif
1066 
1067 #ifdef HW_TX_DELAY_STATS_ENABLE
1068 /**
1069  * dp_tx_desc_set_ktimestamp() - set kernel timestamp in tx descriptor
1070  * @vdev: DP vdev handle
1071  * @tx_desc: tx descriptor
1072  *
1073  * Return: true when descriptor is timestamped, false otherwise
1074  */
1075 static inline
1076 bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
1077 			       struct dp_tx_desc_s *tx_desc)
1078 {
1079 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1080 	    qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
1081 	    qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
1082 	    qdf_unlikely(vdev->pdev->soc->peerstats_enabled) ||
1083 	    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev))) {
1084 		tx_desc->timestamp = qdf_ktime_real_get();
1085 		return true;
1086 	}
1087 	return false;
1088 }
1089 #else
1090 static inline
1091 bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
1092 			       struct dp_tx_desc_s *tx_desc)
1093 {
1094 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1095 	    qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
1096 	    qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
1097 	    qdf_unlikely(vdev->pdev->soc->peerstats_enabled)) {
1098 		tx_desc->timestamp = qdf_ktime_real_get();
1099 		return true;
1100 	}
1101 	return false;
1102 }
1103 #endif
1104 
1105 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
1106 /**
1107  * dp_pkt_add_timestamp() - add timestamp in data payload
1108  *
1109  * @vdev: dp vdev
1110  * @index: index to decide offset in payload
1111  * @time: timestamp to add in data payload
1112  * @nbuf: network buffer
1113  *
1114  * Return: none
1115  */
1116 void dp_pkt_add_timestamp(struct dp_vdev *vdev,
1117 			  enum qdf_pkt_timestamp_index index, uint64_t time,
1118 			  qdf_nbuf_t nbuf);
1119 /**
1120  * dp_pkt_get_timestamp() - get current system time
1121  *
1122  * @time: return current system time
1123  *
1124  * Return: none
1125  */
1126 void dp_pkt_get_timestamp(uint64_t *time);
1127 #else
1128 #define dp_pkt_add_timestamp(vdev, index, time, nbuf)
1129 
1130 static inline
1131 void dp_pkt_get_timestamp(uint64_t *time)
1132 {
1133 }
1134 #endif
1135 
1136 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
1137 /**
1138  * dp_update_tx_desc_stats - Update the increase or decrease in
1139  * outstanding tx desc count
1140  * values on pdev and soc
1141  * @vdev: DP pdev handle
1142  *
1143  * Return: void
1144  */
1145 static inline void
1146 dp_update_tx_desc_stats(struct dp_pdev *pdev)
1147 {
1148 	int32_t tx_descs_cnt =
1149 		qdf_atomic_read(&pdev->num_tx_outstanding);
1150 	if (pdev->tx_descs_max < tx_descs_cnt)
1151 		pdev->tx_descs_max = tx_descs_cnt;
1152 	qdf_mem_tx_desc_cnt_update(pdev->num_tx_outstanding,
1153 				   pdev->tx_descs_max);
1154 }
1155 
1156 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
1157 
1158 static inline void
1159 dp_update_tx_desc_stats(struct dp_pdev *pdev)
1160 {
1161 }
1162 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
1163 
1164 #ifdef QCA_SUPPORT_GLOBAL_DESC
1165 /**
1166  * dp_tx_get_global_desc_in_use() - read global descriptors in usage
1167  * @dp_global: Datapath global context
1168  *
1169  * Return: global descriptors in use
1170  */
1171 static inline int32_t
1172 dp_tx_get_global_desc_in_use(struct dp_global_desc_context *dp_global)
1173 {
1174 	return qdf_atomic_read(&dp_global->global_descriptor_in_use);
1175 }
1176 #endif
1177 
1178 #ifdef QCA_TX_LIMIT_CHECK
1179 static inline bool is_spl_packet(qdf_nbuf_t nbuf)
1180 {
1181 	if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1182 		return true;
1183 	return false;
1184 }
1185 
1186 #ifdef QCA_SUPPORT_GLOBAL_DESC
1187 /**
1188  * is_dp_spl_tx_limit_reached - Check if the packet is a special packet to allow
1189  * allocation if allocated tx descriptors are within the global max limit
1190  * and pdev max limit.
1191  * @vdev: DP vdev handle
1192  *
1193  * Return: true if allocated tx descriptors reached max configured value, else
1194  * false
1195  */
1196 static inline bool
1197 is_dp_spl_tx_limit_reached(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1198 {
1199 	struct dp_pdev *pdev = vdev->pdev;
1200 	struct dp_soc *soc = pdev->soc;
1201 	struct dp_global_desc_context *dp_global;
1202 	uint32_t global_tx_desc_allowed;
1203 
1204 	dp_global = wlan_objmgr_get_desc_ctx();
1205 	global_tx_desc_allowed =
1206 		wlan_cfg_get_num_global_tx_desc(soc->wlan_cfg_ctx);
1207 
1208 	if (is_spl_packet(nbuf)) {
1209 		if (dp_tx_get_global_desc_in_use(dp_global) >=
1210 				global_tx_desc_allowed)
1211 			return true;
1212 
1213 		if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
1214 			pdev->num_tx_allowed)
1215 			return true;
1216 
1217 		return false;
1218 	}
1219 
1220 	return true;
1221 }
1222 
1223 /**
1224  * dp_tx_limit_check - Check if allocated tx descriptors reached
1225  * global max reg limit and pdev max reg limit for regular packets. Also check
1226  * if the limit is reached for special packets.
1227  * @vdev: DP vdev handle
1228  *
1229  * Return: true if allocated tx descriptors reached max limit for regular
1230  * packets and in case of special packets, if the limit is reached max
1231  * configured vale for the soc/pdev, else false
1232  */
1233 static inline bool
1234 dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1235 {
1236 	struct dp_pdev *pdev = vdev->pdev;
1237 	struct dp_soc *soc = pdev->soc;
1238 	struct dp_global_desc_context *dp_global;
1239 	uint32_t global_tx_desc_allowed;
1240 	uint32_t global_tx_desc_reg_allowed;
1241 	uint32_t global_tx_desc_spcl_allowed;
1242 
1243 	dp_global = wlan_objmgr_get_desc_ctx();
1244 	global_tx_desc_allowed =
1245 		wlan_cfg_get_num_global_tx_desc(soc->wlan_cfg_ctx);
1246 	global_tx_desc_spcl_allowed =
1247 		wlan_cfg_get_num_global_spcl_tx_desc(soc->wlan_cfg_ctx);
1248 	global_tx_desc_reg_allowed = global_tx_desc_allowed -
1249 					global_tx_desc_spcl_allowed;
1250 
1251 	if (dp_tx_get_global_desc_in_use(dp_global) >= global_tx_desc_reg_allowed) {
1252 		if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
1253 			dp_tx_info("queued packets are more than max tx, drop the frame");
1254 			DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1255 			return true;
1256 		}
1257 	}
1258 
1259 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
1260 			pdev->num_reg_tx_allowed) {
1261 		if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
1262 			dp_tx_info("queued packets are more than max tx, drop the frame");
1263 			DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1264 			DP_STATS_INC(vdev,
1265 				     tx_i.dropped.desc_na_exc_outstand.num, 1);
1266 			return true;
1267 		}
1268 	}
1269 	return false;
1270 }
1271 #else
1272 /**
1273  * is_dp_spl_tx_limit_reached - Check if the packet is a special packet to allow
1274  * allocation if allocated tx descriptors are within the soc max limit
1275  * and pdev max limit.
1276  * @vdev: DP vdev handle
1277  *
1278  * Return: true if allocated tx descriptors reached max configured value, else
1279  * false
1280  */
1281 static inline bool
1282 is_dp_spl_tx_limit_reached(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1283 {
1284 	struct dp_pdev *pdev = vdev->pdev;
1285 	struct dp_soc *soc = pdev->soc;
1286 
1287 	if (is_spl_packet(nbuf)) {
1288 		if (qdf_atomic_read(&soc->num_tx_outstanding) >=
1289 				soc->num_tx_allowed)
1290 			return true;
1291 
1292 		if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
1293 			pdev->num_tx_allowed)
1294 			return true;
1295 
1296 		return false;
1297 	}
1298 
1299 	return true;
1300 }
1301 
1302 /**
1303  * dp_tx_limit_check - Check if allocated tx descriptors reached
1304  * soc max reg limit and pdev max reg limit for regular packets. Also check if
1305  * the limit is reached for special packets.
1306  * @vdev: DP vdev handle
1307  *
1308  * Return: true if allocated tx descriptors reached max limit for regular
1309  * packets and in case of special packets, if the limit is reached max
1310  * configured vale for the soc/pdev, else false
1311  */
1312 static inline bool
1313 dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1314 {
1315 	struct dp_pdev *pdev = vdev->pdev;
1316 	struct dp_soc *soc = pdev->soc;
1317 
1318 	if (qdf_atomic_read(&soc->num_tx_outstanding) >=
1319 			soc->num_reg_tx_allowed) {
1320 		if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
1321 			dp_tx_info("queued packets are more than max tx, drop the frame");
1322 			DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1323 			return true;
1324 		}
1325 	}
1326 
1327 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
1328 			pdev->num_reg_tx_allowed) {
1329 		if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
1330 			dp_tx_info("queued packets are more than max tx, drop the frame");
1331 			DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1332 			DP_STATS_INC(vdev,
1333 				     tx_i.dropped.desc_na_exc_outstand.num, 1);
1334 			return true;
1335 		}
1336 	}
1337 	return false;
1338 }
1339 #endif
1340 
1341 /**
1342  * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
1343  * reached soc max limit
1344  * @vdev: DP vdev handle
1345  *
1346  * Return: true if allocated tx descriptors reached max configured value, else
1347  * false
1348  */
1349 static inline bool
1350 dp_tx_exception_limit_check(struct dp_vdev *vdev)
1351 {
1352 	struct dp_pdev *pdev = vdev->pdev;
1353 	struct dp_soc *soc = pdev->soc;
1354 
1355 	if (qdf_atomic_read(&soc->num_tx_exception) >=
1356 			soc->num_msdu_exception_desc) {
1357 		dp_info("exc packets are more than max drop the exc pkt");
1358 		DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
1359 		return true;
1360 	}
1361 
1362 	return false;
1363 }
1364 
1365 #ifdef QCA_SUPPORT_GLOBAL_DESC
1366 /**
1367  * dp_tx_outstanding_inc - Inc outstanding tx desc values on global and pdev
1368  * @vdev: DP pdev handle
1369  *
1370  * Return: void
1371  */
1372 static inline void
1373 dp_tx_outstanding_inc(struct dp_pdev *pdev)
1374 {
1375 	struct dp_global_desc_context *dp_global;
1376 
1377 	dp_global = wlan_objmgr_get_desc_ctx();
1378 
1379 	qdf_atomic_inc(&dp_global->global_descriptor_in_use);
1380 	qdf_atomic_inc(&pdev->num_tx_outstanding);
1381 	dp_update_tx_desc_stats(pdev);
1382 }
1383 
1384 /**
1385  * dp_tx_outstanding__dec - Dec outstanding tx desc values on global and pdev
1386  * @vdev: DP pdev handle
1387  *
1388  * Return: void
1389  */
1390 static inline void
1391 dp_tx_outstanding_dec(struct dp_pdev *pdev)
1392 {
1393 	struct dp_global_desc_context *dp_global;
1394 
1395 	dp_global = wlan_objmgr_get_desc_ctx();
1396 
1397 	qdf_atomic_dec(&dp_global->global_descriptor_in_use);
1398 	qdf_atomic_dec(&pdev->num_tx_outstanding);
1399 	dp_update_tx_desc_stats(pdev);
1400 }
1401 
1402 #else
1403 /**
1404  * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
1405  * @vdev: DP pdev handle
1406  *
1407  * Return: void
1408  */
1409 static inline void
1410 dp_tx_outstanding_inc(struct dp_pdev *pdev)
1411 {
1412 	struct dp_soc *soc = pdev->soc;
1413 
1414 	qdf_atomic_inc(&pdev->num_tx_outstanding);
1415 	qdf_atomic_inc(&soc->num_tx_outstanding);
1416 	dp_update_tx_desc_stats(pdev);
1417 }
1418 
1419 /**
1420  * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
1421  * @vdev: DP pdev handle
1422  *
1423  * Return: void
1424  */
1425 static inline void
1426 dp_tx_outstanding_dec(struct dp_pdev *pdev)
1427 {
1428 	struct dp_soc *soc = pdev->soc;
1429 
1430 	qdf_atomic_dec(&pdev->num_tx_outstanding);
1431 	qdf_atomic_dec(&soc->num_tx_outstanding);
1432 	dp_update_tx_desc_stats(pdev);
1433 }
1434 #endif /* QCA_SUPPORT_GLOBAL_DESC */
1435 
1436 #else //QCA_TX_LIMIT_CHECK
1437 static inline bool
1438 dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1439 {
1440 	return false;
1441 }
1442 
1443 static inline bool
1444 dp_tx_exception_limit_check(struct dp_vdev *vdev)
1445 {
1446 	return false;
1447 }
1448 
1449 static inline void
1450 dp_tx_outstanding_inc(struct dp_pdev *pdev)
1451 {
1452 	qdf_atomic_inc(&pdev->num_tx_outstanding);
1453 	dp_update_tx_desc_stats(pdev);
1454 }
1455 
1456 static inline void
1457 dp_tx_outstanding_dec(struct dp_pdev *pdev)
1458 {
1459 	qdf_atomic_dec(&pdev->num_tx_outstanding);
1460 	dp_update_tx_desc_stats(pdev);
1461 }
1462 #endif //QCA_TX_LIMIT_CHECK
1463 
1464 /**
1465  * dp_tx_get_pkt_len() - Get the packet length of a msdu
1466  * @tx_desc: tx descriptor
1467  *
1468  * Return: Packet length of a msdu. If the packet is fragmented,
1469  * it will return the single fragment length.
1470  *
1471  * In TSO mode, the msdu from stack will be fragmented into small
1472  * fragments and each of these new fragments will be transmitted
1473  * as an individual msdu.
1474  *
1475  * Please note that the length of a msdu from stack may be smaller
1476  * than the length of the total length of the fragments it has been
1477  * fragmentted because each of the fragments has a nbuf header.
1478  */
1479 static inline uint32_t dp_tx_get_pkt_len(struct dp_tx_desc_s *tx_desc)
1480 {
1481 	return tx_desc->frm_type == dp_tx_frm_tso ?
1482 		tx_desc->msdu_ext_desc->tso_desc->seg.total_len :
1483 		qdf_nbuf_len(tx_desc->nbuf);
1484 }
1485 #endif
1486