xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.h (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef __DP_TX_H
20 #define __DP_TX_H
21 
22 #include <qdf_types.h>
23 #include <qdf_nbuf.h>
24 #include "dp_types.h"
25 #ifdef FEATURE_PERPKT_INFO
26 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
27 	defined(QCA_TX_CAPTURE_SUPPORT) || \
28 	defined(QCA_MCOPY_SUPPORT)
29 #include "if_meta_hdr.h"
30 #endif
31 #endif
32 #include "dp_internal.h"
33 #include "hal_tx.h"
34 #include <qdf_tracepoint.h>
35 #ifdef CONFIG_SAWF
36 #include "dp_sawf.h"
37 #endif
38 #include <qdf_pkt_add_timestamp.h>
39 
40 #define DP_INVALID_VDEV_ID 0xFF
41 
42 #define DP_TX_MAX_NUM_FRAGS 6
43 
44 /*
45  * DP_TX_DESC_FLAG_FRAG flags should always be defined to 0x1
46  * please do not change this flag's definition
47  */
48 #define DP_TX_DESC_FLAG_FRAG		0x1
49 #define DP_TX_DESC_FLAG_TO_FW		0x2
50 #define DP_TX_DESC_FLAG_SIMPLE		0x4
51 #define DP_TX_DESC_FLAG_RAW		0x8
52 #define DP_TX_DESC_FLAG_MESH		0x10
53 #define DP_TX_DESC_FLAG_QUEUED_TX	0x20
54 #define DP_TX_DESC_FLAG_COMPLETED_TX	0x40
55 #define DP_TX_DESC_FLAG_ME		0x80
56 #define DP_TX_DESC_FLAG_TDLS_FRAME	0x100
57 #define DP_TX_DESC_FLAG_ALLOCATED	0x200
58 #define DP_TX_DESC_FLAG_MESH_MODE	0x400
59 #define DP_TX_DESC_FLAG_UNMAP_DONE	0x800
60 #define DP_TX_DESC_FLAG_TX_COMP_ERR	0x1000
61 #define DP_TX_DESC_FLAG_FLUSH		0x2000
62 #define DP_TX_DESC_FLAG_TRAFFIC_END_IND	0x4000
63 /*
64  * Since the Tx descriptor flag is of only 16-bit and no more bit is free for
65  * any new flag, therefore for time being overloading PPEDS flag with that of
66  * FLUSH flag.
67  */
68 #define DP_TX_DESC_FLAG_PPEDS		0x2000
69 
70 #define DP_TX_EXT_DESC_FLAG_METADATA_VALID 0x1
71 
72 #define DP_TX_FREE_SINGLE_BUF(soc, buf)                  \
73 do {                                                           \
74 	qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE);  \
75 	qdf_nbuf_free(buf);                                    \
76 } while (0)
77 
78 #define OCB_HEADER_VERSION	 1
79 
80 #ifdef TX_PER_PDEV_DESC_POOL
81 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
82 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
83 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
84 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
85 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
86 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
87 #else
88 	#ifdef TX_PER_VDEV_DESC_POOL
89 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
90 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
91 	#endif /* TX_PER_VDEV_DESC_POOL */
92 #endif /* TX_PER_PDEV_DESC_POOL */
93 #define DP_TX_QUEUE_MASK 0x3
94 
95 #define MAX_CDP_SEC_TYPE 12
96 
97 /* number of dwords for htt_tx_msdu_desc_ext2_t */
98 #define DP_TX_MSDU_INFO_META_DATA_DWORDS 7
99 
100 #define dp_tx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX, params)
101 #define dp_tx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX, params)
102 #define dp_tx_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP_TX, params)
103 #define dp_tx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX, params)
104 #define dp_tx_info(params...) \
105 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX, ## params)
106 #define dp_tx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX, params)
107 
108 #define dp_tx_comp_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX_COMP, params)
109 #define dp_tx_comp_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX_COMP, params)
110 #define dp_tx_comp_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX_COMP, params)
111 #define dp_tx_comp_info(params...) \
112 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
113 #define dp_tx_comp_info_rl(params...) \
114 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
115 #define dp_tx_comp_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX_COMP, params)
116 
117 #ifndef QCA_HOST_MODE_WIFI_DISABLED
118 
119 /**
120  * struct dp_tx_frag_info_s
121  * @vaddr: hlos virtual address for buffer
122  * @paddr_lo: physical address lower 32bits
123  * @paddr_hi: physical address higher bits
124  * @len: length of the buffer
125  */
126 struct dp_tx_frag_info_s {
127 	uint8_t  *vaddr;
128 	uint32_t paddr_lo;
129 	uint16_t paddr_hi;
130 	uint16_t len;
131 };
132 
133 /**
134  * struct dp_tx_seg_info_s - Segmentation Descriptor
135  * @nbuf: NBUF pointer if segment corresponds to separate nbuf
136  * @frag_cnt: Fragment count in this segment
137  * @total_len: Total length of segment
138  * @frags: per-Fragment information
139  * @next: pointer to next MSDU segment
140  */
141 struct dp_tx_seg_info_s  {
142 	qdf_nbuf_t nbuf;
143 	uint16_t frag_cnt;
144 	uint16_t total_len;
145 	struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS];
146 	struct dp_tx_seg_info_s *next;
147 };
148 
149 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
150 
151 /**
152  * struct dp_tx_sg_info_s - Scatter Gather Descriptor
153  * @num_segs: Number of segments (TSO/ME) in the frame
154  * @total_len: Total length of the frame
155  * @curr_seg: Points to current segment descriptor to be processed. Chain of
156  * 	      descriptors for SG frames/multicast-unicast converted packets.
157  *
158  * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to
159  * carry fragmentation information
160  * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries
161  * indicated through flags in SKB CB (first_msdu and last_msdu). This will be
162  * converted into set of skb sg (nr_frags) structures.
163  */
164 struct dp_tx_sg_info_s {
165 	uint32_t num_segs;
166 	uint32_t total_len;
167 	struct dp_tx_seg_info_s *curr_seg;
168 };
169 
170 /**
171  * struct dp_tx_queue - Tx queue
172  * @desc_pool_id: Descriptor Pool to be used for the tx queue
173  * @ring_id: TCL descriptor ring ID corresponding to the tx queue
174  *
175  * Tx queue contains information of the software (Descriptor pool)
176  * and hardware resources (TCL ring id) to be used for a particular
177  * transmit queue (obtained from skb_queue_mapping in case of linux)
178  */
179 struct dp_tx_queue {
180 	uint8_t desc_pool_id;
181 	uint8_t ring_id;
182 };
183 
184 /**
185  * struct dp_tx_msdu_info_s - MSDU Descriptor
186  * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement
187  * @tx_queue: Tx queue on which this MSDU should be transmitted
188  * @num_seg: Number of segments (TSO)
189  * @tid: TID (override) that is sent from HLOS
190  * @u.tso_info: TSO information for TSO frame types
191  * 	     (chain of the TSO segments, number of segments)
192  * @u.sg_info: Scatter Gather information for non-TSO SG frames
193  * @meta_data: Mesh meta header information
194  * @exception_fw: Duplicate frame to be sent to firmware
195  * @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions
196  * @ix_tx_sniffer: Indicates if the packet has to be sniffed
197  * @gsn: global sequence for reinjected mcast packets
198  * @vdev_id : vdev_id for reinjected mcast packets
199  * @skip_hp_update : Skip HP update for TSO segments and update in last segment
200  *
201  * This structure holds the complete MSDU information needed to program the
202  * Hardware TCL and MSDU extension descriptors for different frame types
203  *
204  */
205 struct dp_tx_msdu_info_s {
206 	enum dp_tx_frm_type frm_type;
207 	struct dp_tx_queue tx_queue;
208 	uint32_t num_seg;
209 	uint8_t tid;
210 	uint8_t exception_fw;
211 	uint8_t is_tx_sniffer;
212 	union {
213 		struct qdf_tso_info_t tso_info;
214 		struct dp_tx_sg_info_s sg_info;
215 	} u;
216 	uint32_t meta_data[DP_TX_MSDU_INFO_META_DATA_DWORDS];
217 	uint16_t ppdu_cookie;
218 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
219 #ifdef WLAN_MCAST_MLO
220 	uint16_t gsn;
221 	uint8_t vdev_id;
222 #endif
223 #endif
224 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
225 	uint8_t skip_hp_update;
226 #endif
227 };
228 
229 #ifndef QCA_HOST_MODE_WIFI_DISABLED
230 /**
231  * dp_tx_deinit_pair_by_index() - Deinit TX rings based on index
232  * @soc: core txrx context
233  * @index: index of ring to deinit
234  *
235  * Deinit 1 TCL and 1 WBM2SW release ring on as needed basis using
236  * index of the respective TCL/WBM2SW release in soc structure.
237  * For example, if the index is 2 then &soc->tcl_data_ring[2]
238  * and &soc->tx_comp_ring[2] will be deinitialized.
239  *
240  * Return: none
241  */
242 void dp_tx_deinit_pair_by_index(struct dp_soc *soc, int index);
243 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
244 
245 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
246 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
247 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
248 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
249 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
250 					 uint8_t num_pool,
251 					 uint32_t num_desc);
252 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
253 					uint8_t num_pool,
254 					uint32_t num_desc);
255 qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
256 			       bool delayed_free);
257 void dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id);
258 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
259 			 uint8_t tid, uint8_t ring_id);
260 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
261 				  struct dp_tx_desc_s *tx_desc,
262 				  struct hal_tx_completion_status *ts,
263 				  struct dp_txrx_peer *txrx_peer,
264 				  uint8_t ring_id);
265 void dp_tx_comp_process_desc(struct dp_soc *soc,
266 			     struct dp_tx_desc_s *desc,
267 			     struct hal_tx_completion_status *ts,
268 			     struct dp_txrx_peer *txrx_peer);
269 void dp_tx_reinject_handler(struct dp_soc *soc,
270 			    struct dp_vdev *vdev,
271 			    struct dp_tx_desc_s *tx_desc,
272 			    uint8_t *status,
273 			    uint8_t reinject_reason);
274 void dp_tx_inspect_handler(struct dp_soc *soc,
275 			   struct dp_vdev *vdev,
276 			   struct dp_tx_desc_s *tx_desc,
277 			   uint8_t *status);
278 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
279 				   uint32_t length, uint8_t tx_status,
280 				   bool update);
281 
282 #ifdef DP_UMAC_HW_RESET_SUPPORT
283 qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
284 
285 qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
286 			  qdf_nbuf_t nbuf,
287 			  struct cdp_tx_exception_metadata *tx_exc_metadata);
288 #endif
289 #ifdef WLAN_SUPPORT_PPEDS
290 void dp_ppeds_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc);
291 #else
292 static inline
293 void dp_ppeds_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
294 {
295 }
296 #endif
297 #ifndef QCA_HOST_MODE_WIFI_DISABLED
298 /**
299  * dp_tso_attach() - TSO Attach handler
300  * @txrx_soc: Opaque Dp handle
301  *
302  * Reserve TSO descriptor buffers
303  *
304  * Return: QDF_STATUS_E_FAILURE on failure or
305  * QDF_STATUS_SUCCESS on success
306  */
307 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc);
308 
309 /**
310  * dp_tso_detach() - TSO Detach handler
311  * @txrx_soc: Opaque Dp handle
312  *
313  * Deallocate TSO descriptor buffers
314  *
315  * Return: QDF_STATUS_E_FAILURE on failure or
316  * QDF_STATUS_SUCCESS on success
317  */
318 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc);
319 
320 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
321 
322 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc, uint8_t vdev_id,
323 				    qdf_nbuf_t nbuf);
324 
325 qdf_nbuf_t dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id,
326 				qdf_nbuf_t nbuf,
327 				struct cdp_tx_exception_metadata *tx_exc);
328 
329 qdf_nbuf_t dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc,
330 					      uint8_t vdev_id,
331 					      qdf_nbuf_t nbuf,
332 				struct cdp_tx_exception_metadata *tx_exc);
333 
334 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
335 			   qdf_nbuf_t nbuf);
336 qdf_nbuf_t
337 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
338 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
339 		       struct cdp_tx_exception_metadata *tx_exc_metadata);
340 
341 #if QDF_LOCK_STATS
342 noinline qdf_nbuf_t
343 dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
344 			 struct dp_tx_msdu_info_s *msdu_info);
345 #else
346 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
347 				    struct dp_tx_msdu_info_s *msdu_info);
348 #endif
349 #ifdef FEATURE_WLAN_TDLS
350 /**
351  * dp_tx_non_std() - Allow the control-path SW to send data frames
352  * @soc_hdl: Datapath soc handle
353  * @vdev_id: id of vdev
354  * @tx_spec: what non-standard handling to apply to the tx data frames
355  * @msdu_list: NULL-terminated list of tx MSDUs
356  *
357  * Return: NULL on success,
358  *         nbuf when it fails to send
359  */
360 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
361 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
362 #endif
363 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac);
364 
365 /**
366  * dp_tx_comp_handler() - Tx completion handler
367  * @int_ctx: pointer to DP interrupt context
368  * @soc: core txrx main context
369  * @hal_srng: Opaque HAL SRNG pointer
370  * @ring_id: completion ring id
371  * @quota: No. of packets/descriptors that can be serviced in one loop
372  *
373  * This function will collect hardware release ring element contents and
374  * handle descriptor contents. Based on contents, free packet or handle error
375  * conditions
376  *
377  * Return: Number of TX completions processed
378  */
379 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
380 			    hal_ring_handle_t hal_srng, uint8_t ring_id,
381 			    uint32_t quota);
382 
383 QDF_STATUS
384 dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
385 
386 QDF_STATUS
387 dp_tx_prepare_send_igmp_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
388 
389 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
390 
391 #if defined(QCA_HOST_MODE_WIFI_DISABLED) || !defined(ATH_SUPPORT_IQUE)
392 static inline void dp_tx_me_exit(struct dp_pdev *pdev)
393 {
394 	return;
395 }
396 #endif
397 
398 /**
399  * dp_tx_pdev_init() - dp tx pdev init
400  * @pdev: physical device instance
401  *
402  * Return: QDF_STATUS_SUCCESS: success
403  *         QDF_STATUS_E_RESOURCES: Error return
404  */
405 static inline QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev)
406 {
407 	struct dp_soc *soc = pdev->soc;
408 
409 	/* Initialize Flow control counters */
410 	qdf_atomic_init(&pdev->num_tx_outstanding);
411 	pdev->tx_descs_max = 0;
412 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
413 		/* Initialize descriptors in TCL Ring */
414 		hal_tx_init_data_ring(soc->hal_soc,
415 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
416 	}
417 
418 	return QDF_STATUS_SUCCESS;
419 }
420 
421 /**
422  * dp_tx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc
423  * @soc: Handle to HAL Soc structure
424  * @hal_soc: HAL SOC handle
425  * @num_avail_for_reap: descriptors available for reap
426  * @hal_ring_hdl: ring pointer
427  * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
428  * @last_prefetched_sw_desc: pointer to last prefetch SW desc
429  *
430  * Return: None
431  */
432 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
433 static inline
434 void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
435 				    hal_soc_handle_t hal_soc,
436 				    uint32_t num_avail_for_reap,
437 				    hal_ring_handle_t hal_ring_hdl,
438 				    void **last_prefetched_hw_desc,
439 				    struct dp_tx_desc_s
440 				    **last_prefetched_sw_desc)
441 {
442 	if (*last_prefetched_sw_desc) {
443 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
444 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
445 	}
446 
447 	if (num_avail_for_reap && *last_prefetched_hw_desc) {
448 		soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
449 						       *last_prefetched_hw_desc,
450 						       last_prefetched_sw_desc);
451 
452 		if ((uintptr_t)*last_prefetched_hw_desc & 0x3f)
453 			*last_prefetched_hw_desc =
454 				hal_srng_dst_prefetch_next_cached_desc(
455 					hal_soc,
456 					hal_ring_hdl,
457 					(uint8_t *)*last_prefetched_hw_desc);
458 		else
459 			*last_prefetched_hw_desc =
460 				hal_srng_dst_get_next_32_byte_desc(hal_soc,
461 					hal_ring_hdl,
462 					(uint8_t *)*last_prefetched_hw_desc);
463 	}
464 }
465 #else
466 static inline
467 void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
468 				    hal_soc_handle_t hal_soc,
469 				    uint32_t num_avail_for_reap,
470 				    hal_ring_handle_t hal_ring_hdl,
471 				    void **last_prefetched_hw_desc,
472 				    struct dp_tx_desc_s
473 				    **last_prefetched_sw_desc)
474 {
475 }
476 #endif
477 
478 #ifndef FEATURE_WDS
479 static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
480 {
481 	return;
482 }
483 #endif
484 
485 #ifndef QCA_MULTIPASS_SUPPORT
486 static inline
487 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
488 			     qdf_nbuf_t nbuf,
489 			     struct dp_tx_msdu_info_s *msdu_info)
490 {
491 	return true;
492 }
493 
494 static inline
495 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
496 {
497 }
498 
499 #else
500 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
501 			     qdf_nbuf_t nbuf,
502 			     struct dp_tx_msdu_info_s *msdu_info);
503 
504 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev);
505 void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
506 void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev,
507 				 struct dp_tx_msdu_info_s *msdu_info,
508 				 uint16_t group_key);
509 #endif
510 
511 /**
512  * dp_tx_hw_to_qdf()- convert hw status to qdf status
513  * @status: hw status
514  *
515  * Return: qdf tx rx status
516  */
517 static inline enum qdf_dp_tx_rx_status dp_tx_hw_to_qdf(uint16_t status)
518 {
519 	switch (status) {
520 	case HAL_TX_TQM_RR_FRAME_ACKED:
521 		return QDF_TX_RX_STATUS_OK;
522 	case HAL_TX_TQM_RR_REM_CMD_TX:
523 		return QDF_TX_RX_STATUS_NO_ACK;
524 	case HAL_TX_TQM_RR_REM_CMD_REM:
525 	case HAL_TX_TQM_RR_REM_CMD_NOTX:
526 	case HAL_TX_TQM_RR_REM_CMD_AGED:
527 		return QDF_TX_RX_STATUS_FW_DISCARD;
528 	default:
529 		return QDF_TX_RX_STATUS_DEFAULT;
530 	}
531 }
532 
533 #ifndef QCA_HOST_MODE_WIFI_DISABLED
534 /**
535  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
536  * @vdev: DP Virtual device handle
537  * @nbuf: Buffer pointer
538  * @queue: queue ids container for nbuf
539  *
540  * TX packet queue has 2 instances, software descriptors id and dma ring id
541  * Based on tx feature and hardware configuration queue id combination could be
542  * different.
543  * For example -
544  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
545  * With no XPS,lock based resource protection, Descriptor pool ids are different
546  * for each vdev, dma ring id will be same as single pdev id
547  *
548  * Return: None
549  */
550 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
551 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
552 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
553 {
554 	queue->ring_id = qdf_get_cpu();
555 	queue->desc_pool_id = queue->ring_id;
556 }
557 
558 /*
559  * dp_tx_get_hal_ring_hdl()- Get the hal_tx_ring_hdl for data transmission
560  * @dp_soc - DP soc structure pointer
561  * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled
562  *
563  * Return - HAL ring handle
564  */
565 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
566 						       uint8_t ring_id)
567 {
568 	if (ring_id == soc->num_tcl_data_rings)
569 		return soc->tcl_cmd_credit_ring.hal_srng;
570 
571 	return soc->tcl_data_ring[ring_id].hal_srng;
572 }
573 
574 #else /* QCA_OL_TX_MULTIQ_SUPPORT */
575 
576 #ifdef TX_MULTI_TCL
577 #ifdef IPA_OFFLOAD
578 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
579 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
580 {
581 	/* get flow id */
582 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
583 	if (vdev->pdev->soc->wlan_cfg_ctx->ipa_enabled)
584 		queue->ring_id = DP_TX_GET_RING_ID(vdev);
585 	else
586 		queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
587 					vdev->pdev->soc->num_tcl_data_rings);
588 }
589 #else
590 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
591 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
592 {
593 	/* get flow id */
594 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
595 	queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
596 				vdev->pdev->soc->num_tcl_data_rings);
597 }
598 #endif
599 #else
600 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
601 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
602 {
603 	/* get flow id */
604 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
605 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
606 }
607 #endif
608 
609 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
610 						       uint8_t ring_id)
611 {
612 	return soc->tcl_data_ring[ring_id].hal_srng;
613 }
614 #endif
615 
616 #ifdef QCA_OL_TX_LOCK_LESS_ACCESS
617 /*
618  * dp_tx_hal_ring_access_start()- hal_tx_ring access for data transmission
619  * @dp_soc - DP soc structure pointer
620  * @hal_ring_hdl - HAL ring handle
621  *
622  * Return - None
623  */
624 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
625 					      hal_ring_handle_t hal_ring_hdl)
626 {
627 	return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
628 }
629 
630 /*
631  * dp_tx_hal_ring_access_end()- hal_tx_ring access for data transmission
632  * @dp_soc - DP soc structure pointer
633  * @hal_ring_hdl - HAL ring handle
634  *
635  * Return - None
636  */
637 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
638 					     hal_ring_handle_t hal_ring_hdl)
639 {
640 	hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
641 }
642 
643 /*
644  * dp_tx_hal_ring_access_reap()- hal_tx_ring access for data transmission
645  * @dp_soc - DP soc structure pointer
646  * @hal_ring_hdl - HAL ring handle
647  *
648  * Return - None
649  */
650 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
651 						  hal_ring_handle_t
652 						  hal_ring_hdl)
653 {
654 }
655 
656 #else
657 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
658 					      hal_ring_handle_t hal_ring_hdl)
659 {
660 	return hal_srng_access_start(soc->hal_soc, hal_ring_hdl);
661 }
662 
663 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
664 					     hal_ring_handle_t hal_ring_hdl)
665 {
666 	hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
667 }
668 
669 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
670 						  hal_ring_handle_t
671 						  hal_ring_hdl)
672 {
673 	hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
674 }
675 #endif
676 
677 #ifdef ATH_TX_PRI_OVERRIDE
678 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \
679 	((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf))
680 #else
681 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf)
682 #endif
683 
684 /* TODO TX_FEATURE_NOT_YET */
685 static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc)
686 {
687 	return;
688 }
689 /* TODO TX_FEATURE_NOT_YET */
690 
691 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
692 		      bool force_free);
693 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev);
694 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev);
695 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev);
696 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc);
697 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc);
698 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc);
699 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc);
700 void
701 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
702 			     uint32_t buf_type);
703 #else /* QCA_HOST_MODE_WIFI_DISABLED */
704 
705 static inline
706 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
707 {
708 	return QDF_STATUS_SUCCESS;
709 }
710 
711 static inline
712 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
713 {
714 	return QDF_STATUS_SUCCESS;
715 }
716 
717 static inline void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
718 {
719 }
720 
721 static inline void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
722 {
723 }
724 
725 static inline
726 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
727 		      bool force_free)
728 {
729 }
730 
731 static inline QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
732 {
733 	return QDF_STATUS_SUCCESS;
734 }
735 
736 static inline QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
737 {
738 	return QDF_STATUS_SUCCESS;
739 }
740 
741 static inline void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
742 {
743 }
744 
745 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
746 
747 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
748 	defined(QCA_TX_CAPTURE_SUPPORT) || \
749 	defined(QCA_MCOPY_SUPPORT)
750 #ifdef FEATURE_PERPKT_INFO
751 QDF_STATUS
752 dp_get_completion_indication_for_stack(struct dp_soc *soc,
753 				       struct dp_pdev *pdev,
754 				       struct dp_txrx_peer *peer,
755 				       struct hal_tx_completion_status *ts,
756 				       qdf_nbuf_t netbuf,
757 				       uint64_t time_latency);
758 
759 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
760 			    uint16_t peer_id, uint32_t ppdu_id,
761 			    qdf_nbuf_t netbuf);
762 #endif
763 #else
764 static inline
765 QDF_STATUS dp_get_completion_indication_for_stack(struct dp_soc *soc,
766 				       struct dp_pdev *pdev,
767 				       struct dp_txrx_peer *peer,
768 				       struct hal_tx_completion_status *ts,
769 				       qdf_nbuf_t netbuf,
770 				       uint64_t time_latency)
771 {
772 	return QDF_STATUS_E_NOSUPPORT;
773 }
774 
775 static inline
776 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
777 			    uint16_t peer_id, uint32_t ppdu_id,
778 			    qdf_nbuf_t netbuf)
779 {
780 }
781 #endif
782 
783 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
784 void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
785 				       struct dp_tx_desc_s *desc,
786 				       struct hal_tx_completion_status *ts);
787 #else
788 static inline void
789 dp_send_completion_to_pkt_capture(struct dp_soc *soc,
790 				  struct dp_tx_desc_s *desc,
791 				  struct hal_tx_completion_status *ts)
792 {
793 }
794 #endif
795 
796 #ifndef QCA_HOST_MODE_WIFI_DISABLED
797 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
798 /**
799  * dp_tx_update_stats() - Update soc level tx stats
800  * @soc: DP soc handle
801  * @tx_desc: TX descriptor reference
802  * @ring_id: TCL ring id
803  *
804  * Returns: none
805  */
806 void dp_tx_update_stats(struct dp_soc *soc,
807 			struct dp_tx_desc_s *tx_desc,
808 			uint8_t ring_id);
809 
810 /**
811  * dp_tx_attempt_coalescing() - Check and attempt TCL register write coalescing
812  * @soc: Datapath soc handle
813  * @tx_desc: tx packet descriptor
814  * @tid: TID for pkt transmission
815  * @msdu_info: MSDU info of tx packet
816  * @ring_id: TCL ring id
817  *
818  * Returns: 1, if coalescing is to be done
819  *	    0, if coalescing is not to be done
820  */
821 int
822 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
823 			 struct dp_tx_desc_s *tx_desc,
824 			 uint8_t tid,
825 			 struct dp_tx_msdu_info_s *msdu_info,
826 			 uint8_t ring_id);
827 
828 /**
829  * dp_tx_ring_access_end() - HAL ring access end for data transmission
830  * @soc: Datapath soc handle
831  * @hal_ring_hdl: HAL ring handle
832  * @coalesce: Coalesce the current write or not
833  *
834  * Returns: none
835  */
836 void
837 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
838 		      int coalesce);
839 #else
840 /**
841  * dp_tx_update_stats() - Update soc level tx stats
842  * @soc: DP soc handle
843  * @tx_desc: TX descriptor reference
844  * @ring_id: TCL ring id
845  *
846  * Returns: none
847  */
848 static inline void dp_tx_update_stats(struct dp_soc *soc,
849 				      struct dp_tx_desc_s *tx_desc,
850 				      uint8_t ring_id){ }
851 
852 static inline void
853 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
854 		      int coalesce)
855 {
856 	dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
857 }
858 
859 static inline int
860 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
861 			 struct dp_tx_desc_s *tx_desc,
862 			 uint8_t tid,
863 			 struct dp_tx_msdu_info_s *msdu_info,
864 			 uint8_t ring_id)
865 {
866 	return 0;
867 }
868 
869 #endif /* WLAN_DP_FEATURE_SW_LATENCY_MGR */
870 
871 #ifdef FEATURE_RUNTIME_PM
872 /**
873  * dp_set_rtpm_tput_policy_requirement() - Update RTPM throughput policy
874  * @soc_hdl: DP soc handle
875  * @is_high_tput: flag to indicate whether throughput is high
876  *
877  * Returns: none
878  */
879 static inline
880 void dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
881 					 bool is_high_tput)
882 {
883 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
884 
885 	qdf_atomic_set(&soc->rtpm_high_tput_flag, is_high_tput);
886 }
887 
888 void
889 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
890 			      hal_ring_handle_t hal_ring_hdl,
891 			      int coalesce);
892 #else
893 #ifdef DP_POWER_SAVE
894 void
895 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
896 			      hal_ring_handle_t hal_ring_hdl,
897 			      int coalesce);
898 #else
899 static inline void
900 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
901 			      hal_ring_handle_t hal_ring_hdl,
902 			      int coalesce)
903 {
904 	dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
905 }
906 #endif
907 
908 static inline void
909 dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
910 				    bool is_high_tput)
911 { }
912 #endif
913 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
914 
915 #ifdef DP_TX_HW_DESC_HISTORY
916 static inline void
917 dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
918 			 hal_ring_handle_t hal_ring_hdl,
919 			 struct dp_soc *soc, uint8_t ring_id)
920 {
921 	struct dp_tx_hw_desc_history *tx_hw_desc_history =
922 						&soc->tx_hw_desc_history;
923 	struct dp_tx_hw_desc_evt *evt;
924 	uint32_t idx = 0;
925 	uint16_t slot = 0;
926 
927 	if (!tx_hw_desc_history->allocated)
928 		return;
929 
930 	dp_get_frag_hist_next_atomic_idx(&tx_hw_desc_history->index, &idx,
931 					 &slot,
932 					 DP_TX_HW_DESC_HIST_SLOT_SHIFT,
933 					 DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
934 					 DP_TX_HW_DESC_HIST_MAX);
935 
936 	evt = &tx_hw_desc_history->entry[slot][idx];
937 	qdf_mem_copy(evt->tcl_desc, hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
938 	evt->posted = qdf_get_log_timestamp();
939 	evt->tcl_ring_id = ring_id;
940 	hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &evt->tp, &evt->hp);
941 }
942 #else
943 static inline void
944 dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
945 			 hal_ring_handle_t hal_ring_hdl,
946 			 struct dp_soc *soc, uint8_t ring_id)
947 {
948 }
949 #endif
950 
951 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
952 /**
953  * dp_tx_compute_hw_delay_us() - Compute hardware Tx completion delay
954  * @ts: Tx completion status
955  * @delta_tsf: Difference between TSF clock and qtimer
956  * @delay_us: Delay in microseconds
957  *
958  * Return: QDF_STATUS_SUCCESS   : Success
959  *         QDF_STATUS_E_INVAL   : Tx completion status is invalid or
960  *                                delay_us is NULL
961  *         QDF_STATUS_E_FAILURE : Error in delay calculation
962  */
963 QDF_STATUS
964 dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
965 			  uint32_t delta_tsf,
966 			  uint32_t *delay_us);
967 
968 /**
969  * dp_set_delta_tsf() - Set delta_tsf to dp_soc structure
970  * @soc_hdl: cdp soc pointer
971  * @vdev_id: vdev id
972  * @delta_tsf: difference between TSF clock and qtimer
973  *
974  * Return: None
975  */
976 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
977 		      uint32_t delta_tsf);
978 #endif
979 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
980 /**
981  * dp_set_tsf_report_ul_delay() - Enable or disable reporting uplink delay
982  * @soc_hdl: cdp soc pointer
983  * @vdev_id: vdev id
984  * @enable: true to enable and false to disable
985  *
986  * Return: QDF_STATUS
987  */
988 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
989 				      uint8_t vdev_id, bool enable);
990 
991 /**
992  * dp_get_uplink_delay() - Get uplink delay value
993  * @soc_hdl: cdp soc pointer
994  * @vdev_id: vdev id
995  * @val: pointer to save uplink delay value
996  *
997  * Return: QDF_STATUS
998  */
999 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1000 			       uint32_t *val);
1001 #endif /* WLAN_FEATURE_TSF_UPLINK_TSF */
1002 
1003 /**
1004  * dp_tx_pkt_tracepoints_enabled() - Get the state of tx pkt tracepoint
1005  *
1006  * Return: True if any tx pkt tracepoint is enabled else false
1007  */
1008 static inline
1009 bool dp_tx_pkt_tracepoints_enabled(void)
1010 {
1011 	return (qdf_trace_dp_tx_comp_tcp_pkt_enabled() ||
1012 		qdf_trace_dp_tx_comp_udp_pkt_enabled() ||
1013 		qdf_trace_dp_tx_comp_pkt_enabled());
1014 }
1015 
1016 #ifdef DP_TX_TRACKING
1017 /**
1018  * dp_tx_desc_set_timestamp() - set timestamp in tx descriptor
1019  * @tx_desc - tx descriptor
1020  *
1021  * Return: None
1022  */
1023 static inline
1024 void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
1025 {
1026 	tx_desc->timestamp_tick = qdf_system_ticks();
1027 }
1028 
1029 /**
1030  * dp_tx_desc_check_corruption() - Verify magic pattern in tx descriptor
1031  * @tx_desc: tx descriptor
1032  *
1033  * Check for corruption in tx descriptor, if magic pattern is not matching
1034  * trigger self recovery
1035  *
1036  * Return: none
1037  */
1038 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc);
1039 #else
1040 static inline
1041 void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
1042 {
1043 }
1044 
1045 static inline
1046 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
1047 {
1048 }
1049 #endif
1050 
1051 #ifndef CONFIG_SAWF
1052 static inline bool dp_sawf_tag_valid_get(qdf_nbuf_t nbuf)
1053 {
1054 	return false;
1055 }
1056 #endif
1057 
1058 #ifdef HW_TX_DELAY_STATS_ENABLE
1059 /**
1060  * dp_tx_desc_set_ktimestamp() - set kernel timestamp in tx descriptor
1061  * @vdev: DP vdev handle
1062  * @tx_desc: tx descriptor
1063  *
1064  * Return: true when descriptor is timestamped, false otherwise
1065  */
1066 static inline
1067 bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
1068 			       struct dp_tx_desc_s *tx_desc)
1069 {
1070 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1071 	    qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
1072 	    qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
1073 	    qdf_unlikely(vdev->pdev->soc->peerstats_enabled) ||
1074 	    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev))) {
1075 		tx_desc->timestamp = qdf_ktime_real_get();
1076 		return true;
1077 	}
1078 	return false;
1079 }
1080 #else
1081 static inline
1082 bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
1083 			       struct dp_tx_desc_s *tx_desc)
1084 {
1085 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1086 	    qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
1087 	    qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
1088 	    qdf_unlikely(vdev->pdev->soc->peerstats_enabled)) {
1089 		tx_desc->timestamp = qdf_ktime_real_get();
1090 		return true;
1091 	}
1092 	return false;
1093 }
1094 #endif
1095 
1096 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
1097 /**
1098  * dp_pkt_add_timestamp() - add timestamp in data payload
1099  *
1100  * @vdev: dp vdev
1101  * @index: index to decide offset in payload
1102  * @time: timestamp to add in data payload
1103  * @nbuf: network buffer
1104  *
1105  * Return: none
1106  */
1107 void dp_pkt_add_timestamp(struct dp_vdev *vdev,
1108 			  enum qdf_pkt_timestamp_index index, uint64_t time,
1109 			  qdf_nbuf_t nbuf);
1110 /**
1111  * dp_pkt_get_timestamp() - get current system time
1112  *
1113  * @time: return current system time
1114  *
1115  * Return: none
1116  */
1117 void dp_pkt_get_timestamp(uint64_t *time);
1118 #else
1119 #define dp_pkt_add_timestamp(vdev, index, time, nbuf)
1120 
1121 static inline
1122 void dp_pkt_get_timestamp(uint64_t *time)
1123 {
1124 }
1125 #endif
1126 
1127 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
1128 /**
1129  * dp_update_tx_desc_stats - Update the increase or decrease in
1130  * outstanding tx desc count
1131  * values on pdev and soc
1132  * @vdev: DP pdev handle
1133  *
1134  * Return: void
1135  */
1136 static inline void
1137 dp_update_tx_desc_stats(struct dp_pdev *pdev)
1138 {
1139 	int32_t tx_descs_cnt =
1140 		qdf_atomic_read(&pdev->num_tx_outstanding);
1141 	if (pdev->tx_descs_max < tx_descs_cnt)
1142 		pdev->tx_descs_max = tx_descs_cnt;
1143 	qdf_mem_tx_desc_cnt_update(pdev->num_tx_outstanding,
1144 				   pdev->tx_descs_max);
1145 }
1146 
1147 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
1148 
1149 static inline void
1150 dp_update_tx_desc_stats(struct dp_pdev *pdev)
1151 {
1152 }
1153 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
1154 
1155 #ifdef QCA_TX_LIMIT_CHECK
1156 /**
1157  * dp_tx_limit_check - Check if allocated tx descriptors reached
1158  * soc max limit and pdev max limit
1159  * @vdev: DP vdev handle
1160  *
1161  * Return: true if allocated tx descriptors reached max configured value, else
1162  * false
1163  */
1164 static inline bool
1165 dp_tx_limit_check(struct dp_vdev *vdev)
1166 {
1167 	struct dp_pdev *pdev = vdev->pdev;
1168 	struct dp_soc *soc = pdev->soc;
1169 
1170 	if (qdf_atomic_read(&soc->num_tx_outstanding) >=
1171 			soc->num_tx_allowed) {
1172 		dp_tx_info("queued packets are more than max tx, drop the frame");
1173 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1174 		return true;
1175 	}
1176 
1177 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
1178 			pdev->num_tx_allowed) {
1179 		dp_tx_info("queued packets are more than max tx, drop the frame");
1180 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1181 		DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_outstand.num, 1);
1182 		return true;
1183 	}
1184 	return false;
1185 }
1186 
1187 /**
1188  * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
1189  * reached soc max limit
1190  * @vdev: DP vdev handle
1191  *
1192  * Return: true if allocated tx descriptors reached max configured value, else
1193  * false
1194  */
1195 static inline bool
1196 dp_tx_exception_limit_check(struct dp_vdev *vdev)
1197 {
1198 	struct dp_pdev *pdev = vdev->pdev;
1199 	struct dp_soc *soc = pdev->soc;
1200 
1201 	if (qdf_atomic_read(&soc->num_tx_exception) >=
1202 			soc->num_msdu_exception_desc) {
1203 		dp_info("exc packets are more than max drop the exc pkt");
1204 		DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
1205 		return true;
1206 	}
1207 
1208 	return false;
1209 }
1210 
1211 /**
1212  * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
1213  * @vdev: DP pdev handle
1214  *
1215  * Return: void
1216  */
1217 static inline void
1218 dp_tx_outstanding_inc(struct dp_pdev *pdev)
1219 {
1220 	struct dp_soc *soc = pdev->soc;
1221 
1222 	qdf_atomic_inc(&pdev->num_tx_outstanding);
1223 	qdf_atomic_inc(&soc->num_tx_outstanding);
1224 	dp_update_tx_desc_stats(pdev);
1225 }
1226 
1227 /**
1228  * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
1229  * @vdev: DP pdev handle
1230  *
1231  * Return: void
1232  */
1233 static inline void
1234 dp_tx_outstanding_dec(struct dp_pdev *pdev)
1235 {
1236 	struct dp_soc *soc = pdev->soc;
1237 
1238 	qdf_atomic_dec(&pdev->num_tx_outstanding);
1239 	qdf_atomic_dec(&soc->num_tx_outstanding);
1240 	dp_update_tx_desc_stats(pdev);
1241 }
1242 
1243 #else //QCA_TX_LIMIT_CHECK
1244 static inline bool
1245 dp_tx_limit_check(struct dp_vdev *vdev)
1246 {
1247 	return false;
1248 }
1249 
1250 static inline bool
1251 dp_tx_exception_limit_check(struct dp_vdev *vdev)
1252 {
1253 	return false;
1254 }
1255 
1256 static inline void
1257 dp_tx_outstanding_inc(struct dp_pdev *pdev)
1258 {
1259 	qdf_atomic_inc(&pdev->num_tx_outstanding);
1260 	dp_update_tx_desc_stats(pdev);
1261 }
1262 
1263 static inline void
1264 dp_tx_outstanding_dec(struct dp_pdev *pdev)
1265 {
1266 	qdf_atomic_dec(&pdev->num_tx_outstanding);
1267 	dp_update_tx_desc_stats(pdev);
1268 }
1269 #endif //QCA_TX_LIMIT_CHECK
1270 #endif
1271