xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.h (revision 2b23d2e388c3e0ba9ac6113a9da98706fc6be2fd)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef __DP_TX_H
20 #define __DP_TX_H
21 
22 #include <qdf_types.h>
23 #include <qdf_nbuf.h>
24 #include "dp_types.h"
25 #ifdef FEATURE_PERPKT_INFO
26 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
27 	defined(QCA_TX_CAPTURE_SUPPORT) || \
28 	defined(QCA_MCOPY_SUPPORT)
29 #include "if_meta_hdr.h"
30 #endif
31 #endif
32 #include "dp_internal.h"
33 #include "hal_tx.h"
34 #include <qdf_tracepoint.h>
35 #ifdef CONFIG_SAWF
36 #include "dp_sawf.h"
37 #endif
38 #include <qdf_pkt_add_timestamp.h>
39 
40 #define DP_INVALID_VDEV_ID 0xFF
41 
42 #define DP_TX_MAX_NUM_FRAGS 6
43 
44 /*
45  * DP_TX_DESC_FLAG_FRAG flags should always be defined to 0x1
46  * please do not change this flag's definition
47  */
48 #define DP_TX_DESC_FLAG_FRAG		0x1
49 #define DP_TX_DESC_FLAG_TO_FW		0x2
50 #define DP_TX_DESC_FLAG_SIMPLE		0x4
51 #define DP_TX_DESC_FLAG_RAW		0x8
52 #define DP_TX_DESC_FLAG_MESH		0x10
53 #define DP_TX_DESC_FLAG_QUEUED_TX	0x20
54 #define DP_TX_DESC_FLAG_COMPLETED_TX	0x40
55 #define DP_TX_DESC_FLAG_ME		0x80
56 #define DP_TX_DESC_FLAG_TDLS_FRAME	0x100
57 #define DP_TX_DESC_FLAG_ALLOCATED	0x200
58 #define DP_TX_DESC_FLAG_MESH_MODE	0x400
59 #define DP_TX_DESC_FLAG_UNMAP_DONE	0x800
60 #define DP_TX_DESC_FLAG_TX_COMP_ERR	0x1000
61 #define DP_TX_DESC_FLAG_FLUSH		0x2000
62 #define DP_TX_DESC_FLAG_TRAFFIC_END_IND	0x4000
63 
64 #define DP_TX_EXT_DESC_FLAG_METADATA_VALID 0x1
65 
66 #define DP_TX_FREE_SINGLE_BUF(soc, buf)                  \
67 do {                                                           \
68 	qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE);  \
69 	qdf_nbuf_free(buf);                                    \
70 } while (0)
71 
72 #define OCB_HEADER_VERSION	 1
73 
74 #ifdef TX_PER_PDEV_DESC_POOL
75 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
76 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
77 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
78 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
79 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
80 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
81 #else
82 	#ifdef TX_PER_VDEV_DESC_POOL
83 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
84 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
85 	#endif /* TX_PER_VDEV_DESC_POOL */
86 #endif /* TX_PER_PDEV_DESC_POOL */
87 #define DP_TX_QUEUE_MASK 0x3
88 
89 #define MAX_CDP_SEC_TYPE 12
90 
91 /* number of dwords for htt_tx_msdu_desc_ext2_t */
92 #define DP_TX_MSDU_INFO_META_DATA_DWORDS 7
93 
94 #define dp_tx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX, params)
95 #define dp_tx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX, params)
96 #define dp_tx_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP_TX, params)
97 #define dp_tx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX, params)
98 #define dp_tx_info(params...) \
99 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX, ## params)
100 #define dp_tx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX, params)
101 
102 #define dp_tx_comp_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX_COMP, params)
103 #define dp_tx_comp_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX_COMP, params)
104 #define dp_tx_comp_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX_COMP, params)
105 #define dp_tx_comp_info(params...) \
106 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
107 #define dp_tx_comp_info_rl(params...) \
108 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
109 #define dp_tx_comp_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX_COMP, params)
110 
111 #ifndef QCA_HOST_MODE_WIFI_DISABLED
112 
113 /**
114  * struct dp_tx_frag_info_s
115  * @vaddr: hlos vritual address for buffer
116  * @paddr_lo: physical address lower 32bits
117  * @paddr_hi: physical address higher bits
118  * @len: length of the buffer
119  */
120 struct dp_tx_frag_info_s {
121 	uint8_t  *vaddr;
122 	uint32_t paddr_lo;
123 	uint16_t paddr_hi;
124 	uint16_t len;
125 };
126 
127 /**
128  * struct dp_tx_seg_info_s - Segmentation Descriptor
129  * @nbuf: NBUF pointer if segment corresponds to separate nbuf
130  * @frag_cnt: Fragment count in this segment
131  * @total_len: Total length of segment
132  * @frags: per-Fragment information
133  * @next: pointer to next MSDU segment
134  */
135 struct dp_tx_seg_info_s  {
136 	qdf_nbuf_t nbuf;
137 	uint16_t frag_cnt;
138 	uint16_t total_len;
139 	struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS];
140 	struct dp_tx_seg_info_s *next;
141 };
142 
143 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
144 
145 /**
146  * struct dp_tx_sg_info_s - Scatter Gather Descriptor
147  * @num_segs: Number of segments (TSO/ME) in the frame
148  * @total_len: Total length of the frame
149  * @curr_seg: Points to current segment descriptor to be processed. Chain of
150  * 	      descriptors for SG frames/multicast-unicast converted packets.
151  *
152  * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to
153  * carry fragmentation information
154  * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries
155  * indicated through flags in SKB CB (first_msdu and last_msdu). This will be
156  * converted into set of skb sg (nr_frags) structures.
157  */
158 struct dp_tx_sg_info_s {
159 	uint32_t num_segs;
160 	uint32_t total_len;
161 	struct dp_tx_seg_info_s *curr_seg;
162 };
163 
164 /**
165  * struct dp_tx_queue - Tx queue
166  * @desc_pool_id: Descriptor Pool to be used for the tx queue
167  * @ring_id: TCL descriptor ring ID corresponding to the tx queue
168  *
169  * Tx queue contains information of the software (Descriptor pool)
170  * and hardware resources (TCL ring id) to be used for a particular
171  * transmit queue (obtained from skb_queue_mapping in case of linux)
172  */
173 struct dp_tx_queue {
174 	uint8_t desc_pool_id;
175 	uint8_t ring_id;
176 };
177 
178 /**
179  * struct dp_tx_msdu_info_s - MSDU Descriptor
180  * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement
181  * @tx_queue: Tx queue on which this MSDU should be transmitted
182  * @num_seg: Number of segments (TSO)
183  * @tid: TID (override) that is sent from HLOS
184  * @u.tso_info: TSO information for TSO frame types
185  * 	     (chain of the TSO segments, number of segments)
186  * @u.sg_info: Scatter Gather information for non-TSO SG frames
187  * @meta_data: Mesh meta header information
188  * @exception_fw: Duplicate frame to be sent to firmware
189  * @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions
190  * @ix_tx_sniffer: Indicates if the packet has to be sniffed
191  * @gsn: global sequence for reinjected mcast packets
192  * @vdev_id : vdev_id for reinjected mcast packets
193  * @skip_hp_update : Skip HP update for TSO segments and update in last segment
194  *
195  * This structure holds the complete MSDU information needed to program the
196  * Hardware TCL and MSDU extension descriptors for different frame types
197  *
198  */
199 struct dp_tx_msdu_info_s {
200 	enum dp_tx_frm_type frm_type;
201 	struct dp_tx_queue tx_queue;
202 	uint32_t num_seg;
203 	uint8_t tid;
204 	uint8_t exception_fw;
205 	uint8_t is_tx_sniffer;
206 	union {
207 		struct qdf_tso_info_t tso_info;
208 		struct dp_tx_sg_info_s sg_info;
209 	} u;
210 	uint32_t meta_data[DP_TX_MSDU_INFO_META_DATA_DWORDS];
211 	uint16_t ppdu_cookie;
212 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
213 #ifdef WLAN_MCAST_MLO
214 	uint16_t gsn;
215 	uint8_t vdev_id;
216 #endif
217 #endif
218 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
219 	uint8_t skip_hp_update;
220 #endif
221 };
222 
223 #ifndef QCA_HOST_MODE_WIFI_DISABLED
224 /**
225  * dp_tx_deinit_pair_by_index() - Deinit TX rings based on index
226  * @soc: core txrx context
227  * @index: index of ring to deinit
228  *
229  * Deinit 1 TCL and 1 WBM2SW release ring on as needed basis using
230  * index of the respective TCL/WBM2SW release in soc structure.
231  * For example, if the index is 2 then &soc->tcl_data_ring[2]
232  * and &soc->tx_comp_ring[2] will be deinitialized.
233  *
234  * Return: none
235  */
236 void dp_tx_deinit_pair_by_index(struct dp_soc *soc, int index);
237 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
238 
239 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
240 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
241 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
242 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
243 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
244 					 uint8_t num_pool,
245 					 uint32_t num_desc);
246 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
247 					uint8_t num_pool,
248 					uint32_t num_desc);
249 qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
250 			       bool delayed_free);
251 void dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id);
252 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
253 			 uint8_t tid, uint8_t ring_id);
254 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
255 				  struct dp_tx_desc_s *tx_desc,
256 				  struct hal_tx_completion_status *ts,
257 				  struct dp_txrx_peer *txrx_peer,
258 				  uint8_t ring_id);
259 void dp_tx_comp_process_desc(struct dp_soc *soc,
260 			     struct dp_tx_desc_s *desc,
261 			     struct hal_tx_completion_status *ts,
262 			     struct dp_txrx_peer *txrx_peer);
263 void dp_tx_reinject_handler(struct dp_soc *soc,
264 			    struct dp_vdev *vdev,
265 			    struct dp_tx_desc_s *tx_desc,
266 			    uint8_t *status,
267 			    uint8_t reinject_reason);
268 void dp_tx_inspect_handler(struct dp_soc *soc,
269 			   struct dp_vdev *vdev,
270 			   struct dp_tx_desc_s *tx_desc,
271 			   uint8_t *status);
272 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
273 				   uint32_t length, uint8_t tx_status,
274 				   bool update);
275 
276 #ifdef DP_UMAC_HW_RESET_SUPPORT
277 qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
278 
279 qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
280 			  qdf_nbuf_t nbuf,
281 			  struct cdp_tx_exception_metadata *tx_exc_metadata);
282 #endif
283 #ifndef QCA_HOST_MODE_WIFI_DISABLED
284 /**
285  * dp_tso_attach() - TSO Attach handler
286  * @txrx_soc: Opaque Dp handle
287  *
288  * Reserve TSO descriptor buffers
289  *
290  * Return: QDF_STATUS_E_FAILURE on failure or
291  * QDF_STATUS_SUCCESS on success
292  */
293 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc);
294 
295 /**
296  * dp_tso_detach() - TSO Detach handler
297  * @txrx_soc: Opaque Dp handle
298  *
299  * Deallocate TSO descriptor buffers
300  *
301  * Return: QDF_STATUS_E_FAILURE on failure or
302  * QDF_STATUS_SUCCESS on success
303  */
304 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc);
305 
306 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
307 
308 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc, uint8_t vdev_id,
309 				    qdf_nbuf_t nbuf);
310 
311 qdf_nbuf_t dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id,
312 				qdf_nbuf_t nbuf,
313 				struct cdp_tx_exception_metadata *tx_exc);
314 
315 qdf_nbuf_t dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc,
316 					      uint8_t vdev_id,
317 					      qdf_nbuf_t nbuf,
318 				struct cdp_tx_exception_metadata *tx_exc);
319 
320 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
321 			   qdf_nbuf_t nbuf);
322 qdf_nbuf_t
323 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
324 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
325 		       struct cdp_tx_exception_metadata *tx_exc_metadata);
326 
327 #if QDF_LOCK_STATS
328 noinline qdf_nbuf_t
329 dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
330 			 struct dp_tx_msdu_info_s *msdu_info);
331 #else
332 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
333 				    struct dp_tx_msdu_info_s *msdu_info);
334 #endif
335 #ifdef FEATURE_WLAN_TDLS
336 /**
337  * dp_tx_non_std() - Allow the control-path SW to send data frames
338  * @soc_hdl: Datapath soc handle
339  * @vdev_id: id of vdev
340  * @tx_spec: what non-standard handling to apply to the tx data frames
341  * @msdu_list: NULL-terminated list of tx MSDUs
342  *
343  * Return: NULL on success,
344  *         nbuf when it fails to send
345  */
346 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
347 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
348 #endif
349 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac);
350 
351 /**
352  * dp_tx_comp_handler() - Tx completion handler
353  * @int_ctx: pointer to DP interrupt context
354  * @soc: core txrx main context
355  * @hal_srng: Opaque HAL SRNG pointer
356  * @ring_id: completion ring id
357  * @quota: No. of packets/descriptors that can be serviced in one loop
358  *
359  * This function will collect hardware release ring element contents and
360  * handle descriptor contents. Based on contents, free packet or handle error
361  * conditions
362  *
363  * Return: Number of TX completions processed
364  */
365 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
366 			    hal_ring_handle_t hal_srng, uint8_t ring_id,
367 			    uint32_t quota);
368 
369 QDF_STATUS
370 dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
371 
372 QDF_STATUS
373 dp_tx_prepare_send_igmp_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
374 
375 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
376 
377 #if defined(QCA_HOST_MODE_WIFI_DISABLED) || !defined(ATH_SUPPORT_IQUE)
378 static inline void dp_tx_me_exit(struct dp_pdev *pdev)
379 {
380 	return;
381 }
382 #endif
383 
384 /**
385  * dp_tx_pdev_init() - dp tx pdev init
386  * @pdev: physical device instance
387  *
388  * Return: QDF_STATUS_SUCCESS: success
389  *         QDF_STATUS_E_RESOURCES: Error return
390  */
391 static inline QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev)
392 {
393 	struct dp_soc *soc = pdev->soc;
394 
395 	/* Initialize Flow control counters */
396 	qdf_atomic_init(&pdev->num_tx_outstanding);
397 	pdev->tx_descs_max = 0;
398 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
399 		/* Initialize descriptors in TCL Ring */
400 		hal_tx_init_data_ring(soc->hal_soc,
401 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
402 	}
403 
404 	return QDF_STATUS_SUCCESS;
405 }
406 
407 /**
408  * dp_tx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc
409  * @soc: Handle to HAL Soc structure
410  * @hal_soc: HAL SOC handle
411  * @num_avail_for_reap: descriptors available for reap
412  * @hal_ring_hdl: ring pointer
413  * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
414  * @last_prefetched_sw_desc: pointer to last prefetch SW desc
415  *
416  * Return: None
417  */
418 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
419 static inline
420 void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
421 				    hal_soc_handle_t hal_soc,
422 				    uint32_t num_avail_for_reap,
423 				    hal_ring_handle_t hal_ring_hdl,
424 				    void **last_prefetched_hw_desc,
425 				    struct dp_tx_desc_s
426 				    **last_prefetched_sw_desc)
427 {
428 	if (*last_prefetched_sw_desc) {
429 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
430 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
431 	}
432 
433 	if (num_avail_for_reap && *last_prefetched_hw_desc) {
434 		soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
435 						       *last_prefetched_hw_desc,
436 						       last_prefetched_sw_desc);
437 
438 		if ((uintptr_t)*last_prefetched_hw_desc & 0x3f)
439 			*last_prefetched_hw_desc =
440 				hal_srng_dst_prefetch_next_cached_desc(
441 					hal_soc,
442 					hal_ring_hdl,
443 					(uint8_t *)*last_prefetched_hw_desc);
444 		else
445 			*last_prefetched_hw_desc =
446 				hal_srng_dst_get_next_32_byte_desc(hal_soc,
447 					hal_ring_hdl,
448 					(uint8_t *)*last_prefetched_hw_desc);
449 	}
450 }
451 #else
452 static inline
453 void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
454 				    hal_soc_handle_t hal_soc,
455 				    uint32_t num_avail_for_reap,
456 				    hal_ring_handle_t hal_ring_hdl,
457 				    void **last_prefetched_hw_desc,
458 				    struct dp_tx_desc_s
459 				    **last_prefetched_sw_desc)
460 {
461 }
462 #endif
463 
464 #ifndef FEATURE_WDS
465 static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
466 {
467 	return;
468 }
469 #endif
470 
471 #ifndef QCA_MULTIPASS_SUPPORT
472 static inline
473 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
474 			     qdf_nbuf_t nbuf,
475 			     struct dp_tx_msdu_info_s *msdu_info)
476 {
477 	return true;
478 }
479 
480 static inline
481 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
482 {
483 }
484 
485 #else
486 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
487 			     qdf_nbuf_t nbuf,
488 			     struct dp_tx_msdu_info_s *msdu_info);
489 
490 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev);
491 void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
492 void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev,
493 				 struct dp_tx_msdu_info_s *msdu_info,
494 				 uint16_t group_key);
495 #endif
496 
497 /**
498  * dp_tx_hw_to_qdf()- convert hw status to qdf status
499  * @status: hw status
500  *
501  * Return: qdf tx rx status
502  */
503 static inline enum qdf_dp_tx_rx_status dp_tx_hw_to_qdf(uint16_t status)
504 {
505 	switch (status) {
506 	case HAL_TX_TQM_RR_FRAME_ACKED:
507 		return QDF_TX_RX_STATUS_OK;
508 	case HAL_TX_TQM_RR_REM_CMD_TX:
509 		return QDF_TX_RX_STATUS_NO_ACK;
510 	case HAL_TX_TQM_RR_REM_CMD_REM:
511 	case HAL_TX_TQM_RR_REM_CMD_NOTX:
512 	case HAL_TX_TQM_RR_REM_CMD_AGED:
513 		return QDF_TX_RX_STATUS_FW_DISCARD;
514 	default:
515 		return QDF_TX_RX_STATUS_DEFAULT;
516 	}
517 }
518 
519 #ifndef QCA_HOST_MODE_WIFI_DISABLED
520 /**
521  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
522  * @vdev: DP Virtual device handle
523  * @nbuf: Buffer pointer
524  * @queue: queue ids container for nbuf
525  *
526  * TX packet queue has 2 instances, software descriptors id and dma ring id
527  * Based on tx feature and hardware configuration queue id combination could be
528  * different.
529  * For example -
530  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
531  * With no XPS,lock based resource protection, Descriptor pool ids are different
532  * for each vdev, dma ring id will be same as single pdev id
533  *
534  * Return: None
535  */
536 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
537 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
538 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
539 {
540 	queue->ring_id = qdf_get_cpu();
541 	queue->desc_pool_id = queue->ring_id;
542 }
543 
544 /*
545  * dp_tx_get_hal_ring_hdl()- Get the hal_tx_ring_hdl for data transmission
546  * @dp_soc - DP soc structure pointer
547  * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled
548  *
549  * Return - HAL ring handle
550  */
551 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
552 						       uint8_t ring_id)
553 {
554 	if (ring_id == soc->num_tcl_data_rings)
555 		return soc->tcl_cmd_credit_ring.hal_srng;
556 
557 	return soc->tcl_data_ring[ring_id].hal_srng;
558 }
559 
560 #else /* QCA_OL_TX_MULTIQ_SUPPORT */
561 
562 #ifdef TX_MULTI_TCL
563 #ifdef IPA_OFFLOAD
564 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
565 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
566 {
567 	/* get flow id */
568 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
569 	if (vdev->pdev->soc->wlan_cfg_ctx->ipa_enabled)
570 		queue->ring_id = DP_TX_GET_RING_ID(vdev);
571 	else
572 		queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
573 					vdev->pdev->soc->num_tcl_data_rings);
574 }
575 #else
576 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
577 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
578 {
579 	/* get flow id */
580 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
581 	queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
582 				vdev->pdev->soc->num_tcl_data_rings);
583 }
584 #endif
585 #else
586 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
587 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
588 {
589 	/* get flow id */
590 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
591 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
592 }
593 #endif
594 
595 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
596 						       uint8_t ring_id)
597 {
598 	return soc->tcl_data_ring[ring_id].hal_srng;
599 }
600 #endif
601 
602 #ifdef QCA_OL_TX_LOCK_LESS_ACCESS
603 /*
604  * dp_tx_hal_ring_access_start()- hal_tx_ring access for data transmission
605  * @dp_soc - DP soc structure pointer
606  * @hal_ring_hdl - HAL ring handle
607  *
608  * Return - None
609  */
610 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
611 					      hal_ring_handle_t hal_ring_hdl)
612 {
613 	return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
614 }
615 
616 /*
617  * dp_tx_hal_ring_access_end()- hal_tx_ring access for data transmission
618  * @dp_soc - DP soc structure pointer
619  * @hal_ring_hdl - HAL ring handle
620  *
621  * Return - None
622  */
623 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
624 					     hal_ring_handle_t hal_ring_hdl)
625 {
626 	hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
627 }
628 
629 /*
630  * dp_tx_hal_ring_access_reap()- hal_tx_ring access for data transmission
631  * @dp_soc - DP soc structure pointer
632  * @hal_ring_hdl - HAL ring handle
633  *
634  * Return - None
635  */
636 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
637 						  hal_ring_handle_t
638 						  hal_ring_hdl)
639 {
640 }
641 
642 #else
643 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
644 					      hal_ring_handle_t hal_ring_hdl)
645 {
646 	return hal_srng_access_start(soc->hal_soc, hal_ring_hdl);
647 }
648 
649 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
650 					     hal_ring_handle_t hal_ring_hdl)
651 {
652 	hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
653 }
654 
655 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
656 						  hal_ring_handle_t
657 						  hal_ring_hdl)
658 {
659 	hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
660 }
661 #endif
662 
663 #ifdef ATH_TX_PRI_OVERRIDE
664 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \
665 	((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf))
666 #else
667 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf)
668 #endif
669 
670 /* TODO TX_FEATURE_NOT_YET */
671 static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc)
672 {
673 	return;
674 }
675 /* TODO TX_FEATURE_NOT_YET */
676 
677 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
678 		      bool force_free);
679 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev);
680 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev);
681 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev);
682 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc);
683 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc);
684 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc);
685 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc);
686 void
687 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
688 			     uint32_t buf_type);
689 #else /* QCA_HOST_MODE_WIFI_DISABLED */
690 
691 static inline
692 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
693 {
694 	return QDF_STATUS_SUCCESS;
695 }
696 
697 static inline
698 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
699 {
700 	return QDF_STATUS_SUCCESS;
701 }
702 
703 static inline void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
704 {
705 }
706 
707 static inline void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
708 {
709 }
710 
711 static inline
712 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
713 		      bool force_free)
714 {
715 }
716 
717 static inline QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
718 {
719 	return QDF_STATUS_SUCCESS;
720 }
721 
722 static inline QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
723 {
724 	return QDF_STATUS_SUCCESS;
725 }
726 
727 static inline void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
728 {
729 }
730 
731 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
732 
733 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
734 	defined(QCA_TX_CAPTURE_SUPPORT) || \
735 	defined(QCA_MCOPY_SUPPORT)
736 #ifdef FEATURE_PERPKT_INFO
737 QDF_STATUS
738 dp_get_completion_indication_for_stack(struct dp_soc *soc,
739 				       struct dp_pdev *pdev,
740 				       struct dp_txrx_peer *peer,
741 				       struct hal_tx_completion_status *ts,
742 				       qdf_nbuf_t netbuf,
743 				       uint64_t time_latency);
744 
745 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
746 			    uint16_t peer_id, uint32_t ppdu_id,
747 			    qdf_nbuf_t netbuf);
748 #endif
749 #else
750 static inline
751 QDF_STATUS dp_get_completion_indication_for_stack(struct dp_soc *soc,
752 				       struct dp_pdev *pdev,
753 				       struct dp_txrx_peer *peer,
754 				       struct hal_tx_completion_status *ts,
755 				       qdf_nbuf_t netbuf,
756 				       uint64_t time_latency)
757 {
758 	return QDF_STATUS_E_NOSUPPORT;
759 }
760 
761 static inline
762 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
763 			    uint16_t peer_id, uint32_t ppdu_id,
764 			    qdf_nbuf_t netbuf)
765 {
766 }
767 #endif
768 
769 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
770 void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
771 				       struct dp_tx_desc_s *desc,
772 				       struct hal_tx_completion_status *ts);
773 #else
774 static inline void
775 dp_send_completion_to_pkt_capture(struct dp_soc *soc,
776 				  struct dp_tx_desc_s *desc,
777 				  struct hal_tx_completion_status *ts)
778 {
779 }
780 #endif
781 
782 #ifndef QCA_HOST_MODE_WIFI_DISABLED
783 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
784 /**
785  * dp_tx_update_stats() - Update soc level tx stats
786  * @soc: DP soc handle
787  * @tx_desc: TX descriptor reference
788  * @ring_id: TCL ring id
789  *
790  * Returns: none
791  */
792 void dp_tx_update_stats(struct dp_soc *soc,
793 			struct dp_tx_desc_s *tx_desc,
794 			uint8_t ring_id);
795 
796 /**
797  * dp_tx_attempt_coalescing() - Check and attempt TCL register write coalescing
798  * @soc: Datapath soc handle
799  * @tx_desc: tx packet descriptor
800  * @tid: TID for pkt transmission
801  * @msdu_info: MSDU info of tx packet
802  * @ring_id: TCL ring id
803  *
804  * Returns: 1, if coalescing is to be done
805  *	    0, if coalescing is not to be done
806  */
807 int
808 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
809 			 struct dp_tx_desc_s *tx_desc,
810 			 uint8_t tid,
811 			 struct dp_tx_msdu_info_s *msdu_info,
812 			 uint8_t ring_id);
813 
814 /**
815  * dp_tx_ring_access_end() - HAL ring access end for data transmission
816  * @soc: Datapath soc handle
817  * @hal_ring_hdl: HAL ring handle
818  * @coalesce: Coalesce the current write or not
819  *
820  * Returns: none
821  */
822 void
823 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
824 		      int coalesce);
825 #else
826 /**
827  * dp_tx_update_stats() - Update soc level tx stats
828  * @soc: DP soc handle
829  * @tx_desc: TX descriptor reference
830  * @ring_id: TCL ring id
831  *
832  * Returns: none
833  */
834 static inline void dp_tx_update_stats(struct dp_soc *soc,
835 				      struct dp_tx_desc_s *tx_desc,
836 				      uint8_t ring_id){ }
837 
838 static inline void
839 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
840 		      int coalesce)
841 {
842 	dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
843 }
844 
845 static inline int
846 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
847 			 struct dp_tx_desc_s *tx_desc,
848 			 uint8_t tid,
849 			 struct dp_tx_msdu_info_s *msdu_info,
850 			 uint8_t ring_id)
851 {
852 	return 0;
853 }
854 
855 #endif /* WLAN_DP_FEATURE_SW_LATENCY_MGR */
856 
857 #ifdef FEATURE_RUNTIME_PM
858 /**
859  * dp_set_rtpm_tput_policy_requirement() - Update RTPM throughput policy
860  * @soc_hdl: DP soc handle
861  * @is_high_tput: flag to indicate whether throughput is high
862  *
863  * Returns: none
864  */
865 static inline
866 void dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
867 					 bool is_high_tput)
868 {
869 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
870 
871 	qdf_atomic_set(&soc->rtpm_high_tput_flag, is_high_tput);
872 }
873 
874 void
875 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
876 			      hal_ring_handle_t hal_ring_hdl,
877 			      int coalesce);
878 #else
879 #ifdef DP_POWER_SAVE
880 void
881 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
882 			      hal_ring_handle_t hal_ring_hdl,
883 			      int coalesce);
884 #else
885 static inline void
886 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
887 			      hal_ring_handle_t hal_ring_hdl,
888 			      int coalesce)
889 {
890 	dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
891 }
892 #endif
893 
894 static inline void
895 dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
896 				    bool is_high_tput)
897 { }
898 #endif
899 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
900 
901 #ifdef DP_TX_HW_DESC_HISTORY
902 static inline void
903 dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
904 			 hal_ring_handle_t hal_ring_hdl,
905 			 struct dp_soc *soc)
906 {
907 	struct dp_tx_hw_desc_evt *evt;
908 	uint64_t idx = 0;
909 
910 	if (!soc->tx_hw_desc_history)
911 		return;
912 
913 	idx = ++soc->tx_hw_desc_history->index;
914 	if (idx == DP_TX_HW_DESC_HIST_MAX)
915 		soc->tx_hw_desc_history->index = 0;
916 	idx = qdf_do_div_rem(idx, DP_TX_HW_DESC_HIST_MAX);
917 
918 	evt = &soc->tx_hw_desc_history->entry[idx];
919 	qdf_mem_copy(evt->tcl_desc, hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
920 	evt->posted = qdf_get_log_timestamp();
921 	hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &evt->tp, &evt->hp);
922 }
923 #else
924 static inline void
925 dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
926 			 hal_ring_handle_t hal_ring_hdl,
927 			 struct dp_soc *soc)
928 {
929 }
930 #endif
931 
932 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
933 /**
934  * dp_tx_compute_hw_delay_us() - Compute hardware Tx completion delay
935  * @ts: Tx completion status
936  * @delta_tsf: Difference between TSF clock and qtimer
937  * @delay_us: Delay in microseconds
938  *
939  * Return: QDF_STATUS_SUCCESS   : Success
940  *         QDF_STATUS_E_INVAL   : Tx completion status is invalid or
941  *                                delay_us is NULL
942  *         QDF_STATUS_E_FAILURE : Error in delay calculation
943  */
944 QDF_STATUS
945 dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
946 			  uint32_t delta_tsf,
947 			  uint32_t *delay_us);
948 
949 /**
950  * dp_set_delta_tsf() - Set delta_tsf to dp_soc structure
951  * @soc_hdl: cdp soc pointer
952  * @vdev_id: vdev id
953  * @delta_tsf: difference between TSF clock and qtimer
954  *
955  * Return: None
956  */
957 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
958 		      uint32_t delta_tsf);
959 #endif
960 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
961 /**
962  * dp_set_tsf_report_ul_delay() - Enable or disable reporting uplink delay
963  * @soc_hdl: cdp soc pointer
964  * @vdev_id: vdev id
965  * @enable: true to enable and false to disable
966  *
967  * Return: QDF_STATUS
968  */
969 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
970 				      uint8_t vdev_id, bool enable);
971 
972 /**
973  * dp_get_uplink_delay() - Get uplink delay value
974  * @soc_hdl: cdp soc pointer
975  * @vdev_id: vdev id
976  * @val: pointer to save uplink delay value
977  *
978  * Return: QDF_STATUS
979  */
980 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
981 			       uint32_t *val);
982 #endif /* WLAN_FEATURE_TSF_UPLINK_TSF */
983 
984 /**
985  * dp_tx_pkt_tracepoints_enabled() - Get the state of tx pkt tracepoint
986  *
987  * Return: True if any tx pkt tracepoint is enabled else false
988  */
989 static inline
990 bool dp_tx_pkt_tracepoints_enabled(void)
991 {
992 	return (qdf_trace_dp_tx_comp_tcp_pkt_enabled() ||
993 		qdf_trace_dp_tx_comp_udp_pkt_enabled() ||
994 		qdf_trace_dp_tx_comp_pkt_enabled());
995 }
996 
997 #ifdef DP_TX_TRACKING
998 /**
999  * dp_tx_desc_set_timestamp() - set timestamp in tx descriptor
1000  * @tx_desc - tx descriptor
1001  *
1002  * Return: None
1003  */
1004 static inline
1005 void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
1006 {
1007 	tx_desc->timestamp_tick = qdf_system_ticks();
1008 }
1009 
1010 /**
1011  * dp_tx_desc_check_corruption() - Verify magic pattern in tx descriptor
1012  * @tx_desc: tx descriptor
1013  *
1014  * Check for corruption in tx descriptor, if magic pattern is not matching
1015  * trigger self recovery
1016  *
1017  * Return: none
1018  */
1019 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc);
1020 #else
1021 static inline
1022 void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
1023 {
1024 }
1025 
1026 static inline
1027 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
1028 {
1029 }
1030 #endif
1031 
1032 #ifndef CONFIG_SAWF
1033 static inline bool dp_sawf_tag_valid_get(qdf_nbuf_t nbuf)
1034 {
1035 	return false;
1036 }
1037 #endif
1038 
1039 #ifdef HW_TX_DELAY_STATS_ENABLE
1040 /**
1041  * dp_tx_desc_set_ktimestamp() - set kernel timestamp in tx descriptor
1042  * @vdev: DP vdev handle
1043  * @tx_desc: tx descriptor
1044  *
1045  * Return: true when descriptor is timestamped, false otherwise
1046  */
1047 static inline
1048 bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
1049 			       struct dp_tx_desc_s *tx_desc)
1050 {
1051 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1052 	    qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
1053 	    qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
1054 	    qdf_unlikely(vdev->pdev->soc->peerstats_enabled) ||
1055 	    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev))) {
1056 		tx_desc->timestamp = qdf_ktime_real_get();
1057 		return true;
1058 	}
1059 	return false;
1060 }
1061 #else
1062 static inline
1063 bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
1064 			       struct dp_tx_desc_s *tx_desc)
1065 {
1066 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1067 	    qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
1068 	    qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
1069 	    qdf_unlikely(vdev->pdev->soc->peerstats_enabled)) {
1070 		tx_desc->timestamp = qdf_ktime_real_get();
1071 		return true;
1072 	}
1073 	return false;
1074 }
1075 #endif
1076 
1077 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
1078 /**
1079  * dp_pkt_add_timestamp() - add timestamp in data payload
1080  *
1081  * @vdev: dp vdev
1082  * @index: index to decide offset in payload
1083  * @time: timestamp to add in data payload
1084  * @nbuf: network buffer
1085  *
1086  * Return: none
1087  */
1088 void dp_pkt_add_timestamp(struct dp_vdev *vdev,
1089 			  enum qdf_pkt_timestamp_index index, uint64_t time,
1090 			  qdf_nbuf_t nbuf);
1091 /**
1092  * dp_pkt_get_timestamp() - get current system time
1093  *
1094  * @time: return current system time
1095  *
1096  * Return: none
1097  */
1098 void dp_pkt_get_timestamp(uint64_t *time);
1099 #else
1100 #define dp_pkt_add_timestamp(vdev, index, time, nbuf)
1101 
1102 static inline
1103 void dp_pkt_get_timestamp(uint64_t *time)
1104 {
1105 }
1106 #endif
1107 
1108 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
1109 /**
1110  * dp_update_tx_desc_stats - Update the increase or decrease in
1111  * outstanding tx desc count
1112  * values on pdev and soc
1113  * @vdev: DP pdev handle
1114  *
1115  * Return: void
1116  */
1117 static inline void
1118 dp_update_tx_desc_stats(struct dp_pdev *pdev)
1119 {
1120 	int32_t tx_descs_cnt =
1121 		qdf_atomic_read(&pdev->num_tx_outstanding);
1122 	if (pdev->tx_descs_max < tx_descs_cnt)
1123 		pdev->tx_descs_max = tx_descs_cnt;
1124 	qdf_mem_tx_desc_cnt_update(pdev->num_tx_outstanding,
1125 				   pdev->tx_descs_max);
1126 }
1127 
1128 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
1129 
1130 static inline void
1131 dp_update_tx_desc_stats(struct dp_pdev *pdev)
1132 {
1133 }
1134 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
1135 
1136 #ifdef QCA_TX_LIMIT_CHECK
1137 /**
1138  * dp_tx_limit_check - Check if allocated tx descriptors reached
1139  * soc max limit and pdev max limit
1140  * @vdev: DP vdev handle
1141  *
1142  * Return: true if allocated tx descriptors reached max configured value, else
1143  * false
1144  */
1145 static inline bool
1146 dp_tx_limit_check(struct dp_vdev *vdev)
1147 {
1148 	struct dp_pdev *pdev = vdev->pdev;
1149 	struct dp_soc *soc = pdev->soc;
1150 
1151 	if (qdf_atomic_read(&soc->num_tx_outstanding) >=
1152 			soc->num_tx_allowed) {
1153 		dp_tx_info("queued packets are more than max tx, drop the frame");
1154 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1155 		return true;
1156 	}
1157 
1158 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
1159 			pdev->num_tx_allowed) {
1160 		dp_tx_info("queued packets are more than max tx, drop the frame");
1161 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1162 		DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_outstand.num, 1);
1163 		return true;
1164 	}
1165 	return false;
1166 }
1167 
1168 /**
1169  * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
1170  * reached soc max limit
1171  * @vdev: DP vdev handle
1172  *
1173  * Return: true if allocated tx descriptors reached max configured value, else
1174  * false
1175  */
1176 static inline bool
1177 dp_tx_exception_limit_check(struct dp_vdev *vdev)
1178 {
1179 	struct dp_pdev *pdev = vdev->pdev;
1180 	struct dp_soc *soc = pdev->soc;
1181 
1182 	if (qdf_atomic_read(&soc->num_tx_exception) >=
1183 			soc->num_msdu_exception_desc) {
1184 		dp_info("exc packets are more than max drop the exc pkt");
1185 		DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
1186 		return true;
1187 	}
1188 
1189 	return false;
1190 }
1191 
1192 /**
1193  * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
1194  * @vdev: DP pdev handle
1195  *
1196  * Return: void
1197  */
1198 static inline void
1199 dp_tx_outstanding_inc(struct dp_pdev *pdev)
1200 {
1201 	struct dp_soc *soc = pdev->soc;
1202 
1203 	qdf_atomic_inc(&pdev->num_tx_outstanding);
1204 	qdf_atomic_inc(&soc->num_tx_outstanding);
1205 	dp_update_tx_desc_stats(pdev);
1206 }
1207 
1208 /**
1209  * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
1210  * @vdev: DP pdev handle
1211  *
1212  * Return: void
1213  */
1214 static inline void
1215 dp_tx_outstanding_dec(struct dp_pdev *pdev)
1216 {
1217 	struct dp_soc *soc = pdev->soc;
1218 
1219 	qdf_atomic_dec(&pdev->num_tx_outstanding);
1220 	qdf_atomic_dec(&soc->num_tx_outstanding);
1221 	dp_update_tx_desc_stats(pdev);
1222 }
1223 
1224 #else //QCA_TX_LIMIT_CHECK
1225 static inline bool
1226 dp_tx_limit_check(struct dp_vdev *vdev)
1227 {
1228 	return false;
1229 }
1230 
1231 static inline bool
1232 dp_tx_exception_limit_check(struct dp_vdev *vdev)
1233 {
1234 	return false;
1235 }
1236 
1237 static inline void
1238 dp_tx_outstanding_inc(struct dp_pdev *pdev)
1239 {
1240 	qdf_atomic_inc(&pdev->num_tx_outstanding);
1241 	dp_update_tx_desc_stats(pdev);
1242 }
1243 
1244 static inline void
1245 dp_tx_outstanding_dec(struct dp_pdev *pdev)
1246 {
1247 	qdf_atomic_dec(&pdev->num_tx_outstanding);
1248 	dp_update_tx_desc_stats(pdev);
1249 }
1250 #endif //QCA_TX_LIMIT_CHECK
1251 #endif
1252