xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.h (revision 67011a392c017971b1c0e7b8f6621fd5613f6075)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef __DP_TX_H
20 #define __DP_TX_H
21 
22 #include <qdf_types.h>
23 #include <qdf_nbuf.h>
24 #include "dp_types.h"
25 #ifdef FEATURE_PERPKT_INFO
26 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
27 	defined(QCA_TX_CAPTURE_SUPPORT) || \
28 	defined(QCA_MCOPY_SUPPORT)
29 #include "if_meta_hdr.h"
30 #endif
31 #endif
32 #include "dp_internal.h"
33 #include "hal_tx.h"
34 #include <qdf_tracepoint.h>
35 #ifdef CONFIG_SAWF
36 #include "dp_sawf.h"
37 #endif
38 #include <qdf_pkt_add_timestamp.h>
39 
40 #define DP_INVALID_VDEV_ID 0xFF
41 
42 #define DP_TX_MAX_NUM_FRAGS 6
43 
44 /*
45  * DP_TX_DESC_FLAG_FRAG flags should always be defined to 0x1
46  * please do not change this flag's definition
47  */
48 #define DP_TX_DESC_FLAG_FRAG		0x1
49 #define DP_TX_DESC_FLAG_TO_FW		0x2
50 #define DP_TX_DESC_FLAG_SIMPLE		0x4
51 #define DP_TX_DESC_FLAG_RAW		0x8
52 #define DP_TX_DESC_FLAG_MESH		0x10
53 #define DP_TX_DESC_FLAG_QUEUED_TX	0x20
54 #define DP_TX_DESC_FLAG_COMPLETED_TX	0x40
55 #define DP_TX_DESC_FLAG_ME		0x80
56 #define DP_TX_DESC_FLAG_TDLS_FRAME	0x100
57 #define DP_TX_DESC_FLAG_ALLOCATED	0x200
58 #define DP_TX_DESC_FLAG_MESH_MODE	0x400
59 #define DP_TX_DESC_FLAG_UNMAP_DONE	0x800
60 #define DP_TX_DESC_FLAG_TX_COMP_ERR	0x1000
61 #define DP_TX_DESC_FLAG_FLUSH		0x2000
62 #define DP_TX_DESC_FLAG_TRAFFIC_END_IND	0x4000
63 
64 #define DP_TX_EXT_DESC_FLAG_METADATA_VALID 0x1
65 
66 #define DP_TX_FREE_SINGLE_BUF(soc, buf)                  \
67 do {                                                           \
68 	qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE);  \
69 	qdf_nbuf_free(buf);                                    \
70 } while (0)
71 
72 #define OCB_HEADER_VERSION	 1
73 
74 #ifdef TX_PER_PDEV_DESC_POOL
75 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
76 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
77 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
78 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
79 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
80 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
81 #else
82 	#ifdef TX_PER_VDEV_DESC_POOL
83 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
84 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
85 	#endif /* TX_PER_VDEV_DESC_POOL */
86 #endif /* TX_PER_PDEV_DESC_POOL */
87 #define DP_TX_QUEUE_MASK 0x3
88 
89 #define MAX_CDP_SEC_TYPE 12
90 
91 /* number of dwords for htt_tx_msdu_desc_ext2_t */
92 #define DP_TX_MSDU_INFO_META_DATA_DWORDS 7
93 
94 #define dp_tx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX, params)
95 #define dp_tx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX, params)
96 #define dp_tx_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP_TX, params)
97 #define dp_tx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX, params)
98 #define dp_tx_info(params...) \
99 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX, ## params)
100 #define dp_tx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX, params)
101 
102 #define dp_tx_comp_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX_COMP, params)
103 #define dp_tx_comp_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX_COMP, params)
104 #define dp_tx_comp_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX_COMP, params)
105 #define dp_tx_comp_info(params...) \
106 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
107 #define dp_tx_comp_info_rl(params...) \
108 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
109 #define dp_tx_comp_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX_COMP, params)
110 
111 #ifndef QCA_HOST_MODE_WIFI_DISABLED
112 
113 /**
114  * struct dp_tx_frag_info_s
115  * @vaddr: hlos vritual address for buffer
116  * @paddr_lo: physical address lower 32bits
117  * @paddr_hi: physical address higher bits
118  * @len: length of the buffer
119  */
120 struct dp_tx_frag_info_s {
121 	uint8_t  *vaddr;
122 	uint32_t paddr_lo;
123 	uint16_t paddr_hi;
124 	uint16_t len;
125 };
126 
127 /**
128  * struct dp_tx_seg_info_s - Segmentation Descriptor
129  * @nbuf: NBUF pointer if segment corresponds to separate nbuf
130  * @frag_cnt: Fragment count in this segment
131  * @total_len: Total length of segment
132  * @frags: per-Fragment information
133  * @next: pointer to next MSDU segment
134  */
135 struct dp_tx_seg_info_s  {
136 	qdf_nbuf_t nbuf;
137 	uint16_t frag_cnt;
138 	uint16_t total_len;
139 	struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS];
140 	struct dp_tx_seg_info_s *next;
141 };
142 
143 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
144 
145 /**
146  * struct dp_tx_sg_info_s - Scatter Gather Descriptor
147  * @num_segs: Number of segments (TSO/ME) in the frame
148  * @total_len: Total length of the frame
149  * @curr_seg: Points to current segment descriptor to be processed. Chain of
150  * 	      descriptors for SG frames/multicast-unicast converted packets.
151  *
152  * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to
153  * carry fragmentation information
154  * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries
155  * indicated through flags in SKB CB (first_msdu and last_msdu). This will be
156  * converted into set of skb sg (nr_frags) structures.
157  */
158 struct dp_tx_sg_info_s {
159 	uint32_t num_segs;
160 	uint32_t total_len;
161 	struct dp_tx_seg_info_s *curr_seg;
162 };
163 
164 /**
165  * struct dp_tx_queue - Tx queue
166  * @desc_pool_id: Descriptor Pool to be used for the tx queue
167  * @ring_id: TCL descriptor ring ID corresponding to the tx queue
168  *
169  * Tx queue contains information of the software (Descriptor pool)
170  * and hardware resources (TCL ring id) to be used for a particular
171  * transmit queue (obtained from skb_queue_mapping in case of linux)
172  */
173 struct dp_tx_queue {
174 	uint8_t desc_pool_id;
175 	uint8_t ring_id;
176 };
177 
178 /**
179  * struct dp_tx_msdu_info_s - MSDU Descriptor
180  * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement
181  * @tx_queue: Tx queue on which this MSDU should be transmitted
182  * @num_seg: Number of segments (TSO)
183  * @tid: TID (override) that is sent from HLOS
184  * @u.tso_info: TSO information for TSO frame types
185  * 	     (chain of the TSO segments, number of segments)
186  * @u.sg_info: Scatter Gather information for non-TSO SG frames
187  * @meta_data: Mesh meta header information
188  * @exception_fw: Duplicate frame to be sent to firmware
189  * @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions
190  * @ix_tx_sniffer: Indicates if the packet has to be sniffed
191  * @gsn: global sequence for reinjected mcast packets
192  * @vdev_id : vdev_id for reinjected mcast packets
193  * @skip_hp_update : Skip HP update for TSO segments and update in last segment
194  *
195  * This structure holds the complete MSDU information needed to program the
196  * Hardware TCL and MSDU extension descriptors for different frame types
197  *
198  */
199 struct dp_tx_msdu_info_s {
200 	enum dp_tx_frm_type frm_type;
201 	struct dp_tx_queue tx_queue;
202 	uint32_t num_seg;
203 	uint8_t tid;
204 	uint8_t exception_fw;
205 	uint8_t is_tx_sniffer;
206 	union {
207 		struct qdf_tso_info_t tso_info;
208 		struct dp_tx_sg_info_s sg_info;
209 	} u;
210 	uint32_t meta_data[DP_TX_MSDU_INFO_META_DATA_DWORDS];
211 	uint16_t ppdu_cookie;
212 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
213 #ifdef WLAN_MCAST_MLO
214 	uint16_t gsn;
215 	uint8_t vdev_id;
216 #endif
217 #endif
218 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
219 	uint8_t skip_hp_update;
220 #endif
221 };
222 
223 #ifndef QCA_HOST_MODE_WIFI_DISABLED
224 /**
225  * dp_tx_deinit_pair_by_index() - Deinit TX rings based on index
226  * @soc: core txrx context
227  * @index: index of ring to deinit
228  *
229  * Deinit 1 TCL and 1 WBM2SW release ring on as needed basis using
230  * index of the respective TCL/WBM2SW release in soc structure.
231  * For example, if the index is 2 then &soc->tcl_data_ring[2]
232  * and &soc->tx_comp_ring[2] will be deinitialized.
233  *
234  * Return: none
235  */
236 void dp_tx_deinit_pair_by_index(struct dp_soc *soc, int index);
237 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
238 
239 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
240 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
241 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
242 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
243 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
244 					 uint8_t num_pool,
245 					 uint32_t num_desc);
246 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
247 					uint8_t num_pool,
248 					uint32_t num_desc);
249 void dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc);
250 void dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id);
251 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
252 			 uint8_t tid, uint8_t ring_id);
253 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
254 				  struct dp_tx_desc_s *tx_desc,
255 				  struct hal_tx_completion_status *ts,
256 				  struct dp_txrx_peer *txrx_peer,
257 				  uint8_t ring_id);
258 void dp_tx_comp_process_desc(struct dp_soc *soc,
259 			     struct dp_tx_desc_s *desc,
260 			     struct hal_tx_completion_status *ts,
261 			     struct dp_txrx_peer *txrx_peer);
262 void dp_tx_reinject_handler(struct dp_soc *soc,
263 			    struct dp_vdev *vdev,
264 			    struct dp_tx_desc_s *tx_desc,
265 			    uint8_t *status,
266 			    uint8_t reinject_reason);
267 void dp_tx_inspect_handler(struct dp_soc *soc,
268 			   struct dp_vdev *vdev,
269 			   struct dp_tx_desc_s *tx_desc,
270 			   uint8_t *status);
271 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
272 				   uint32_t length, uint8_t tx_status,
273 				   bool update);
274 
275 #ifdef DP_UMAC_HW_RESET_SUPPORT
276 qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
277 
278 qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
279 			  qdf_nbuf_t nbuf,
280 			  struct cdp_tx_exception_metadata *tx_exc_metadata);
281 #endif
282 #ifndef QCA_HOST_MODE_WIFI_DISABLED
283 /**
284  * dp_tso_attach() - TSO Attach handler
285  * @txrx_soc: Opaque Dp handle
286  *
287  * Reserve TSO descriptor buffers
288  *
289  * Return: QDF_STATUS_E_FAILURE on failure or
290  * QDF_STATUS_SUCCESS on success
291  */
292 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc);
293 
294 /**
295  * dp_tso_detach() - TSO Detach handler
296  * @txrx_soc: Opaque Dp handle
297  *
298  * Deallocate TSO descriptor buffers
299  *
300  * Return: QDF_STATUS_E_FAILURE on failure or
301  * QDF_STATUS_SUCCESS on success
302  */
303 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc);
304 
305 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
306 
307 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc, uint8_t vdev_id,
308 				    qdf_nbuf_t nbuf);
309 
310 qdf_nbuf_t dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id,
311 				qdf_nbuf_t nbuf,
312 				struct cdp_tx_exception_metadata *tx_exc);
313 
314 qdf_nbuf_t dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc,
315 					      uint8_t vdev_id,
316 					      qdf_nbuf_t nbuf,
317 				struct cdp_tx_exception_metadata *tx_exc);
318 
319 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
320 			   qdf_nbuf_t nbuf);
321 qdf_nbuf_t
322 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
323 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
324 		       struct cdp_tx_exception_metadata *tx_exc_metadata);
325 
326 #if QDF_LOCK_STATS
327 noinline qdf_nbuf_t
328 dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
329 			 struct dp_tx_msdu_info_s *msdu_info);
330 #else
331 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
332 				    struct dp_tx_msdu_info_s *msdu_info);
333 #endif
334 #ifdef FEATURE_WLAN_TDLS
335 /**
336  * dp_tx_non_std() - Allow the control-path SW to send data frames
337  * @soc_hdl: Datapath soc handle
338  * @vdev_id: id of vdev
339  * @tx_spec: what non-standard handling to apply to the tx data frames
340  * @msdu_list: NULL-terminated list of tx MSDUs
341  *
342  * Return: NULL on success,
343  *         nbuf when it fails to send
344  */
345 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
346 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
347 #endif
348 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac);
349 
350 /**
351  * dp_tx_comp_handler() - Tx completion handler
352  * @int_ctx: pointer to DP interrupt context
353  * @soc: core txrx main context
354  * @hal_srng: Opaque HAL SRNG pointer
355  * @ring_id: completion ring id
356  * @quota: No. of packets/descriptors that can be serviced in one loop
357  *
358  * This function will collect hardware release ring element contents and
359  * handle descriptor contents. Based on contents, free packet or handle error
360  * conditions
361  *
362  * Return: Number of TX completions processed
363  */
364 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
365 			    hal_ring_handle_t hal_srng, uint8_t ring_id,
366 			    uint32_t quota);
367 
368 QDF_STATUS
369 dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
370 
371 QDF_STATUS
372 dp_tx_prepare_send_igmp_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
373 
374 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
375 
376 #if defined(QCA_HOST_MODE_WIFI_DISABLED) || !defined(ATH_SUPPORT_IQUE)
377 static inline void dp_tx_me_exit(struct dp_pdev *pdev)
378 {
379 	return;
380 }
381 #endif
382 
383 /**
384  * dp_tx_pdev_init() - dp tx pdev init
385  * @pdev: physical device instance
386  *
387  * Return: QDF_STATUS_SUCCESS: success
388  *         QDF_STATUS_E_RESOURCES: Error return
389  */
390 static inline QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev)
391 {
392 	struct dp_soc *soc = pdev->soc;
393 
394 	/* Initialize Flow control counters */
395 	qdf_atomic_init(&pdev->num_tx_outstanding);
396 	pdev->tx_descs_max = 0;
397 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
398 		/* Initialize descriptors in TCL Ring */
399 		hal_tx_init_data_ring(soc->hal_soc,
400 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
401 	}
402 
403 	return QDF_STATUS_SUCCESS;
404 }
405 
406 /**
407  * dp_tx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc
408  * @soc: Handle to HAL Soc structure
409  * @hal_soc: HAL SOC handle
410  * @num_avail_for_reap: descriptors available for reap
411  * @hal_ring_hdl: ring pointer
412  * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
413  * @last_prefetched_sw_desc: pointer to last prefetch SW desc
414  *
415  * Return: None
416  */
417 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
418 static inline
419 void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
420 				    hal_soc_handle_t hal_soc,
421 				    uint32_t num_avail_for_reap,
422 				    hal_ring_handle_t hal_ring_hdl,
423 				    void **last_prefetched_hw_desc,
424 				    struct dp_tx_desc_s
425 				    **last_prefetched_sw_desc)
426 {
427 	if (*last_prefetched_sw_desc) {
428 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
429 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
430 	}
431 
432 	if (num_avail_for_reap && *last_prefetched_hw_desc) {
433 		dp_tx_comp_get_prefetched_params_from_hal_desc(
434 						soc,
435 						*last_prefetched_hw_desc,
436 						last_prefetched_sw_desc);
437 		*last_prefetched_hw_desc =
438 			hal_srng_dst_prefetch_next_cached_desc(
439 					hal_soc,
440 					hal_ring_hdl,
441 					(uint8_t *)*last_prefetched_hw_desc);
442 	}
443 }
444 #else
445 static inline
446 void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
447 				    hal_soc_handle_t hal_soc,
448 				    uint32_t num_avail_for_reap,
449 				    hal_ring_handle_t hal_ring_hdl,
450 				    void **last_prefetched_hw_desc,
451 				    struct dp_tx_desc_s
452 				    **last_prefetched_sw_desc)
453 {
454 }
455 #endif
456 
457 #ifndef FEATURE_WDS
458 static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
459 {
460 	return;
461 }
462 #endif
463 
464 #ifndef QCA_MULTIPASS_SUPPORT
465 static inline
466 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
467 			     qdf_nbuf_t nbuf,
468 			     struct dp_tx_msdu_info_s *msdu_info)
469 {
470 	return true;
471 }
472 
473 static inline
474 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
475 {
476 }
477 
478 #else
479 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
480 			     qdf_nbuf_t nbuf,
481 			     struct dp_tx_msdu_info_s *msdu_info);
482 
483 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev);
484 void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
485 void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev,
486 				 struct dp_tx_msdu_info_s *msdu_info,
487 				 uint16_t group_key);
488 #endif
489 
490 /**
491  * dp_tx_hw_to_qdf()- convert hw status to qdf status
492  * @status: hw status
493  *
494  * Return: qdf tx rx status
495  */
496 static inline enum qdf_dp_tx_rx_status dp_tx_hw_to_qdf(uint16_t status)
497 {
498 	switch (status) {
499 	case HAL_TX_TQM_RR_FRAME_ACKED:
500 		return QDF_TX_RX_STATUS_OK;
501 	case HAL_TX_TQM_RR_REM_CMD_TX:
502 		return QDF_TX_RX_STATUS_NO_ACK;
503 	case HAL_TX_TQM_RR_REM_CMD_REM:
504 	case HAL_TX_TQM_RR_REM_CMD_NOTX:
505 	case HAL_TX_TQM_RR_REM_CMD_AGED:
506 		return QDF_TX_RX_STATUS_FW_DISCARD;
507 	default:
508 		return QDF_TX_RX_STATUS_DEFAULT;
509 	}
510 }
511 
512 #ifndef QCA_HOST_MODE_WIFI_DISABLED
513 /**
514  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
515  * @vdev: DP Virtual device handle
516  * @nbuf: Buffer pointer
517  * @queue: queue ids container for nbuf
518  *
519  * TX packet queue has 2 instances, software descriptors id and dma ring id
520  * Based on tx feature and hardware configuration queue id combination could be
521  * different.
522  * For example -
523  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
524  * With no XPS,lock based resource protection, Descriptor pool ids are different
525  * for each vdev, dma ring id will be same as single pdev id
526  *
527  * Return: None
528  */
529 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
530 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
531 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
532 {
533 	queue->ring_id = qdf_get_cpu();
534 	queue->desc_pool_id = queue->ring_id;
535 }
536 
537 /*
538  * dp_tx_get_hal_ring_hdl()- Get the hal_tx_ring_hdl for data transmission
539  * @dp_soc - DP soc structure pointer
540  * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled
541  *
542  * Return - HAL ring handle
543  */
544 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
545 						       uint8_t ring_id)
546 {
547 	if (ring_id == soc->num_tcl_data_rings)
548 		return soc->tcl_cmd_credit_ring.hal_srng;
549 
550 	return soc->tcl_data_ring[ring_id].hal_srng;
551 }
552 
553 #else /* QCA_OL_TX_MULTIQ_SUPPORT */
554 
555 #ifdef TX_MULTI_TCL
556 #ifdef IPA_OFFLOAD
557 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
558 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
559 {
560 	/* get flow id */
561 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
562 	if (vdev->pdev->soc->wlan_cfg_ctx->ipa_enabled)
563 		queue->ring_id = DP_TX_GET_RING_ID(vdev);
564 	else
565 		queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
566 					vdev->pdev->soc->num_tcl_data_rings);
567 }
568 #else
569 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
570 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
571 {
572 	/* get flow id */
573 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
574 	queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
575 				vdev->pdev->soc->num_tcl_data_rings);
576 }
577 #endif
578 #else
579 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
580 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
581 {
582 	/* get flow id */
583 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
584 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
585 }
586 #endif
587 
588 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
589 						       uint8_t ring_id)
590 {
591 	return soc->tcl_data_ring[ring_id].hal_srng;
592 }
593 #endif
594 
595 #ifdef QCA_OL_TX_LOCK_LESS_ACCESS
596 /*
597  * dp_tx_hal_ring_access_start()- hal_tx_ring access for data transmission
598  * @dp_soc - DP soc structure pointer
599  * @hal_ring_hdl - HAL ring handle
600  *
601  * Return - None
602  */
603 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
604 					      hal_ring_handle_t hal_ring_hdl)
605 {
606 	return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
607 }
608 
609 /*
610  * dp_tx_hal_ring_access_end()- hal_tx_ring access for data transmission
611  * @dp_soc - DP soc structure pointer
612  * @hal_ring_hdl - HAL ring handle
613  *
614  * Return - None
615  */
616 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
617 					     hal_ring_handle_t hal_ring_hdl)
618 {
619 	hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
620 }
621 
622 /*
623  * dp_tx_hal_ring_access_reap()- hal_tx_ring access for data transmission
624  * @dp_soc - DP soc structure pointer
625  * @hal_ring_hdl - HAL ring handle
626  *
627  * Return - None
628  */
629 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
630 						  hal_ring_handle_t
631 						  hal_ring_hdl)
632 {
633 }
634 
635 #else
636 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
637 					      hal_ring_handle_t hal_ring_hdl)
638 {
639 	return hal_srng_access_start(soc->hal_soc, hal_ring_hdl);
640 }
641 
642 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
643 					     hal_ring_handle_t hal_ring_hdl)
644 {
645 	hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
646 }
647 
648 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
649 						  hal_ring_handle_t
650 						  hal_ring_hdl)
651 {
652 	hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
653 }
654 #endif
655 
656 #ifdef ATH_TX_PRI_OVERRIDE
657 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \
658 	((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf))
659 #else
660 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf)
661 #endif
662 
663 /* TODO TX_FEATURE_NOT_YET */
664 static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc)
665 {
666 	return;
667 }
668 /* TODO TX_FEATURE_NOT_YET */
669 
670 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
671 		      bool force_free);
672 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev);
673 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev);
674 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev);
675 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc);
676 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc);
677 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc);
678 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc);
679 void
680 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
681 			     uint32_t buf_type);
682 #else /* QCA_HOST_MODE_WIFI_DISABLED */
683 
684 static inline
685 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
686 {
687 	return QDF_STATUS_SUCCESS;
688 }
689 
690 static inline
691 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
692 {
693 	return QDF_STATUS_SUCCESS;
694 }
695 
696 static inline void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
697 {
698 }
699 
700 static inline void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
701 {
702 }
703 
704 static inline
705 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
706 		      bool force_free)
707 {
708 }
709 
710 static inline QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
711 {
712 	return QDF_STATUS_SUCCESS;
713 }
714 
715 static inline QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
716 {
717 	return QDF_STATUS_SUCCESS;
718 }
719 
720 static inline void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
721 {
722 }
723 
724 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
725 
726 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
727 	defined(QCA_TX_CAPTURE_SUPPORT) || \
728 	defined(QCA_MCOPY_SUPPORT)
729 #ifdef FEATURE_PERPKT_INFO
730 QDF_STATUS
731 dp_get_completion_indication_for_stack(struct dp_soc *soc,
732 				       struct dp_pdev *pdev,
733 				       struct dp_txrx_peer *peer,
734 				       struct hal_tx_completion_status *ts,
735 				       qdf_nbuf_t netbuf,
736 				       uint64_t time_latency);
737 
738 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
739 			    uint16_t peer_id, uint32_t ppdu_id,
740 			    qdf_nbuf_t netbuf);
741 #endif
742 #else
743 static inline
744 QDF_STATUS dp_get_completion_indication_for_stack(struct dp_soc *soc,
745 				       struct dp_pdev *pdev,
746 				       struct dp_txrx_peer *peer,
747 				       struct hal_tx_completion_status *ts,
748 				       qdf_nbuf_t netbuf,
749 				       uint64_t time_latency)
750 {
751 	return QDF_STATUS_E_NOSUPPORT;
752 }
753 
754 static inline
755 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
756 			    uint16_t peer_id, uint32_t ppdu_id,
757 			    qdf_nbuf_t netbuf)
758 {
759 }
760 #endif
761 
762 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
763 void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
764 				       struct dp_tx_desc_s *desc,
765 				       struct hal_tx_completion_status *ts);
766 #else
767 static inline void
768 dp_send_completion_to_pkt_capture(struct dp_soc *soc,
769 				  struct dp_tx_desc_s *desc,
770 				  struct hal_tx_completion_status *ts)
771 {
772 }
773 #endif
774 
775 #ifndef QCA_HOST_MODE_WIFI_DISABLED
776 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
777 /**
778  * dp_tx_update_stats() - Update soc level tx stats
779  * @soc: DP soc handle
780  * @tx_desc: TX descriptor reference
781  * @ring_id: TCL ring id
782  *
783  * Returns: none
784  */
785 void dp_tx_update_stats(struct dp_soc *soc,
786 			struct dp_tx_desc_s *tx_desc,
787 			uint8_t ring_id);
788 
789 /**
790  * dp_tx_attempt_coalescing() - Check and attempt TCL register write coalescing
791  * @soc: Datapath soc handle
792  * @tx_desc: tx packet descriptor
793  * @tid: TID for pkt transmission
794  * @msdu_info: MSDU info of tx packet
795  * @ring_id: TCL ring id
796  *
797  * Returns: 1, if coalescing is to be done
798  *	    0, if coalescing is not to be done
799  */
800 int
801 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
802 			 struct dp_tx_desc_s *tx_desc,
803 			 uint8_t tid,
804 			 struct dp_tx_msdu_info_s *msdu_info,
805 			 uint8_t ring_id);
806 
807 /**
808  * dp_tx_ring_access_end() - HAL ring access end for data transmission
809  * @soc: Datapath soc handle
810  * @hal_ring_hdl: HAL ring handle
811  * @coalesce: Coalesce the current write or not
812  *
813  * Returns: none
814  */
815 void
816 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
817 		      int coalesce);
818 #else
819 /**
820  * dp_tx_update_stats() - Update soc level tx stats
821  * @soc: DP soc handle
822  * @tx_desc: TX descriptor reference
823  * @ring_id: TCL ring id
824  *
825  * Returns: none
826  */
827 static inline void dp_tx_update_stats(struct dp_soc *soc,
828 				      struct dp_tx_desc_s *tx_desc,
829 				      uint8_t ring_id){ }
830 
831 static inline void
832 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
833 		      int coalesce)
834 {
835 	dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
836 }
837 
838 static inline int
839 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
840 			 struct dp_tx_desc_s *tx_desc,
841 			 uint8_t tid,
842 			 struct dp_tx_msdu_info_s *msdu_info,
843 			 uint8_t ring_id)
844 {
845 	return 0;
846 }
847 
848 #endif /* WLAN_DP_FEATURE_SW_LATENCY_MGR */
849 
850 #ifdef FEATURE_RUNTIME_PM
851 /**
852  * dp_set_rtpm_tput_policy_requirement() - Update RTPM throughput policy
853  * @soc_hdl: DP soc handle
854  * @is_high_tput: flag to indicate whether throughput is high
855  *
856  * Returns: none
857  */
858 static inline
859 void dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
860 					 bool is_high_tput)
861 {
862 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
863 
864 	qdf_atomic_set(&soc->rtpm_high_tput_flag, is_high_tput);
865 }
866 
867 void
868 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
869 			      hal_ring_handle_t hal_ring_hdl,
870 			      int coalesce);
871 #else
872 #ifdef DP_POWER_SAVE
873 void
874 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
875 			      hal_ring_handle_t hal_ring_hdl,
876 			      int coalesce);
877 #else
878 static inline void
879 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
880 			      hal_ring_handle_t hal_ring_hdl,
881 			      int coalesce)
882 {
883 	dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
884 }
885 #endif
886 
887 static inline void
888 dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
889 				    bool is_high_tput)
890 { }
891 #endif
892 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
893 
894 #ifdef DP_TX_HW_DESC_HISTORY
895 static inline void
896 dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
897 			 hal_ring_handle_t hal_ring_hdl,
898 			 struct dp_soc *soc)
899 {
900 	struct dp_tx_hw_desc_evt *evt;
901 	uint64_t idx = 0;
902 
903 	if (!soc->tx_hw_desc_history)
904 		return;
905 
906 	idx = ++soc->tx_hw_desc_history->index;
907 	if (idx == DP_TX_HW_DESC_HIST_MAX)
908 		soc->tx_hw_desc_history->index = 0;
909 	idx = qdf_do_div_rem(idx, DP_TX_HW_DESC_HIST_MAX);
910 
911 	evt = &soc->tx_hw_desc_history->entry[idx];
912 	qdf_mem_copy(evt->tcl_desc, hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
913 	evt->posted = qdf_get_log_timestamp();
914 	hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &evt->tp, &evt->hp);
915 }
916 #else
917 static inline void
918 dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
919 			 hal_ring_handle_t hal_ring_hdl,
920 			 struct dp_soc *soc)
921 {
922 }
923 #endif
924 
925 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
926 /**
927  * dp_tx_compute_hw_delay_us() - Compute hardware Tx completion delay
928  * @ts: Tx completion status
929  * @delta_tsf: Difference between TSF clock and qtimer
930  * @delay_us: Delay in microseconds
931  *
932  * Return: QDF_STATUS_SUCCESS   : Success
933  *         QDF_STATUS_E_INVAL   : Tx completion status is invalid or
934  *                                delay_us is NULL
935  *         QDF_STATUS_E_FAILURE : Error in delay calculation
936  */
937 QDF_STATUS
938 dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
939 			  uint32_t delta_tsf,
940 			  uint32_t *delay_us);
941 
942 /**
943  * dp_set_delta_tsf() - Set delta_tsf to dp_soc structure
944  * @soc_hdl: cdp soc pointer
945  * @vdev_id: vdev id
946  * @delta_tsf: difference between TSF clock and qtimer
947  *
948  * Return: None
949  */
950 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
951 		      uint32_t delta_tsf);
952 #endif
953 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
954 /**
955  * dp_set_tsf_report_ul_delay() - Enable or disable reporting uplink delay
956  * @soc_hdl: cdp soc pointer
957  * @vdev_id: vdev id
958  * @enable: true to enable and false to disable
959  *
960  * Return: QDF_STATUS
961  */
962 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
963 				      uint8_t vdev_id, bool enable);
964 
965 /**
966  * dp_get_uplink_delay() - Get uplink delay value
967  * @soc_hdl: cdp soc pointer
968  * @vdev_id: vdev id
969  * @val: pointer to save uplink delay value
970  *
971  * Return: QDF_STATUS
972  */
973 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
974 			       uint32_t *val);
975 #endif /* WLAN_FEATURE_TSF_UPLINK_TSF */
976 
977 /**
978  * dp_tx_pkt_tracepoints_enabled() - Get the state of tx pkt tracepoint
979  *
980  * Return: True if any tx pkt tracepoint is enabled else false
981  */
982 static inline
983 bool dp_tx_pkt_tracepoints_enabled(void)
984 {
985 	return (qdf_trace_dp_tx_comp_tcp_pkt_enabled() ||
986 		qdf_trace_dp_tx_comp_udp_pkt_enabled() ||
987 		qdf_trace_dp_tx_comp_pkt_enabled());
988 }
989 
990 #ifdef DP_TX_TRACKING
991 /**
992  * dp_tx_desc_set_timestamp() - set timestamp in tx descriptor
993  * @tx_desc - tx descriptor
994  *
995  * Return: None
996  */
997 static inline
998 void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
999 {
1000 	tx_desc->timestamp_tick = qdf_system_ticks();
1001 }
1002 
1003 /**
1004  * dp_tx_desc_check_corruption() - Verify magic pattern in tx descriptor
1005  * @tx_desc: tx descriptor
1006  *
1007  * Check for corruption in tx descriptor, if magic pattern is not matching
1008  * trigger self recovery
1009  *
1010  * Return: none
1011  */
1012 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc);
1013 #else
1014 static inline
1015 void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
1016 {
1017 }
1018 
1019 static inline
1020 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
1021 {
1022 }
1023 #endif
1024 
1025 #ifndef CONFIG_SAWF
1026 static inline bool dp_sawf_tag_valid_get(qdf_nbuf_t nbuf)
1027 {
1028 	return false;
1029 }
1030 #endif
1031 
1032 #ifdef HW_TX_DELAY_STATS_ENABLE
1033 /**
1034  * dp_tx_desc_set_ktimestamp() - set kernel timestamp in tx descriptor
1035  * @vdev: DP vdev handle
1036  * @tx_desc: tx descriptor
1037  *
1038  * Return: true when descriptor is timestamped, false otherwise
1039  */
1040 static inline
1041 bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
1042 			       struct dp_tx_desc_s *tx_desc)
1043 {
1044 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1045 	    qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
1046 	    qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
1047 	    qdf_unlikely(vdev->pdev->soc->peerstats_enabled) ||
1048 	    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev))) {
1049 		tx_desc->timestamp = qdf_ktime_real_get();
1050 		return true;
1051 	}
1052 	return false;
1053 }
1054 #else
1055 static inline
1056 bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
1057 			       struct dp_tx_desc_s *tx_desc)
1058 {
1059 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1060 	    qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
1061 	    qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
1062 	    qdf_unlikely(vdev->pdev->soc->peerstats_enabled)) {
1063 		tx_desc->timestamp = qdf_ktime_real_get();
1064 		return true;
1065 	}
1066 	return false;
1067 }
1068 #endif
1069 
1070 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
1071 /**
1072  * dp_pkt_add_timestamp() - add timestamp in data payload
1073  *
1074  * @vdev: dp vdev
1075  * @index: index to decide offset in payload
1076  * @time: timestamp to add in data payload
1077  * @nbuf: network buffer
1078  *
1079  * Return: none
1080  */
1081 void dp_pkt_add_timestamp(struct dp_vdev *vdev,
1082 			  enum qdf_pkt_timestamp_index index, uint64_t time,
1083 			  qdf_nbuf_t nbuf);
1084 /**
1085  * dp_pkt_get_timestamp() - get current system time
1086  *
1087  * @time: return current system time
1088  *
1089  * Return: none
1090  */
1091 void dp_pkt_get_timestamp(uint64_t *time);
1092 #else
1093 #define dp_pkt_add_timestamp(vdev, index, time, nbuf)
1094 
1095 static inline
1096 void dp_pkt_get_timestamp(uint64_t *time)
1097 {
1098 }
1099 #endif
1100 #endif
1101