xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.h (revision cf269aa28dd6d2246de6aa0dfeab69dedcc01bb8)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef __DP_TX_H
20 #define __DP_TX_H
21 
22 #include <qdf_types.h>
23 #include <qdf_nbuf.h>
24 #include "dp_types.h"
25 #ifdef FEATURE_PERPKT_INFO
26 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
27 	defined(QCA_TX_CAPTURE_SUPPORT) || \
28 	defined(QCA_MCOPY_SUPPORT)
29 #include "if_meta_hdr.h"
30 #endif
31 #endif
32 #include "dp_internal.h"
33 #include "hal_tx.h"
34 #include <qdf_tracepoint.h>
35 #ifdef CONFIG_SAWF
36 #include "dp_sawf.h"
37 #endif
38 #include <qdf_pkt_add_timestamp.h>
39 #include "dp_ipa.h"
40 #ifdef IPA_OFFLOAD
41 #include <wlan_ipa_obj_mgmt_api.h>
42 #endif
43 
44 #define DP_INVALID_VDEV_ID 0xFF
45 
46 #define DP_TX_MAX_NUM_FRAGS 6
47 
48 /* invalid peer id for reinject*/
49 #define DP_INVALID_PEER 0XFFFE
50 
51 void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
52 			 struct dp_tx_msdu_info_s *msdu_info,
53 			 qdf_nbuf_t nbuf, uint16_t sa_peer_id);
54 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
55 /*
56  * DP_TX_DESC_FLAG_FRAG flags should always be defined to 0x1
57  * please do not change this flag's definition
58  */
59 #define DP_TX_DESC_FLAG_FRAG		0x1
60 #define DP_TX_DESC_FLAG_TO_FW		0x2
61 #define DP_TX_DESC_FLAG_SIMPLE		0x4
62 #define DP_TX_DESC_FLAG_RAW		0x8
63 #define DP_TX_DESC_FLAG_MESH		0x10
64 #define DP_TX_DESC_FLAG_QUEUED_TX	0x20
65 #define DP_TX_DESC_FLAG_COMPLETED_TX	0x40
66 #define DP_TX_DESC_FLAG_ME		0x80
67 #define DP_TX_DESC_FLAG_TDLS_FRAME	0x100
68 #define DP_TX_DESC_FLAG_ALLOCATED	0x200
69 #define DP_TX_DESC_FLAG_MESH_MODE	0x400
70 #define DP_TX_DESC_FLAG_UNMAP_DONE	0x800
71 #define DP_TX_DESC_FLAG_TX_COMP_ERR	0x1000
72 #define DP_TX_DESC_FLAG_FLUSH		0x2000
73 #define DP_TX_DESC_FLAG_TRAFFIC_END_IND	0x4000
74 #define DP_TX_DESC_FLAG_RMNET		0x8000
75 #define DP_TX_DESC_FLAG_FASTPATH_SIMPLE 0x10000
76 #define DP_TX_DESC_FLAG_PPEDS		0x20000
77 #define DP_TX_DESC_FLAG_FAST		0x40000
78 #define DP_TX_DESC_FLAG_SPECIAL         0x80000
79 
80 #define DP_TX_EXT_DESC_FLAG_METADATA_VALID 0x1
81 
82 #define DP_TX_FREE_SINGLE_BUF(soc, buf)                  \
83 do {                                                           \
84 	qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE);  \
85 	qdf_nbuf_free(buf);                                    \
86 } while (0)
87 
88 #define OCB_HEADER_VERSION	 1
89 
90 #ifdef TX_PER_PDEV_DESC_POOL
91 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
92 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
93 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
94 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
95 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
96 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
97 #else
98 	#ifdef TX_PER_VDEV_DESC_POOL
99 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
100 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
101 	#endif /* TX_PER_VDEV_DESC_POOL */
102 #endif /* TX_PER_PDEV_DESC_POOL */
103 #define DP_TX_QUEUE_MASK 0x3
104 
105 #define MAX_CDP_SEC_TYPE 12
106 
107 /* number of dwords for htt_tx_msdu_desc_ext2_t */
108 #define DP_TX_MSDU_INFO_META_DATA_DWORDS 7
109 
110 #define dp_tx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX, params)
111 #define dp_tx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX, params)
112 #define dp_tx_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP_TX, params)
113 #define dp_tx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX, params)
114 #define dp_tx_info(params...) \
115 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX, ## params)
116 #define dp_tx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX, params)
117 
118 #define dp_tx_comp_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX_COMP, params)
119 #define dp_tx_comp_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX_COMP, params)
120 #define dp_tx_comp_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX_COMP, params)
121 #define dp_tx_comp_info(params...) \
122 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
123 #define dp_tx_comp_info_rl(params...) \
124 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
125 #define dp_tx_comp_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX_COMP, params)
126 
127 #ifndef QCA_HOST_MODE_WIFI_DISABLED
128 
129 /**
130  * struct dp_tx_frag_info_s
131  * @vaddr: hlos virtual address for buffer
132  * @paddr_lo: physical address lower 32bits
133  * @paddr_hi: physical address higher bits
134  * @len: length of the buffer
135  */
136 struct dp_tx_frag_info_s {
137 	uint8_t  *vaddr;
138 	uint32_t paddr_lo;
139 	uint16_t paddr_hi;
140 	uint16_t len;
141 };
142 
143 /**
144  * struct dp_tx_seg_info_s - Segmentation Descriptor
145  * @nbuf: NBUF pointer if segment corresponds to separate nbuf
146  * @frag_cnt: Fragment count in this segment
147  * @total_len: Total length of segment
148  * @frags: per-Fragment information
149  * @next: pointer to next MSDU segment
150  */
151 struct dp_tx_seg_info_s  {
152 	qdf_nbuf_t nbuf;
153 	uint16_t frag_cnt;
154 	uint16_t total_len;
155 	struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS];
156 	struct dp_tx_seg_info_s *next;
157 };
158 
159 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
160 
161 /**
162  * struct dp_tx_sg_info_s - Scatter Gather Descriptor
163  * @num_segs: Number of segments (TSO/ME) in the frame
164  * @total_len: Total length of the frame
165  * @curr_seg: Points to current segment descriptor to be processed. Chain of
166  * 	      descriptors for SG frames/multicast-unicast converted packets.
167  *
168  * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to
169  * carry fragmentation information
170  * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries
171  * indicated through flags in SKB CB (first_msdu and last_msdu). This will be
172  * converted into set of skb sg (nr_frags) structures.
173  */
174 struct dp_tx_sg_info_s {
175 	uint32_t num_segs;
176 	uint32_t total_len;
177 	struct dp_tx_seg_info_s *curr_seg;
178 };
179 
180 /**
181  * struct dp_tx_queue - Tx queue
182  * @desc_pool_id: Descriptor Pool to be used for the tx queue
183  * @ring_id: TCL descriptor ring ID corresponding to the tx queue
184  *
185  * Tx queue contains information of the software (Descriptor pool)
186  * and hardware resources (TCL ring id) to be used for a particular
187  * transmit queue (obtained from skb_queue_mapping in case of linux)
188  */
189 struct dp_tx_queue {
190 	uint8_t desc_pool_id;
191 	uint8_t ring_id;
192 };
193 
194 /**
195  * struct dp_tx_msdu_info_s - MSDU Descriptor
196  * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement
197  * @tx_queue: Tx queue on which this MSDU should be transmitted
198  * @num_seg: Number of segments (TSO)
199  * @tid: TID (override) that is sent from HLOS
200  * @exception_fw: Duplicate frame to be sent to firmware
201  * @is_tx_sniffer: Indicates if the packet has to be sniffed
202  * @u: union of frame information structs
203  * @u.tso_info: TSO information for TSO frame types
204  * 	     (chain of the TSO segments, number of segments)
205  * @u.sg_info: Scatter Gather information for non-TSO SG frames
206  * @meta_data: Mesh meta header information
207  * @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions
208  * @gsn: global sequence for reinjected mcast packets
209  * @vdev_id : vdev_id for reinjected mcast packets
210  * @skip_hp_update : Skip HP update for TSO segments and update in last segment
211  * @buf_len:
212  * @payload_addr:
213  * @driver_ingress_ts: driver ingress timestamp
214  *
215  * This structure holds the complete MSDU information needed to program the
216  * Hardware TCL and MSDU extension descriptors for different frame types
217  *
218  */
219 struct dp_tx_msdu_info_s {
220 	enum dp_tx_frm_type frm_type;
221 	struct dp_tx_queue tx_queue;
222 	uint32_t num_seg;
223 	uint8_t tid;
224 	uint8_t exception_fw;
225 	uint8_t is_tx_sniffer;
226 	union {
227 		struct qdf_tso_info_t tso_info;
228 		struct dp_tx_sg_info_s sg_info;
229 	} u;
230 	uint32_t meta_data[DP_TX_MSDU_INFO_META_DATA_DWORDS];
231 	uint16_t ppdu_cookie;
232 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
233 #ifdef WLAN_MCAST_MLO
234 	uint16_t gsn;
235 	uint8_t vdev_id;
236 #endif
237 #endif
238 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
239 	uint8_t skip_hp_update;
240 #endif
241 #ifdef QCA_DP_TX_RMNET_OPTIMIZATION
242 	uint16_t buf_len;
243 	uint8_t *payload_addr;
244 #endif
245 #ifdef WLAN_FEATURE_TX_LATENCY_STATS
246 	qdf_ktime_t driver_ingress_ts;
247 #endif
248 };
249 
250 #ifndef QCA_HOST_MODE_WIFI_DISABLED
251 /**
252  * dp_tx_deinit_pair_by_index() - Deinit TX rings based on index
253  * @soc: core txrx context
254  * @index: index of ring to deinit
255  *
256  * Deinit 1 TCL and 1 WBM2SW release ring on as needed basis using
257  * index of the respective TCL/WBM2SW release in soc structure.
258  * For example, if the index is 2 then &soc->tcl_data_ring[2]
259  * and &soc->tx_comp_ring[2] will be deinitialized.
260  *
261  * Return: none
262  */
263 void dp_tx_deinit_pair_by_index(struct dp_soc *soc, int index);
264 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
265 
266 /**
267  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
268  * @soc: core txrx main context
269  * @comp_head: software descriptor head pointer
270  * @ring_id: ring number
271  *
272  * This function will process batch of descriptors reaped by dp_tx_comp_handler
273  * and release the software descriptors after processing is complete
274  *
275  * Return: none
276  */
277 void
278 dp_tx_comp_process_desc_list(struct dp_soc *soc,
279 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id);
280 
281 /**
282  * dp_tx_comp_process_desc_list_fast() - Tx complete fast sw descriptor handler
283  * @soc: core txrx main context
284  * @head_desc: software descriptor head pointer
285  * @tail_desc: software descriptor tail pointer
286  * @ring_id: ring number
287  * @fast_desc_count: Total descriptor count in the list
288  *
289  * This function will process batch of descriptors reaped by dp_tx_comp_handler
290  * and append the list of descriptors to the freelist
291  *
292  * Return: none
293  */
294 void
295 dp_tx_comp_process_desc_list_fast(struct dp_soc *soc,
296 				  struct dp_tx_desc_s *head_desc,
297 				  struct dp_tx_desc_s *tail_desc,
298 				  uint8_t ring_id,
299 				  uint32_t fast_desc_count);
300 
301 /**
302  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
303  * @soc: Soc handle
304  * @desc: software Tx descriptor to be processed
305  * @delayed_free: defer freeing of nbuf
306  *
307  * Return: nbuf to be freed later
308  */
309 qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
310 			       bool delayed_free);
311 
312 /**
313  * dp_tx_desc_release() - Release Tx Descriptor
314  * @soc: Soc handle
315  * @tx_desc: Tx Descriptor
316  * @desc_pool_id: Descriptor Pool ID
317  *
318  * Deallocate all resources attached to Tx descriptor and free the Tx
319  * descriptor.
320  *
321  * Return:
322  */
323 void dp_tx_desc_release(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
324 			uint8_t desc_pool_id);
325 
326 /**
327  * dp_tx_compute_delay() - Compute and fill in all timestamps
328  *				to pass in correct fields
329  * @vdev: pdev handle
330  * @tx_desc: tx descriptor
331  * @tid: tid value
332  * @ring_id: TCL or WBM ring number for transmit path
333  *
334  * Return: none
335  */
336 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
337 			 uint8_t tid, uint8_t ring_id);
338 
339 /**
340  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
341  * @soc: DP soc handle
342  * @tx_desc: software descriptor head pointer
343  * @ts: Tx completion status
344  * @txrx_peer: txrx peer handle
345  * @ring_id: ring number
346  *
347  * Return: none
348  */
349 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
350 				  struct dp_tx_desc_s *tx_desc,
351 				  struct hal_tx_completion_status *ts,
352 				  struct dp_txrx_peer *txrx_peer,
353 				  uint8_t ring_id);
354 
355 /**
356  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
357  * @soc: DP Soc handle
358  * @desc: software Tx descriptor
359  * @ts: Tx completion status from HAL/HTT descriptor
360  * @txrx_peer: DP peer context
361  *
362  * Return: none
363  */
364 void dp_tx_comp_process_desc(struct dp_soc *soc,
365 			     struct dp_tx_desc_s *desc,
366 			     struct hal_tx_completion_status *ts,
367 			     struct dp_txrx_peer *txrx_peer);
368 
369 /**
370  * dp_tx_reinject_handler() - Tx Reinject Handler
371  * @soc: datapath soc handle
372  * @vdev: datapath vdev handle
373  * @tx_desc: software descriptor head pointer
374  * @status: Tx completion status from HTT descriptor
375  * @reinject_reason: reinject reason from HTT descriptor
376  *
377  * This function reinjects frames back to Target.
378  * Todo - Host queue needs to be added
379  *
380  * Return: none
381  */
382 void dp_tx_reinject_handler(struct dp_soc *soc,
383 			    struct dp_vdev *vdev,
384 			    struct dp_tx_desc_s *tx_desc,
385 			    uint8_t *status,
386 			    uint8_t reinject_reason);
387 
388 /**
389  * dp_tx_inspect_handler() - Tx Inspect Handler
390  * @soc: datapath soc handle
391  * @vdev: datapath vdev handle
392  * @tx_desc: software descriptor head pointer
393  * @status: Tx completion status from HTT descriptor
394  *
395  * Handles Tx frames sent back to Host for inspection
396  * (ProxyARP)
397  *
398  * Return: none
399  */
400 void dp_tx_inspect_handler(struct dp_soc *soc,
401 			   struct dp_vdev *vdev,
402 			   struct dp_tx_desc_s *tx_desc,
403 			   uint8_t *status);
404 
405 /**
406  * dp_tx_update_peer_basic_stats() - Update peer basic stats
407  * @txrx_peer: Datapath txrx_peer handle
408  * @length: Length of the packet
409  * @tx_status: Tx status from TQM/FW
410  * @update: enhanced flag value present in dp_pdev
411  *
412  * Return: none
413  */
414 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
415 				   uint32_t length, uint8_t tx_status,
416 				   bool update);
417 
418 #ifdef DP_UMAC_HW_RESET_SUPPORT
419 /**
420  * dp_tx_drop() - Drop the frame on a given VAP
421  * @soc: DP soc handle
422  * @vdev_id: id of DP vdev handle
423  * @nbuf: skb
424  *
425  * Drop all the incoming packets
426  *
427  * Return: nbuf
428  */
429 qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
430 
431 /**
432  * dp_tx_exc_drop() - Drop the frame on a given VAP
433  * @soc_hdl: DP soc handle
434  * @vdev_id: id of DP vdev handle
435  * @nbuf: skb
436  * @tx_exc_metadata: Handle that holds exception path meta data
437  *
438  * Drop all the incoming packets
439  *
440  * Return: nbuf
441  */
442 qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
443 			  qdf_nbuf_t nbuf,
444 			  struct cdp_tx_exception_metadata *tx_exc_metadata);
445 #endif
446 #ifdef WLAN_SUPPORT_PPEDS
447 qdf_nbuf_t
448 dp_ppeds_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc);
449 #else
450 static inline qdf_nbuf_t
451 dp_ppeds_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
452 {
453 	return NULL;
454 }
455 #endif
456 
457 /**
458  * dp_get_updated_tx_desc() - get updated tx_desc value
459  * @psoc: psoc object
460  * @pool_num: Tx desc pool Id
461  * @current_desc: Current Tx Desc value
462  *
463  * In Lowmem profiles the number of Tx desc in 4th pool is reduced to quarter
464  * for memory optimizations via this flag DP_TX_DESC_POOL_OPTIMIZE
465  *
466  * Return: Updated Tx Desc value
467  */
468 #ifdef DP_TX_DESC_POOL_OPTIMIZE
469 static inline uint32_t dp_get_updated_tx_desc(struct cdp_ctrl_objmgr_psoc *psoc,
470 					      uint8_t pool_num,
471 					      uint32_t current_desc)
472 {
473 	if (pool_num == 3)
474 		return cfg_get(psoc, CFG_DP_TX_DESC_POOL_3);
475 	else
476 		return current_desc;
477 }
478 #else
479 static inline uint32_t dp_get_updated_tx_desc(struct cdp_ctrl_objmgr_psoc *psoc,
480 					      uint8_t pool_num,
481 					      uint32_t current_desc)
482 {
483 	return current_desc;
484 }
485 #endif
486 
487 #ifdef DP_TX_EXT_DESC_POOL_OPTIMIZE
488 /**
489  * dp_tx_ext_desc_pool_override() - Override tx ext desc pool Id
490  * @desc_pool_id: Desc pool Id
491  *
492  * For low mem profiles the number of ext_tx_desc_pool is reduced to 1.
493  * Since in Tx path the desc_pool_id is filled based on CPU core,
494  * dp_tx_ext_desc_pool_override will return the desc_pool_id as 0 for lowmem
495  * profiles.
496  *
497  * Return: updated tx_ext_desc_pool Id
498  */
499 static inline uint8_t dp_tx_ext_desc_pool_override(uint8_t desc_pool_id)
500 {
501 	return 0;
502 }
503 
504 /**
505  * dp_get_ext_tx_desc_pool_num() - get the number of ext_tx_desc pool
506  * @soc: core txrx main context
507  *
508  * For lowmem profiles the number of ext_tx_desc pool is reduced to 1 for
509  * memory optimizations.
510  * Based on this flag DP_TX_EXT_DESC_POOL_OPTIMIZE dp_get_ext_tx_desc_pool_num
511  * will return reduced desc_pool value 1 for low mem profile and for the other
512  * profiles it will return the same value as tx_desc pool.
513  *
514  * Return: number of ext_tx_desc pool
515  */
516 
517 static inline uint8_t dp_get_ext_tx_desc_pool_num(struct dp_soc *soc)
518 {
519 	return 1;
520 }
521 
522 #else
523 static inline uint8_t dp_tx_ext_desc_pool_override(uint8_t desc_pool_id)
524 {
525 	return desc_pool_id;
526 }
527 
528 static inline uint8_t dp_get_ext_tx_desc_pool_num(struct dp_soc *soc)
529 {
530 	return wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
531 }
532 #endif
533 
534 #ifndef QCA_HOST_MODE_WIFI_DISABLED
535 /**
536  * dp_tso_soc_attach() - TSO Attach handler
537  * @txrx_soc: Opaque Dp handle
538  *
539  * Reserve TSO descriptor buffers
540  *
541  * Return: QDF_STATUS_E_FAILURE on failure or
542  * QDF_STATUS_SUCCESS on success
543  */
544 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc);
545 
546 /**
547  * dp_tso_soc_detach() - TSO Detach handler
548  * @txrx_soc: Opaque Dp handle
549  *
550  * Deallocate TSO descriptor buffers
551  *
552  * Return: QDF_STATUS_E_FAILURE on failure or
553  * QDF_STATUS_SUCCESS on success
554  */
555 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc);
556 
557 /**
558  * dp_tx_send() - Transmit a frame on a given VAP
559  * @soc_hdl: DP soc handle
560  * @vdev_id: id of DP vdev handle
561  * @nbuf: skb
562  *
563  * Entry point for Core Tx layer (DP_TX) invoked from
564  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
565  * cases
566  *
567  * Return: NULL on success,
568  *         nbuf when it fails to send
569  */
570 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
571 		      qdf_nbuf_t nbuf);
572 
573 /**
574  * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
575  *      case to avoid check in per-packet path.
576  * @soc_hdl: DP soc handle
577  * @vdev_id: id of DP vdev handle
578  * @nbuf: skb
579  *
580  * Entry point for Core Tx layer (DP_TX) invoked from
581  * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
582  * with special condition to avoid per pkt check in dp_tx_send
583  *
584  * Return: NULL on success,
585  *         nbuf when it fails to send
586  */
587 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
588 				    uint8_t vdev_id, qdf_nbuf_t nbuf);
589 
590 /**
591  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
592  * @soc_hdl: DP soc handle
593  * @vdev_id: id of DP vdev handle
594  * @nbuf: skb
595  * @tx_exc_metadata: Handle that holds exception path meta data
596  *
597  * Entry point for Core Tx layer (DP_TX) invoked from
598  * hard_start_xmit in OSIF/HDD to transmit frames through fw
599  *
600  * Return: NULL on success,
601  *         nbuf when it fails to send
602  */
603 qdf_nbuf_t
604 dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
605 		     qdf_nbuf_t nbuf,
606 		     struct cdp_tx_exception_metadata *tx_exc_metadata);
607 
608 /**
609  * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
610  *      in exception path in special case to avoid regular exception path chk.
611  * @soc_hdl: DP soc handle
612  * @vdev_id: id of DP vdev handle
613  * @nbuf: skb
614  * @tx_exc_metadata: Handle that holds exception path meta data
615  *
616  * Entry point for Core Tx layer (DP_TX) invoked from
617  * hard_start_xmit in OSIF/HDD to transmit frames through fw
618  *
619  * Return: NULL on success,
620  *         nbuf when it fails to send
621  */
622 qdf_nbuf_t
623 dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
624 				   uint8_t vdev_id, qdf_nbuf_t nbuf,
625 				   struct cdp_tx_exception_metadata *tx_exc_metadata);
626 
627 /**
628  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
629  * @soc_hdl: DP soc handle
630  * @vdev_id: DP vdev handle
631  * @nbuf: skb
632  *
633  * Entry point for Core Tx layer (DP_TX) invoked from
634  * hard_start_xmit in OSIF/HDD
635  *
636  * Return: NULL on success,
637  *         nbuf when it fails to send
638  */
639 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
640 			   qdf_nbuf_t nbuf);
641 
642 /**
643  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
644  * @vdev: DP vdev handle
645  * @nbuf: skb
646  * @msdu_info: MSDU information
647  * @peer_id: peer_id of the peer in case of NAWDS frames
648  * @tx_exc_metadata: Handle that holds exception path metadata
649  *
650  * Return: NULL on success,
651  *         nbuf when it fails to send
652  */
653 qdf_nbuf_t
654 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
655 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
656 		       struct cdp_tx_exception_metadata *tx_exc_metadata);
657 
658 /**
659  * dp_tx_mcast_enhance() - Multicast enhancement on TX
660  * @vdev: DP vdev handle
661  * @nbuf: network buffer to be transmitted
662  *
663  * Return: true on success
664  *         false on failure
665  */
666 bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
667 
668 /**
669  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
670  * @vdev: DP vdev handle
671  * @nbuf: skb
672  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
673  *
674  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
675  *
676  * Return: NULL on success,
677  *         nbuf when it fails to send
678  */
679 #if QDF_LOCK_STATS
680 noinline qdf_nbuf_t
681 dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
682 			 struct dp_tx_msdu_info_s *msdu_info);
683 #else
684 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
685 				    struct dp_tx_msdu_info_s *msdu_info);
686 #endif
687 #ifdef FEATURE_WLAN_TDLS
688 /**
689  * dp_tx_non_std() - Allow the control-path SW to send data frames
690  * @soc_hdl: Datapath soc handle
691  * @vdev_id: id of vdev
692  * @tx_spec: what non-standard handling to apply to the tx data frames
693  * @msdu_list: NULL-terminated list of tx MSDUs
694  *
695  * Return: NULL on success,
696  *         nbuf when it fails to send
697  */
698 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
699 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
700 #endif
701 
702 /**
703  * dp_tx_frame_is_drop() - checks if the packet is loopback
704  * @vdev: DP vdev handle
705  * @srcmac: source MAC address
706  * @dstmac: destination MAC address
707  *
708  * Return: 1 if frame needs to be dropped else 0
709  */
710 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac);
711 
712 #ifndef WLAN_SOFTUMAC_SUPPORT
713 /**
714  * dp_tx_comp_handler() - Tx completion handler
715  * @int_ctx: pointer to DP interrupt context
716  * @soc: core txrx main context
717  * @hal_srng: Opaque HAL SRNG pointer
718  * @ring_id: completion ring id
719  * @quota: No. of packets/descriptors that can be serviced in one loop
720  *
721  * This function will collect hardware release ring element contents and
722  * handle descriptor contents. Based on contents, free packet or handle error
723  * conditions
724  *
725  * Return: Number of TX completions processed
726  */
727 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
728 			    hal_ring_handle_t hal_srng, uint8_t ring_id,
729 			    uint32_t quota);
730 #endif
731 
732 void
733 dp_tx_comp_process_desc_list(struct dp_soc *soc,
734 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id);
735 
736 QDF_STATUS
737 dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
738 
739 QDF_STATUS
740 dp_tx_prepare_send_igmp_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
741 
742 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
743 
744 #if defined(QCA_HOST_MODE_WIFI_DISABLED) || !defined(ATH_SUPPORT_IQUE)
745 static inline void dp_tx_me_exit(struct dp_pdev *pdev)
746 {
747 	return;
748 }
749 #endif
750 
751 /**
752  * dp_tx_pdev_init() - dp tx pdev init
753  * @pdev: physical device instance
754  *
755  * Return: QDF_STATUS_SUCCESS: success
756  *         QDF_STATUS_E_RESOURCES: Error return
757  */
758 static inline QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev)
759 {
760 	struct dp_soc *soc = pdev->soc;
761 
762 	/* Initialize Flow control counters */
763 	qdf_atomic_init(&pdev->num_tx_outstanding);
764 	pdev->tx_descs_max = 0;
765 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
766 		/* Initialize descriptors in TCL Ring */
767 		hal_tx_init_data_ring(soc->hal_soc,
768 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
769 	}
770 
771 	return QDF_STATUS_SUCCESS;
772 }
773 
774 /**
775  * dp_tx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc
776  * @soc: Handle to HAL Soc structure
777  * @hal_soc: HAL SOC handle
778  * @num_avail_for_reap: descriptors available for reap
779  * @hal_ring_hdl: ring pointer
780  * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
781  * @last_prefetched_sw_desc: pointer to last prefetch SW desc
782  *
783  * Return: None
784  */
785 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
786 static inline
787 void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
788 				    hal_soc_handle_t hal_soc,
789 				    uint32_t num_avail_for_reap,
790 				    hal_ring_handle_t hal_ring_hdl,
791 				    void **last_prefetched_hw_desc,
792 				    struct dp_tx_desc_s
793 				    **last_prefetched_sw_desc)
794 {
795 	if (*last_prefetched_sw_desc) {
796 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
797 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
798 	}
799 
800 	if (num_avail_for_reap && *last_prefetched_hw_desc) {
801 		soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
802 						       *last_prefetched_hw_desc,
803 						       last_prefetched_sw_desc);
804 
805 		if ((uintptr_t)*last_prefetched_hw_desc & 0x3f)
806 			*last_prefetched_hw_desc =
807 				hal_srng_dst_prefetch_next_cached_desc(
808 					hal_soc,
809 					hal_ring_hdl,
810 					(uint8_t *)*last_prefetched_hw_desc);
811 		else
812 			*last_prefetched_hw_desc =
813 				hal_srng_dst_get_next_32_byte_desc(hal_soc,
814 					hal_ring_hdl,
815 					(uint8_t *)*last_prefetched_hw_desc);
816 	}
817 }
818 #else
819 static inline
820 void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
821 				    hal_soc_handle_t hal_soc,
822 				    uint32_t num_avail_for_reap,
823 				    hal_ring_handle_t hal_ring_hdl,
824 				    void **last_prefetched_hw_desc,
825 				    struct dp_tx_desc_s
826 				    **last_prefetched_sw_desc)
827 {
828 }
829 #endif
830 
831 #ifndef FEATURE_WDS
832 static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
833 {
834 	return;
835 }
836 #endif
837 
838 #ifndef QCA_MULTIPASS_SUPPORT
839 static inline
840 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
841 			     qdf_nbuf_t nbuf,
842 			     struct dp_tx_msdu_info_s *msdu_info)
843 {
844 	return true;
845 }
846 
847 static inline
848 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
849 {
850 }
851 
852 #else
853 /**
854  * dp_tx_multipass_process() - Process vlan frames in tx path
855  * @soc: dp soc handle
856  * @vdev: DP vdev handle
857  * @nbuf: skb
858  * @msdu_info: msdu descriptor
859  *
860  * Return: status whether frame needs to be dropped or transmitted
861  */
862 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
863 			     qdf_nbuf_t nbuf,
864 			     struct dp_tx_msdu_info_s *msdu_info);
865 
866 /**
867  * dp_tx_vdev_multipass_deinit() - set vlan map for vdev
868  * @vdev: pointer to vdev
869  *
870  * return: void
871  */
872 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev);
873 
874 /**
875  * dp_tx_add_groupkey_metadata() - Add group key in metadata
876  * @vdev: DP vdev handle
877  * @msdu_info: MSDU info to be setup in MSDU descriptor
878  * @group_key: Group key index programmed in metadata
879  *
880  * Return: void
881  */
882 void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev,
883 				 struct dp_tx_msdu_info_s *msdu_info,
884 				 uint16_t group_key);
885 #endif
886 
887 /**
888  * dp_tx_hw_to_qdf()- convert hw status to qdf status
889  * @status: hw status
890  *
891  * Return: qdf tx rx status
892  */
893 static inline enum qdf_dp_tx_rx_status dp_tx_hw_to_qdf(uint16_t status)
894 {
895 	switch (status) {
896 	case HAL_TX_TQM_RR_FRAME_ACKED:
897 		return QDF_TX_RX_STATUS_OK;
898 	case HAL_TX_TQM_RR_REM_CMD_TX:
899 		return QDF_TX_RX_STATUS_NO_ACK;
900 	case HAL_TX_TQM_RR_REM_CMD_REM:
901 	case HAL_TX_TQM_RR_REM_CMD_NOTX:
902 	case HAL_TX_TQM_RR_REM_CMD_AGED:
903 		return QDF_TX_RX_STATUS_FW_DISCARD;
904 	default:
905 		return QDF_TX_RX_STATUS_DEFAULT;
906 	}
907 }
908 
909 #ifndef QCA_HOST_MODE_WIFI_DISABLED
910 /**
911  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
912  * @vdev: DP Virtual device handle
913  * @nbuf: Buffer pointer
914  * @queue: queue ids container for nbuf
915  *
916  * TX packet queue has 2 instances, software descriptors id and dma ring id
917  * Based on tx feature and hardware configuration queue id combination could be
918  * different.
919  * For example -
920  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
921  * With no XPS,lock based resource protection, Descriptor pool ids are different
922  * for each vdev, dma ring id will be same as single pdev id
923  *
924  * Return: None
925  */
926 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
927 #if defined(IPA_OFFLOAD) && defined(QCA_IPA_LL_TX_FLOW_CONTROL)
928 #ifdef IPA_WDI3_TX_TWO_PIPES
929 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
930 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
931 {
932 	queue->ring_id = qdf_get_cpu();
933 	if (vdev->pdev->soc->wlan_cfg_ctx->ipa_enabled)
934 		if ((queue->ring_id == IPA_TCL_DATA_RING_IDX) ||
935 		    (queue->ring_id == IPA_TX_ALT_RING_IDX))
936 			queue->ring_id = 0;
937 
938 	queue->desc_pool_id = queue->ring_id;
939 }
940 #else
941 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
942 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
943 {
944 	queue->ring_id = qdf_get_cpu();
945 	if (vdev->pdev->soc->wlan_cfg_ctx->ipa_enabled)
946 		if (queue->ring_id == IPA_TCL_DATA_RING_IDX)
947 			queue->ring_id = 0;
948 
949 	queue->desc_pool_id = queue->ring_id;
950 }
951 #endif
952 #else
953 #ifdef WLAN_TX_PKT_CAPTURE_ENH
954 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
955 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
956 {
957 	if (qdf_unlikely(vdev->is_override_rbm_id))
958 		queue->ring_id = vdev->rbm_id;
959 	else
960 		queue->ring_id = qdf_get_cpu();
961 
962 	queue->desc_pool_id = queue->ring_id;
963 }
964 #else
965 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
966 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
967 {
968 	queue->ring_id = qdf_get_cpu();
969 	queue->desc_pool_id = queue->ring_id;
970 }
971 
972 #endif
973 #endif
974 
975 /**
976  * dp_tx_get_hal_ring_hdl() - Get the hal_tx_ring_hdl for data transmission
977  * @soc: DP soc structure pointer
978  * @ring_id: Transmit Queue/ring_id to be used when XPS is enabled
979  *
980  * Return: HAL ring handle
981  */
982 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
983 						       uint8_t ring_id)
984 {
985 	if (ring_id == soc->num_tcl_data_rings)
986 		return soc->tcl_cmd_credit_ring.hal_srng;
987 
988 	return soc->tcl_data_ring[ring_id].hal_srng;
989 }
990 
991 #else /* QCA_OL_TX_MULTIQ_SUPPORT */
992 
993 #ifdef TX_MULTI_TCL
994 #ifdef IPA_OFFLOAD
995 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
996 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
997 {
998 	/* get flow id */
999 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
1000 	if (vdev->pdev->soc->wlan_cfg_ctx->ipa_enabled &&
1001 	    !ipa_config_is_opt_wifi_dp_enabled())
1002 		queue->ring_id = DP_TX_GET_RING_ID(vdev);
1003 	else
1004 		queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
1005 					vdev->pdev->soc->num_tcl_data_rings);
1006 }
1007 #else
1008 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
1009 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
1010 {
1011 	/* get flow id */
1012 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
1013 	queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
1014 				vdev->pdev->soc->num_tcl_data_rings);
1015 }
1016 #endif
1017 #else
1018 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
1019 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
1020 {
1021 	/* get flow id */
1022 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
1023 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
1024 }
1025 #endif
1026 
1027 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
1028 						       uint8_t ring_id)
1029 {
1030 	return soc->tcl_data_ring[ring_id].hal_srng;
1031 }
1032 #endif
1033 
1034 #ifdef QCA_OL_TX_LOCK_LESS_ACCESS
1035 /**
1036  * dp_tx_hal_ring_access_start() - hal_tx_ring access for data transmission
1037  * @soc: DP soc structure pointer
1038  * @hal_ring_hdl: HAL ring handle
1039  *
1040  * Return: None
1041  */
1042 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
1043 					      hal_ring_handle_t hal_ring_hdl)
1044 {
1045 	return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
1046 }
1047 
1048 /**
1049  * dp_tx_hal_ring_access_end() - hal_tx_ring access for data transmission
1050  * @soc: DP soc structure pointer
1051  * @hal_ring_hdl: HAL ring handle
1052  *
1053  * Return: None
1054  */
1055 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
1056 					     hal_ring_handle_t hal_ring_hdl)
1057 {
1058 	hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
1059 }
1060 
1061 /**
1062  * dp_tx_hal_ring_access_end_reap() - hal_tx_ring access for data transmission
1063  * @soc: DP soc structure pointer
1064  * @hal_ring_hdl: HAL ring handle
1065  *
1066  * Return: None
1067  */
1068 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
1069 						  hal_ring_handle_t
1070 						  hal_ring_hdl)
1071 {
1072 }
1073 
1074 #else
1075 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
1076 					      hal_ring_handle_t hal_ring_hdl)
1077 {
1078 	return hal_srng_access_start(soc->hal_soc, hal_ring_hdl);
1079 }
1080 
1081 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
1082 					     hal_ring_handle_t hal_ring_hdl)
1083 {
1084 	hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
1085 }
1086 
1087 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
1088 						  hal_ring_handle_t
1089 						  hal_ring_hdl)
1090 {
1091 	hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
1092 }
1093 #endif
1094 
1095 #ifdef ATH_TX_PRI_OVERRIDE
1096 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \
1097 	((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf))
1098 #else
1099 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf)
1100 #endif
1101 
1102 /* TODO TX_FEATURE_NOT_YET */
1103 static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc)
1104 {
1105 	return;
1106 }
1107 /* TODO TX_FEATURE_NOT_YET */
1108 
1109 /**
1110  * dp_tx_desc_flush() - release resources associated
1111  *                      to TX Desc
1112  *
1113  * @pdev: Handle to DP pdev structure
1114  * @vdev: virtual device instance
1115  * NULL: no specific Vdev is required and check all allcated TX desc
1116  * on this pdev.
1117  * Non-NULL: only check the allocated TX Desc associated to this Vdev.
1118  *
1119  * @force_free:
1120  * true: flush the TX desc.
1121  * false: only reset the Vdev in each allocated TX desc
1122  * that associated to current Vdev.
1123  *
1124  * This function will go through the TX desc pool to flush
1125  * the outstanding TX data or reset Vdev to NULL in associated TX
1126  * Desc.
1127  */
1128 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
1129 		      bool force_free);
1130 
1131 /**
1132  * dp_tx_vdev_attach() - attach vdev to dp tx
1133  * @vdev: virtual device instance
1134  *
1135  * Return: QDF_STATUS_SUCCESS: success
1136  *         QDF_STATUS_E_RESOURCES: Error return
1137  */
1138 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev);
1139 
1140 /**
1141  * dp_tx_vdev_detach() - detach vdev from dp tx
1142  * @vdev: virtual device instance
1143  *
1144  * Return: QDF_STATUS_SUCCESS: success
1145  *         QDF_STATUS_E_RESOURCES: Error return
1146  */
1147 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev);
1148 
1149 /**
1150  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
1151  * @vdev: virtual device instance
1152  *
1153  * Return: void
1154  *
1155  */
1156 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev);
1157 
1158 /**
1159  * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
1160  * @soc: core txrx main context
1161  *
1162  * This function allocates memory for following descriptor pools
1163  * 1. regular sw tx descriptor pools (static pools)
1164  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
1165  * 3. TSO descriptor pools
1166  *
1167  * Return: QDF_STATUS_SUCCESS: success
1168  *         QDF_STATUS_E_RESOURCES: Error return
1169  */
1170 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc);
1171 
1172 /**
1173  * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
1174  * @soc: core txrx main context
1175  *
1176  * This function initializes the following TX descriptor pools
1177  * 1. regular sw tx descriptor pools (static pools)
1178  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
1179  * 3. TSO descriptor pools
1180  *
1181  * Return: QDF_STATUS_SUCCESS: success
1182  *	   QDF_STATUS_E_RESOURCES: Error return
1183  */
1184 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc);
1185 
1186 /**
1187  * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
1188  * @soc: core txrx main context
1189  *
1190  * This function frees all tx related descriptors as below
1191  * 1. Regular TX descriptors (static pools)
1192  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
1193  * 3. TSO descriptors
1194  *
1195  */
1196 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc);
1197 
1198 /**
1199  * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
1200  * @soc: core txrx main context
1201  *
1202  * This function de-initializes all tx related descriptors as below
1203  * 1. Regular TX descriptors (static pools)
1204  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
1205  * 3. TSO descriptors
1206  *
1207  */
1208 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc);
1209 
1210 #ifndef WLAN_SOFTUMAC_SUPPORT
1211 /**
1212  * dp_handle_wbm_internal_error() - handles wbm_internal_error case
1213  * @soc: core DP main context
1214  * @hal_desc: hal descriptor
1215  * @buf_type: indicates if the buffer is of type link disc or msdu
1216  *
1217  * wbm_internal_error is seen in following scenarios :
1218  *
1219  * 1.  Null pointers detected in WBM_RELEASE_RING descriptors
1220  * 2.  Null pointers detected during delinking process
1221  *
1222  * Some null pointer cases:
1223  *
1224  * a. MSDU buffer pointer is NULL
1225  * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag
1226  * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL
1227  *
1228  * Return: None
1229  */
1230 void
1231 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
1232 			     uint32_t buf_type);
1233 #endif
1234 #else /* QCA_HOST_MODE_WIFI_DISABLED */
1235 
1236 static inline
1237 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
1238 {
1239 	return QDF_STATUS_SUCCESS;
1240 }
1241 
1242 static inline
1243 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
1244 {
1245 	return QDF_STATUS_SUCCESS;
1246 }
1247 
1248 static inline void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
1249 {
1250 }
1251 
1252 static inline void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
1253 {
1254 }
1255 
1256 static inline
1257 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
1258 		      bool force_free)
1259 {
1260 }
1261 
1262 static inline QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
1263 {
1264 	return QDF_STATUS_SUCCESS;
1265 }
1266 
1267 static inline QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
1268 {
1269 	return QDF_STATUS_SUCCESS;
1270 }
1271 
1272 static inline void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
1273 {
1274 }
1275 
1276 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1277 
1278 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
1279 	defined(QCA_TX_CAPTURE_SUPPORT) || \
1280 	defined(QCA_MCOPY_SUPPORT)
1281 #ifdef FEATURE_PERPKT_INFO
1282 
1283 /**
1284  * dp_get_completion_indication_for_stack() - send completion to stack
1285  * @soc : dp_soc handle
1286  * @pdev: dp_pdev handle
1287  * @txrx_peer: dp peer handle
1288  * @ts: transmit completion status structure
1289  * @netbuf: Buffer pointer for free
1290  * @time_latency:
1291  *
1292  * This function is used for indication whether buffer needs to be
1293  * sent to stack for freeing or not
1294  *
1295  * Return: QDF_STATUS
1296  */
1297 QDF_STATUS
1298 dp_get_completion_indication_for_stack(struct dp_soc *soc,
1299 				       struct dp_pdev *pdev,
1300 				       struct dp_txrx_peer *txrx_peer,
1301 				       struct hal_tx_completion_status *ts,
1302 				       qdf_nbuf_t netbuf,
1303 				       uint64_t time_latency);
1304 
1305 /**
1306  * dp_send_completion_to_stack() - send completion to stack
1307  * @soc :  dp_soc handle
1308  * @pdev:  dp_pdev handle
1309  * @peer_id: peer_id of the peer for which completion came
1310  * @ppdu_id: ppdu_id
1311  * @netbuf: Buffer pointer for free
1312  *
1313  * This function is used to send completion to stack
1314  * to free buffer
1315  *
1316  * Return: QDF_STATUS
1317  */
1318 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
1319 			    uint16_t peer_id, uint32_t ppdu_id,
1320 			    qdf_nbuf_t netbuf);
1321 #endif
1322 #else
1323 static inline
1324 QDF_STATUS dp_get_completion_indication_for_stack(struct dp_soc *soc,
1325 				       struct dp_pdev *pdev,
1326 				       struct dp_txrx_peer *peer,
1327 				       struct hal_tx_completion_status *ts,
1328 				       qdf_nbuf_t netbuf,
1329 				       uint64_t time_latency)
1330 {
1331 	return QDF_STATUS_E_NOSUPPORT;
1332 }
1333 
1334 static inline
1335 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
1336 			    uint16_t peer_id, uint32_t ppdu_id,
1337 			    qdf_nbuf_t netbuf)
1338 {
1339 }
1340 #endif
1341 
1342 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
1343 /**
1344  * dp_send_completion_to_pkt_capture() - send tx completion to packet capture
1345  * @soc: dp_soc handle
1346  * @desc: Tx Descriptor
1347  * @ts: HAL Tx completion descriptor contents
1348  *
1349  * This function is used to send tx completion to packet capture
1350  */
1351 
1352 void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
1353 				       struct dp_tx_desc_s *desc,
1354 				       struct hal_tx_completion_status *ts);
1355 #else
1356 static inline void
1357 dp_send_completion_to_pkt_capture(struct dp_soc *soc,
1358 				  struct dp_tx_desc_s *desc,
1359 				  struct hal_tx_completion_status *ts)
1360 {
1361 }
1362 #endif
1363 
1364 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1365 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1366 /**
1367  * dp_tx_update_stats() - Update soc level tx stats
1368  * @soc: DP soc handle
1369  * @tx_desc: TX descriptor reference
1370  * @ring_id: TCL ring id
1371  *
1372  * Return: none
1373  */
1374 void dp_tx_update_stats(struct dp_soc *soc,
1375 			struct dp_tx_desc_s *tx_desc,
1376 			uint8_t ring_id);
1377 
1378 /**
1379  * dp_tx_attempt_coalescing() - Check and attempt TCL register write coalescing
1380  * @soc: Datapath soc handle
1381  * @vdev: DP vdev handle
1382  * @tx_desc: tx packet descriptor
1383  * @tid: TID for pkt transmission
1384  * @msdu_info: MSDU info of tx packet
1385  * @ring_id: TCL ring id
1386  *
1387  * Return: 1, if coalescing is to be done
1388  *	    0, if coalescing is not to be done
1389  */
1390 int
1391 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1392 			 struct dp_tx_desc_s *tx_desc,
1393 			 uint8_t tid,
1394 			 struct dp_tx_msdu_info_s *msdu_info,
1395 			 uint8_t ring_id);
1396 
1397 /**
1398  * dp_tx_ring_access_end() - HAL ring access end for data transmission
1399  * @soc: Datapath soc handle
1400  * @hal_ring_hdl: HAL ring handle
1401  * @coalesce: Coalesce the current write or not
1402  *
1403  * Return: none
1404  */
1405 void
1406 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1407 		      int coalesce);
1408 #else
1409 /**
1410  * dp_tx_update_stats() - Update soc level tx stats
1411  * @soc: DP soc handle
1412  * @tx_desc: TX descriptor reference
1413  * @ring_id: TCL ring id
1414  *
1415  * Return: none
1416  */
1417 static inline void dp_tx_update_stats(struct dp_soc *soc,
1418 				      struct dp_tx_desc_s *tx_desc,
1419 				      uint8_t ring_id){ }
1420 
1421 static inline void
1422 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1423 		      int coalesce)
1424 {
1425 	dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1426 }
1427 
1428 static inline int
1429 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1430 			 struct dp_tx_desc_s *tx_desc,
1431 			 uint8_t tid,
1432 			 struct dp_tx_msdu_info_s *msdu_info,
1433 			 uint8_t ring_id)
1434 {
1435 	return 0;
1436 }
1437 
1438 #endif /* WLAN_DP_FEATURE_SW_LATENCY_MGR */
1439 
1440 #ifdef FEATURE_RUNTIME_PM
1441 /**
1442  * dp_set_rtpm_tput_policy_requirement() - Update RTPM throughput policy
1443  * @soc_hdl: DP soc handle
1444  * @is_high_tput: flag to indicate whether throughput is high
1445  *
1446  * Return: none
1447  */
1448 static inline
1449 void dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
1450 					 bool is_high_tput)
1451 {
1452 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1453 
1454 	qdf_atomic_set(&soc->rtpm_high_tput_flag, is_high_tput);
1455 }
1456 
1457 /**
1458  * dp_tx_ring_access_end_wrapper() - Wrapper for ring access end
1459  * @soc: Datapath soc handle
1460  * @hal_ring_hdl: HAL ring handle
1461  * @coalesce: Coalesce the current write or not
1462  *
1463  * Feature-specific wrapper for HAL ring access end for data
1464  * transmission
1465  *
1466  * Return: none
1467  */
1468 void
1469 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1470 			      hal_ring_handle_t hal_ring_hdl,
1471 			      int coalesce);
1472 #else
1473 #ifdef DP_POWER_SAVE
1474 void
1475 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1476 			      hal_ring_handle_t hal_ring_hdl,
1477 			      int coalesce);
1478 #else
1479 static inline void
1480 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1481 			      hal_ring_handle_t hal_ring_hdl,
1482 			      int coalesce)
1483 {
1484 	dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1485 }
1486 #endif
1487 
1488 static inline void
1489 dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
1490 				    bool is_high_tput)
1491 { }
1492 #endif
1493 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1494 
1495 #ifdef DP_TX_HW_DESC_HISTORY
1496 static inline void
1497 dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
1498 			 hal_ring_handle_t hal_ring_hdl,
1499 			 struct dp_soc *soc, uint8_t ring_id)
1500 {
1501 	struct dp_tx_hw_desc_history *tx_hw_desc_history =
1502 						&soc->tx_hw_desc_history;
1503 	struct dp_tx_hw_desc_evt *evt;
1504 	uint32_t idx = 0;
1505 	uint16_t slot = 0;
1506 
1507 	if (!tx_hw_desc_history->allocated)
1508 		return;
1509 
1510 	dp_get_frag_hist_next_atomic_idx(&tx_hw_desc_history->index, &idx,
1511 					 &slot,
1512 					 DP_TX_HW_DESC_HIST_SLOT_SHIFT,
1513 					 DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
1514 					 DP_TX_HW_DESC_HIST_MAX);
1515 
1516 	evt = &tx_hw_desc_history->entry[slot][idx];
1517 	qdf_mem_copy(evt->tcl_desc, hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
1518 	evt->posted = qdf_get_log_timestamp();
1519 	evt->tcl_ring_id = ring_id;
1520 	hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &evt->tp, &evt->hp);
1521 }
1522 #else
1523 static inline void
1524 dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
1525 			 hal_ring_handle_t hal_ring_hdl,
1526 			 struct dp_soc *soc, uint8_t ring_id)
1527 {
1528 }
1529 #endif
1530 
1531 #if defined(WLAN_FEATURE_TSF_AUTO_REPORT) || defined(WLAN_CONFIG_TX_DELAY)
1532 /**
1533  * dp_tx_compute_hw_delay_us() - Compute hardware Tx completion delay
1534  * @ts: Tx completion status
1535  * @delta_tsf: Difference between TSF clock and qtimer
1536  * @delay_us: Delay in microseconds
1537  *
1538  * Return: QDF_STATUS_SUCCESS   : Success
1539  *         QDF_STATUS_E_INVAL   : Tx completion status is invalid or
1540  *                                delay_us is NULL
1541  *         QDF_STATUS_E_FAILURE : Error in delay calculation
1542  */
1543 QDF_STATUS
1544 dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
1545 			  uint32_t delta_tsf,
1546 			  uint32_t *delay_us);
1547 
1548 /**
1549  * dp_set_delta_tsf() - Set delta_tsf to dp_soc structure
1550  * @soc_hdl: cdp soc pointer
1551  * @vdev_id: vdev id
1552  * @delta_tsf: difference between TSF clock and qtimer
1553  *
1554  * Return: None
1555  */
1556 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1557 		      uint32_t delta_tsf);
1558 #endif
1559 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
1560 /**
1561  * dp_set_tsf_ul_delay_report() - Enable or disable reporting uplink delay
1562  * @soc_hdl: cdp soc pointer
1563  * @vdev_id: vdev id
1564  * @enable: true to enable and false to disable
1565  *
1566  * Return: QDF_STATUS
1567  */
1568 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
1569 				      uint8_t vdev_id, bool enable);
1570 
1571 /**
1572  * dp_get_uplink_delay() - Get uplink delay value
1573  * @soc_hdl: cdp soc pointer
1574  * @vdev_id: vdev id
1575  * @val: pointer to save uplink delay value
1576  *
1577  * Return: QDF_STATUS
1578  */
1579 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1580 			       uint32_t *val);
1581 #endif /* WLAN_FEATURE_TSF_UPLINK_TSF */
1582 
1583 /**
1584  * dp_tx_pkt_tracepoints_enabled() - Get the state of tx pkt tracepoint
1585  *
1586  * Return: True if any tx pkt tracepoint is enabled else false
1587  */
1588 static inline
1589 bool dp_tx_pkt_tracepoints_enabled(void)
1590 {
1591 	return (qdf_trace_dp_tx_comp_tcp_pkt_enabled() ||
1592 		qdf_trace_dp_tx_comp_udp_pkt_enabled() ||
1593 		qdf_trace_dp_tx_comp_pkt_enabled());
1594 }
1595 
1596 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
1597 static inline
1598 struct dp_tx_desc_pool_s *dp_get_tx_desc_pool(struct dp_soc *soc,
1599 					      uint8_t pool_id)
1600 {
1601 	struct dp_global_context *dp_global = NULL;
1602 
1603 	dp_global = wlan_objmgr_get_global_ctx();
1604 	return dp_global->tx_desc[soc->arch_id][pool_id];
1605 }
1606 
1607 static inline
1608 struct dp_tx_desc_pool_s *dp_get_spcl_tx_desc_pool(struct dp_soc *soc,
1609 						   uint8_t pool_id)
1610 {
1611 	struct dp_global_context *dp_global = NULL;
1612 
1613 	dp_global = wlan_objmgr_get_global_ctx();
1614 	return dp_global->spcl_tx_desc[soc->arch_id][pool_id];
1615 }
1616 #else
1617 static inline
1618 struct dp_tx_desc_pool_s *dp_get_tx_desc_pool(struct dp_soc *soc,
1619 					      uint8_t pool_id)
1620 {
1621 	return &soc->tx_desc[pool_id];
1622 }
1623 
1624 static inline
1625 struct dp_tx_desc_pool_s *dp_get_spcl_tx_desc_pool(struct dp_soc *soc,
1626 						   uint8_t pool_id)
1627 {
1628 	return &soc->tx_desc[pool_id];
1629 }
1630 #endif
1631 
1632 #ifdef DP_TX_TRACKING
1633 /**
1634  * dp_tx_desc_set_timestamp() - set timestamp in tx descriptor
1635  * @tx_desc: tx descriptor
1636  *
1637  * Return: None
1638  */
1639 static inline
1640 void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
1641 {
1642 	tx_desc->timestamp_tick = qdf_system_ticks();
1643 }
1644 
1645 /**
1646  * dp_tx_desc_check_corruption() - Verify magic pattern in tx descriptor
1647  * @tx_desc: tx descriptor
1648  *
1649  * Check for corruption in tx descriptor, if magic pattern is not matching
1650  * trigger self recovery
1651  *
1652  * Return: none
1653  */
1654 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc);
1655 #else
1656 static inline
1657 void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
1658 {
1659 }
1660 
1661 static inline
1662 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
1663 {
1664 }
1665 #endif
1666 
1667 #ifndef CONFIG_SAWF
1668 static inline bool dp_sawf_tag_valid_get(qdf_nbuf_t nbuf)
1669 {
1670 	return false;
1671 }
1672 #endif
1673 
1674 #ifdef HW_TX_DELAY_STATS_ENABLE
1675 /**
1676  * dp_tx_desc_set_ktimestamp() - set kernel timestamp in tx descriptor
1677  * @vdev: DP vdev handle
1678  * @tx_desc: tx descriptor
1679  *
1680  * Return: true when descriptor is timestamped, false otherwise
1681  */
1682 static inline
1683 bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
1684 			       struct dp_tx_desc_s *tx_desc)
1685 {
1686 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1687 	    qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
1688 	    qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
1689 	    qdf_unlikely(vdev->pdev->soc->peerstats_enabled) ||
1690 	    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev)) ||
1691 	    qdf_unlikely(wlan_cfg_is_peer_jitter_stats_enabled(vdev->pdev->soc->wlan_cfg_ctx))) {
1692 		tx_desc->timestamp = qdf_ktime_real_get();
1693 		return true;
1694 	}
1695 	return false;
1696 }
1697 #else
1698 static inline
1699 bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
1700 			       struct dp_tx_desc_s *tx_desc)
1701 {
1702 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1703 	    qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
1704 	    qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
1705 	    qdf_unlikely(vdev->pdev->soc->peerstats_enabled) ||
1706 	    qdf_unlikely(wlan_cfg_is_peer_jitter_stats_enabled(vdev->pdev->soc->wlan_cfg_ctx))) {
1707 		tx_desc->timestamp = qdf_ktime_real_get();
1708 		return true;
1709 	}
1710 	return false;
1711 }
1712 #endif
1713 
1714 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
1715 /**
1716  * dp_pkt_add_timestamp() - add timestamp in data payload
1717  *
1718  * @vdev: dp vdev
1719  * @index: index to decide offset in payload
1720  * @time: timestamp to add in data payload
1721  * @nbuf: network buffer
1722  *
1723  * Return: none
1724  */
1725 void dp_pkt_add_timestamp(struct dp_vdev *vdev,
1726 			  enum qdf_pkt_timestamp_index index, uint64_t time,
1727 			  qdf_nbuf_t nbuf);
1728 /**
1729  * dp_pkt_get_timestamp() - get current system time
1730  *
1731  * @time: return current system time
1732  *
1733  * Return: none
1734  */
1735 void dp_pkt_get_timestamp(uint64_t *time);
1736 #else
1737 #define dp_pkt_add_timestamp(vdev, index, time, nbuf)
1738 
1739 static inline
1740 void dp_pkt_get_timestamp(uint64_t *time)
1741 {
1742 }
1743 #endif
1744 
1745 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
1746 /**
1747  * dp_update_tx_desc_stats - Update the increase or decrease in
1748  * outstanding tx desc count
1749  * values on pdev and soc
1750  * @pdev: DP pdev handle
1751  *
1752  * Return: void
1753  */
1754 static inline void
1755 dp_update_tx_desc_stats(struct dp_pdev *pdev)
1756 {
1757 	int32_t tx_descs_cnt =
1758 		qdf_atomic_read(&pdev->num_tx_outstanding);
1759 	if (pdev->tx_descs_max < tx_descs_cnt)
1760 		pdev->tx_descs_max = tx_descs_cnt;
1761 	qdf_mem_tx_desc_cnt_update(pdev->num_tx_outstanding,
1762 				   pdev->tx_descs_max);
1763 }
1764 
1765 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
1766 
1767 static inline void
1768 dp_update_tx_desc_stats(struct dp_pdev *pdev)
1769 {
1770 }
1771 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
1772 
1773 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
1774 /**
1775  * dp_tx_get_global_desc_in_use() - read global descriptors in usage
1776  * @dp_global: Datapath global context
1777  *
1778  * Return: global descriptors in use
1779  */
1780 static inline int32_t
1781 dp_tx_get_global_desc_in_use(struct dp_global_context *dp_global)
1782 {
1783 	return qdf_atomic_read(&dp_global->global_descriptor_in_use);
1784 }
1785 #endif
1786 
1787 #ifdef QCA_TX_LIMIT_CHECK
1788 static inline bool is_spl_packet(qdf_nbuf_t nbuf)
1789 {
1790 	if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1791 		return true;
1792 	return false;
1793 }
1794 
1795 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
1796 /**
1797  * dp_tx_limit_check - Check if allocated tx descriptors reached
1798  * global max reg limit and pdev max reg limit for regular packets. Also check
1799  * if the limit is reached for special packets.
1800  * @vdev: DP vdev handle
1801  * @nbuf: network buffer
1802  *
1803  * Return: true if allocated tx descriptors reached max limit for regular
1804  * packets and in case of special packets, if the limit is reached max
1805  * configured vale for the soc/pdev, else false
1806  */
1807 static inline bool
1808 dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1809 {
1810 	return false;
1811 }
1812 
1813 static inline bool
1814 __dp_tx_limit_check(struct dp_soc *soc)
1815 {
1816 	return false;
1817 }
1818 #else
1819 /**
1820  * is_dp_spl_tx_limit_reached - Check if the packet is a special packet to allow
1821  * allocation if allocated tx descriptors are within the soc max limit
1822  * and pdev max limit.
1823  * @vdev: DP vdev handle
1824  * @nbuf: network buffer
1825  *
1826  * Return: true if allocated tx descriptors reached max configured value, else
1827  * false
1828  */
1829 static inline bool
1830 is_dp_spl_tx_limit_reached(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1831 {
1832 	struct dp_pdev *pdev = vdev->pdev;
1833 	struct dp_soc *soc = pdev->soc;
1834 
1835 	if (is_spl_packet(nbuf)) {
1836 		if (qdf_atomic_read(&soc->num_tx_outstanding) >=
1837 				soc->num_tx_allowed)
1838 			return true;
1839 
1840 		if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
1841 			pdev->num_tx_allowed)
1842 			return true;
1843 
1844 		return false;
1845 	}
1846 
1847 	return true;
1848 }
1849 
1850 static inline bool
1851 __dp_tx_limit_check(struct dp_soc *soc)
1852 {
1853 	return (qdf_atomic_read(&soc->num_tx_outstanding) >=
1854 					soc->num_reg_tx_allowed);
1855 }
1856 
1857 /**
1858  * dp_tx_limit_check - Check if allocated tx descriptors reached
1859  * soc max reg limit and pdev max reg limit for regular packets. Also check if
1860  * the limit is reached for special packets.
1861  * @vdev: DP vdev handle
1862  * @nbuf: network buffer
1863  *
1864  * Return: true if allocated tx descriptors reached max limit for regular
1865  * packets and in case of special packets, if the limit is reached max
1866  * configured vale for the soc/pdev, else false
1867  */
1868 static inline bool
1869 dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1870 {
1871 	struct dp_pdev *pdev = vdev->pdev;
1872 	struct dp_soc *soc = pdev->soc;
1873 
1874 	if (__dp_tx_limit_check(soc)) {
1875 		if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
1876 			dp_tx_info("queued packets are more than max tx, drop the frame");
1877 			DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1878 			return true;
1879 		}
1880 	}
1881 
1882 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
1883 			pdev->num_reg_tx_allowed) {
1884 		if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
1885 			dp_tx_info("queued packets are more than max tx, drop the frame");
1886 			DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1887 			DP_STATS_INC(vdev,
1888 				     tx_i.dropped.desc_na_exc_outstand.num, 1);
1889 			return true;
1890 		}
1891 	}
1892 	return false;
1893 }
1894 #endif
1895 
1896 /**
1897  * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
1898  * reached soc max limit
1899  * @vdev: DP vdev handle
1900  *
1901  * Return: true if allocated tx descriptors reached max configured value, else
1902  * false
1903  */
1904 static inline bool
1905 dp_tx_exception_limit_check(struct dp_vdev *vdev)
1906 {
1907 	struct dp_pdev *pdev = vdev->pdev;
1908 	struct dp_soc *soc = pdev->soc;
1909 
1910 	if (qdf_atomic_read(&soc->num_tx_exception) >=
1911 			soc->num_msdu_exception_desc) {
1912 		dp_info("exc packets are more than max drop the exc pkt");
1913 		DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
1914 		return true;
1915 	}
1916 
1917 	return false;
1918 }
1919 
1920 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
1921 /**
1922  * dp_tx_outstanding_inc - Inc outstanding tx desc values on global and pdev
1923  * @pdev: DP pdev handle
1924  *
1925  * Return: void
1926  */
1927 static inline void
1928 dp_tx_outstanding_inc(struct dp_pdev *pdev)
1929 {
1930 }
1931 
1932 static inline void
1933 __dp_tx_outstanding_inc(struct dp_soc *soc)
1934 {
1935 }
1936 
1937 static inline void
1938 __dp_tx_outstanding_dec(struct dp_soc *soc)
1939 {
1940 }
1941 
1942 /**
1943  * dp_tx_outstanding_dec - Dec outstanding tx desc values on global and pdev
1944  * @pdev: DP pdev handle
1945  *
1946  * Return: void
1947  */
1948 static inline void
1949 dp_tx_outstanding_dec(struct dp_pdev *pdev)
1950 {
1951 }
1952 
1953 /**
1954  * dp_tx_outstanding_sub - Subtract outstanding tx desc values on pdev
1955  * @pdev: DP pdev handle
1956  * @count: count of descs to subtract from outstanding
1957  *
1958  * Return: void
1959  */
1960 static inline void
1961 dp_tx_outstanding_sub(struct dp_pdev *pdev, uint32_t count)
1962 {
1963 }
1964 #else
1965 
1966 static inline void
1967 __dp_tx_outstanding_inc(struct dp_soc *soc)
1968 {
1969 	qdf_atomic_inc(&soc->num_tx_outstanding);
1970 }
1971 /**
1972  * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
1973  * @pdev: DP pdev handle
1974  *
1975  * Return: void
1976  */
1977 static inline void
1978 dp_tx_outstanding_inc(struct dp_pdev *pdev)
1979 {
1980 	struct dp_soc *soc = pdev->soc;
1981 
1982 	__dp_tx_outstanding_inc(soc);
1983 	qdf_atomic_inc(&pdev->num_tx_outstanding);
1984 	dp_update_tx_desc_stats(pdev);
1985 }
1986 
1987 static inline void
1988 __dp_tx_outstanding_dec(struct dp_soc *soc)
1989 {
1990 	qdf_atomic_dec(&soc->num_tx_outstanding);
1991 }
1992 
1993 /**
1994  * dp_tx_outstanding_dec - Decrement outstanding tx desc values on pdev and soc
1995  * @pdev: DP pdev handle
1996  *
1997  * Return: void
1998  */
1999 static inline void
2000 dp_tx_outstanding_dec(struct dp_pdev *pdev)
2001 {
2002 	struct dp_soc *soc = pdev->soc;
2003 
2004 	__dp_tx_outstanding_dec(soc);
2005 	qdf_atomic_dec(&pdev->num_tx_outstanding);
2006 	dp_update_tx_desc_stats(pdev);
2007 }
2008 
2009 /**
2010  * __dp_tx_outstanding_sub - Sub outstanding tx desc values from soc
2011  * @soc: DP soc handle
2012  * @count: count of descs to subtract from outstanding
2013  *
2014  * Return: void
2015  */
2016 static inline void
2017 __dp_tx_outstanding_sub(struct dp_soc *soc, uint32_t count)
2018 {
2019 	qdf_atomic_sub(count, &soc->num_tx_outstanding);
2020 }
2021 
2022 /**
2023  * dp_tx_outstanding_sub - Subtract outstanding tx desc values on pdev
2024  * @pdev: DP pdev handle
2025  * @count: count of descs to subtract from outstanding
2026  *
2027  * Return: void
2028  */
2029 static inline void
2030 dp_tx_outstanding_sub(struct dp_pdev *pdev, uint32_t count)
2031 {
2032 	struct dp_soc *soc = pdev->soc;
2033 
2034 	__dp_tx_outstanding_sub(soc, count);
2035 	qdf_atomic_sub(count, &pdev->num_tx_outstanding);
2036 	dp_update_tx_desc_stats(pdev);
2037 }
2038 #endif /* QCA_SUPPORT_DP_GLOBAL_CTX */
2039 
2040 #else //QCA_TX_LIMIT_CHECK
2041 static inline bool
2042 __dp_tx_limit_check(struct dp_soc *soc)
2043 {
2044 	return false;
2045 }
2046 
2047 static inline bool
2048 dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
2049 {
2050 	return false;
2051 }
2052 
2053 static inline bool
2054 dp_tx_exception_limit_check(struct dp_vdev *vdev)
2055 {
2056 	return false;
2057 }
2058 
2059 static inline void
2060 __dp_tx_outstanding_inc(struct dp_soc *soc)
2061 {
2062 }
2063 
2064 static inline void
2065 dp_tx_outstanding_inc(struct dp_pdev *pdev)
2066 {
2067 	qdf_atomic_inc(&pdev->num_tx_outstanding);
2068 	dp_update_tx_desc_stats(pdev);
2069 }
2070 
2071 static inline void
2072 __dp_tx_outstanding_dec(struct dp_soc *soc)
2073 {
2074 }
2075 
2076 static inline void
2077 dp_tx_outstanding_dec(struct dp_pdev *pdev)
2078 {
2079 	qdf_atomic_dec(&pdev->num_tx_outstanding);
2080 	dp_update_tx_desc_stats(pdev);
2081 }
2082 
2083 static inline void
2084 __dp_tx_outstanding_sub(struct dp_soc *soc, uint32_t count)
2085 {
2086 }
2087 
2088 /**
2089  * dp_tx_outstanding_sub - Subtract outstanding tx desc values on pdev
2090  * @pdev: DP pdev handle
2091  * @count: count of descs to subtract from outstanding
2092  *
2093  * Return: void
2094  */
2095 static inline void
2096 dp_tx_outstanding_sub(struct dp_pdev *pdev, uint32_t count)
2097 {
2098 	qdf_atomic_sub(count, &pdev->num_tx_outstanding);
2099 	dp_update_tx_desc_stats(pdev);
2100 }
2101 #endif //QCA_TX_LIMIT_CHECK
2102 
2103 /**
2104  * dp_tx_get_pkt_len() - Get the packet length of a msdu
2105  * @tx_desc: tx descriptor
2106  *
2107  * Return: Packet length of a msdu. If the packet is fragmented,
2108  * it will return the single fragment length.
2109  *
2110  * In TSO mode, the msdu from stack will be fragmented into small
2111  * fragments and each of these new fragments will be transmitted
2112  * as an individual msdu.
2113  *
2114  * Please note that the length of a msdu from stack may be smaller
2115  * than the length of the total length of the fragments it has been
2116  * fragmentted because each of the fragments has a nbuf header.
2117  */
2118 static inline uint32_t dp_tx_get_pkt_len(struct dp_tx_desc_s *tx_desc)
2119 {
2120 	return tx_desc->frm_type == dp_tx_frm_tso ?
2121 		tx_desc->msdu_ext_desc->tso_desc->seg.total_len :
2122 		tx_desc->length;
2123 }
2124 
2125 #ifdef FEATURE_RUNTIME_PM
2126 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
2127 {
2128 	return qdf_atomic_read(&soc->rtpm_high_tput_flag) &&
2129 		(hif_rtpm_get_state() <= HIF_RTPM_STATE_ON);
2130 }
2131 #else
2132 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
2133 {
2134 	return 0;
2135 }
2136 #endif
2137 #if defined WLAN_FEATURE_11BE_MLO && defined DP_MLO_LINK_STATS_SUPPORT
2138 /**
2139  * dp_tx_set_nbuf_band() - Set band info in nbuf cb
2140  * @nbuf: nbuf pointer
2141  * @txrx_peer: txrx_peer pointer
2142  * @link_id: Peer Link ID
2143  *
2144  * Returen: None
2145  */
2146 static inline void
2147 dp_tx_set_nbuf_band(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer,
2148 		    uint8_t link_id)
2149 {
2150 	qdf_nbuf_tx_set_band(nbuf, txrx_peer->band[link_id]);
2151 }
2152 #else
2153 static inline void
2154 dp_tx_set_nbuf_band(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer,
2155 		    uint8_t link_id)
2156 {
2157 }
2158 #endif
2159 
2160 #ifdef WLAN_FEATURE_TX_LATENCY_STATS
2161 /**
2162  * dp_tx_latency_stats_fetch() - fetch transmit latency statistics for
2163  * specified link mac address
2164  * @soc_hdl: Handle to struct dp_soc
2165  * @vdev_id: vdev id
2166  * @mac: link mac address of remote peer
2167  * @latency: buffer to hold per-link transmit latency statistics
2168  *
2169  * Return: QDF_STATUS
2170  */
2171 QDF_STATUS
2172 dp_tx_latency_stats_fetch(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2173 			  uint8_t *mac, struct cdp_tx_latency *latency);
2174 
2175 /**
2176  * dp_tx_latency_stats_config() - config transmit latency statistics for
2177  * specified vdev
2178  * @soc_hdl: Handle to struct dp_soc
2179  * @vdev_id: vdev id
2180  * @cfg: configuration for transmit latency statistics
2181  *
2182  * Return: QDF_STATUS
2183  */
2184 QDF_STATUS
2185 dp_tx_latency_stats_config(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2186 			   struct cdp_tx_latency_config *cfg);
2187 
2188 /**
2189  * dp_tx_latency_stats_register_cb() - register transmit latency statistics
2190  * callback
2191  * @handle: Handle to struct dp_soc
2192  * @cb: callback function for transmit latency statistics
2193  *
2194  * Return: QDF_STATUS
2195  */
2196 QDF_STATUS dp_tx_latency_stats_register_cb(struct cdp_soc_t *handle,
2197 					   cdp_tx_latency_cb cb);
2198 #endif
2199 #endif
2200