xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.h (revision b62151f8dd0743da724a4533988c78d2c7385d4f)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef __DP_TX_H
20 #define __DP_TX_H
21 
22 #include <qdf_types.h>
23 #include <qdf_nbuf.h>
24 #include "dp_types.h"
25 #ifdef FEATURE_PERPKT_INFO
26 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
27 	defined(QCA_TX_CAPTURE_SUPPORT) || \
28 	defined(QCA_MCOPY_SUPPORT)
29 #include "if_meta_hdr.h"
30 #endif
31 #endif
32 #include "dp_internal.h"
33 #include "hal_tx.h"
34 #include <qdf_tracepoint.h>
35 #ifdef CONFIG_SAWF
36 #include "dp_sawf.h"
37 #endif
38 #include <qdf_pkt_add_timestamp.h>
39 #include "dp_ipa.h"
40 
41 #define DP_INVALID_VDEV_ID 0xFF
42 
43 #define DP_TX_MAX_NUM_FRAGS 6
44 
45 /* invalid peer id for reinject*/
46 #define DP_INVALID_PEER 0XFFFE
47 
48 void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
49 			 struct dp_tx_msdu_info_s *msdu_info,
50 			 qdf_nbuf_t nbuf, uint16_t sa_peer_id);
51 /*
52  * DP_TX_DESC_FLAG_FRAG flags should always be defined to 0x1
53  * please do not change this flag's definition
54  */
55 #define DP_TX_DESC_FLAG_FRAG		0x1
56 #define DP_TX_DESC_FLAG_TO_FW		0x2
57 #define DP_TX_DESC_FLAG_SIMPLE		0x4
58 #define DP_TX_DESC_FLAG_RAW		0x8
59 #define DP_TX_DESC_FLAG_MESH		0x10
60 #define DP_TX_DESC_FLAG_QUEUED_TX	0x20
61 #define DP_TX_DESC_FLAG_COMPLETED_TX	0x40
62 #define DP_TX_DESC_FLAG_ME		0x80
63 #define DP_TX_DESC_FLAG_TDLS_FRAME	0x100
64 #define DP_TX_DESC_FLAG_ALLOCATED	0x200
65 #define DP_TX_DESC_FLAG_MESH_MODE	0x400
66 #define DP_TX_DESC_FLAG_UNMAP_DONE	0x800
67 #define DP_TX_DESC_FLAG_TX_COMP_ERR	0x1000
68 #define DP_TX_DESC_FLAG_FLUSH		0x2000
69 #define DP_TX_DESC_FLAG_TRAFFIC_END_IND	0x4000
70 #define DP_TX_DESC_FLAG_RMNET		0x8000
71 /*
72  * Since the Tx descriptor flag is of only 16-bit and no more bit is free for
73  * any new flag, therefore for time being overloading PPEDS flag with that of
74  * FLUSH flag and FLAG_FAST with TDLS which is not enabled for WIN.
75  */
76 #define DP_TX_DESC_FLAG_PPEDS		0x2000
77 #define DP_TX_DESC_FLAG_FAST		0x100
78 
79 #define DP_TX_EXT_DESC_FLAG_METADATA_VALID 0x1
80 
81 #define DP_TX_FREE_SINGLE_BUF(soc, buf)                  \
82 do {                                                           \
83 	qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE);  \
84 	qdf_nbuf_free(buf);                                    \
85 } while (0)
86 
87 #define OCB_HEADER_VERSION	 1
88 
89 #ifdef TX_PER_PDEV_DESC_POOL
90 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
91 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
92 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
93 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
94 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
95 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
96 #else
97 	#ifdef TX_PER_VDEV_DESC_POOL
98 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
99 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
100 	#endif /* TX_PER_VDEV_DESC_POOL */
101 #endif /* TX_PER_PDEV_DESC_POOL */
102 #define DP_TX_QUEUE_MASK 0x3
103 
104 #define MAX_CDP_SEC_TYPE 12
105 
106 /* number of dwords for htt_tx_msdu_desc_ext2_t */
107 #define DP_TX_MSDU_INFO_META_DATA_DWORDS 7
108 
109 #define dp_tx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX, params)
110 #define dp_tx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX, params)
111 #define dp_tx_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP_TX, params)
112 #define dp_tx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX, params)
113 #define dp_tx_info(params...) \
114 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX, ## params)
115 #define dp_tx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX, params)
116 
117 #define dp_tx_comp_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX_COMP, params)
118 #define dp_tx_comp_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX_COMP, params)
119 #define dp_tx_comp_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX_COMP, params)
120 #define dp_tx_comp_info(params...) \
121 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
122 #define dp_tx_comp_info_rl(params...) \
123 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
124 #define dp_tx_comp_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX_COMP, params)
125 
126 #ifndef QCA_HOST_MODE_WIFI_DISABLED
127 
128 /**
129  * struct dp_tx_frag_info_s
130  * @vaddr: hlos virtual address for buffer
131  * @paddr_lo: physical address lower 32bits
132  * @paddr_hi: physical address higher bits
133  * @len: length of the buffer
134  */
135 struct dp_tx_frag_info_s {
136 	uint8_t  *vaddr;
137 	uint32_t paddr_lo;
138 	uint16_t paddr_hi;
139 	uint16_t len;
140 };
141 
142 /**
143  * struct dp_tx_seg_info_s - Segmentation Descriptor
144  * @nbuf: NBUF pointer if segment corresponds to separate nbuf
145  * @frag_cnt: Fragment count in this segment
146  * @total_len: Total length of segment
147  * @frags: per-Fragment information
148  * @next: pointer to next MSDU segment
149  */
150 struct dp_tx_seg_info_s  {
151 	qdf_nbuf_t nbuf;
152 	uint16_t frag_cnt;
153 	uint16_t total_len;
154 	struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS];
155 	struct dp_tx_seg_info_s *next;
156 };
157 
158 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
159 
160 /**
161  * struct dp_tx_sg_info_s - Scatter Gather Descriptor
162  * @num_segs: Number of segments (TSO/ME) in the frame
163  * @total_len: Total length of the frame
164  * @curr_seg: Points to current segment descriptor to be processed. Chain of
165  * 	      descriptors for SG frames/multicast-unicast converted packets.
166  *
167  * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to
168  * carry fragmentation information
169  * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries
170  * indicated through flags in SKB CB (first_msdu and last_msdu). This will be
171  * converted into set of skb sg (nr_frags) structures.
172  */
173 struct dp_tx_sg_info_s {
174 	uint32_t num_segs;
175 	uint32_t total_len;
176 	struct dp_tx_seg_info_s *curr_seg;
177 };
178 
179 /**
180  * struct dp_tx_queue - Tx queue
181  * @desc_pool_id: Descriptor Pool to be used for the tx queue
182  * @ring_id: TCL descriptor ring ID corresponding to the tx queue
183  *
184  * Tx queue contains information of the software (Descriptor pool)
185  * and hardware resources (TCL ring id) to be used for a particular
186  * transmit queue (obtained from skb_queue_mapping in case of linux)
187  */
188 struct dp_tx_queue {
189 	uint8_t desc_pool_id;
190 	uint8_t ring_id;
191 };
192 
193 /**
194  * struct dp_tx_msdu_info_s - MSDU Descriptor
195  * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement
196  * @tx_queue: Tx queue on which this MSDU should be transmitted
197  * @num_seg: Number of segments (TSO)
198  * @tid: TID (override) that is sent from HLOS
199  * @exception_fw: Duplicate frame to be sent to firmware
200  * @is_tx_sniffer: Indicates if the packet has to be sniffed
201  * @u: union of frame information structs
202  * @u.tso_info: TSO information for TSO frame types
203  * 	     (chain of the TSO segments, number of segments)
204  * @u.sg_info: Scatter Gather information for non-TSO SG frames
205  * @meta_data: Mesh meta header information
206  * @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions
207  * @gsn: global sequence for reinjected mcast packets
208  * @vdev_id : vdev_id for reinjected mcast packets
209  * @skip_hp_update : Skip HP update for TSO segments and update in last segment
210  * @buf_len:
211  * @payload_addr:
212  *
213  * This structure holds the complete MSDU information needed to program the
214  * Hardware TCL and MSDU extension descriptors for different frame types
215  *
216  */
217 struct dp_tx_msdu_info_s {
218 	enum dp_tx_frm_type frm_type;
219 	struct dp_tx_queue tx_queue;
220 	uint32_t num_seg;
221 	uint8_t tid;
222 	uint8_t exception_fw;
223 	uint8_t is_tx_sniffer;
224 	union {
225 		struct qdf_tso_info_t tso_info;
226 		struct dp_tx_sg_info_s sg_info;
227 	} u;
228 	uint32_t meta_data[DP_TX_MSDU_INFO_META_DATA_DWORDS];
229 	uint16_t ppdu_cookie;
230 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
231 #ifdef WLAN_MCAST_MLO
232 	uint16_t gsn;
233 	uint8_t vdev_id;
234 #endif
235 #endif
236 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
237 	uint8_t skip_hp_update;
238 #endif
239 #ifdef QCA_DP_TX_RMNET_OPTIMIZATION
240 	uint16_t buf_len;
241 	uint8_t *payload_addr;
242 #endif
243 };
244 
245 #ifndef QCA_HOST_MODE_WIFI_DISABLED
246 /**
247  * dp_tx_deinit_pair_by_index() - Deinit TX rings based on index
248  * @soc: core txrx context
249  * @index: index of ring to deinit
250  *
251  * Deinit 1 TCL and 1 WBM2SW release ring on as needed basis using
252  * index of the respective TCL/WBM2SW release in soc structure.
253  * For example, if the index is 2 then &soc->tcl_data_ring[2]
254  * and &soc->tx_comp_ring[2] will be deinitialized.
255  *
256  * Return: none
257  */
258 void dp_tx_deinit_pair_by_index(struct dp_soc *soc, int index);
259 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
260 
261 /**
262  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
263  * @soc: core txrx main context
264  * @comp_head: software descriptor head pointer
265  * @ring_id: ring number
266  *
267  * This function will process batch of descriptors reaped by dp_tx_comp_handler
268  * and release the software descriptors after processing is complete
269  *
270  * Return: none
271  */
272 void
273 dp_tx_comp_process_desc_list(struct dp_soc *soc,
274 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id);
275 
276 /**
277  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
278  * @soc: Soc handle
279  * @desc: software Tx descriptor to be processed
280  * @delayed_free: defer freeing of nbuf
281  *
282  * Return: nbuf to be freed later
283  */
284 qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
285 			       bool delayed_free);
286 
287 /**
288  * dp_tx_desc_release() - Release Tx Descriptor
289  * @tx_desc: Tx Descriptor
290  * @desc_pool_id: Descriptor Pool ID
291  *
292  * Deallocate all resources attached to Tx descriptor and free the Tx
293  * descriptor.
294  *
295  * Return:
296  */
297 void dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id);
298 
299 /**
300  * dp_tx_compute_delay() - Compute and fill in all timestamps
301  *				to pass in correct fields
302  * @vdev: pdev handle
303  * @tx_desc: tx descriptor
304  * @tid: tid value
305  * @ring_id: TCL or WBM ring number for transmit path
306  *
307  * Return: none
308  */
309 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
310 			 uint8_t tid, uint8_t ring_id);
311 
312 /**
313  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
314  * @soc: DP soc handle
315  * @tx_desc: software descriptor head pointer
316  * @ts: Tx completion status
317  * @txrx_peer: txrx peer handle
318  * @ring_id: ring number
319  *
320  * Return: none
321  */
322 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
323 				  struct dp_tx_desc_s *tx_desc,
324 				  struct hal_tx_completion_status *ts,
325 				  struct dp_txrx_peer *txrx_peer,
326 				  uint8_t ring_id);
327 
328 /**
329  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
330  * @soc: DP Soc handle
331  * @desc: software Tx descriptor
332  * @ts: Tx completion status from HAL/HTT descriptor
333  * @txrx_peer: DP peer context
334  *
335  * Return: none
336  */
337 void dp_tx_comp_process_desc(struct dp_soc *soc,
338 			     struct dp_tx_desc_s *desc,
339 			     struct hal_tx_completion_status *ts,
340 			     struct dp_txrx_peer *txrx_peer);
341 
342 /**
343  * dp_tx_reinject_handler() - Tx Reinject Handler
344  * @soc: datapath soc handle
345  * @vdev: datapath vdev handle
346  * @tx_desc: software descriptor head pointer
347  * @status: Tx completion status from HTT descriptor
348  * @reinject_reason: reinject reason from HTT descriptor
349  *
350  * This function reinjects frames back to Target.
351  * Todo - Host queue needs to be added
352  *
353  * Return: none
354  */
355 void dp_tx_reinject_handler(struct dp_soc *soc,
356 			    struct dp_vdev *vdev,
357 			    struct dp_tx_desc_s *tx_desc,
358 			    uint8_t *status,
359 			    uint8_t reinject_reason);
360 
361 /**
362  * dp_tx_inspect_handler() - Tx Inspect Handler
363  * @soc: datapath soc handle
364  * @vdev: datapath vdev handle
365  * @tx_desc: software descriptor head pointer
366  * @status: Tx completion status from HTT descriptor
367  *
368  * Handles Tx frames sent back to Host for inspection
369  * (ProxyARP)
370  *
371  * Return: none
372  */
373 void dp_tx_inspect_handler(struct dp_soc *soc,
374 			   struct dp_vdev *vdev,
375 			   struct dp_tx_desc_s *tx_desc,
376 			   uint8_t *status);
377 
378 /**
379  * dp_tx_update_peer_basic_stats() - Update peer basic stats
380  * @txrx_peer: Datapath txrx_peer handle
381  * @length: Length of the packet
382  * @tx_status: Tx status from TQM/FW
383  * @update: enhanced flag value present in dp_pdev
384  *
385  * Return: none
386  */
387 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
388 				   uint32_t length, uint8_t tx_status,
389 				   bool update);
390 
391 #ifdef DP_UMAC_HW_RESET_SUPPORT
392 /**
393  * dp_tx_drop() - Drop the frame on a given VAP
394  * @soc: DP soc handle
395  * @vdev_id: id of DP vdev handle
396  * @nbuf: skb
397  *
398  * Drop all the incoming packets
399  *
400  * Return: nbuf
401  */
402 qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
403 
404 /**
405  * dp_tx_exc_drop() - Drop the frame on a given VAP
406  * @soc_hdl: DP soc handle
407  * @vdev_id: id of DP vdev handle
408  * @nbuf: skb
409  * @tx_exc_metadata: Handle that holds exception path meta data
410  *
411  * Drop all the incoming packets
412  *
413  * Return: nbuf
414  */
415 qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
416 			  qdf_nbuf_t nbuf,
417 			  struct cdp_tx_exception_metadata *tx_exc_metadata);
418 #endif
419 #ifdef WLAN_SUPPORT_PPEDS
420 qdf_nbuf_t
421 dp_ppeds_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc);
422 #else
423 static inline qdf_nbuf_t
424 dp_ppeds_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
425 {
426 	return NULL;
427 }
428 #endif
429 #ifndef QCA_HOST_MODE_WIFI_DISABLED
430 /**
431  * dp_tso_soc_attach() - TSO Attach handler
432  * @txrx_soc: Opaque Dp handle
433  *
434  * Reserve TSO descriptor buffers
435  *
436  * Return: QDF_STATUS_E_FAILURE on failure or
437  * QDF_STATUS_SUCCESS on success
438  */
439 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc);
440 
441 /**
442  * dp_tso_soc_detach() - TSO Detach handler
443  * @txrx_soc: Opaque Dp handle
444  *
445  * Deallocate TSO descriptor buffers
446  *
447  * Return: QDF_STATUS_E_FAILURE on failure or
448  * QDF_STATUS_SUCCESS on success
449  */
450 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc);
451 
452 /**
453  * dp_tx_send() - Transmit a frame on a given VAP
454  * @soc_hdl: DP soc handle
455  * @vdev_id: id of DP vdev handle
456  * @nbuf: skb
457  *
458  * Entry point for Core Tx layer (DP_TX) invoked from
459  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
460  * cases
461  *
462  * Return: NULL on success,
463  *         nbuf when it fails to send
464  */
465 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
466 		      qdf_nbuf_t nbuf);
467 
468 /**
469  * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
470  *      case to avoid check in per-packet path.
471  * @soc_hdl: DP soc handle
472  * @vdev_id: id of DP vdev handle
473  * @nbuf: skb
474  *
475  * Entry point for Core Tx layer (DP_TX) invoked from
476  * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
477  * with special condition to avoid per pkt check in dp_tx_send
478  *
479  * Return: NULL on success,
480  *         nbuf when it fails to send
481  */
482 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
483 				    uint8_t vdev_id, qdf_nbuf_t nbuf);
484 
485 /**
486  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
487  * @soc_hdl: DP soc handle
488  * @vdev_id: id of DP vdev handle
489  * @nbuf: skb
490  * @tx_exc_metadata: Handle that holds exception path meta data
491  *
492  * Entry point for Core Tx layer (DP_TX) invoked from
493  * hard_start_xmit in OSIF/HDD to transmit frames through fw
494  *
495  * Return: NULL on success,
496  *         nbuf when it fails to send
497  */
498 qdf_nbuf_t
499 dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
500 		     qdf_nbuf_t nbuf,
501 		     struct cdp_tx_exception_metadata *tx_exc_metadata);
502 
503 /**
504  * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
505  *      in exception path in special case to avoid regular exception path chk.
506  * @soc_hdl: DP soc handle
507  * @vdev_id: id of DP vdev handle
508  * @nbuf: skb
509  * @tx_exc_metadata: Handle that holds exception path meta data
510  *
511  * Entry point for Core Tx layer (DP_TX) invoked from
512  * hard_start_xmit in OSIF/HDD to transmit frames through fw
513  *
514  * Return: NULL on success,
515  *         nbuf when it fails to send
516  */
517 qdf_nbuf_t
518 dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
519 				   uint8_t vdev_id, qdf_nbuf_t nbuf,
520 				   struct cdp_tx_exception_metadata *tx_exc_metadata);
521 
522 /**
523  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
524  * @soc_hdl: DP soc handle
525  * @vdev_id: DP vdev handle
526  * @nbuf: skb
527  *
528  * Entry point for Core Tx layer (DP_TX) invoked from
529  * hard_start_xmit in OSIF/HDD
530  *
531  * Return: NULL on success,
532  *         nbuf when it fails to send
533  */
534 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
535 			   qdf_nbuf_t nbuf);
536 
537 /**
538  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
539  * @vdev: DP vdev handle
540  * @nbuf: skb
541  * @msdu_info: MSDU information
542  * @peer_id: peer_id of the peer in case of NAWDS frames
543  * @tx_exc_metadata: Handle that holds exception path metadata
544  *
545  * Return: NULL on success,
546  *         nbuf when it fails to send
547  */
548 qdf_nbuf_t
549 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
550 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
551 		       struct cdp_tx_exception_metadata *tx_exc_metadata);
552 
553 /**
554  * dp_tx_mcast_enhance() - Multicast enhancement on TX
555  * @vdev: DP vdev handle
556  * @nbuf: network buffer to be transmitted
557  *
558  * Return: true on success
559  *         false on failure
560  */
561 bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
562 
563 /**
564  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
565  * @vdev: DP vdev handle
566  * @nbuf: skb
567  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
568  *
569  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
570  *
571  * Return: NULL on success,
572  *         nbuf when it fails to send
573  */
574 #if QDF_LOCK_STATS
575 noinline qdf_nbuf_t
576 dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
577 			 struct dp_tx_msdu_info_s *msdu_info);
578 #else
579 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
580 				    struct dp_tx_msdu_info_s *msdu_info);
581 #endif
582 #ifdef FEATURE_WLAN_TDLS
583 /**
584  * dp_tx_non_std() - Allow the control-path SW to send data frames
585  * @soc_hdl: Datapath soc handle
586  * @vdev_id: id of vdev
587  * @tx_spec: what non-standard handling to apply to the tx data frames
588  * @msdu_list: NULL-terminated list of tx MSDUs
589  *
590  * Return: NULL on success,
591  *         nbuf when it fails to send
592  */
593 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
594 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
595 #endif
596 
597 /**
598  * dp_tx_frame_is_drop() - checks if the packet is loopback
599  * @vdev: DP vdev handle
600  * @srcmac: source MAC address
601  * @dstmac: destination MAC address
602  *
603  * Return: 1 if frame needs to be dropped else 0
604  */
605 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac);
606 
607 #ifndef WLAN_SOFTUMAC_SUPPORT
608 /**
609  * dp_tx_comp_handler() - Tx completion handler
610  * @int_ctx: pointer to DP interrupt context
611  * @soc: core txrx main context
612  * @hal_srng: Opaque HAL SRNG pointer
613  * @ring_id: completion ring id
614  * @quota: No. of packets/descriptors that can be serviced in one loop
615  *
616  * This function will collect hardware release ring element contents and
617  * handle descriptor contents. Based on contents, free packet or handle error
618  * conditions
619  *
620  * Return: Number of TX completions processed
621  */
622 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
623 			    hal_ring_handle_t hal_srng, uint8_t ring_id,
624 			    uint32_t quota);
625 #endif
626 
627 void
628 dp_tx_comp_process_desc_list(struct dp_soc *soc,
629 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id);
630 
631 QDF_STATUS
632 dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
633 
634 QDF_STATUS
635 dp_tx_prepare_send_igmp_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
636 
637 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
638 
639 #if defined(QCA_HOST_MODE_WIFI_DISABLED) || !defined(ATH_SUPPORT_IQUE)
640 static inline void dp_tx_me_exit(struct dp_pdev *pdev)
641 {
642 	return;
643 }
644 #endif
645 
646 /**
647  * dp_tx_pdev_init() - dp tx pdev init
648  * @pdev: physical device instance
649  *
650  * Return: QDF_STATUS_SUCCESS: success
651  *         QDF_STATUS_E_RESOURCES: Error return
652  */
653 static inline QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev)
654 {
655 	struct dp_soc *soc = pdev->soc;
656 
657 	/* Initialize Flow control counters */
658 	qdf_atomic_init(&pdev->num_tx_outstanding);
659 	pdev->tx_descs_max = 0;
660 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
661 		/* Initialize descriptors in TCL Ring */
662 		hal_tx_init_data_ring(soc->hal_soc,
663 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
664 	}
665 
666 	return QDF_STATUS_SUCCESS;
667 }
668 
669 /**
670  * dp_tx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc
671  * @soc: Handle to HAL Soc structure
672  * @hal_soc: HAL SOC handle
673  * @num_avail_for_reap: descriptors available for reap
674  * @hal_ring_hdl: ring pointer
675  * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
676  * @last_prefetched_sw_desc: pointer to last prefetch SW desc
677  *
678  * Return: None
679  */
680 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
681 static inline
682 void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
683 				    hal_soc_handle_t hal_soc,
684 				    uint32_t num_avail_for_reap,
685 				    hal_ring_handle_t hal_ring_hdl,
686 				    void **last_prefetched_hw_desc,
687 				    struct dp_tx_desc_s
688 				    **last_prefetched_sw_desc)
689 {
690 	if (*last_prefetched_sw_desc) {
691 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
692 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
693 	}
694 
695 	if (num_avail_for_reap && *last_prefetched_hw_desc) {
696 		soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
697 						       *last_prefetched_hw_desc,
698 						       last_prefetched_sw_desc);
699 
700 		if ((uintptr_t)*last_prefetched_hw_desc & 0x3f)
701 			*last_prefetched_hw_desc =
702 				hal_srng_dst_prefetch_next_cached_desc(
703 					hal_soc,
704 					hal_ring_hdl,
705 					(uint8_t *)*last_prefetched_hw_desc);
706 		else
707 			*last_prefetched_hw_desc =
708 				hal_srng_dst_get_next_32_byte_desc(hal_soc,
709 					hal_ring_hdl,
710 					(uint8_t *)*last_prefetched_hw_desc);
711 	}
712 }
713 #else
714 static inline
715 void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
716 				    hal_soc_handle_t hal_soc,
717 				    uint32_t num_avail_for_reap,
718 				    hal_ring_handle_t hal_ring_hdl,
719 				    void **last_prefetched_hw_desc,
720 				    struct dp_tx_desc_s
721 				    **last_prefetched_sw_desc)
722 {
723 }
724 #endif
725 
726 #ifndef FEATURE_WDS
727 static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
728 {
729 	return;
730 }
731 #endif
732 
733 #ifndef QCA_MULTIPASS_SUPPORT
734 static inline
735 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
736 			     qdf_nbuf_t nbuf,
737 			     struct dp_tx_msdu_info_s *msdu_info)
738 {
739 	return true;
740 }
741 
742 static inline
743 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
744 {
745 }
746 
747 #else
748 /**
749  * dp_tx_multipass_process() - Process vlan frames in tx path
750  * @soc: dp soc handle
751  * @vdev: DP vdev handle
752  * @nbuf: skb
753  * @msdu_info: msdu descriptor
754  *
755  * Return: status whether frame needs to be dropped or transmitted
756  */
757 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
758 			     qdf_nbuf_t nbuf,
759 			     struct dp_tx_msdu_info_s *msdu_info);
760 
761 /**
762  * dp_tx_vdev_multipass_deinit() - set vlan map for vdev
763  * @vdev: pointer to vdev
764  *
765  * return: void
766  */
767 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev);
768 
769 /**
770  * dp_tx_remove_vlan_tag() - Remove 4 bytes of vlan tag
771  * @vdev: DP vdev handle
772  * @nbuf: network buffer
773  *
774  * Return: void
775  */
776 void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
777 
778 /**
779  * dp_tx_add_groupkey_metadata() - Add group key in metadata
780  * @vdev: DP vdev handle
781  * @msdu_info: MSDU info to be setup in MSDU descriptor
782  * @group_key: Group key index programmed in metadata
783  *
784  * Return: void
785  */
786 void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev,
787 				 struct dp_tx_msdu_info_s *msdu_info,
788 				 uint16_t group_key);
789 #endif
790 
791 /**
792  * dp_tx_hw_to_qdf()- convert hw status to qdf status
793  * @status: hw status
794  *
795  * Return: qdf tx rx status
796  */
797 static inline enum qdf_dp_tx_rx_status dp_tx_hw_to_qdf(uint16_t status)
798 {
799 	switch (status) {
800 	case HAL_TX_TQM_RR_FRAME_ACKED:
801 		return QDF_TX_RX_STATUS_OK;
802 	case HAL_TX_TQM_RR_REM_CMD_TX:
803 		return QDF_TX_RX_STATUS_NO_ACK;
804 	case HAL_TX_TQM_RR_REM_CMD_REM:
805 	case HAL_TX_TQM_RR_REM_CMD_NOTX:
806 	case HAL_TX_TQM_RR_REM_CMD_AGED:
807 		return QDF_TX_RX_STATUS_FW_DISCARD;
808 	default:
809 		return QDF_TX_RX_STATUS_DEFAULT;
810 	}
811 }
812 
813 #ifndef QCA_HOST_MODE_WIFI_DISABLED
814 /**
815  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
816  * @vdev: DP Virtual device handle
817  * @nbuf: Buffer pointer
818  * @queue: queue ids container for nbuf
819  *
820  * TX packet queue has 2 instances, software descriptors id and dma ring id
821  * Based on tx feature and hardware configuration queue id combination could be
822  * different.
823  * For example -
824  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
825  * With no XPS,lock based resource protection, Descriptor pool ids are different
826  * for each vdev, dma ring id will be same as single pdev id
827  *
828  * Return: None
829  */
830 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
831 #if defined(IPA_OFFLOAD) && defined(QCA_IPA_LL_TX_FLOW_CONTROL)
832 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
833 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
834 {
835 	queue->ring_id = qdf_get_cpu();
836 	if (vdev->pdev->soc->wlan_cfg_ctx->ipa_enabled)
837 		if (queue->ring_id == IPA_TCL_DATA_RING_IDX)
838 			queue->ring_id = 0;
839 
840 	queue->desc_pool_id = queue->ring_id;
841 }
842 #else
843 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
844 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
845 {
846 	queue->ring_id = qdf_get_cpu();
847 	queue->desc_pool_id = queue->ring_id;
848 }
849 #endif
850 
851 /**
852  * dp_tx_get_hal_ring_hdl() - Get the hal_tx_ring_hdl for data transmission
853  * @soc: DP soc structure pointer
854  * @ring_id: Transmit Queue/ring_id to be used when XPS is enabled
855  *
856  * Return: HAL ring handle
857  */
858 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
859 						       uint8_t ring_id)
860 {
861 	if (ring_id == soc->num_tcl_data_rings)
862 		return soc->tcl_cmd_credit_ring.hal_srng;
863 
864 	return soc->tcl_data_ring[ring_id].hal_srng;
865 }
866 
867 #else /* QCA_OL_TX_MULTIQ_SUPPORT */
868 
869 #ifdef TX_MULTI_TCL
870 #ifdef IPA_OFFLOAD
871 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
872 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
873 {
874 	/* get flow id */
875 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
876 	if (vdev->pdev->soc->wlan_cfg_ctx->ipa_enabled)
877 		queue->ring_id = DP_TX_GET_RING_ID(vdev);
878 	else
879 		queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
880 					vdev->pdev->soc->num_tcl_data_rings);
881 }
882 #else
883 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
884 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
885 {
886 	/* get flow id */
887 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
888 	queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
889 				vdev->pdev->soc->num_tcl_data_rings);
890 }
891 #endif
892 #else
893 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
894 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
895 {
896 	/* get flow id */
897 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
898 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
899 }
900 #endif
901 
902 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
903 						       uint8_t ring_id)
904 {
905 	return soc->tcl_data_ring[ring_id].hal_srng;
906 }
907 #endif
908 
909 #ifdef QCA_OL_TX_LOCK_LESS_ACCESS
910 /**
911  * dp_tx_hal_ring_access_start() - hal_tx_ring access for data transmission
912  * @soc: DP soc structure pointer
913  * @hal_ring_hdl: HAL ring handle
914  *
915  * Return: None
916  */
917 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
918 					      hal_ring_handle_t hal_ring_hdl)
919 {
920 	return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
921 }
922 
923 /**
924  * dp_tx_hal_ring_access_end() - hal_tx_ring access for data transmission
925  * @soc: DP soc structure pointer
926  * @hal_ring_hdl: HAL ring handle
927  *
928  * Return: None
929  */
930 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
931 					     hal_ring_handle_t hal_ring_hdl)
932 {
933 	hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
934 }
935 
936 /**
937  * dp_tx_hal_ring_access_end_reap() - hal_tx_ring access for data transmission
938  * @soc: DP soc structure pointer
939  * @hal_ring_hdl: HAL ring handle
940  *
941  * Return: None
942  */
943 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
944 						  hal_ring_handle_t
945 						  hal_ring_hdl)
946 {
947 }
948 
949 #else
950 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
951 					      hal_ring_handle_t hal_ring_hdl)
952 {
953 	return hal_srng_access_start(soc->hal_soc, hal_ring_hdl);
954 }
955 
956 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
957 					     hal_ring_handle_t hal_ring_hdl)
958 {
959 	hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
960 }
961 
962 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
963 						  hal_ring_handle_t
964 						  hal_ring_hdl)
965 {
966 	hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
967 }
968 #endif
969 
970 #ifdef ATH_TX_PRI_OVERRIDE
971 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \
972 	((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf))
973 #else
974 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf)
975 #endif
976 
977 /* TODO TX_FEATURE_NOT_YET */
978 static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc)
979 {
980 	return;
981 }
982 /* TODO TX_FEATURE_NOT_YET */
983 
984 /**
985  * dp_tx_desc_flush() - release resources associated
986  *                      to TX Desc
987  *
988  * @pdev: Handle to DP pdev structure
989  * @vdev: virtual device instance
990  * NULL: no specific Vdev is required and check all allcated TX desc
991  * on this pdev.
992  * Non-NULL: only check the allocated TX Desc associated to this Vdev.
993  *
994  * @force_free:
995  * true: flush the TX desc.
996  * false: only reset the Vdev in each allocated TX desc
997  * that associated to current Vdev.
998  *
999  * This function will go through the TX desc pool to flush
1000  * the outstanding TX data or reset Vdev to NULL in associated TX
1001  * Desc.
1002  */
1003 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
1004 		      bool force_free);
1005 
1006 /**
1007  * dp_tx_vdev_attach() - attach vdev to dp tx
1008  * @vdev: virtual device instance
1009  *
1010  * Return: QDF_STATUS_SUCCESS: success
1011  *         QDF_STATUS_E_RESOURCES: Error return
1012  */
1013 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev);
1014 
1015 /**
1016  * dp_tx_vdev_detach() - detach vdev from dp tx
1017  * @vdev: virtual device instance
1018  *
1019  * Return: QDF_STATUS_SUCCESS: success
1020  *         QDF_STATUS_E_RESOURCES: Error return
1021  */
1022 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev);
1023 
1024 /**
1025  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
1026  * @vdev: virtual device instance
1027  *
1028  * Return: void
1029  *
1030  */
1031 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev);
1032 
1033 /**
1034  * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
1035  * @soc: core txrx main context
1036  *
1037  * This function allocates memory for following descriptor pools
1038  * 1. regular sw tx descriptor pools (static pools)
1039  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
1040  * 3. TSO descriptor pools
1041  *
1042  * Return: QDF_STATUS_SUCCESS: success
1043  *         QDF_STATUS_E_RESOURCES: Error return
1044  */
1045 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc);
1046 
1047 /**
1048  * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
1049  * @soc: core txrx main context
1050  *
1051  * This function initializes the following TX descriptor pools
1052  * 1. regular sw tx descriptor pools (static pools)
1053  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
1054  * 3. TSO descriptor pools
1055  *
1056  * Return: QDF_STATUS_SUCCESS: success
1057  *	   QDF_STATUS_E_RESOURCES: Error return
1058  */
1059 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc);
1060 
1061 /**
1062  * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
1063  * @soc: core txrx main context
1064  *
1065  * This function frees all tx related descriptors as below
1066  * 1. Regular TX descriptors (static pools)
1067  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
1068  * 3. TSO descriptors
1069  *
1070  */
1071 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc);
1072 
1073 /**
1074  * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
1075  * @soc: core txrx main context
1076  *
1077  * This function de-initializes all tx related descriptors as below
1078  * 1. Regular TX descriptors (static pools)
1079  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
1080  * 3. TSO descriptors
1081  *
1082  */
1083 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc);
1084 
1085 #ifndef WLAN_SOFTUMAC_SUPPORT
1086 /**
1087  * dp_handle_wbm_internal_error() - handles wbm_internal_error case
1088  * @soc: core DP main context
1089  * @hal_desc: hal descriptor
1090  * @buf_type: indicates if the buffer is of type link disc or msdu
1091  *
1092  * wbm_internal_error is seen in following scenarios :
1093  *
1094  * 1.  Null pointers detected in WBM_RELEASE_RING descriptors
1095  * 2.  Null pointers detected during delinking process
1096  *
1097  * Some null pointer cases:
1098  *
1099  * a. MSDU buffer pointer is NULL
1100  * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag
1101  * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL
1102  *
1103  * Return: None
1104  */
1105 void
1106 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
1107 			     uint32_t buf_type);
1108 #endif
1109 #else /* QCA_HOST_MODE_WIFI_DISABLED */
1110 
1111 static inline
1112 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
1113 {
1114 	return QDF_STATUS_SUCCESS;
1115 }
1116 
1117 static inline
1118 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
1119 {
1120 	return QDF_STATUS_SUCCESS;
1121 }
1122 
1123 static inline void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
1124 {
1125 }
1126 
1127 static inline void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
1128 {
1129 }
1130 
1131 static inline
1132 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
1133 		      bool force_free)
1134 {
1135 }
1136 
1137 static inline QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
1138 {
1139 	return QDF_STATUS_SUCCESS;
1140 }
1141 
1142 static inline QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
1143 {
1144 	return QDF_STATUS_SUCCESS;
1145 }
1146 
1147 static inline void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
1148 {
1149 }
1150 
1151 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1152 
1153 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
1154 	defined(QCA_TX_CAPTURE_SUPPORT) || \
1155 	defined(QCA_MCOPY_SUPPORT)
1156 #ifdef FEATURE_PERPKT_INFO
1157 
1158 /**
1159  * dp_get_completion_indication_for_stack() - send completion to stack
1160  * @soc : dp_soc handle
1161  * @pdev: dp_pdev handle
1162  * @txrx_peer: dp peer handle
1163  * @ts: transmit completion status structure
1164  * @netbuf: Buffer pointer for free
1165  * @time_latency:
1166  *
1167  * This function is used for indication whether buffer needs to be
1168  * sent to stack for freeing or not
1169  *
1170  * Return: QDF_STATUS
1171  */
1172 QDF_STATUS
1173 dp_get_completion_indication_for_stack(struct dp_soc *soc,
1174 				       struct dp_pdev *pdev,
1175 				       struct dp_txrx_peer *txrx_peer,
1176 				       struct hal_tx_completion_status *ts,
1177 				       qdf_nbuf_t netbuf,
1178 				       uint64_t time_latency);
1179 
1180 /**
1181  * dp_send_completion_to_stack() - send completion to stack
1182  * @soc :  dp_soc handle
1183  * @pdev:  dp_pdev handle
1184  * @peer_id: peer_id of the peer for which completion came
1185  * @ppdu_id: ppdu_id
1186  * @netbuf: Buffer pointer for free
1187  *
1188  * This function is used to send completion to stack
1189  * to free buffer
1190  *
1191  * Return: QDF_STATUS
1192  */
1193 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
1194 			    uint16_t peer_id, uint32_t ppdu_id,
1195 			    qdf_nbuf_t netbuf);
1196 #endif
1197 #else
1198 static inline
1199 QDF_STATUS dp_get_completion_indication_for_stack(struct dp_soc *soc,
1200 				       struct dp_pdev *pdev,
1201 				       struct dp_txrx_peer *peer,
1202 				       struct hal_tx_completion_status *ts,
1203 				       qdf_nbuf_t netbuf,
1204 				       uint64_t time_latency)
1205 {
1206 	return QDF_STATUS_E_NOSUPPORT;
1207 }
1208 
1209 static inline
1210 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
1211 			    uint16_t peer_id, uint32_t ppdu_id,
1212 			    qdf_nbuf_t netbuf)
1213 {
1214 }
1215 #endif
1216 
1217 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
1218 /**
1219  * dp_send_completion_to_pkt_capture() - send tx completion to packet capture
1220  * @soc: dp_soc handle
1221  * @desc: Tx Descriptor
1222  * @ts: HAL Tx completion descriptor contents
1223  *
1224  * This function is used to send tx completion to packet capture
1225  */
1226 
1227 void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
1228 				       struct dp_tx_desc_s *desc,
1229 				       struct hal_tx_completion_status *ts);
1230 #else
1231 static inline void
1232 dp_send_completion_to_pkt_capture(struct dp_soc *soc,
1233 				  struct dp_tx_desc_s *desc,
1234 				  struct hal_tx_completion_status *ts)
1235 {
1236 }
1237 #endif
1238 
1239 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1240 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1241 /**
1242  * dp_tx_update_stats() - Update soc level tx stats
1243  * @soc: DP soc handle
1244  * @tx_desc: TX descriptor reference
1245  * @ring_id: TCL ring id
1246  *
1247  * Return: none
1248  */
1249 void dp_tx_update_stats(struct dp_soc *soc,
1250 			struct dp_tx_desc_s *tx_desc,
1251 			uint8_t ring_id);
1252 
1253 /**
1254  * dp_tx_attempt_coalescing() - Check and attempt TCL register write coalescing
1255  * @soc: Datapath soc handle
1256  * @vdev: DP vdev handle
1257  * @tx_desc: tx packet descriptor
1258  * @tid: TID for pkt transmission
1259  * @msdu_info: MSDU info of tx packet
1260  * @ring_id: TCL ring id
1261  *
1262  * Return: 1, if coalescing is to be done
1263  *	    0, if coalescing is not to be done
1264  */
1265 int
1266 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1267 			 struct dp_tx_desc_s *tx_desc,
1268 			 uint8_t tid,
1269 			 struct dp_tx_msdu_info_s *msdu_info,
1270 			 uint8_t ring_id);
1271 
1272 /**
1273  * dp_tx_ring_access_end() - HAL ring access end for data transmission
1274  * @soc: Datapath soc handle
1275  * @hal_ring_hdl: HAL ring handle
1276  * @coalesce: Coalesce the current write or not
1277  *
1278  * Return: none
1279  */
1280 void
1281 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1282 		      int coalesce);
1283 #else
1284 /**
1285  * dp_tx_update_stats() - Update soc level tx stats
1286  * @soc: DP soc handle
1287  * @tx_desc: TX descriptor reference
1288  * @ring_id: TCL ring id
1289  *
1290  * Return: none
1291  */
1292 static inline void dp_tx_update_stats(struct dp_soc *soc,
1293 				      struct dp_tx_desc_s *tx_desc,
1294 				      uint8_t ring_id){ }
1295 
1296 static inline void
1297 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1298 		      int coalesce)
1299 {
1300 	dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1301 }
1302 
1303 static inline int
1304 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1305 			 struct dp_tx_desc_s *tx_desc,
1306 			 uint8_t tid,
1307 			 struct dp_tx_msdu_info_s *msdu_info,
1308 			 uint8_t ring_id)
1309 {
1310 	return 0;
1311 }
1312 
1313 #endif /* WLAN_DP_FEATURE_SW_LATENCY_MGR */
1314 
1315 #ifdef FEATURE_RUNTIME_PM
1316 /**
1317  * dp_set_rtpm_tput_policy_requirement() - Update RTPM throughput policy
1318  * @soc_hdl: DP soc handle
1319  * @is_high_tput: flag to indicate whether throughput is high
1320  *
1321  * Return: none
1322  */
1323 static inline
1324 void dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
1325 					 bool is_high_tput)
1326 {
1327 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1328 
1329 	qdf_atomic_set(&soc->rtpm_high_tput_flag, is_high_tput);
1330 }
1331 
1332 /**
1333  * dp_tx_ring_access_end_wrapper() - Wrapper for ring access end
1334  * @soc: Datapath soc handle
1335  * @hal_ring_hdl: HAL ring handle
1336  * @coalesce: Coalesce the current write or not
1337  *
1338  * Feature-specific wrapper for HAL ring access end for data
1339  * transmission
1340  *
1341  * Return: none
1342  */
1343 void
1344 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1345 			      hal_ring_handle_t hal_ring_hdl,
1346 			      int coalesce);
1347 #else
1348 #ifdef DP_POWER_SAVE
1349 void
1350 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1351 			      hal_ring_handle_t hal_ring_hdl,
1352 			      int coalesce);
1353 #else
1354 static inline void
1355 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1356 			      hal_ring_handle_t hal_ring_hdl,
1357 			      int coalesce)
1358 {
1359 	dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1360 }
1361 #endif
1362 
1363 static inline void
1364 dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
1365 				    bool is_high_tput)
1366 { }
1367 #endif
1368 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1369 
1370 #ifdef DP_TX_HW_DESC_HISTORY
1371 static inline void
1372 dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
1373 			 hal_ring_handle_t hal_ring_hdl,
1374 			 struct dp_soc *soc, uint8_t ring_id)
1375 {
1376 	struct dp_tx_hw_desc_history *tx_hw_desc_history =
1377 						&soc->tx_hw_desc_history;
1378 	struct dp_tx_hw_desc_evt *evt;
1379 	uint32_t idx = 0;
1380 	uint16_t slot = 0;
1381 
1382 	if (!tx_hw_desc_history->allocated)
1383 		return;
1384 
1385 	dp_get_frag_hist_next_atomic_idx(&tx_hw_desc_history->index, &idx,
1386 					 &slot,
1387 					 DP_TX_HW_DESC_HIST_SLOT_SHIFT,
1388 					 DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
1389 					 DP_TX_HW_DESC_HIST_MAX);
1390 
1391 	evt = &tx_hw_desc_history->entry[slot][idx];
1392 	qdf_mem_copy(evt->tcl_desc, hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
1393 	evt->posted = qdf_get_log_timestamp();
1394 	evt->tcl_ring_id = ring_id;
1395 	hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &evt->tp, &evt->hp);
1396 }
1397 #else
1398 static inline void
1399 dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
1400 			 hal_ring_handle_t hal_ring_hdl,
1401 			 struct dp_soc *soc, uint8_t ring_id)
1402 {
1403 }
1404 #endif
1405 
1406 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
1407 /**
1408  * dp_tx_compute_hw_delay_us() - Compute hardware Tx completion delay
1409  * @ts: Tx completion status
1410  * @delta_tsf: Difference between TSF clock and qtimer
1411  * @delay_us: Delay in microseconds
1412  *
1413  * Return: QDF_STATUS_SUCCESS   : Success
1414  *         QDF_STATUS_E_INVAL   : Tx completion status is invalid or
1415  *                                delay_us is NULL
1416  *         QDF_STATUS_E_FAILURE : Error in delay calculation
1417  */
1418 QDF_STATUS
1419 dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
1420 			  uint32_t delta_tsf,
1421 			  uint32_t *delay_us);
1422 
1423 /**
1424  * dp_set_delta_tsf() - Set delta_tsf to dp_soc structure
1425  * @soc_hdl: cdp soc pointer
1426  * @vdev_id: vdev id
1427  * @delta_tsf: difference between TSF clock and qtimer
1428  *
1429  * Return: None
1430  */
1431 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1432 		      uint32_t delta_tsf);
1433 #endif
1434 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
1435 /**
1436  * dp_set_tsf_ul_delay_report() - Enable or disable reporting uplink delay
1437  * @soc_hdl: cdp soc pointer
1438  * @vdev_id: vdev id
1439  * @enable: true to enable and false to disable
1440  *
1441  * Return: QDF_STATUS
1442  */
1443 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
1444 				      uint8_t vdev_id, bool enable);
1445 
1446 /**
1447  * dp_get_uplink_delay() - Get uplink delay value
1448  * @soc_hdl: cdp soc pointer
1449  * @vdev_id: vdev id
1450  * @val: pointer to save uplink delay value
1451  *
1452  * Return: QDF_STATUS
1453  */
1454 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1455 			       uint32_t *val);
1456 #endif /* WLAN_FEATURE_TSF_UPLINK_TSF */
1457 
1458 /**
1459  * dp_tx_pkt_tracepoints_enabled() - Get the state of tx pkt tracepoint
1460  *
1461  * Return: True if any tx pkt tracepoint is enabled else false
1462  */
1463 static inline
1464 bool dp_tx_pkt_tracepoints_enabled(void)
1465 {
1466 	return (qdf_trace_dp_tx_comp_tcp_pkt_enabled() ||
1467 		qdf_trace_dp_tx_comp_udp_pkt_enabled() ||
1468 		qdf_trace_dp_tx_comp_pkt_enabled());
1469 }
1470 
1471 #ifdef DP_TX_TRACKING
1472 /**
1473  * dp_tx_desc_set_timestamp() - set timestamp in tx descriptor
1474  * @tx_desc: tx descriptor
1475  *
1476  * Return: None
1477  */
1478 static inline
1479 void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
1480 {
1481 	tx_desc->timestamp_tick = qdf_system_ticks();
1482 }
1483 
1484 /**
1485  * dp_tx_desc_check_corruption() - Verify magic pattern in tx descriptor
1486  * @tx_desc: tx descriptor
1487  *
1488  * Check for corruption in tx descriptor, if magic pattern is not matching
1489  * trigger self recovery
1490  *
1491  * Return: none
1492  */
1493 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc);
1494 #else
1495 static inline
1496 void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
1497 {
1498 }
1499 
1500 static inline
1501 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
1502 {
1503 }
1504 #endif
1505 
1506 #ifndef CONFIG_SAWF
1507 static inline bool dp_sawf_tag_valid_get(qdf_nbuf_t nbuf)
1508 {
1509 	return false;
1510 }
1511 #endif
1512 
1513 #ifdef HW_TX_DELAY_STATS_ENABLE
1514 /**
1515  * dp_tx_desc_set_ktimestamp() - set kernel timestamp in tx descriptor
1516  * @vdev: DP vdev handle
1517  * @tx_desc: tx descriptor
1518  *
1519  * Return: true when descriptor is timestamped, false otherwise
1520  */
1521 static inline
1522 bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
1523 			       struct dp_tx_desc_s *tx_desc)
1524 {
1525 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1526 	    qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
1527 	    qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
1528 	    qdf_unlikely(vdev->pdev->soc->peerstats_enabled) ||
1529 	    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev))) {
1530 		tx_desc->timestamp = qdf_ktime_real_get();
1531 		return true;
1532 	}
1533 	return false;
1534 }
1535 #else
1536 static inline
1537 bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
1538 			       struct dp_tx_desc_s *tx_desc)
1539 {
1540 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1541 	    qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
1542 	    qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
1543 	    qdf_unlikely(vdev->pdev->soc->peerstats_enabled)) {
1544 		tx_desc->timestamp = qdf_ktime_real_get();
1545 		return true;
1546 	}
1547 	return false;
1548 }
1549 #endif
1550 
1551 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
1552 /**
1553  * dp_pkt_add_timestamp() - add timestamp in data payload
1554  *
1555  * @vdev: dp vdev
1556  * @index: index to decide offset in payload
1557  * @time: timestamp to add in data payload
1558  * @nbuf: network buffer
1559  *
1560  * Return: none
1561  */
1562 void dp_pkt_add_timestamp(struct dp_vdev *vdev,
1563 			  enum qdf_pkt_timestamp_index index, uint64_t time,
1564 			  qdf_nbuf_t nbuf);
1565 /**
1566  * dp_pkt_get_timestamp() - get current system time
1567  *
1568  * @time: return current system time
1569  *
1570  * Return: none
1571  */
1572 void dp_pkt_get_timestamp(uint64_t *time);
1573 #else
1574 #define dp_pkt_add_timestamp(vdev, index, time, nbuf)
1575 
1576 static inline
1577 void dp_pkt_get_timestamp(uint64_t *time)
1578 {
1579 }
1580 #endif
1581 
1582 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
1583 /**
1584  * dp_update_tx_desc_stats - Update the increase or decrease in
1585  * outstanding tx desc count
1586  * values on pdev and soc
1587  * @pdev: DP pdev handle
1588  *
1589  * Return: void
1590  */
1591 static inline void
1592 dp_update_tx_desc_stats(struct dp_pdev *pdev)
1593 {
1594 	int32_t tx_descs_cnt =
1595 		qdf_atomic_read(&pdev->num_tx_outstanding);
1596 	if (pdev->tx_descs_max < tx_descs_cnt)
1597 		pdev->tx_descs_max = tx_descs_cnt;
1598 	qdf_mem_tx_desc_cnt_update(pdev->num_tx_outstanding,
1599 				   pdev->tx_descs_max);
1600 }
1601 
1602 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
1603 
1604 static inline void
1605 dp_update_tx_desc_stats(struct dp_pdev *pdev)
1606 {
1607 }
1608 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
1609 
1610 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
1611 /**
1612  * dp_tx_get_global_desc_in_use() - read global descriptors in usage
1613  * @dp_global: Datapath global context
1614  *
1615  * Return: global descriptors in use
1616  */
1617 static inline int32_t
1618 dp_tx_get_global_desc_in_use(struct dp_global_context *dp_global)
1619 {
1620 	return qdf_atomic_read(&dp_global->global_descriptor_in_use);
1621 }
1622 #endif
1623 
1624 #ifdef QCA_TX_LIMIT_CHECK
1625 static inline bool is_spl_packet(qdf_nbuf_t nbuf)
1626 {
1627 	if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1628 		return true;
1629 	return false;
1630 }
1631 
1632 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
1633 /**
1634  * is_dp_spl_tx_limit_reached - Check if the packet is a special packet to allow
1635  * allocation if allocated tx descriptors are within the global max limit
1636  * and pdev max limit.
1637  * @vdev: DP vdev handle
1638  * @nbuf: network buffer
1639  *
1640  * Return: true if allocated tx descriptors reached max configured value, else
1641  * false
1642  */
1643 static inline bool
1644 is_dp_spl_tx_limit_reached(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1645 {
1646 	struct dp_pdev *pdev = vdev->pdev;
1647 	struct dp_soc *soc = pdev->soc;
1648 	struct dp_global_context *dp_global;
1649 	uint32_t global_tx_desc_allowed;
1650 
1651 	dp_global = wlan_objmgr_get_global_ctx();
1652 	global_tx_desc_allowed =
1653 		wlan_cfg_get_num_global_tx_desc(soc->wlan_cfg_ctx);
1654 
1655 	if (is_spl_packet(nbuf)) {
1656 		if (dp_tx_get_global_desc_in_use(dp_global) >=
1657 				global_tx_desc_allowed)
1658 			return true;
1659 
1660 		if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
1661 			pdev->num_tx_allowed)
1662 			return true;
1663 
1664 		return false;
1665 	}
1666 
1667 	return true;
1668 }
1669 
1670 /**
1671  * dp_tx_limit_check - Check if allocated tx descriptors reached
1672  * global max reg limit and pdev max reg limit for regular packets. Also check
1673  * if the limit is reached for special packets.
1674  * @vdev: DP vdev handle
1675  * @nbuf: network buffer
1676  *
1677  * Return: true if allocated tx descriptors reached max limit for regular
1678  * packets and in case of special packets, if the limit is reached max
1679  * configured vale for the soc/pdev, else false
1680  */
1681 static inline bool
1682 dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1683 {
1684 	struct dp_pdev *pdev = vdev->pdev;
1685 	struct dp_soc *soc = pdev->soc;
1686 	struct dp_global_context *dp_global;
1687 	uint32_t global_tx_desc_allowed;
1688 	uint32_t global_tx_desc_reg_allowed;
1689 	uint32_t global_tx_desc_spcl_allowed;
1690 
1691 	dp_global = wlan_objmgr_get_global_ctx();
1692 	global_tx_desc_allowed =
1693 		wlan_cfg_get_num_global_tx_desc(soc->wlan_cfg_ctx);
1694 	global_tx_desc_spcl_allowed =
1695 		wlan_cfg_get_num_global_spcl_tx_desc(soc->wlan_cfg_ctx);
1696 	global_tx_desc_reg_allowed = global_tx_desc_allowed -
1697 					global_tx_desc_spcl_allowed;
1698 
1699 	if (dp_tx_get_global_desc_in_use(dp_global) >= global_tx_desc_reg_allowed) {
1700 		if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
1701 			dp_tx_info("queued packets are more than max tx, drop the frame");
1702 			DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1703 			return true;
1704 		}
1705 	}
1706 
1707 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
1708 			pdev->num_reg_tx_allowed) {
1709 		if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
1710 			dp_tx_info("queued packets are more than max tx, drop the frame");
1711 			DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1712 			DP_STATS_INC(vdev,
1713 				     tx_i.dropped.desc_na_exc_outstand.num, 1);
1714 			return true;
1715 		}
1716 	}
1717 	return false;
1718 }
1719 #else
1720 /**
1721  * is_dp_spl_tx_limit_reached - Check if the packet is a special packet to allow
1722  * allocation if allocated tx descriptors are within the soc max limit
1723  * and pdev max limit.
1724  * @vdev: DP vdev handle
1725  * @nbuf: network buffer
1726  *
1727  * Return: true if allocated tx descriptors reached max configured value, else
1728  * false
1729  */
1730 static inline bool
1731 is_dp_spl_tx_limit_reached(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1732 {
1733 	struct dp_pdev *pdev = vdev->pdev;
1734 	struct dp_soc *soc = pdev->soc;
1735 
1736 	if (is_spl_packet(nbuf)) {
1737 		if (qdf_atomic_read(&soc->num_tx_outstanding) >=
1738 				soc->num_tx_allowed)
1739 			return true;
1740 
1741 		if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
1742 			pdev->num_tx_allowed)
1743 			return true;
1744 
1745 		return false;
1746 	}
1747 
1748 	return true;
1749 }
1750 
1751 /**
1752  * dp_tx_limit_check - Check if allocated tx descriptors reached
1753  * soc max reg limit and pdev max reg limit for regular packets. Also check if
1754  * the limit is reached for special packets.
1755  * @vdev: DP vdev handle
1756  * @nbuf: network buffer
1757  *
1758  * Return: true if allocated tx descriptors reached max limit for regular
1759  * packets and in case of special packets, if the limit is reached max
1760  * configured vale for the soc/pdev, else false
1761  */
1762 static inline bool
1763 dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1764 {
1765 	struct dp_pdev *pdev = vdev->pdev;
1766 	struct dp_soc *soc = pdev->soc;
1767 
1768 	if (qdf_atomic_read(&soc->num_tx_outstanding) >=
1769 			soc->num_reg_tx_allowed) {
1770 		if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
1771 			dp_tx_info("queued packets are more than max tx, drop the frame");
1772 			DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1773 			return true;
1774 		}
1775 	}
1776 
1777 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
1778 			pdev->num_reg_tx_allowed) {
1779 		if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
1780 			dp_tx_info("queued packets are more than max tx, drop the frame");
1781 			DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1782 			DP_STATS_INC(vdev,
1783 				     tx_i.dropped.desc_na_exc_outstand.num, 1);
1784 			return true;
1785 		}
1786 	}
1787 	return false;
1788 }
1789 #endif
1790 
1791 /**
1792  * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
1793  * reached soc max limit
1794  * @vdev: DP vdev handle
1795  *
1796  * Return: true if allocated tx descriptors reached max configured value, else
1797  * false
1798  */
1799 static inline bool
1800 dp_tx_exception_limit_check(struct dp_vdev *vdev)
1801 {
1802 	struct dp_pdev *pdev = vdev->pdev;
1803 	struct dp_soc *soc = pdev->soc;
1804 
1805 	if (qdf_atomic_read(&soc->num_tx_exception) >=
1806 			soc->num_msdu_exception_desc) {
1807 		dp_info("exc packets are more than max drop the exc pkt");
1808 		DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
1809 		return true;
1810 	}
1811 
1812 	return false;
1813 }
1814 
1815 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
1816 /**
1817  * dp_tx_outstanding_inc - Inc outstanding tx desc values on global and pdev
1818  * @pdev: DP pdev handle
1819  *
1820  * Return: void
1821  */
1822 static inline void
1823 dp_tx_outstanding_inc(struct dp_pdev *pdev)
1824 {
1825 	struct dp_global_context *dp_global;
1826 
1827 	dp_global = wlan_objmgr_get_global_ctx();
1828 
1829 	qdf_atomic_inc(&dp_global->global_descriptor_in_use);
1830 	qdf_atomic_inc(&pdev->num_tx_outstanding);
1831 	dp_update_tx_desc_stats(pdev);
1832 }
1833 
1834 /**
1835  * dp_tx_outstanding_dec - Dec outstanding tx desc values on global and pdev
1836  * @pdev: DP pdev handle
1837  *
1838  * Return: void
1839  */
1840 static inline void
1841 dp_tx_outstanding_dec(struct dp_pdev *pdev)
1842 {
1843 	struct dp_global_context *dp_global;
1844 
1845 	dp_global = wlan_objmgr_get_global_ctx();
1846 
1847 	qdf_atomic_dec(&dp_global->global_descriptor_in_use);
1848 	qdf_atomic_dec(&pdev->num_tx_outstanding);
1849 	dp_update_tx_desc_stats(pdev);
1850 }
1851 
1852 #else
1853 /**
1854  * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
1855  * @pdev: DP pdev handle
1856  *
1857  * Return: void
1858  */
1859 static inline void
1860 dp_tx_outstanding_inc(struct dp_pdev *pdev)
1861 {
1862 	struct dp_soc *soc = pdev->soc;
1863 
1864 	qdf_atomic_inc(&pdev->num_tx_outstanding);
1865 	qdf_atomic_inc(&soc->num_tx_outstanding);
1866 	dp_update_tx_desc_stats(pdev);
1867 }
1868 
1869 /**
1870  * dp_tx_outstanding_dec - Decrement outstanding tx desc values on pdev and soc
1871  * @pdev: DP pdev handle
1872  *
1873  * Return: void
1874  */
1875 static inline void
1876 dp_tx_outstanding_dec(struct dp_pdev *pdev)
1877 {
1878 	struct dp_soc *soc = pdev->soc;
1879 
1880 	qdf_atomic_dec(&pdev->num_tx_outstanding);
1881 	qdf_atomic_dec(&soc->num_tx_outstanding);
1882 	dp_update_tx_desc_stats(pdev);
1883 }
1884 #endif /* QCA_SUPPORT_DP_GLOBAL_CTX */
1885 
1886 #else //QCA_TX_LIMIT_CHECK
1887 static inline bool
1888 dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1889 {
1890 	return false;
1891 }
1892 
1893 static inline bool
1894 dp_tx_exception_limit_check(struct dp_vdev *vdev)
1895 {
1896 	return false;
1897 }
1898 
1899 static inline void
1900 dp_tx_outstanding_inc(struct dp_pdev *pdev)
1901 {
1902 	qdf_atomic_inc(&pdev->num_tx_outstanding);
1903 	dp_update_tx_desc_stats(pdev);
1904 }
1905 
1906 static inline void
1907 dp_tx_outstanding_dec(struct dp_pdev *pdev)
1908 {
1909 	qdf_atomic_dec(&pdev->num_tx_outstanding);
1910 	dp_update_tx_desc_stats(pdev);
1911 }
1912 #endif //QCA_TX_LIMIT_CHECK
1913 
1914 /**
1915  * dp_tx_get_pkt_len() - Get the packet length of a msdu
1916  * @tx_desc: tx descriptor
1917  *
1918  * Return: Packet length of a msdu. If the packet is fragmented,
1919  * it will return the single fragment length.
1920  *
1921  * In TSO mode, the msdu from stack will be fragmented into small
1922  * fragments and each of these new fragments will be transmitted
1923  * as an individual msdu.
1924  *
1925  * Please note that the length of a msdu from stack may be smaller
1926  * than the length of the total length of the fragments it has been
1927  * fragmentted because each of the fragments has a nbuf header.
1928  */
1929 static inline uint32_t dp_tx_get_pkt_len(struct dp_tx_desc_s *tx_desc)
1930 {
1931 	return tx_desc->frm_type == dp_tx_frm_tso ?
1932 		tx_desc->msdu_ext_desc->tso_desc->seg.total_len :
1933 		qdf_nbuf_len(tx_desc->nbuf);
1934 }
1935 
1936 #ifdef FEATURE_RUNTIME_PM
1937 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
1938 {
1939 	return qdf_atomic_read(&soc->rtpm_high_tput_flag) &&
1940 		(hif_rtpm_get_state() <= HIF_RTPM_STATE_ON);
1941 }
1942 #else
1943 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
1944 {
1945 	return 0;
1946 }
1947 #endif
1948 #endif
1949