xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.h (revision ed7ed761f307f964abd13da4df8dcb908086bd83)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef __DP_TX_H
20 #define __DP_TX_H
21 
22 #include <qdf_types.h>
23 #include <qdf_nbuf.h>
24 #include "dp_types.h"
25 #ifdef FEATURE_PERPKT_INFO
26 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
27 	defined(QCA_TX_CAPTURE_SUPPORT) || \
28 	defined(QCA_MCOPY_SUPPORT)
29 #include "if_meta_hdr.h"
30 #endif
31 #endif
32 #include "dp_internal.h"
33 #include "hal_tx.h"
34 #include <qdf_tracepoint.h>
35 #ifdef CONFIG_SAWF
36 #include "dp_sawf.h"
37 #endif
38 #include <qdf_pkt_add_timestamp.h>
39 #include "dp_ipa.h"
40 
41 #define DP_INVALID_VDEV_ID 0xFF
42 
43 #define DP_TX_MAX_NUM_FRAGS 6
44 
45 /* invalid peer id for reinject*/
46 #define DP_INVALID_PEER 0XFFFE
47 
48 void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
49 			 struct dp_tx_msdu_info_s *msdu_info,
50 			 qdf_nbuf_t nbuf, uint16_t sa_peer_id);
51 /*
52  * DP_TX_DESC_FLAG_FRAG flags should always be defined to 0x1
53  * please do not change this flag's definition
54  */
55 #define DP_TX_DESC_FLAG_FRAG		0x1
56 #define DP_TX_DESC_FLAG_TO_FW		0x2
57 #define DP_TX_DESC_FLAG_SIMPLE		0x4
58 #define DP_TX_DESC_FLAG_RAW		0x8
59 #define DP_TX_DESC_FLAG_MESH		0x10
60 #define DP_TX_DESC_FLAG_QUEUED_TX	0x20
61 #define DP_TX_DESC_FLAG_COMPLETED_TX	0x40
62 #define DP_TX_DESC_FLAG_ME		0x80
63 #define DP_TX_DESC_FLAG_TDLS_FRAME	0x100
64 #define DP_TX_DESC_FLAG_ALLOCATED	0x200
65 #define DP_TX_DESC_FLAG_MESH_MODE	0x400
66 #define DP_TX_DESC_FLAG_UNMAP_DONE	0x800
67 #define DP_TX_DESC_FLAG_TX_COMP_ERR	0x1000
68 #define DP_TX_DESC_FLAG_FLUSH		0x2000
69 #define DP_TX_DESC_FLAG_TRAFFIC_END_IND	0x4000
70 #define DP_TX_DESC_FLAG_RMNET		0x8000
71 /*
72  * Since the Tx descriptor flag is of only 16-bit and no more bit is free for
73  * any new flag, therefore for time being overloading PPEDS flag with that of
74  * FLUSH flag and FLAG_FAST with TDLS which is not enabled for WIN.
75  */
76 #define DP_TX_DESC_FLAG_PPEDS		0x2000
77 #define DP_TX_DESC_FLAG_FAST		0x100
78 
79 #define DP_TX_EXT_DESC_FLAG_METADATA_VALID 0x1
80 
81 #define DP_TX_FREE_SINGLE_BUF(soc, buf)                  \
82 do {                                                           \
83 	qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE);  \
84 	qdf_nbuf_free(buf);                                    \
85 } while (0)
86 
87 #define OCB_HEADER_VERSION	 1
88 
89 #ifdef TX_PER_PDEV_DESC_POOL
90 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
91 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
92 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
93 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
94 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
95 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
96 #else
97 	#ifdef TX_PER_VDEV_DESC_POOL
98 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
99 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
100 	#endif /* TX_PER_VDEV_DESC_POOL */
101 #endif /* TX_PER_PDEV_DESC_POOL */
102 #define DP_TX_QUEUE_MASK 0x3
103 
104 #define MAX_CDP_SEC_TYPE 12
105 
106 /* number of dwords for htt_tx_msdu_desc_ext2_t */
107 #define DP_TX_MSDU_INFO_META_DATA_DWORDS 7
108 
109 #define dp_tx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX, params)
110 #define dp_tx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX, params)
111 #define dp_tx_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP_TX, params)
112 #define dp_tx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX, params)
113 #define dp_tx_info(params...) \
114 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX, ## params)
115 #define dp_tx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX, params)
116 
117 #define dp_tx_comp_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX_COMP, params)
118 #define dp_tx_comp_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX_COMP, params)
119 #define dp_tx_comp_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX_COMP, params)
120 #define dp_tx_comp_info(params...) \
121 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
122 #define dp_tx_comp_info_rl(params...) \
123 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
124 #define dp_tx_comp_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX_COMP, params)
125 
126 #ifndef QCA_HOST_MODE_WIFI_DISABLED
127 
128 /**
129  * struct dp_tx_frag_info_s
130  * @vaddr: hlos virtual address for buffer
131  * @paddr_lo: physical address lower 32bits
132  * @paddr_hi: physical address higher bits
133  * @len: length of the buffer
134  */
135 struct dp_tx_frag_info_s {
136 	uint8_t  *vaddr;
137 	uint32_t paddr_lo;
138 	uint16_t paddr_hi;
139 	uint16_t len;
140 };
141 
142 /**
143  * struct dp_tx_seg_info_s - Segmentation Descriptor
144  * @nbuf: NBUF pointer if segment corresponds to separate nbuf
145  * @frag_cnt: Fragment count in this segment
146  * @total_len: Total length of segment
147  * @frags: per-Fragment information
148  * @next: pointer to next MSDU segment
149  */
150 struct dp_tx_seg_info_s  {
151 	qdf_nbuf_t nbuf;
152 	uint16_t frag_cnt;
153 	uint16_t total_len;
154 	struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS];
155 	struct dp_tx_seg_info_s *next;
156 };
157 
158 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
159 
160 /**
161  * struct dp_tx_sg_info_s - Scatter Gather Descriptor
162  * @num_segs: Number of segments (TSO/ME) in the frame
163  * @total_len: Total length of the frame
164  * @curr_seg: Points to current segment descriptor to be processed. Chain of
165  * 	      descriptors for SG frames/multicast-unicast converted packets.
166  *
167  * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to
168  * carry fragmentation information
169  * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries
170  * indicated through flags in SKB CB (first_msdu and last_msdu). This will be
171  * converted into set of skb sg (nr_frags) structures.
172  */
173 struct dp_tx_sg_info_s {
174 	uint32_t num_segs;
175 	uint32_t total_len;
176 	struct dp_tx_seg_info_s *curr_seg;
177 };
178 
179 /**
180  * struct dp_tx_queue - Tx queue
181  * @desc_pool_id: Descriptor Pool to be used for the tx queue
182  * @ring_id: TCL descriptor ring ID corresponding to the tx queue
183  *
184  * Tx queue contains information of the software (Descriptor pool)
185  * and hardware resources (TCL ring id) to be used for a particular
186  * transmit queue (obtained from skb_queue_mapping in case of linux)
187  */
188 struct dp_tx_queue {
189 	uint8_t desc_pool_id;
190 	uint8_t ring_id;
191 };
192 
193 /**
194  * struct dp_tx_msdu_info_s - MSDU Descriptor
195  * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement
196  * @tx_queue: Tx queue on which this MSDU should be transmitted
197  * @num_seg: Number of segments (TSO)
198  * @tid: TID (override) that is sent from HLOS
199  * @exception_fw: Duplicate frame to be sent to firmware
200  * @is_tx_sniffer: Indicates if the packet has to be sniffed
201  * @u: union of frame information structs
202  * @u.tso_info: TSO information for TSO frame types
203  * 	     (chain of the TSO segments, number of segments)
204  * @u.sg_info: Scatter Gather information for non-TSO SG frames
205  * @meta_data: Mesh meta header information
206  * @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions
207  * @gsn: global sequence for reinjected mcast packets
208  * @vdev_id : vdev_id for reinjected mcast packets
209  * @skip_hp_update : Skip HP update for TSO segments and update in last segment
210  * @buf_len:
211  * @payload_addr:
212  *
213  * This structure holds the complete MSDU information needed to program the
214  * Hardware TCL and MSDU extension descriptors for different frame types
215  *
216  */
217 struct dp_tx_msdu_info_s {
218 	enum dp_tx_frm_type frm_type;
219 	struct dp_tx_queue tx_queue;
220 	uint32_t num_seg;
221 	uint8_t tid;
222 	uint8_t exception_fw;
223 	uint8_t is_tx_sniffer;
224 	union {
225 		struct qdf_tso_info_t tso_info;
226 		struct dp_tx_sg_info_s sg_info;
227 	} u;
228 	uint32_t meta_data[DP_TX_MSDU_INFO_META_DATA_DWORDS];
229 	uint16_t ppdu_cookie;
230 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
231 #ifdef WLAN_MCAST_MLO
232 	uint16_t gsn;
233 	uint8_t vdev_id;
234 #endif
235 #endif
236 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
237 	uint8_t skip_hp_update;
238 #endif
239 #ifdef QCA_DP_TX_RMNET_OPTIMIZATION
240 	uint16_t buf_len;
241 	uint8_t *payload_addr;
242 #endif
243 };
244 
245 #ifndef QCA_HOST_MODE_WIFI_DISABLED
246 /**
247  * dp_tx_deinit_pair_by_index() - Deinit TX rings based on index
248  * @soc: core txrx context
249  * @index: index of ring to deinit
250  *
251  * Deinit 1 TCL and 1 WBM2SW release ring on as needed basis using
252  * index of the respective TCL/WBM2SW release in soc structure.
253  * For example, if the index is 2 then &soc->tcl_data_ring[2]
254  * and &soc->tx_comp_ring[2] will be deinitialized.
255  *
256  * Return: none
257  */
258 void dp_tx_deinit_pair_by_index(struct dp_soc *soc, int index);
259 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
260 
261 /**
262  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
263  * @soc: core txrx main context
264  * @comp_head: software descriptor head pointer
265  * @ring_id: ring number
266  *
267  * This function will process batch of descriptors reaped by dp_tx_comp_handler
268  * and release the software descriptors after processing is complete
269  *
270  * Return: none
271  */
272 void
273 dp_tx_comp_process_desc_list(struct dp_soc *soc,
274 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id);
275 
276 /**
277  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
278  * @soc: Soc handle
279  * @desc: software Tx descriptor to be processed
280  * @delayed_free: defer freeing of nbuf
281  *
282  * Return: nbuf to be freed later
283  */
284 qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
285 			       bool delayed_free);
286 
287 /**
288  * dp_tx_desc_release() - Release Tx Descriptor
289  * @tx_desc: Tx Descriptor
290  * @desc_pool_id: Descriptor Pool ID
291  *
292  * Deallocate all resources attached to Tx descriptor and free the Tx
293  * descriptor.
294  *
295  * Return:
296  */
297 void dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id);
298 
299 /**
300  * dp_tx_compute_delay() - Compute and fill in all timestamps
301  *				to pass in correct fields
302  * @vdev: pdev handle
303  * @tx_desc: tx descriptor
304  * @tid: tid value
305  * @ring_id: TCL or WBM ring number for transmit path
306  *
307  * Return: none
308  */
309 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
310 			 uint8_t tid, uint8_t ring_id);
311 
312 /**
313  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
314  * @soc: DP soc handle
315  * @tx_desc: software descriptor head pointer
316  * @ts: Tx completion status
317  * @txrx_peer: txrx peer handle
318  * @ring_id: ring number
319  *
320  * Return: none
321  */
322 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
323 				  struct dp_tx_desc_s *tx_desc,
324 				  struct hal_tx_completion_status *ts,
325 				  struct dp_txrx_peer *txrx_peer,
326 				  uint8_t ring_id);
327 
328 /**
329  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
330  * @soc: DP Soc handle
331  * @desc: software Tx descriptor
332  * @ts: Tx completion status from HAL/HTT descriptor
333  * @txrx_peer: DP peer context
334  *
335  * Return: none
336  */
337 void dp_tx_comp_process_desc(struct dp_soc *soc,
338 			     struct dp_tx_desc_s *desc,
339 			     struct hal_tx_completion_status *ts,
340 			     struct dp_txrx_peer *txrx_peer);
341 
342 /**
343  * dp_tx_reinject_handler() - Tx Reinject Handler
344  * @soc: datapath soc handle
345  * @vdev: datapath vdev handle
346  * @tx_desc: software descriptor head pointer
347  * @status: Tx completion status from HTT descriptor
348  * @reinject_reason: reinject reason from HTT descriptor
349  *
350  * This function reinjects frames back to Target.
351  * Todo - Host queue needs to be added
352  *
353  * Return: none
354  */
355 void dp_tx_reinject_handler(struct dp_soc *soc,
356 			    struct dp_vdev *vdev,
357 			    struct dp_tx_desc_s *tx_desc,
358 			    uint8_t *status,
359 			    uint8_t reinject_reason);
360 
361 /**
362  * dp_tx_inspect_handler() - Tx Inspect Handler
363  * @soc: datapath soc handle
364  * @vdev: datapath vdev handle
365  * @tx_desc: software descriptor head pointer
366  * @status: Tx completion status from HTT descriptor
367  *
368  * Handles Tx frames sent back to Host for inspection
369  * (ProxyARP)
370  *
371  * Return: none
372  */
373 void dp_tx_inspect_handler(struct dp_soc *soc,
374 			   struct dp_vdev *vdev,
375 			   struct dp_tx_desc_s *tx_desc,
376 			   uint8_t *status);
377 
378 /**
379  * dp_tx_update_peer_basic_stats() - Update peer basic stats
380  * @txrx_peer: Datapath txrx_peer handle
381  * @length: Length of the packet
382  * @tx_status: Tx status from TQM/FW
383  * @update: enhanced flag value present in dp_pdev
384  *
385  * Return: none
386  */
387 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
388 				   uint32_t length, uint8_t tx_status,
389 				   bool update);
390 
391 #ifdef DP_UMAC_HW_RESET_SUPPORT
392 /**
393  * dp_tx_drop() - Drop the frame on a given VAP
394  * @soc: DP soc handle
395  * @vdev_id: id of DP vdev handle
396  * @nbuf: skb
397  *
398  * Drop all the incoming packets
399  *
400  * Return: nbuf
401  */
402 qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
403 
404 /**
405  * dp_tx_exc_drop() - Drop the frame on a given VAP
406  * @soc_hdl: DP soc handle
407  * @vdev_id: id of DP vdev handle
408  * @nbuf: skb
409  * @tx_exc_metadata: Handle that holds exception path meta data
410  *
411  * Drop all the incoming packets
412  *
413  * Return: nbuf
414  */
415 qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
416 			  qdf_nbuf_t nbuf,
417 			  struct cdp_tx_exception_metadata *tx_exc_metadata);
418 #endif
419 #ifdef WLAN_SUPPORT_PPEDS
420 qdf_nbuf_t
421 dp_ppeds_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc);
422 #else
423 static inline qdf_nbuf_t
424 dp_ppeds_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
425 {
426 	return NULL;
427 }
428 #endif
429 #ifndef QCA_HOST_MODE_WIFI_DISABLED
430 /**
431  * dp_tso_soc_attach() - TSO Attach handler
432  * @txrx_soc: Opaque Dp handle
433  *
434  * Reserve TSO descriptor buffers
435  *
436  * Return: QDF_STATUS_E_FAILURE on failure or
437  * QDF_STATUS_SUCCESS on success
438  */
439 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc);
440 
441 /**
442  * dp_tso_soc_detach() - TSO Detach handler
443  * @txrx_soc: Opaque Dp handle
444  *
445  * Deallocate TSO descriptor buffers
446  *
447  * Return: QDF_STATUS_E_FAILURE on failure or
448  * QDF_STATUS_SUCCESS on success
449  */
450 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc);
451 
452 /**
453  * dp_tx_send() - Transmit a frame on a given VAP
454  * @soc_hdl: DP soc handle
455  * @vdev_id: id of DP vdev handle
456  * @nbuf: skb
457  *
458  * Entry point for Core Tx layer (DP_TX) invoked from
459  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
460  * cases
461  *
462  * Return: NULL on success,
463  *         nbuf when it fails to send
464  */
465 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
466 		      qdf_nbuf_t nbuf);
467 
468 /**
469  * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
470  *      case to avoid check in per-packet path.
471  * @soc_hdl: DP soc handle
472  * @vdev_id: id of DP vdev handle
473  * @nbuf: skb
474  *
475  * Entry point for Core Tx layer (DP_TX) invoked from
476  * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
477  * with special condition to avoid per pkt check in dp_tx_send
478  *
479  * Return: NULL on success,
480  *         nbuf when it fails to send
481  */
482 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
483 				    uint8_t vdev_id, qdf_nbuf_t nbuf);
484 
485 /**
486  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
487  * @soc_hdl: DP soc handle
488  * @vdev_id: id of DP vdev handle
489  * @nbuf: skb
490  * @tx_exc_metadata: Handle that holds exception path meta data
491  *
492  * Entry point for Core Tx layer (DP_TX) invoked from
493  * hard_start_xmit in OSIF/HDD to transmit frames through fw
494  *
495  * Return: NULL on success,
496  *         nbuf when it fails to send
497  */
498 qdf_nbuf_t
499 dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
500 		     qdf_nbuf_t nbuf,
501 		     struct cdp_tx_exception_metadata *tx_exc_metadata);
502 
503 /**
504  * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
505  *      in exception path in special case to avoid regular exception path chk.
506  * @soc_hdl: DP soc handle
507  * @vdev_id: id of DP vdev handle
508  * @nbuf: skb
509  * @tx_exc_metadata: Handle that holds exception path meta data
510  *
511  * Entry point for Core Tx layer (DP_TX) invoked from
512  * hard_start_xmit in OSIF/HDD to transmit frames through fw
513  *
514  * Return: NULL on success,
515  *         nbuf when it fails to send
516  */
517 qdf_nbuf_t
518 dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
519 				   uint8_t vdev_id, qdf_nbuf_t nbuf,
520 				   struct cdp_tx_exception_metadata *tx_exc_metadata);
521 
522 /**
523  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
524  * @soc_hdl: DP soc handle
525  * @vdev_id: DP vdev handle
526  * @nbuf: skb
527  *
528  * Entry point for Core Tx layer (DP_TX) invoked from
529  * hard_start_xmit in OSIF/HDD
530  *
531  * Return: NULL on success,
532  *         nbuf when it fails to send
533  */
534 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
535 			   qdf_nbuf_t nbuf);
536 
537 /**
538  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
539  * @vdev: DP vdev handle
540  * @nbuf: skb
541  * @msdu_info: MSDU information
542  * @peer_id: peer_id of the peer in case of NAWDS frames
543  * @tx_exc_metadata: Handle that holds exception path metadata
544  *
545  * Return: NULL on success,
546  *         nbuf when it fails to send
547  */
548 qdf_nbuf_t
549 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
550 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
551 		       struct cdp_tx_exception_metadata *tx_exc_metadata);
552 
553 /**
554  * dp_tx_mcast_enhance() - Multicast enhancement on TX
555  * @vdev: DP vdev handle
556  * @nbuf: network buffer to be transmitted
557  *
558  * Return: true on success
559  *         false on failure
560  */
561 bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
562 
563 /**
564  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
565  * @vdev: DP vdev handle
566  * @nbuf: skb
567  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
568  *
569  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
570  *
571  * Return: NULL on success,
572  *         nbuf when it fails to send
573  */
574 #if QDF_LOCK_STATS
575 noinline qdf_nbuf_t
576 dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
577 			 struct dp_tx_msdu_info_s *msdu_info);
578 #else
579 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
580 				    struct dp_tx_msdu_info_s *msdu_info);
581 #endif
582 #ifdef FEATURE_WLAN_TDLS
583 /**
584  * dp_tx_non_std() - Allow the control-path SW to send data frames
585  * @soc_hdl: Datapath soc handle
586  * @vdev_id: id of vdev
587  * @tx_spec: what non-standard handling to apply to the tx data frames
588  * @msdu_list: NULL-terminated list of tx MSDUs
589  *
590  * Return: NULL on success,
591  *         nbuf when it fails to send
592  */
593 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
594 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
595 #endif
596 
597 /**
598  * dp_tx_frame_is_drop() - checks if the packet is loopback
599  * @vdev: DP vdev handle
600  * @srcmac: source MAC address
601  * @dstmac: destination MAC address
602  *
603  * Return: 1 if frame needs to be dropped else 0
604  */
605 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac);
606 
607 /**
608  * dp_tx_comp_handler() - Tx completion handler
609  * @int_ctx: pointer to DP interrupt context
610  * @soc: core txrx main context
611  * @hal_srng: Opaque HAL SRNG pointer
612  * @ring_id: completion ring id
613  * @quota: No. of packets/descriptors that can be serviced in one loop
614  *
615  * This function will collect hardware release ring element contents and
616  * handle descriptor contents. Based on contents, free packet or handle error
617  * conditions
618  *
619  * Return: Number of TX completions processed
620  */
621 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
622 			    hal_ring_handle_t hal_srng, uint8_t ring_id,
623 			    uint32_t quota);
624 
625 QDF_STATUS
626 dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
627 
628 QDF_STATUS
629 dp_tx_prepare_send_igmp_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
630 
631 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
632 
633 #if defined(QCA_HOST_MODE_WIFI_DISABLED) || !defined(ATH_SUPPORT_IQUE)
634 static inline void dp_tx_me_exit(struct dp_pdev *pdev)
635 {
636 	return;
637 }
638 #endif
639 
640 /**
641  * dp_tx_pdev_init() - dp tx pdev init
642  * @pdev: physical device instance
643  *
644  * Return: QDF_STATUS_SUCCESS: success
645  *         QDF_STATUS_E_RESOURCES: Error return
646  */
647 static inline QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev)
648 {
649 	struct dp_soc *soc = pdev->soc;
650 
651 	/* Initialize Flow control counters */
652 	qdf_atomic_init(&pdev->num_tx_outstanding);
653 	pdev->tx_descs_max = 0;
654 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
655 		/* Initialize descriptors in TCL Ring */
656 		hal_tx_init_data_ring(soc->hal_soc,
657 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
658 	}
659 
660 	return QDF_STATUS_SUCCESS;
661 }
662 
663 /**
664  * dp_tx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc
665  * @soc: Handle to HAL Soc structure
666  * @hal_soc: HAL SOC handle
667  * @num_avail_for_reap: descriptors available for reap
668  * @hal_ring_hdl: ring pointer
669  * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
670  * @last_prefetched_sw_desc: pointer to last prefetch SW desc
671  *
672  * Return: None
673  */
674 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
675 static inline
676 void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
677 				    hal_soc_handle_t hal_soc,
678 				    uint32_t num_avail_for_reap,
679 				    hal_ring_handle_t hal_ring_hdl,
680 				    void **last_prefetched_hw_desc,
681 				    struct dp_tx_desc_s
682 				    **last_prefetched_sw_desc)
683 {
684 	if (*last_prefetched_sw_desc) {
685 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
686 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
687 	}
688 
689 	if (num_avail_for_reap && *last_prefetched_hw_desc) {
690 		soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
691 						       *last_prefetched_hw_desc,
692 						       last_prefetched_sw_desc);
693 
694 		if ((uintptr_t)*last_prefetched_hw_desc & 0x3f)
695 			*last_prefetched_hw_desc =
696 				hal_srng_dst_prefetch_next_cached_desc(
697 					hal_soc,
698 					hal_ring_hdl,
699 					(uint8_t *)*last_prefetched_hw_desc);
700 		else
701 			*last_prefetched_hw_desc =
702 				hal_srng_dst_get_next_32_byte_desc(hal_soc,
703 					hal_ring_hdl,
704 					(uint8_t *)*last_prefetched_hw_desc);
705 	}
706 }
707 #else
708 static inline
709 void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
710 				    hal_soc_handle_t hal_soc,
711 				    uint32_t num_avail_for_reap,
712 				    hal_ring_handle_t hal_ring_hdl,
713 				    void **last_prefetched_hw_desc,
714 				    struct dp_tx_desc_s
715 				    **last_prefetched_sw_desc)
716 {
717 }
718 #endif
719 
720 #ifndef FEATURE_WDS
721 static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
722 {
723 	return;
724 }
725 #endif
726 
727 #ifndef QCA_MULTIPASS_SUPPORT
728 static inline
729 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
730 			     qdf_nbuf_t nbuf,
731 			     struct dp_tx_msdu_info_s *msdu_info)
732 {
733 	return true;
734 }
735 
736 static inline
737 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
738 {
739 }
740 
741 #else
742 /**
743  * dp_tx_multipass_process() - Process vlan frames in tx path
744  * @soc: dp soc handle
745  * @vdev: DP vdev handle
746  * @nbuf: skb
747  * @msdu_info: msdu descriptor
748  *
749  * Return: status whether frame needs to be dropped or transmitted
750  */
751 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
752 			     qdf_nbuf_t nbuf,
753 			     struct dp_tx_msdu_info_s *msdu_info);
754 
755 /**
756  * dp_tx_vdev_multipass_deinit() - set vlan map for vdev
757  * @vdev: pointer to vdev
758  *
759  * return: void
760  */
761 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev);
762 
763 /**
764  * dp_tx_remove_vlan_tag() - Remove 4 bytes of vlan tag
765  * @vdev: DP vdev handle
766  * @nbuf: network buffer
767  *
768  * Return: void
769  */
770 void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
771 
772 /**
773  * dp_tx_add_groupkey_metadata() - Add group key in metadata
774  * @vdev: DP vdev handle
775  * @msdu_info: MSDU info to be setup in MSDU descriptor
776  * @group_key: Group key index programmed in metadata
777  *
778  * Return: void
779  */
780 void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev,
781 				 struct dp_tx_msdu_info_s *msdu_info,
782 				 uint16_t group_key);
783 #endif
784 
785 /**
786  * dp_tx_hw_to_qdf()- convert hw status to qdf status
787  * @status: hw status
788  *
789  * Return: qdf tx rx status
790  */
791 static inline enum qdf_dp_tx_rx_status dp_tx_hw_to_qdf(uint16_t status)
792 {
793 	switch (status) {
794 	case HAL_TX_TQM_RR_FRAME_ACKED:
795 		return QDF_TX_RX_STATUS_OK;
796 	case HAL_TX_TQM_RR_REM_CMD_TX:
797 		return QDF_TX_RX_STATUS_NO_ACK;
798 	case HAL_TX_TQM_RR_REM_CMD_REM:
799 	case HAL_TX_TQM_RR_REM_CMD_NOTX:
800 	case HAL_TX_TQM_RR_REM_CMD_AGED:
801 		return QDF_TX_RX_STATUS_FW_DISCARD;
802 	default:
803 		return QDF_TX_RX_STATUS_DEFAULT;
804 	}
805 }
806 
807 #ifndef QCA_HOST_MODE_WIFI_DISABLED
808 /**
809  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
810  * @vdev: DP Virtual device handle
811  * @nbuf: Buffer pointer
812  * @queue: queue ids container for nbuf
813  *
814  * TX packet queue has 2 instances, software descriptors id and dma ring id
815  * Based on tx feature and hardware configuration queue id combination could be
816  * different.
817  * For example -
818  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
819  * With no XPS,lock based resource protection, Descriptor pool ids are different
820  * for each vdev, dma ring id will be same as single pdev id
821  *
822  * Return: None
823  */
824 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
825 #if defined(IPA_OFFLOAD) && defined(QCA_IPA_LL_TX_FLOW_CONTROL)
826 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
827 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
828 {
829 	queue->ring_id = qdf_get_cpu();
830 	if (vdev->pdev->soc->wlan_cfg_ctx->ipa_enabled)
831 		if (queue->ring_id == IPA_TCL_DATA_RING_IDX)
832 			queue->ring_id = 0;
833 
834 	queue->desc_pool_id = queue->ring_id;
835 }
836 #else
837 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
838 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
839 {
840 	queue->ring_id = qdf_get_cpu();
841 	queue->desc_pool_id = queue->ring_id;
842 }
843 #endif
844 
845 /**
846  * dp_tx_get_hal_ring_hdl() - Get the hal_tx_ring_hdl for data transmission
847  * @soc: DP soc structure pointer
848  * @ring_id: Transmit Queue/ring_id to be used when XPS is enabled
849  *
850  * Return: HAL ring handle
851  */
852 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
853 						       uint8_t ring_id)
854 {
855 	if (ring_id == soc->num_tcl_data_rings)
856 		return soc->tcl_cmd_credit_ring.hal_srng;
857 
858 	return soc->tcl_data_ring[ring_id].hal_srng;
859 }
860 
861 #else /* QCA_OL_TX_MULTIQ_SUPPORT */
862 
863 #ifdef TX_MULTI_TCL
864 #ifdef IPA_OFFLOAD
865 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
866 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
867 {
868 	/* get flow id */
869 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
870 	if (vdev->pdev->soc->wlan_cfg_ctx->ipa_enabled)
871 		queue->ring_id = DP_TX_GET_RING_ID(vdev);
872 	else
873 		queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
874 					vdev->pdev->soc->num_tcl_data_rings);
875 }
876 #else
877 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
878 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
879 {
880 	/* get flow id */
881 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
882 	queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
883 				vdev->pdev->soc->num_tcl_data_rings);
884 }
885 #endif
886 #else
887 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
888 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
889 {
890 	/* get flow id */
891 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
892 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
893 }
894 #endif
895 
896 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
897 						       uint8_t ring_id)
898 {
899 	return soc->tcl_data_ring[ring_id].hal_srng;
900 }
901 #endif
902 
903 #ifdef QCA_OL_TX_LOCK_LESS_ACCESS
904 /**
905  * dp_tx_hal_ring_access_start() - hal_tx_ring access for data transmission
906  * @soc: DP soc structure pointer
907  * @hal_ring_hdl: HAL ring handle
908  *
909  * Return: None
910  */
911 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
912 					      hal_ring_handle_t hal_ring_hdl)
913 {
914 	return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
915 }
916 
917 /**
918  * dp_tx_hal_ring_access_end() - hal_tx_ring access for data transmission
919  * @soc: DP soc structure pointer
920  * @hal_ring_hdl: HAL ring handle
921  *
922  * Return: None
923  */
924 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
925 					     hal_ring_handle_t hal_ring_hdl)
926 {
927 	hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
928 }
929 
930 /**
931  * dp_tx_hal_ring_access_end_reap() - hal_tx_ring access for data transmission
932  * @soc: DP soc structure pointer
933  * @hal_ring_hdl: HAL ring handle
934  *
935  * Return: None
936  */
937 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
938 						  hal_ring_handle_t
939 						  hal_ring_hdl)
940 {
941 }
942 
943 #else
944 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
945 					      hal_ring_handle_t hal_ring_hdl)
946 {
947 	return hal_srng_access_start(soc->hal_soc, hal_ring_hdl);
948 }
949 
950 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
951 					     hal_ring_handle_t hal_ring_hdl)
952 {
953 	hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
954 }
955 
956 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
957 						  hal_ring_handle_t
958 						  hal_ring_hdl)
959 {
960 	hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
961 }
962 #endif
963 
964 #ifdef ATH_TX_PRI_OVERRIDE
965 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \
966 	((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf))
967 #else
968 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf)
969 #endif
970 
971 /* TODO TX_FEATURE_NOT_YET */
972 static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc)
973 {
974 	return;
975 }
976 /* TODO TX_FEATURE_NOT_YET */
977 
978 /**
979  * dp_tx_desc_flush() - release resources associated
980  *                      to TX Desc
981  *
982  * @pdev: Handle to DP pdev structure
983  * @vdev: virtual device instance
984  * NULL: no specific Vdev is required and check all allcated TX desc
985  * on this pdev.
986  * Non-NULL: only check the allocated TX Desc associated to this Vdev.
987  *
988  * @force_free:
989  * true: flush the TX desc.
990  * false: only reset the Vdev in each allocated TX desc
991  * that associated to current Vdev.
992  *
993  * This function will go through the TX desc pool to flush
994  * the outstanding TX data or reset Vdev to NULL in associated TX
995  * Desc.
996  */
997 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
998 		      bool force_free);
999 
1000 /**
1001  * dp_tx_vdev_attach() - attach vdev to dp tx
1002  * @vdev: virtual device instance
1003  *
1004  * Return: QDF_STATUS_SUCCESS: success
1005  *         QDF_STATUS_E_RESOURCES: Error return
1006  */
1007 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev);
1008 
1009 /**
1010  * dp_tx_vdev_detach() - detach vdev from dp tx
1011  * @vdev: virtual device instance
1012  *
1013  * Return: QDF_STATUS_SUCCESS: success
1014  *         QDF_STATUS_E_RESOURCES: Error return
1015  */
1016 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev);
1017 
1018 /**
1019  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
1020  * @vdev: virtual device instance
1021  *
1022  * Return: void
1023  *
1024  */
1025 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev);
1026 
1027 /**
1028  * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
1029  * @soc: core txrx main context
1030  *
1031  * This function allocates memory for following descriptor pools
1032  * 1. regular sw tx descriptor pools (static pools)
1033  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
1034  * 3. TSO descriptor pools
1035  *
1036  * Return: QDF_STATUS_SUCCESS: success
1037  *         QDF_STATUS_E_RESOURCES: Error return
1038  */
1039 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc);
1040 
1041 /**
1042  * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
1043  * @soc: core txrx main context
1044  *
1045  * This function initializes the following TX descriptor pools
1046  * 1. regular sw tx descriptor pools (static pools)
1047  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
1048  * 3. TSO descriptor pools
1049  *
1050  * Return: QDF_STATUS_SUCCESS: success
1051  *	   QDF_STATUS_E_RESOURCES: Error return
1052  */
1053 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc);
1054 
1055 /**
1056  * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
1057  * @soc: core txrx main context
1058  *
1059  * This function frees all tx related descriptors as below
1060  * 1. Regular TX descriptors (static pools)
1061  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
1062  * 3. TSO descriptors
1063  *
1064  */
1065 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc);
1066 
1067 /**
1068  * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
1069  * @soc: core txrx main context
1070  *
1071  * This function de-initializes all tx related descriptors as below
1072  * 1. Regular TX descriptors (static pools)
1073  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
1074  * 3. TSO descriptors
1075  *
1076  */
1077 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc);
1078 
1079 /**
1080  * dp_handle_wbm_internal_error() - handles wbm_internal_error case
1081  * @soc: core DP main context
1082  * @hal_desc: hal descriptor
1083  * @buf_type: indicates if the buffer is of type link disc or msdu
1084  *
1085  * wbm_internal_error is seen in following scenarios :
1086  *
1087  * 1.  Null pointers detected in WBM_RELEASE_RING descriptors
1088  * 2.  Null pointers detected during delinking process
1089  *
1090  * Some null pointer cases:
1091  *
1092  * a. MSDU buffer pointer is NULL
1093  * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag
1094  * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL
1095  *
1096  * Return: None
1097  */
1098 void
1099 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
1100 			     uint32_t buf_type);
1101 #else /* QCA_HOST_MODE_WIFI_DISABLED */
1102 
1103 static inline
1104 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
1105 {
1106 	return QDF_STATUS_SUCCESS;
1107 }
1108 
1109 static inline
1110 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
1111 {
1112 	return QDF_STATUS_SUCCESS;
1113 }
1114 
1115 static inline void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
1116 {
1117 }
1118 
1119 static inline void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
1120 {
1121 }
1122 
1123 static inline
1124 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
1125 		      bool force_free)
1126 {
1127 }
1128 
1129 static inline QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
1130 {
1131 	return QDF_STATUS_SUCCESS;
1132 }
1133 
1134 static inline QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
1135 {
1136 	return QDF_STATUS_SUCCESS;
1137 }
1138 
1139 static inline void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
1140 {
1141 }
1142 
1143 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1144 
1145 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
1146 	defined(QCA_TX_CAPTURE_SUPPORT) || \
1147 	defined(QCA_MCOPY_SUPPORT)
1148 #ifdef FEATURE_PERPKT_INFO
1149 
1150 /**
1151  * dp_get_completion_indication_for_stack() - send completion to stack
1152  * @soc : dp_soc handle
1153  * @pdev: dp_pdev handle
1154  * @txrx_peer: dp peer handle
1155  * @ts: transmit completion status structure
1156  * @netbuf: Buffer pointer for free
1157  * @time_latency:
1158  *
1159  * This function is used for indication whether buffer needs to be
1160  * sent to stack for freeing or not
1161  *
1162  * Return: QDF_STATUS
1163  */
1164 QDF_STATUS
1165 dp_get_completion_indication_for_stack(struct dp_soc *soc,
1166 				       struct dp_pdev *pdev,
1167 				       struct dp_txrx_peer *txrx_peer,
1168 				       struct hal_tx_completion_status *ts,
1169 				       qdf_nbuf_t netbuf,
1170 				       uint64_t time_latency);
1171 
1172 /**
1173  * dp_send_completion_to_stack() - send completion to stack
1174  * @soc :  dp_soc handle
1175  * @pdev:  dp_pdev handle
1176  * @peer_id: peer_id of the peer for which completion came
1177  * @ppdu_id: ppdu_id
1178  * @netbuf: Buffer pointer for free
1179  *
1180  * This function is used to send completion to stack
1181  * to free buffer
1182  *
1183  * Return: QDF_STATUS
1184  */
1185 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
1186 			    uint16_t peer_id, uint32_t ppdu_id,
1187 			    qdf_nbuf_t netbuf);
1188 #endif
1189 #else
1190 static inline
1191 QDF_STATUS dp_get_completion_indication_for_stack(struct dp_soc *soc,
1192 				       struct dp_pdev *pdev,
1193 				       struct dp_txrx_peer *peer,
1194 				       struct hal_tx_completion_status *ts,
1195 				       qdf_nbuf_t netbuf,
1196 				       uint64_t time_latency)
1197 {
1198 	return QDF_STATUS_E_NOSUPPORT;
1199 }
1200 
1201 static inline
1202 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
1203 			    uint16_t peer_id, uint32_t ppdu_id,
1204 			    qdf_nbuf_t netbuf)
1205 {
1206 }
1207 #endif
1208 
1209 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
1210 /**
1211  * dp_send_completion_to_pkt_capture() - send tx completion to packet capture
1212  * @soc: dp_soc handle
1213  * @desc: Tx Descriptor
1214  * @ts: HAL Tx completion descriptor contents
1215  *
1216  * This function is used to send tx completion to packet capture
1217  */
1218 
1219 void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
1220 				       struct dp_tx_desc_s *desc,
1221 				       struct hal_tx_completion_status *ts);
1222 #else
1223 static inline void
1224 dp_send_completion_to_pkt_capture(struct dp_soc *soc,
1225 				  struct dp_tx_desc_s *desc,
1226 				  struct hal_tx_completion_status *ts)
1227 {
1228 }
1229 #endif
1230 
1231 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1232 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1233 /**
1234  * dp_tx_update_stats() - Update soc level tx stats
1235  * @soc: DP soc handle
1236  * @tx_desc: TX descriptor reference
1237  * @ring_id: TCL ring id
1238  *
1239  * Return: none
1240  */
1241 void dp_tx_update_stats(struct dp_soc *soc,
1242 			struct dp_tx_desc_s *tx_desc,
1243 			uint8_t ring_id);
1244 
1245 /**
1246  * dp_tx_attempt_coalescing() - Check and attempt TCL register write coalescing
1247  * @soc: Datapath soc handle
1248  * @vdev: DP vdev handle
1249  * @tx_desc: tx packet descriptor
1250  * @tid: TID for pkt transmission
1251  * @msdu_info: MSDU info of tx packet
1252  * @ring_id: TCL ring id
1253  *
1254  * Return: 1, if coalescing is to be done
1255  *	    0, if coalescing is not to be done
1256  */
1257 int
1258 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1259 			 struct dp_tx_desc_s *tx_desc,
1260 			 uint8_t tid,
1261 			 struct dp_tx_msdu_info_s *msdu_info,
1262 			 uint8_t ring_id);
1263 
1264 /**
1265  * dp_tx_ring_access_end() - HAL ring access end for data transmission
1266  * @soc: Datapath soc handle
1267  * @hal_ring_hdl: HAL ring handle
1268  * @coalesce: Coalesce the current write or not
1269  *
1270  * Return: none
1271  */
1272 void
1273 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1274 		      int coalesce);
1275 #else
1276 /**
1277  * dp_tx_update_stats() - Update soc level tx stats
1278  * @soc: DP soc handle
1279  * @tx_desc: TX descriptor reference
1280  * @ring_id: TCL ring id
1281  *
1282  * Return: none
1283  */
1284 static inline void dp_tx_update_stats(struct dp_soc *soc,
1285 				      struct dp_tx_desc_s *tx_desc,
1286 				      uint8_t ring_id){ }
1287 
1288 static inline void
1289 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1290 		      int coalesce)
1291 {
1292 	dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1293 }
1294 
1295 static inline int
1296 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1297 			 struct dp_tx_desc_s *tx_desc,
1298 			 uint8_t tid,
1299 			 struct dp_tx_msdu_info_s *msdu_info,
1300 			 uint8_t ring_id)
1301 {
1302 	return 0;
1303 }
1304 
1305 #endif /* WLAN_DP_FEATURE_SW_LATENCY_MGR */
1306 
1307 #ifdef FEATURE_RUNTIME_PM
1308 /**
1309  * dp_set_rtpm_tput_policy_requirement() - Update RTPM throughput policy
1310  * @soc_hdl: DP soc handle
1311  * @is_high_tput: flag to indicate whether throughput is high
1312  *
1313  * Return: none
1314  */
1315 static inline
1316 void dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
1317 					 bool is_high_tput)
1318 {
1319 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1320 
1321 	qdf_atomic_set(&soc->rtpm_high_tput_flag, is_high_tput);
1322 }
1323 
1324 /**
1325  * dp_tx_ring_access_end_wrapper() - Wrapper for ring access end
1326  * @soc: Datapath soc handle
1327  * @hal_ring_hdl: HAL ring handle
1328  * @coalesce: Coalesce the current write or not
1329  *
1330  * Feature-specific wrapper for HAL ring access end for data
1331  * transmission
1332  *
1333  * Return: none
1334  */
1335 void
1336 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1337 			      hal_ring_handle_t hal_ring_hdl,
1338 			      int coalesce);
1339 #else
1340 #ifdef DP_POWER_SAVE
1341 void
1342 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1343 			      hal_ring_handle_t hal_ring_hdl,
1344 			      int coalesce);
1345 #else
1346 static inline void
1347 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1348 			      hal_ring_handle_t hal_ring_hdl,
1349 			      int coalesce)
1350 {
1351 	dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1352 }
1353 #endif
1354 
1355 static inline void
1356 dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
1357 				    bool is_high_tput)
1358 { }
1359 #endif
1360 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1361 
1362 #ifdef DP_TX_HW_DESC_HISTORY
1363 static inline void
1364 dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
1365 			 hal_ring_handle_t hal_ring_hdl,
1366 			 struct dp_soc *soc, uint8_t ring_id)
1367 {
1368 	struct dp_tx_hw_desc_history *tx_hw_desc_history =
1369 						&soc->tx_hw_desc_history;
1370 	struct dp_tx_hw_desc_evt *evt;
1371 	uint32_t idx = 0;
1372 	uint16_t slot = 0;
1373 
1374 	if (!tx_hw_desc_history->allocated)
1375 		return;
1376 
1377 	dp_get_frag_hist_next_atomic_idx(&tx_hw_desc_history->index, &idx,
1378 					 &slot,
1379 					 DP_TX_HW_DESC_HIST_SLOT_SHIFT,
1380 					 DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
1381 					 DP_TX_HW_DESC_HIST_MAX);
1382 
1383 	evt = &tx_hw_desc_history->entry[slot][idx];
1384 	qdf_mem_copy(evt->tcl_desc, hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
1385 	evt->posted = qdf_get_log_timestamp();
1386 	evt->tcl_ring_id = ring_id;
1387 	hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &evt->tp, &evt->hp);
1388 }
1389 #else
1390 static inline void
1391 dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
1392 			 hal_ring_handle_t hal_ring_hdl,
1393 			 struct dp_soc *soc, uint8_t ring_id)
1394 {
1395 }
1396 #endif
1397 
1398 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
1399 /**
1400  * dp_tx_compute_hw_delay_us() - Compute hardware Tx completion delay
1401  * @ts: Tx completion status
1402  * @delta_tsf: Difference between TSF clock and qtimer
1403  * @delay_us: Delay in microseconds
1404  *
1405  * Return: QDF_STATUS_SUCCESS   : Success
1406  *         QDF_STATUS_E_INVAL   : Tx completion status is invalid or
1407  *                                delay_us is NULL
1408  *         QDF_STATUS_E_FAILURE : Error in delay calculation
1409  */
1410 QDF_STATUS
1411 dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
1412 			  uint32_t delta_tsf,
1413 			  uint32_t *delay_us);
1414 
1415 /**
1416  * dp_set_delta_tsf() - Set delta_tsf to dp_soc structure
1417  * @soc_hdl: cdp soc pointer
1418  * @vdev_id: vdev id
1419  * @delta_tsf: difference between TSF clock and qtimer
1420  *
1421  * Return: None
1422  */
1423 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1424 		      uint32_t delta_tsf);
1425 #endif
1426 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
1427 /**
1428  * dp_set_tsf_ul_delay_report() - Enable or disable reporting uplink delay
1429  * @soc_hdl: cdp soc pointer
1430  * @vdev_id: vdev id
1431  * @enable: true to enable and false to disable
1432  *
1433  * Return: QDF_STATUS
1434  */
1435 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
1436 				      uint8_t vdev_id, bool enable);
1437 
1438 /**
1439  * dp_get_uplink_delay() - Get uplink delay value
1440  * @soc_hdl: cdp soc pointer
1441  * @vdev_id: vdev id
1442  * @val: pointer to save uplink delay value
1443  *
1444  * Return: QDF_STATUS
1445  */
1446 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1447 			       uint32_t *val);
1448 #endif /* WLAN_FEATURE_TSF_UPLINK_TSF */
1449 
1450 /**
1451  * dp_tx_pkt_tracepoints_enabled() - Get the state of tx pkt tracepoint
1452  *
1453  * Return: True if any tx pkt tracepoint is enabled else false
1454  */
1455 static inline
1456 bool dp_tx_pkt_tracepoints_enabled(void)
1457 {
1458 	return (qdf_trace_dp_tx_comp_tcp_pkt_enabled() ||
1459 		qdf_trace_dp_tx_comp_udp_pkt_enabled() ||
1460 		qdf_trace_dp_tx_comp_pkt_enabled());
1461 }
1462 
1463 #ifdef DP_TX_TRACKING
1464 /**
1465  * dp_tx_desc_set_timestamp() - set timestamp in tx descriptor
1466  * @tx_desc: tx descriptor
1467  *
1468  * Return: None
1469  */
1470 static inline
1471 void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
1472 {
1473 	tx_desc->timestamp_tick = qdf_system_ticks();
1474 }
1475 
1476 /**
1477  * dp_tx_desc_check_corruption() - Verify magic pattern in tx descriptor
1478  * @tx_desc: tx descriptor
1479  *
1480  * Check for corruption in tx descriptor, if magic pattern is not matching
1481  * trigger self recovery
1482  *
1483  * Return: none
1484  */
1485 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc);
1486 #else
1487 static inline
1488 void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
1489 {
1490 }
1491 
1492 static inline
1493 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
1494 {
1495 }
1496 #endif
1497 
1498 #ifndef CONFIG_SAWF
1499 static inline bool dp_sawf_tag_valid_get(qdf_nbuf_t nbuf)
1500 {
1501 	return false;
1502 }
1503 #endif
1504 
1505 #ifdef HW_TX_DELAY_STATS_ENABLE
1506 /**
1507  * dp_tx_desc_set_ktimestamp() - set kernel timestamp in tx descriptor
1508  * @vdev: DP vdev handle
1509  * @tx_desc: tx descriptor
1510  *
1511  * Return: true when descriptor is timestamped, false otherwise
1512  */
1513 static inline
1514 bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
1515 			       struct dp_tx_desc_s *tx_desc)
1516 {
1517 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1518 	    qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
1519 	    qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
1520 	    qdf_unlikely(vdev->pdev->soc->peerstats_enabled) ||
1521 	    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev))) {
1522 		tx_desc->timestamp = qdf_ktime_real_get();
1523 		return true;
1524 	}
1525 	return false;
1526 }
1527 #else
1528 static inline
1529 bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
1530 			       struct dp_tx_desc_s *tx_desc)
1531 {
1532 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1533 	    qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
1534 	    qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
1535 	    qdf_unlikely(vdev->pdev->soc->peerstats_enabled)) {
1536 		tx_desc->timestamp = qdf_ktime_real_get();
1537 		return true;
1538 	}
1539 	return false;
1540 }
1541 #endif
1542 
1543 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
1544 /**
1545  * dp_pkt_add_timestamp() - add timestamp in data payload
1546  *
1547  * @vdev: dp vdev
1548  * @index: index to decide offset in payload
1549  * @time: timestamp to add in data payload
1550  * @nbuf: network buffer
1551  *
1552  * Return: none
1553  */
1554 void dp_pkt_add_timestamp(struct dp_vdev *vdev,
1555 			  enum qdf_pkt_timestamp_index index, uint64_t time,
1556 			  qdf_nbuf_t nbuf);
1557 /**
1558  * dp_pkt_get_timestamp() - get current system time
1559  *
1560  * @time: return current system time
1561  *
1562  * Return: none
1563  */
1564 void dp_pkt_get_timestamp(uint64_t *time);
1565 #else
1566 #define dp_pkt_add_timestamp(vdev, index, time, nbuf)
1567 
1568 static inline
1569 void dp_pkt_get_timestamp(uint64_t *time)
1570 {
1571 }
1572 #endif
1573 
1574 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
1575 /**
1576  * dp_update_tx_desc_stats - Update the increase or decrease in
1577  * outstanding tx desc count
1578  * values on pdev and soc
1579  * @pdev: DP pdev handle
1580  *
1581  * Return: void
1582  */
1583 static inline void
1584 dp_update_tx_desc_stats(struct dp_pdev *pdev)
1585 {
1586 	int32_t tx_descs_cnt =
1587 		qdf_atomic_read(&pdev->num_tx_outstanding);
1588 	if (pdev->tx_descs_max < tx_descs_cnt)
1589 		pdev->tx_descs_max = tx_descs_cnt;
1590 	qdf_mem_tx_desc_cnt_update(pdev->num_tx_outstanding,
1591 				   pdev->tx_descs_max);
1592 }
1593 
1594 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
1595 
1596 static inline void
1597 dp_update_tx_desc_stats(struct dp_pdev *pdev)
1598 {
1599 }
1600 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
1601 
1602 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
1603 /**
1604  * dp_tx_get_global_desc_in_use() - read global descriptors in usage
1605  * @dp_global: Datapath global context
1606  *
1607  * Return: global descriptors in use
1608  */
1609 static inline int32_t
1610 dp_tx_get_global_desc_in_use(struct dp_global_context *dp_global)
1611 {
1612 	return qdf_atomic_read(&dp_global->global_descriptor_in_use);
1613 }
1614 #endif
1615 
1616 #ifdef QCA_TX_LIMIT_CHECK
1617 static inline bool is_spl_packet(qdf_nbuf_t nbuf)
1618 {
1619 	if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1620 		return true;
1621 	return false;
1622 }
1623 
1624 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
1625 /**
1626  * is_dp_spl_tx_limit_reached - Check if the packet is a special packet to allow
1627  * allocation if allocated tx descriptors are within the global max limit
1628  * and pdev max limit.
1629  * @vdev: DP vdev handle
1630  * @nbuf: network buffer
1631  *
1632  * Return: true if allocated tx descriptors reached max configured value, else
1633  * false
1634  */
1635 static inline bool
1636 is_dp_spl_tx_limit_reached(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1637 {
1638 	struct dp_pdev *pdev = vdev->pdev;
1639 	struct dp_soc *soc = pdev->soc;
1640 	struct dp_global_context *dp_global;
1641 	uint32_t global_tx_desc_allowed;
1642 
1643 	dp_global = wlan_objmgr_get_global_ctx();
1644 	global_tx_desc_allowed =
1645 		wlan_cfg_get_num_global_tx_desc(soc->wlan_cfg_ctx);
1646 
1647 	if (is_spl_packet(nbuf)) {
1648 		if (dp_tx_get_global_desc_in_use(dp_global) >=
1649 				global_tx_desc_allowed)
1650 			return true;
1651 
1652 		if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
1653 			pdev->num_tx_allowed)
1654 			return true;
1655 
1656 		return false;
1657 	}
1658 
1659 	return true;
1660 }
1661 
1662 /**
1663  * dp_tx_limit_check - Check if allocated tx descriptors reached
1664  * global max reg limit and pdev max reg limit for regular packets. Also check
1665  * if the limit is reached for special packets.
1666  * @vdev: DP vdev handle
1667  * @nbuf: network buffer
1668  *
1669  * Return: true if allocated tx descriptors reached max limit for regular
1670  * packets and in case of special packets, if the limit is reached max
1671  * configured vale for the soc/pdev, else false
1672  */
1673 static inline bool
1674 dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1675 {
1676 	struct dp_pdev *pdev = vdev->pdev;
1677 	struct dp_soc *soc = pdev->soc;
1678 	struct dp_global_context *dp_global;
1679 	uint32_t global_tx_desc_allowed;
1680 	uint32_t global_tx_desc_reg_allowed;
1681 	uint32_t global_tx_desc_spcl_allowed;
1682 
1683 	dp_global = wlan_objmgr_get_global_ctx();
1684 	global_tx_desc_allowed =
1685 		wlan_cfg_get_num_global_tx_desc(soc->wlan_cfg_ctx);
1686 	global_tx_desc_spcl_allowed =
1687 		wlan_cfg_get_num_global_spcl_tx_desc(soc->wlan_cfg_ctx);
1688 	global_tx_desc_reg_allowed = global_tx_desc_allowed -
1689 					global_tx_desc_spcl_allowed;
1690 
1691 	if (dp_tx_get_global_desc_in_use(dp_global) >= global_tx_desc_reg_allowed) {
1692 		if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
1693 			dp_tx_info("queued packets are more than max tx, drop the frame");
1694 			DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1695 			return true;
1696 		}
1697 	}
1698 
1699 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
1700 			pdev->num_reg_tx_allowed) {
1701 		if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
1702 			dp_tx_info("queued packets are more than max tx, drop the frame");
1703 			DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1704 			DP_STATS_INC(vdev,
1705 				     tx_i.dropped.desc_na_exc_outstand.num, 1);
1706 			return true;
1707 		}
1708 	}
1709 	return false;
1710 }
1711 #else
1712 /**
1713  * is_dp_spl_tx_limit_reached - Check if the packet is a special packet to allow
1714  * allocation if allocated tx descriptors are within the soc max limit
1715  * and pdev max limit.
1716  * @vdev: DP vdev handle
1717  * @nbuf: network buffer
1718  *
1719  * Return: true if allocated tx descriptors reached max configured value, else
1720  * false
1721  */
1722 static inline bool
1723 is_dp_spl_tx_limit_reached(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1724 {
1725 	struct dp_pdev *pdev = vdev->pdev;
1726 	struct dp_soc *soc = pdev->soc;
1727 
1728 	if (is_spl_packet(nbuf)) {
1729 		if (qdf_atomic_read(&soc->num_tx_outstanding) >=
1730 				soc->num_tx_allowed)
1731 			return true;
1732 
1733 		if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
1734 			pdev->num_tx_allowed)
1735 			return true;
1736 
1737 		return false;
1738 	}
1739 
1740 	return true;
1741 }
1742 
1743 /**
1744  * dp_tx_limit_check - Check if allocated tx descriptors reached
1745  * soc max reg limit and pdev max reg limit for regular packets. Also check if
1746  * the limit is reached for special packets.
1747  * @vdev: DP vdev handle
1748  * @nbuf: network buffer
1749  *
1750  * Return: true if allocated tx descriptors reached max limit for regular
1751  * packets and in case of special packets, if the limit is reached max
1752  * configured vale for the soc/pdev, else false
1753  */
1754 static inline bool
1755 dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1756 {
1757 	struct dp_pdev *pdev = vdev->pdev;
1758 	struct dp_soc *soc = pdev->soc;
1759 
1760 	if (qdf_atomic_read(&soc->num_tx_outstanding) >=
1761 			soc->num_reg_tx_allowed) {
1762 		if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
1763 			dp_tx_info("queued packets are more than max tx, drop the frame");
1764 			DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1765 			return true;
1766 		}
1767 	}
1768 
1769 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
1770 			pdev->num_reg_tx_allowed) {
1771 		if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
1772 			dp_tx_info("queued packets are more than max tx, drop the frame");
1773 			DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1774 			DP_STATS_INC(vdev,
1775 				     tx_i.dropped.desc_na_exc_outstand.num, 1);
1776 			return true;
1777 		}
1778 	}
1779 	return false;
1780 }
1781 #endif
1782 
1783 /**
1784  * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
1785  * reached soc max limit
1786  * @vdev: DP vdev handle
1787  *
1788  * Return: true if allocated tx descriptors reached max configured value, else
1789  * false
1790  */
1791 static inline bool
1792 dp_tx_exception_limit_check(struct dp_vdev *vdev)
1793 {
1794 	struct dp_pdev *pdev = vdev->pdev;
1795 	struct dp_soc *soc = pdev->soc;
1796 
1797 	if (qdf_atomic_read(&soc->num_tx_exception) >=
1798 			soc->num_msdu_exception_desc) {
1799 		dp_info("exc packets are more than max drop the exc pkt");
1800 		DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
1801 		return true;
1802 	}
1803 
1804 	return false;
1805 }
1806 
1807 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
1808 /**
1809  * dp_tx_outstanding_inc - Inc outstanding tx desc values on global and pdev
1810  * @pdev: DP pdev handle
1811  *
1812  * Return: void
1813  */
1814 static inline void
1815 dp_tx_outstanding_inc(struct dp_pdev *pdev)
1816 {
1817 	struct dp_global_context *dp_global;
1818 
1819 	dp_global = wlan_objmgr_get_global_ctx();
1820 
1821 	qdf_atomic_inc(&dp_global->global_descriptor_in_use);
1822 	qdf_atomic_inc(&pdev->num_tx_outstanding);
1823 	dp_update_tx_desc_stats(pdev);
1824 }
1825 
1826 /**
1827  * dp_tx_outstanding_dec - Dec outstanding tx desc values on global and pdev
1828  * @pdev: DP pdev handle
1829  *
1830  * Return: void
1831  */
1832 static inline void
1833 dp_tx_outstanding_dec(struct dp_pdev *pdev)
1834 {
1835 	struct dp_global_context *dp_global;
1836 
1837 	dp_global = wlan_objmgr_get_global_ctx();
1838 
1839 	qdf_atomic_dec(&dp_global->global_descriptor_in_use);
1840 	qdf_atomic_dec(&pdev->num_tx_outstanding);
1841 	dp_update_tx_desc_stats(pdev);
1842 }
1843 
1844 #else
1845 /**
1846  * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
1847  * @pdev: DP pdev handle
1848  *
1849  * Return: void
1850  */
1851 static inline void
1852 dp_tx_outstanding_inc(struct dp_pdev *pdev)
1853 {
1854 	struct dp_soc *soc = pdev->soc;
1855 
1856 	qdf_atomic_inc(&pdev->num_tx_outstanding);
1857 	qdf_atomic_inc(&soc->num_tx_outstanding);
1858 	dp_update_tx_desc_stats(pdev);
1859 }
1860 
1861 /**
1862  * dp_tx_outstanding_dec - Decrement outstanding tx desc values on pdev and soc
1863  * @pdev: DP pdev handle
1864  *
1865  * Return: void
1866  */
1867 static inline void
1868 dp_tx_outstanding_dec(struct dp_pdev *pdev)
1869 {
1870 	struct dp_soc *soc = pdev->soc;
1871 
1872 	qdf_atomic_dec(&pdev->num_tx_outstanding);
1873 	qdf_atomic_dec(&soc->num_tx_outstanding);
1874 	dp_update_tx_desc_stats(pdev);
1875 }
1876 #endif /* QCA_SUPPORT_DP_GLOBAL_CTX */
1877 
1878 #else //QCA_TX_LIMIT_CHECK
1879 static inline bool
1880 dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1881 {
1882 	return false;
1883 }
1884 
1885 static inline bool
1886 dp_tx_exception_limit_check(struct dp_vdev *vdev)
1887 {
1888 	return false;
1889 }
1890 
1891 static inline void
1892 dp_tx_outstanding_inc(struct dp_pdev *pdev)
1893 {
1894 	qdf_atomic_inc(&pdev->num_tx_outstanding);
1895 	dp_update_tx_desc_stats(pdev);
1896 }
1897 
1898 static inline void
1899 dp_tx_outstanding_dec(struct dp_pdev *pdev)
1900 {
1901 	qdf_atomic_dec(&pdev->num_tx_outstanding);
1902 	dp_update_tx_desc_stats(pdev);
1903 }
1904 #endif //QCA_TX_LIMIT_CHECK
1905 
1906 /**
1907  * dp_tx_get_pkt_len() - Get the packet length of a msdu
1908  * @tx_desc: tx descriptor
1909  *
1910  * Return: Packet length of a msdu. If the packet is fragmented,
1911  * it will return the single fragment length.
1912  *
1913  * In TSO mode, the msdu from stack will be fragmented into small
1914  * fragments and each of these new fragments will be transmitted
1915  * as an individual msdu.
1916  *
1917  * Please note that the length of a msdu from stack may be smaller
1918  * than the length of the total length of the fragments it has been
1919  * fragmentted because each of the fragments has a nbuf header.
1920  */
1921 static inline uint32_t dp_tx_get_pkt_len(struct dp_tx_desc_s *tx_desc)
1922 {
1923 	return tx_desc->frm_type == dp_tx_frm_tso ?
1924 		tx_desc->msdu_ext_desc->tso_desc->seg.total_len :
1925 		qdf_nbuf_len(tx_desc->nbuf);
1926 }
1927 #endif
1928