1 /*
2  * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * @file ol_txrx_ctrl_api.h
22  * @brief Define the host data API functions called by the host control SW.
23  */
24 #ifndef _OL_TXRX_CTRL_API__H_
25 #define _OL_TXRX_CTRL_API__H_
26 
27 #include <athdefs.h>            /* A_STATUS */
28 #include <qdf_nbuf.h>           /* qdf_nbuf_t */
29 #include <qdf_types.h>          /* qdf_device_t */
30 #include <htc_api.h>            /* HTC_HANDLE */
31 
32 #include <ol_txrx_api.h>        /* ol_sec_type */
33 #include <wlan_defs.h>          /* MAX_SPATIAL_STREAM */
34 #include <cdp_txrx_cmn.h>       /* ol_pdev_handle, ol_vdev_handle, etc */
35 #include <cdp_txrx_cfg.h>
36 #include <ol_defines.h>
37 #include <cdp_txrx_handle.h>
38 #define OL_ATH_TX_DRAIN_WAIT_DELAY 50
39 
40 /**
41  * @brief Set up the data SW subsystem.
42  * @details
43  *  As part of the WLAN device attach, the data SW subsystem has
44  *  to be attached as a component within the WLAN device.
45  *  This attach allocates and initializes the physical device object
46  *  used by the data SW.
47  *  The data SW subsystem attach needs to happen after the target has
48  *  be started, and host / target parameter negotiation has completed,
49  *  since the host data SW uses some of these host/target negotiated
50  *  parameters (e.g. peer ID range) during the initializations within
51  *  its attach function.
52  *  However, the host data SW is not allowed to send HTC messages to the
53  *  target within this pdev_attach function call, since the HTC setup
54  *  has not complete at this stage of initializations.  Any messaging
55  *  to the target has to be done in the separate pdev_attach_target call
56  *  that is invoked after HTC setup is complete.
57  *
58  * @param soc - datapath soc handle
59  * @param pdev_id - physical device instance id
60  * @return 0 for success or error code
61  */
62 int
63 ol_txrx_pdev_post_attach(struct cdp_soc_t *soc, uint8_t pdev_id);
64 
65 /**
66  * @brief Parameter type to be input to ol_txrx_peer_update
67  * @details
68  *  This struct is union,to be used to specify various information to update
69  *   txrx peer object.
70  */
71 union ol_txrx_peer_update_param_t {
72 	uint8_t qos_capable;
73 	uint8_t uapsd_mask;
74 	enum ol_sec_type sec_type;
75 };
76 
77 /**
78  * @brief Parameter type to be input to ol_txrx_peer_update
79  * @details
80  *   This enum is used to specify what exact information in
81  *   ol_txrx_peer_update_param_t
82  *   is used to update the txrx peer object.
83  */
84 enum ol_txrx_peer_update_select_t {
85 	ol_txrx_peer_update_qos_capable = 1,
86 	ol_txrx_peer_update_uapsdMask,
87 	ol_txrx_peer_update_peer_security,
88 };
89 
90 /**
91  * @brief Update the data peer object as some information changed in node.
92  * @details
93  *  Only a single parameter can be changed for each call to this func.
94  *
95  * @param peer - pointer to the node's object
96  * @param param - new param to be updated in peer object.
97  * @param select - specify what's parameter needed to be update
98  */
99 void
100 ol_txrx_peer_update(ol_txrx_vdev_handle data_vdev, uint8_t *peer_mac,
101 		    union ol_txrx_peer_update_param_t *param,
102 		    enum ol_txrx_peer_update_select_t select);
103 
104 #if defined(CONFIG_HL_SUPPORT)
105 /**
106  * @brief notify tx data SW that a peer-TID is ready to transmit to.
107  * @details
108  *  This function applies only to HL systems - in LL systems, tx flow control
109  *  is handled entirely within the target FW.
110  *  If a peer-TID has tx paused, then the tx datapath will end up queuing
111  *  any tx frames that arrive from the OS shim for that peer-TID.
112  *  In a HL system, the host tx data SW itself will classify the tx frame,
113  *  and determine that it needs to be queued rather than downloaded to the
114  *  target for transmission.
115  *  Once the peer-TID is ready to accept data, the host control SW will call
116  *  this function to notify the host data SW that the queued frames can be
117  *  enabled for transmission, or specifically to download the tx frames
118  *  to the target to transmit.
119  *  The TID parameter is an extended version of the QoS TID.  Values 0-15
120  *  indicate a regular QoS TID, and the value 16 indicates either non-QoS
121  *  data, multicast data, or broadcast data.
122  *
123  * @param data_peer - which peer is being unpaused
124  * @param tid - which TID within the peer is being unpaused, or -1 as a
125  *      wildcard to unpause all TIDs within the peer
126  */
127 void
128 ol_txrx_peer_tid_unpause(ol_txrx_peer_handle data_peer, int tid);
129 
130 
131 /**
132  * @brief Tell a paused peer to release a specified number of tx frames.
133  * @details
134  *  This function applies only to HL systems - in LL systems, tx flow control
135  *  is handled entirely within the target FW.
136  *  Download up to a specified maximum number of tx frames from the tx
137  *  queues of the specified TIDs within the specified paused peer, usually
138  *  in response to a U-APSD trigger from the peer.
139  *  It is up to the host data SW to determine how to choose frames from the
140  *  tx queues of the specified TIDs.  However, the host data SW does need to
141  *  provide long-term fairness across the U-APSD enabled TIDs.
142  *  The host data SW will notify the target data FW when it is done downloading
143  *  the batch of U-APSD triggered tx frames, so the target data FW can
144  *  differentiate between an in-progress download versus a case when there are
145  *  fewer tx frames available than the specified limit.
146  *  This function is relevant primarily to HL U-APSD, where the frames are
147  *  held in the host.
148  *
149  * @param peer - which peer sent the U-APSD trigger
150  * @param tid_mask - bitmask of U-APSD enabled TIDs from whose tx queues
151  *      tx frames can be released
152  * @param max_frms - limit on the number of tx frames to release from the
153  *      specified TID's queues within the specified peer
154  */
155 void ol_txrx_tx_release(ol_txrx_peer_handle peer,
156 			u_int32_t tid_mask,
157 			int max_frms);
158 
159 #else
160 static inline void
ol_txrx_peer_tid_unpause(ol_txrx_peer_handle data_peer,int tid)161 ol_txrx_peer_tid_unpause(ol_txrx_peer_handle data_peer, int tid)
162 {
163 }
164 
165 static inline void
ol_txrx_tx_release(ol_txrx_peer_handle peer,u_int32_t tid_mask,int max_frms)166 ol_txrx_tx_release(ol_txrx_peer_handle peer,
167 		   u_int32_t tid_mask,
168 		   int max_frms)
169 {
170 }
171 
172 #endif /* CONFIG_HL_SUPPORT */
173 
174 #ifdef QCA_SUPPORT_TX_THROTTLE
175 /**
176  * @brief Suspend all tx data per thermal event/timer for the
177  *  specified physical device
178  * @details
179  *  This function applies only to HL systerms, and it makes pause and
180  * unpause operations happen in pairs.
181  */
182 void
183 ol_txrx_throttle_pause(ol_txrx_pdev_handle data_pdev);
184 
185 
186 /**
187  * @brief Resume all tx data per thermal event/timer for the
188  * specified physical device
189  * @details
190  *  This function applies only to HL systerms, and it makes pause and
191  * unpause operations happen in pairs.
192  */
193 void
194 ol_txrx_throttle_unpause(ol_txrx_pdev_handle data_pdev);
195 #else
196 
197 static inline void
ol_txrx_throttle_pause(ol_txrx_pdev_handle data_pdev)198 ol_txrx_throttle_pause(ol_txrx_pdev_handle data_pdev)
199 {
200 }
201 
202 static inline void
ol_txrx_throttle_unpause(ol_txrx_pdev_handle data_pdev)203 ol_txrx_throttle_unpause(ol_txrx_pdev_handle data_pdev)
204 {
205 }
206 #endif
207 
208 /**
209  * @brief notify tx data SW that a peer's transmissions are suspended.
210  * @details
211  *  This function applies only to HL systems - in LL systems, tx flow control
212  *  is handled entirely within the target FW.
213  *  The HL host tx data SW is doing tx classification and tx download
214  *  scheduling, and therefore also needs to actively participate in tx
215  *  flow control.  Specifically, the HL tx data SW needs to check whether a
216  *  given peer is available to transmit to, or is paused.
217  *  This function is used to tell the HL tx data SW when a peer is paused,
218  *  so the host tx data SW can hold the tx frames for that SW.
219  *
220  * @param data_peer - which peer is being paused
221  */
ol_txrx_peer_pause(struct ol_txrx_peer_t * data_peer)222 static inline void ol_txrx_peer_pause(struct ol_txrx_peer_t *data_peer)
223 {
224 }
225 
226 /**
227  * @brief Suspend all tx data for the specified physical device.
228  * @details
229  *  This function applies only to HL systems - in LL systems, tx flow control
230  *  is handled entirely within the target FW.
231  *  In some systems it is necessary to be able to temporarily
232  *  suspend all WLAN traffic, e.g. to allow another device such as bluetooth
233  *  to temporarily have exclusive access to shared RF chain resources.
234  *  This function suspends tx traffic within the specified physical device.
235  *
236  * @param data_pdev - the physical device being paused
237  */
238 #if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(CONFIG_HL_SUPPORT)
239 
240 void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *data_pdev, uint32_t reason);
241 #else
242 static inline
ol_txrx_pdev_pause(struct ol_txrx_pdev_t * data_pdev,uint32_t reason)243 void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *data_pdev, uint32_t reason)
244 {
245 }
246 #endif
247 
248 /**
249  * @brief Resume tx for the specified physical device.
250  * @details
251  *  This function applies only to HL systems - in LL systems, tx flow control
252  *  is handled entirely within the target FW.
253  *
254  * @param data_pdev - the physical device being unpaused
255  */
256 #if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(CONFIG_HL_SUPPORT)
257 
258 void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason);
259 #else
260 static inline
ol_txrx_pdev_unpause(struct ol_txrx_pdev_t * pdev,uint32_t reason)261 void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
262 {
263 }
264 #endif
265 
266 /**
267  * @brief Synchronize the data-path tx with a control-path target download
268  * @dtails
269  * @param data_pdev - the data-path physical device object
270  * @param sync_cnt - after the host data-path SW downloads this sync request
271  *      to the target data-path FW, the target tx data-path will hold itself
272  *      in suspension until it is given an out-of-band sync counter value that
273  *      is equal to or greater than this counter value
274  */
275 void ol_txrx_tx_sync(ol_txrx_pdev_handle data_pdev, uint8_t sync_cnt);
276 
277 /**
278  * @brief Store a delivery notification callback for specific data frames.
279  * @details
280  *  Through a non-std tx function, the txrx SW can be given tx data frames
281  *  that are specially marked to not be unmapped and freed by the tx SW
282  *  when transmission completes.  Rather, these specially-marked frames
283  *  are provided to the callback registered with this function.
284  *
285  * @param soc - datapath soc handle
286  * @param vdev_id - id of which vdev the callback is being registered with
287  *      (Currently the callback is stored in the pdev rather than the vdev.)
288  * @param callback - the function to call when tx frames marked as "no free"
289  *      are done being transmitted
290  * @param ctxt - the context argument provided to the callback function
291  */
292 void
293 ol_txrx_data_tx_cb_set(struct cdp_soc_t *soc, uint8_t vdev_id,
294 		       ol_txrx_data_tx_cb callback, void *ctxt);
295 
296 /**
297  * @brief Discard all tx frames that are pending in txrx.
298  * @details
299  *  Mainly used in clean up path to make sure all pending tx packets
300  *  held by txrx are returned back to OS shim immediately.
301  *
302  * @param pdev - the data physical device object
303  * @return - void
304  */
305 void ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev);
306 
307 void
308 ol_txrx_peer_keyinstalled_state_update(ol_txrx_peer_handle data_peer,
309 				       uint8_t val);
310 
311 #define ol_tx_addba_conf(data_peer, tid, status)        /* no-op */
312 
313 /**
314  * @brief Find a txrx peer handle from the peer's MAC address
315  * @details
316  *  The control SW typically uses the txrx peer handle to refer to the peer.
317  *  In unusual circumstances, if it is infeasible for the control SW maintain
318  *  the txrx peer handle but it can maintain the peer's MAC address,
319  *  this function allows the peer handled to be retrieved, based on the peer's
320  *  MAC address.
321  *  In cases where there are multiple peer objects with the same MAC address,
322  *  it is undefined which such object is returned.
323  *  This function does not increment the peer's reference count.  Thus, it is
324  *  only suitable for use as long as the control SW has assurance that it has
325  *  not deleted the peer object, by calling ol_txrx_peer_detach.
326  *
327  * @param pdev - the data physical device object
328  * @param peer_mac_addr - MAC address of the peer in question
329  * @return handle to the txrx peer object
330  */
331 ol_txrx_peer_handle
332 ol_txrx_peer_find_by_addr(ol_txrx_pdev_handle pdev, uint8_t *peer_mac_addr);
333 
334 struct ol_txrx_peer_stats_t {
335 	struct {
336 		struct {
337 			uint32_t ucast;
338 			uint32_t mcast;
339 			uint32_t bcast;
340 		} frms;
341 		struct {
342 			uint32_t ucast;
343 			uint32_t mcast;
344 			uint32_t bcast;
345 		} bytes;
346 	} tx;
347 	struct {
348 		struct {
349 			uint32_t ucast;
350 			uint32_t mcast;
351 			uint32_t bcast;
352 		} frms;
353 		struct {
354 			uint32_t ucast;
355 			uint32_t mcast;
356 			uint32_t bcast;
357 		} bytes;
358 	} rx;
359 };
360 
361 /**
362  * @brief Provide a snapshot of the txrx counters for the specified peer
363  * @details
364  *  The txrx layer optionally maintains per-peer stats counters.
365  *  This function provides the caller with a consistent snapshot of the
366  *  txrx stats counters for the specified peer.
367  *
368  * @param pdev - the data physical device object
369  * @param peer - which peer's stats counters are requested
370  * @param stats - buffer for holding the stats counters snapshot
371  * @return success / failure status
372  */
373 #ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
374 A_STATUS
375 ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
376 			ol_txrx_peer_handle peer, ol_txrx_peer_stats_t *stats);
377 #else
378 #define ol_txrx_peer_stats_copy(pdev, peer, stats) A_ERROR      /* failure */
379 #endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
380 
381 
382 #define OL_TXRX_RSSI_INVALID 0xffff
383 /**
384  * @brief Provide the current RSSI average from data frames sent by a peer.
385  * @details
386  *  If a peer has sent data frames, the data SW will optionally keep
387  *  a running average of the RSSI observed for those data frames.
388  *  This function returns that time-average RSSI if is it available,
389  *  or OL_TXRX_RSSI_INVALID if either RSSI tracking is disabled or if
390  *  no data frame indications with valid RSSI meta-data have been received.
391  *  The RSSI is in approximate dBm units, and is normalized with respect
392  *  to a 20 MHz channel.  For example, if a data frame is received on a
393  *  40 MHz channel, wherein both the primary 20 MHz channel and the
394  *  secondary 20 MHz channel have an RSSI of -77 dBm, the reported RSSI
395  *  will be -77 dBm, rather than the actual -74 dBm RSSI from the
396  *  combination of the primary + extension 20 MHz channels.
397  *  Alternatively, the RSSI may be evaluated only on the primary 20 MHz
398  *  channel.
399  *
400  * @param peer - which peer's RSSI is desired
401  * @return RSSI evaluated from frames sent by the specified peer
402  */
403 #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
404 int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer);
405 #else
406 #define ol_txrx_peer_rssi(peer) OL_TXRX_RSSI_INVALID
407 #endif /* QCA_SUPPORT_PEER_DATA_RX_RSSI */
408 
409 #if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
410 
411 /**
412  * ol_txrx_bad_peer_txctl_set_setting() - Configure the bad peer tx
413  *					  limit setting.
414  * @soc_hdl: soc handle
415  * @pdev_id: datapath pdev identifier
416  * @enable: enable/disable setting
417  * @period: balance period in ms
418  * @txq_limit: balance txq limit
419  *
420  * @param pdev - the physics device
421  */
422 void
423 ol_txrx_bad_peer_txctl_set_setting(
424 	struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
425 	int enable,
426 	int period,
427 	int txq_limit);
428 
429 /**
430  * ol_txrx_bad_peer_txctl_update_threshold() - Configure the bad peer tx
431  *					       threshold limit
432  * @soc_hdl: soc handle
433  * @pdev_id: datapath pdev identifier
434  * @level: txctl level
435  * @tput_thresh throughput threshold
436  * @tx_limit: balance tx limit
437  *
438  * @param pdev - the physics device
439  */
440 void
441 ol_txrx_bad_peer_txctl_update_threshold(
442 	struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
443 	int level,
444 	int tput_thresh,
445 	int tx_limit);
446 
447 #else
448 
449 static inline void
ol_txrx_bad_peer_txctl_set_setting(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,int enable,int period,int txq_limit)450 ol_txrx_bad_peer_txctl_set_setting(
451 	struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
452 	int enable,
453 	int period,
454 	int txq_limit)
455 {
456 }
457 
458 static inline void
ol_txrx_bad_peer_txctl_update_threshold(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,int level,int tput_thresh,int tx_limit)459 ol_txrx_bad_peer_txctl_update_threshold(
460 	struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
461 	int level,
462 	int tput_thresh,
463 	int tx_limit)
464 {
465 }
466 #endif /* defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL) */
467 
468 
469 void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev,
470 			  struct ol_txrx_peer_t *peer);
471 
472 bool ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t *pdev,
473 			  struct ol_txrx_peer_t **peer);
474 
475 void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value);
476 uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void);
477 
478 #ifdef QCA_LL_TX_FLOW_CONTROL_RESIZE
479 void ol_tx_flow_pool_resize_handler(uint8_t flow_pool_id,
480 				    uint16_t flow_pool_size);
481 #else
ol_tx_flow_pool_resize_handler(uint8_t flow_pool_id,uint16_t flow_pool_size)482 static inline void ol_tx_flow_pool_resize_handler(uint8_t flow_pool_id,
483 						  uint16_t flow_pool_size)
484 {
485 }
486 #endif
487 
488 /* TX FLOW Control related functions */
489 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
490 #define TX_FLOW_MGMT_POOL_ID	0xEF
491 
492 #ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
493 #define TX_FLOW_MGMT_POOL_SIZE  32
494 #else
495 #define TX_FLOW_MGMT_POOL_SIZE  0
496 #endif
497 
498 void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev);
499 void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev);
500 void ol_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl);
501 void ol_tx_dump_flow_pool_info_compact(struct ol_txrx_pdev_t *pdev);
502 void ol_tx_clear_flow_pool_stats(void);
503 void ol_tx_flow_pool_map_handler(uint8_t flow_id, uint8_t flow_type,
504 				 uint8_t flow_pool_id, uint16_t flow_pool_size);
505 void ol_tx_flow_pool_unmap_handler(uint8_t flow_id, uint8_t flow_type,
506 				   uint8_t flow_pool_id);
507 struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(uint8_t flow_pool_id,
508 						 uint16_t flow_pool_size);
509 
510 /**
511  * ol_tx_inc_pool_ref() - increment pool ref count
512  * @pool: flow pool pointer
513  *
514  * Increments pool's ref count, used to make sure that no one is using
515  * pool when it is being deleted.
516  * As this function is taking pool->flow_pool_lock inside it, it should
517  * always be called outside this spinlock.
518  *
519  * Return: QDF_STATUS_SUCCESS - in case of success
520  */
521 QDF_STATUS ol_tx_inc_pool_ref(struct ol_tx_flow_pool_t *pool);
522 
523 /**
524  * ol_tx_dec_pool_ref() - decrement pool ref count
525  * @pool: flow pool pointer
526  * @force: free pool forcefully
527  *
528  * Decrements pool's ref count and deletes the pool if ref count gets 0.
529  * As this function is taking pdev->tx_desc.flow_pool_list_lock and
530  * pool->flow_pool_lock inside it, it should always be called outside
531  * these two spinlocks.
532  *
533  * Return: QDF_STATUS_SUCCESS - in case of success
534  */
535 QDF_STATUS ol_tx_dec_pool_ref(struct ol_tx_flow_pool_t *pool, bool force);
536 
537 #else
538 
ol_tx_register_flow_control(struct ol_txrx_pdev_t * pdev)539 static inline void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev)
540 {
541 }
ol_tx_deregister_flow_control(struct ol_txrx_pdev_t * pdev)542 static inline void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev)
543 {
544 }
545 
546 #if defined(CONFIG_HL_SUPPORT) && defined(QCA_HL_NETDEV_FLOW_CONTROL)
547 void ol_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl);
548 void ol_tx_dump_flow_pool_info_compact(struct ol_txrx_pdev_t *pdev);
549 #else
ol_tx_dump_flow_pool_info(struct cdp_soc_t * soc_hdl)550 static inline void ol_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl)
551 {
552 }
553 
554 static inline
ol_tx_dump_flow_pool_info_compact(struct ol_txrx_pdev_t * pdev)555 void ol_tx_dump_flow_pool_info_compact(struct ol_txrx_pdev_t *pdev)
556 {
557 }
558 #endif
559 
ol_tx_clear_flow_pool_stats(void)560 static inline void ol_tx_clear_flow_pool_stats(void)
561 {
562 }
ol_tx_flow_pool_map_handler(uint8_t flow_id,uint8_t flow_type,uint8_t flow_pool_id,uint16_t flow_pool_size)563 static inline void ol_tx_flow_pool_map_handler(uint8_t flow_id,
564 	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size)
565 {
566 }
ol_tx_flow_pool_unmap_handler(uint8_t flow_id,uint8_t flow_type,uint8_t flow_pool_id)567 static inline void ol_tx_flow_pool_unmap_handler(uint8_t flow_id,
568 	 uint8_t flow_type, uint8_t flow_pool_id)
569 {
570 }
ol_tx_create_flow_pool(uint8_t flow_pool_id,uint16_t flow_pool_size)571 static inline struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(
572 		uint8_t flow_pool_id, uint16_t flow_pool_size)
573 {
574 	return NULL;
575 }
576 static inline QDF_STATUS
ol_tx_inc_pool_ref(struct ol_tx_flow_pool_t * pool)577 ol_tx_inc_pool_ref(struct ol_tx_flow_pool_t *pool)
578 {
579 	return QDF_STATUS_SUCCESS;
580 }
581 static inline QDF_STATUS
ol_tx_dec_pool_ref(struct ol_tx_flow_pool_t * pool,bool force)582 ol_tx_dec_pool_ref(struct ol_tx_flow_pool_t *pool, bool force)
583 {
584 	return QDF_STATUS_SUCCESS;
585 }
586 #endif
587 
588 #endif /* _OL_TXRX_CTRL_API__H_ */
589