1  /*
2   * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3   * Copyright (c) 2022,2024 Qualcomm Innovation Center, Inc. All rights reserved.
4   *
5   * Permission to use, copy, modify, and/or distribute this software for
6   * any purpose with or without fee is hereby granted, provided that the
7   * above copyright notice and this permission notice appear in all
8   * copies.
9   *
10   * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11   * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12   * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13   * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14   * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15   * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16   * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17   * PERFORMANCE OF THIS SOFTWARE.
18   */
19  
20  /**
21   * DOC: qdf_defer.h
22   * This file abstracts deferred execution API's.
23   */
24  
25  #ifndef __QDF_DEFER_H
26  #define __QDF_DEFER_H
27  
28  #include <qdf_types.h>
29  #include <i_qdf_defer.h>
30  
31  /*
32   * TODO This implements work queues (worker threads, kernel threads etc.).
33   * Note that there is no cancel on a scheduled work. You cannot free a work
34   * item if its queued. You cannot know if a work item is queued or not unless
35   * its running, hence you know its not queued.
36   *
37   * so if, say, a module is asked to unload itself, how exactly will it make
38   * sure that the work's not queued, for OS'es that dont provide such a
39   * mechanism??
40   */
41  
42  /*
43   * Representation of a work queue.
44   */
45  typedef __qdf_work_t     qdf_work_t;
46  typedef __qdf_workqueue_t     qdf_workqueue_t;
47  
48  /*
49   * Representation of a bottom half.
50   */
51  typedef __qdf_bh_t       qdf_bh_t;
52  
53  #ifdef ENHANCED_OS_ABSTRACTION
54  /**
55   * qdf_create_bh - creates the bottom half deferred handler
56   * @bh: pointer to bottom
57   * @func: deferred function to run at bottom half interrupt context.
58   * @arg: argument for the deferred function
59   * Return: none
60   */
61  void
62  qdf_create_bh(qdf_bh_t  *bh, qdf_defer_fn_t  func, void  *arg);
63  
64  /**
65   * qdf_sched_bh - schedule a bottom half (DPC)
66   * @bh: pointer to bottom
67   * Return: none
68   */
69  void qdf_sched_bh(qdf_bh_t *bh);
70  
71  /**
72   * qdf_destroy_bh - destroy the bh (synchronous)
73   * @bh: pointer to bottom
74   * Return: none
75   */
76  void qdf_destroy_bh(qdf_bh_t *bh);
77  
78  /**
79   * qdf_create_workqueue - create a workqueue, This runs in non-interrupt
80   * context, so can be preempted by H/W & S/W intr
81   * @name: string
82   *
83   * Return: pointer of type qdf_workqueue_t
84   */
85  qdf_workqueue_t *qdf_create_workqueue(char *name);
86  
87  /**
88   * qdf_create_singlethread_workqueue() - create a single threaded workqueue
89   * @name: string
90   *
91   * This API creates a dedicated work queue with a single worker thread to avoid
92   * wasting unnecessary resources when works which needs to be submitted in this
93   * queue are not very critical and frequent.
94   *
95   * Return: pointer of type qdf_workqueue_t
96   */
97  qdf_workqueue_t *qdf_create_singlethread_workqueue(char *name);
98  
99  /**
100   * qdf_alloc_unbound_workqueue - allocate an unbound workqueue
101   * @name: string
102   *
103   * Return: pointer of type qdf_workqueue_t
104   */
105  qdf_workqueue_t *qdf_alloc_unbound_workqueue(char *name);
106  
107  /**
108   * qdf_destroy_workqueue - Destroy the workqueue
109   * @hdl: OS handle
110   * @wqueue: pointer to workqueue
111   *
112   * Return: none
113   */
114  void qdf_destroy_workqueue(qdf_handle_t hdl, qdf_workqueue_t *wqueue);
115  
116  /**
117   * qdf_cancel_work() - Cancel a work
118   * @work: pointer to work
119   *
120   * Cancel work and wait for its execution to finish.
121   * This function can be used even if the work re-queues
122   * itself or migrates to another workqueue. On return
123   * from this function, work is guaranteed to be not
124   * pending or executing on any CPU. The caller must
125   * ensure that the workqueue on which work was last
126   * queued can't be destroyed before this function returns.
127   *
128   * Return: true if work was pending, false otherwise
129   */
130  bool qdf_cancel_work(qdf_work_t *work);
131  
132  /**
133   * qdf_disable_work - disable the deferred task (synchronous)
134   * @work: pointer to work
135   *
136   * Return: unsigned int
137   */
138  uint32_t qdf_disable_work(qdf_work_t *work);
139  
140  /**
141   * qdf_flush_work - Flush a deferred task on non-interrupt context
142   * @work: pointer to work
143   *
144   * Wait until work has finished execution. work is guaranteed to be
145   * idle on return if it hasn't been requeued since flush started.
146   *
147   * Return: none
148   */
149  void qdf_flush_work(qdf_work_t *work);
150  
151  /**
152   * qdf_create_work - create a work/task queue, This runs in non-interrupt
153   * context, so can be preempted by H/W & S/W intr
154   * @hdl: OS handle
155   * @work: pointer to work
156   * @func: deferred function to run at bottom half non-interrupt context.
157   * @arg: argument for the deferred function
158   *
159   * Return: QDF status
160   */
161  QDF_STATUS qdf_create_work(qdf_handle_t hdl, qdf_work_t  *work,
162  			   qdf_defer_fn_t  func, void  *arg);
163  
164  /**
165   * qdf_sched_work - Schedule a deferred task on non-interrupt context
166   * @hdl: OS handle
167   * @work: pointer to work
168   *
169   * Return: false if work was already on a queue, true otherwise
170   */
171  bool qdf_sched_work(qdf_handle_t hdl, qdf_work_t *work);
172  
173  /**
174   * qdf_queue_work - Queue the work/task
175   * @hdl: OS handle
176   * @wqueue: pointer to workqueue
177   * @work: pointer to work
178   *
179   * Return: false if work was already on a queue, true otherwise
180   */
181  bool
182  qdf_queue_work(qdf_handle_t hdl, qdf_workqueue_t *wqueue, qdf_work_t *work);
183  
184  /**
185   * qdf_flush_workqueue - flush the workqueue
186   * @hdl: OS handle
187   * @wqueue: pointer to workqueue
188   *
189   * Return: none
190   */
191  void qdf_flush_workqueue(qdf_handle_t hdl, qdf_workqueue_t *wqueue);
192  
193  /**
194   * qdf_destroy_work - destroy the deferred task (synchronous)
195   * @hdl: OS handle
196   * @work: pointer to work
197   *
198   * Return: none
199   */
200  void qdf_destroy_work(qdf_handle_t hdl, qdf_work_t *work);
201  
202  /**
203   * qdf_local_bh_disable - Disables softirq and tasklet processing
204   * on the local processor
205   *
206   * Return: none
207   */
208  void qdf_local_bh_disable(void);
209  
210  /**
211   * qdf_local_bh_enable - Disables softirq and tasklet processing
212   * on the local processor
213   *
214   * Return: none
215   */
216  void qdf_local_bh_enable(void);
217  
218  #else
219  /**
220   * qdf_create_bh - creates the bottom half deferred handler
221   * @bh: pointer to bottom
222   * @func: deferred function to run at bottom half interrupt context.
223   * @arg: argument for the deferred function
224   * Return: none
225   */
226  static inline void
qdf_create_bh(qdf_bh_t * bh,qdf_defer_fn_t func,void * arg)227  qdf_create_bh(qdf_bh_t  *bh, qdf_defer_fn_t  func, void  *arg)
228  {
229  	__qdf_init_bh(bh, func, arg);
230  }
231  
232  /**
233   * qdf_sched_bh - schedule a bottom half (DPC)
234   * @bh: pointer to bottom
235   * Return: none
236   */
qdf_sched_bh(qdf_bh_t * bh)237  static inline void qdf_sched_bh(qdf_bh_t *bh)
238  {
239  	__qdf_sched_bh(bh);
240  }
241  
242  /**
243   * qdf_destroy_bh - destroy the bh (synchronous)
244   * @bh: pointer to bottom
245   * Return: none
246   */
qdf_destroy_bh(qdf_bh_t * bh)247  static inline void qdf_destroy_bh(qdf_bh_t *bh)
248  {
249  	__qdf_disable_bh(bh);
250  }
251  
252  /**
253   * qdf_local_bh_disable - Disables softirq and tasklet processing
254   * on the local processor
255   *
256   * Return: none
257   */
qdf_local_bh_disable(void)258  static inline void qdf_local_bh_disable(void)
259  {
260  	__qdf_local_bh_disable();
261  }
262  
263  /**
264   * qdf_local_bh_enable - Enables softirq and tasklet processing
265   * on the local processor
266   *
267   * Return: none
268   */
qdf_local_bh_enable(void)269  static inline void qdf_local_bh_enable(void)
270  {
271  	__qdf_local_bh_enable();
272  }
273  
274  /*********************Non-Interrupt Context deferred Execution***************/
275  
276  /**
277   * qdf_create_work - create a work/task queue, This runs in non-interrupt
278   * context, so can be preempted by H/W & S/W intr
279   * @hdl: OS handle
280   * @work: pointer to work
281   * @func: deferred function to run at bottom half non-interrupt context.
282   * @arg: argument for the deferred function
283   *
284   * Return: QDF status
285   */
qdf_create_work(qdf_handle_t hdl,qdf_work_t * work,qdf_defer_fn_t func,void * arg)286  static inline QDF_STATUS qdf_create_work(qdf_handle_t hdl, qdf_work_t  *work,
287  				   qdf_defer_fn_t  func, void  *arg)
288  {
289  	return __qdf_init_work(work, func, arg);
290  }
291  
292  /**
293   * qdf_create_workqueue - create a workqueue, This runs in non-interrupt
294   * context, so can be preempted by H/W & S/W intr
295   * @name: string
296   * Return: pointer of type qdf_workqueue_t
297   */
qdf_create_workqueue(char * name)298  static inline qdf_workqueue_t *qdf_create_workqueue(char *name)
299  {
300  	return  __qdf_create_workqueue(name);
301  }
302  
303  /**
304   * qdf_create_singlethread_workqueue() - create a single threaded workqueue
305   * @name: string
306   *
307   * This API creates a dedicated work queue with a single worker thread to avoid
308   * wasting unnecessary resources when works which needs to be submitted in this
309   * queue are not very critical and frequent.
310   *
311   * Return: pointer of type qdf_workqueue_t
312   */
qdf_create_singlethread_workqueue(char * name)313  static inline qdf_workqueue_t *qdf_create_singlethread_workqueue(char *name)
314  {
315  	return  __qdf_create_singlethread_workqueue(name);
316  }
317  
318  /**
319   * qdf_alloc_high_prior_ordered_workqueue - alloc high-prior ordered workqueue
320   * @name: string
321   *
322   * Return: pointer of type qdf_workqueue_t
323   */
324  static inline
qdf_alloc_high_prior_ordered_workqueue(char * name)325  qdf_workqueue_t *qdf_alloc_high_prior_ordered_workqueue(char *name)
326  {
327  	return __qdf_alloc_high_prior_ordered_workqueue(name);
328  }
329  
330  /**
331   * qdf_alloc_unbound_workqueue - allocate an unbound workqueue
332   * @name: string
333   *
334   * Return: pointer of type qdf_workqueue_t
335   */
qdf_alloc_unbound_workqueue(char * name)336  static inline qdf_workqueue_t *qdf_alloc_unbound_workqueue(char *name)
337  {
338  	return  __qdf_alloc_unbound_workqueue(name);
339  }
340  
341  /**
342   * qdf_queue_work - Queue the work/task
343   * @hdl: OS handle
344   * @wqueue: pointer to workqueue
345   * @work: pointer to work
346   * Return: false if work was already on a queue, true otherwise
347   */
348  static inline bool
qdf_queue_work(qdf_handle_t hdl,qdf_workqueue_t * wqueue,qdf_work_t * work)349  qdf_queue_work(qdf_handle_t hdl, qdf_workqueue_t *wqueue, qdf_work_t *work)
350  {
351  	return __qdf_queue_work(wqueue, work);
352  }
353  
354  /**
355   * qdf_flush_workqueue - flush the workqueue
356   * @hdl: OS handle
357   * @wqueue: pointer to workqueue
358   * Return: none
359   */
qdf_flush_workqueue(qdf_handle_t hdl,qdf_workqueue_t * wqueue)360  static inline void qdf_flush_workqueue(qdf_handle_t hdl,
361  				       qdf_workqueue_t *wqueue)
362  {
363  	return  __qdf_flush_workqueue(wqueue);
364  }
365  
366  /**
367   * qdf_destroy_workqueue - Destroy the workqueue
368   * @hdl: OS handle
369   * @wqueue: pointer to workqueue
370   * Return: none
371   */
qdf_destroy_workqueue(qdf_handle_t hdl,qdf_workqueue_t * wqueue)372  static inline void qdf_destroy_workqueue(qdf_handle_t hdl,
373  					 qdf_workqueue_t *wqueue)
374  {
375  	return  __qdf_destroy_workqueue(wqueue);
376  }
377  
378  /**
379   * qdf_sched_work - Schedule a deferred task on non-interrupt context
380   * @hdl: OS handle
381   * @work: pointer to work
382   *
383   * Return: false if work was already on a queue, true otherwise
384   */
qdf_sched_work(qdf_handle_t hdl,qdf_work_t * work)385  static inline bool qdf_sched_work(qdf_handle_t hdl, qdf_work_t *work)
386  {
387  	return __qdf_sched_work(work);
388  }
389  
390  /**
391   * qdf_cancel_work() - Cancel a work
392   * @work: pointer to work
393   *
394   * Cancel work and wait for its execution to finish.
395   * This function can be used even if the work re-queues
396   * itself or migrates to another workqueue. On return
397   * from this function, work is guaranteed to be not
398   * pending or executing on any CPU. The caller must
399   * ensure that the workqueue on which work was last
400   * queued can't be destroyed before this function returns.
401   *
402   * Return: true if work was pending, false otherwise
403   */
qdf_cancel_work(qdf_work_t * work)404  static inline bool qdf_cancel_work(qdf_work_t *work)
405  {
406  	return __qdf_cancel_work(work);
407  }
408  
409  /**
410   * qdf_flush_work - Flush a deferred task on non-interrupt context
411   * @work: pointer to work
412   *
413   * Wait until work has finished execution. work is guaranteed to be
414   * idle on return if it hasn't been requeued since flush started.
415   *
416   * Return: none
417   */
qdf_flush_work(qdf_work_t * work)418  static inline void qdf_flush_work(qdf_work_t *work)
419  {
420  	__qdf_flush_work(work);
421  }
422  
423  /**
424   * qdf_disable_work - disable the deferred task (synchronous)
425   * @work: pointer to work
426   * Return: unsigned int
427   */
qdf_disable_work(qdf_work_t * work)428  static inline uint32_t qdf_disable_work(qdf_work_t *work)
429  {
430  	return __qdf_disable_work(work);
431  }
432  
433  /**
434   * qdf_destroy_work - destroy the deferred task (synchronous)
435   * @hdl: OS handle
436   * @work: pointer to work
437   * Return: none
438   */
qdf_destroy_work(qdf_handle_t hdl,qdf_work_t * work)439  static inline void qdf_destroy_work(qdf_handle_t hdl, qdf_work_t *work)
440  {
441  	__qdf_disable_work(work);
442  }
443  #endif
444  
445  #endif /*_QDF_DEFER_H*/
446