xref: /wlan-dirver/qca-wifi-host-cmn/qdf/inc/qdf_defer.h (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_defer.h
21  * This file abstracts deferred execution API's.
22  */
23 
24 #ifndef __QDF_DEFER_H
25 #define __QDF_DEFER_H
26 
27 #include <qdf_types.h>
28 #include <i_qdf_defer.h>
29 
30 /**
31  * TODO This implements work queues (worker threads, kernel threads etc.).
32  * Note that there is no cancel on a scheduled work. You cannot free a work
33  * item if its queued. You cannot know if a work item is queued or not unless
34  * its running, hence you know its not queued.
35  *
36  * so if, say, a module is asked to unload itself, how exactly will it make
37  * sure that the work's not queued, for OS'es that dont provide such a
38  * mechanism??
39  */
40 
41 /*
42  * Representation of a work queue.
43  */
44 typedef __qdf_work_t     qdf_work_t;
45 typedef __qdf_workqueue_t     qdf_workqueue_t;
46 
47 /*
48  * Representation of a bottom half.
49  */
50 typedef __qdf_bh_t       qdf_bh_t;
51 
52 #ifdef ENHANCED_OS_ABSTRACTION
53 /**
54  * qdf_create_bh - creates the bottom half deferred handler
55  * @bh: pointer to bottom
56  * @func: deferred function to run at bottom half interrupt context.
57  * @arg: argument for the deferred function
58  * Return: none
59  */
60 void
61 qdf_create_bh(qdf_bh_t  *bh, qdf_defer_fn_t  func, void  *arg);
62 
63 /**
64  * qdf_sched - schedule a bottom half (DPC)
65  * @bh: pointer to bottom
66  * Return: none
67  */
68 void qdf_sched_bh(qdf_bh_t *bh);
69 
70 /**
71  * qdf_destroy_bh - destroy the bh (synchronous)
72  * @bh: pointer to bottom
73  * Return: none
74  */
75 void qdf_destroy_bh(qdf_bh_t *bh);
76 
77 /**
78  * qdf_create_workqueue - create a workqueue, This runs in non-interrupt
79  * context, so can be preempted by H/W & S/W intr
80  * @name: string
81  *
82  * Return: pointer of type qdf_workqueue_t
83  */
84 qdf_workqueue_t *qdf_create_workqueue(char *name);
85 
86 /**
87  * qdf_create_singlethread_workqueue() - create a single threaded workqueue
88  * @name: string
89  *
90  * This API creates a dedicated work queue with a single worker thread to avoid
91  * wasting unnecessary resources when works which needs to be submitted in this
92  * queue are not very critical and frequent.
93  *
94  * Return: pointer of type qdf_workqueue_t
95  */
96 qdf_workqueue_t *qdf_create_singlethread_workqueue(char *name);
97 
98 /**
99  * qdf_alloc_unbound_workqueue - allocate an unbound workqueue
100  * @name: string
101  *
102  * Return: pointer of type qdf_workqueue_t
103  */
104 qdf_workqueue_t *qdf_alloc_unbound_workqueue(char *name);
105 
106 /**
107  * qdf_destroy_workqueue - Destroy the workqueue
108  * @hdl: OS handle
109  * @wqueue: pointer to workqueue
110  *
111  * Return: none
112  */
113 void qdf_destroy_workqueue(qdf_handle_t hdl, qdf_workqueue_t *wqueue);
114 
115 /**
116  * qdf_cancel_work() - Cancel a work
117  * @work: pointer to work
118  *
119  * Cancel work and wait for its execution to finish.
120  * This function can be used even if the work re-queues
121  * itself or migrates to another workqueue. On return
122  * from this function, work is guaranteed to be not
123  * pending or executing on any CPU. The caller must
124  * ensure that the workqueue on which work was last
125  * queued can't be destroyed before this function returns.
126  *
127  * Return: true if work was pending, false otherwise
128  */
129 bool qdf_cancel_work(qdf_work_t *work);
130 
131 /**
132  * qdf_disable_work - disable the deferred task (synchronous)
133  * @work: pointer to work
134  *
135  * Return: unsigned int
136  */
137 uint32_t qdf_disable_work(qdf_work_t *work);
138 
139 /**
140  * qdf_flush_work - Flush a deferred task on non-interrupt context
141  * @work: pointer to work
142  *
143  * Wait until work has finished execution. work is guaranteed to be
144  * idle on return if it hasn't been requeued since flush started.
145  *
146  * Return: none
147  */
148 void qdf_flush_work(qdf_work_t *work);
149 
150 /**
151  * qdf_create_work - create a work/task queue, This runs in non-interrupt
152  * context, so can be preempted by H/W & S/W intr
153  * @hdl: OS handle
154  * @work: pointer to work
155  * @func: deferred function to run at bottom half non-interrupt context.
156  * @arg: argument for the deferred function
157  *
158  * Return: QDF status
159  */
160 QDF_STATUS qdf_create_work(qdf_handle_t hdl, qdf_work_t  *work,
161 			   qdf_defer_fn_t  func, void  *arg);
162 
163 /**
164  * qdf_sched_work - Schedule a deferred task on non-interrupt context
165  * @hdl: OS handle
166  * @work: pointer to work
167  *
168  * Retrun: none
169  */
170 void qdf_sched_work(qdf_handle_t hdl, qdf_work_t *work);
171 
172 /**
173  * qdf_queue_work - Queue the work/task
174  * @hdl: OS handle
175  * @wqueue: pointer to workqueue
176  * @work: pointer to work
177  *
178  * Return: false if work was already on a queue, true otherwise
179  */
180 bool
181 qdf_queue_work(qdf_handle_t hdl, qdf_workqueue_t *wqueue, qdf_work_t *work);
182 
183 /**
184  * qdf_flush_workqueue - flush the workqueue
185  * @hdl: OS handle
186  * @wqueue: pointer to workqueue
187  *
188  * Return: none
189  */
190 void qdf_flush_workqueue(qdf_handle_t hdl, qdf_workqueue_t *wqueue);
191 
192 /**
193  * qdf_destroy_work - destroy the deferred task (synchronous)
194  * @hdl: OS handle
195  * @work: pointer to work
196  *
197  * Return: none
198  */
199 void qdf_destroy_work(qdf_handle_t hdl, qdf_work_t *work);
200 #else
201 /**
202  * qdf_create_bh - creates the bottom half deferred handler
203  * @bh: pointer to bottom
204  * @func: deferred function to run at bottom half interrupt context.
205  * @arg: argument for the deferred function
206  * Return: none
207  */
208 static inline void
209 qdf_create_bh(qdf_bh_t  *bh, qdf_defer_fn_t  func, void  *arg)
210 {
211 	__qdf_init_bh(bh, func, arg);
212 }
213 
214 /**
215  * qdf_sched - schedule a bottom half (DPC)
216  * @bh: pointer to bottom
217  * Return: none
218  */
219 static inline void qdf_sched_bh(qdf_bh_t *bh)
220 {
221 	__qdf_sched_bh(bh);
222 }
223 
224 /**
225  * qdf_destroy_bh - destroy the bh (synchronous)
226  * @bh: pointer to bottom
227  * Return: none
228  */
229 static inline void qdf_destroy_bh(qdf_bh_t *bh)
230 {
231 	__qdf_disable_bh(bh);
232 }
233 
234 /*********************Non-Interrupt Context deferred Execution***************/
235 
236 /**
237  * qdf_create_work - create a work/task queue, This runs in non-interrupt
238  * context, so can be preempted by H/W & S/W intr
239  * @hdl: OS handle
240  * @work: pointer to work
241  * @func: deferred function to run at bottom half non-interrupt context.
242  * @arg: argument for the deferred function
243  *
244  * Return: QDF status
245  */
246 static inline QDF_STATUS qdf_create_work(qdf_handle_t hdl, qdf_work_t  *work,
247 				   qdf_defer_fn_t  func, void  *arg)
248 {
249 	return __qdf_init_work(work, func, arg);
250 }
251 
252 /**
253  * qdf_create_workqueue - create a workqueue, This runs in non-interrupt
254  * context, so can be preempted by H/W & S/W intr
255  * @name: string
256  * Return: pointer of type qdf_workqueue_t
257  */
258 static inline qdf_workqueue_t *qdf_create_workqueue(char *name)
259 {
260 	return  __qdf_create_workqueue(name);
261 }
262 
263 /**
264  * qdf_create_singlethread_workqueue() - create a single threaded workqueue
265  * @name: string
266  *
267  * This API creates a dedicated work queue with a single worker thread to avoid
268  * wasting unnecessary resources when works which needs to be submitted in this
269  * queue are not very critical and frequent.
270  *
271  * Return: pointer of type qdf_workqueue_t
272  */
273 static inline qdf_workqueue_t *qdf_create_singlethread_workqueue(char *name)
274 {
275 	return  __qdf_create_singlethread_workqueue(name);
276 }
277 
278 /**
279  * qdf_alloc_high_prior_ordered_workqueue - alloc high-prior ordered workqueue
280  * @name: string
281  *
282  * Return: pointer of type qdf_workqueue_t
283  */
284 static inline
285 qdf_workqueue_t *qdf_alloc_high_prior_ordered_workqueue(char *name)
286 {
287 	return __qdf_alloc_high_prior_ordered_workqueue(name);
288 }
289 
290 /**
291  * qdf_alloc_unbound_workqueue - allocate an unbound workqueue
292  * @name: string
293  *
294  * Return: pointer of type qdf_workqueue_t
295  */
296 static inline qdf_workqueue_t *qdf_alloc_unbound_workqueue(char *name)
297 {
298 	return  __qdf_alloc_unbound_workqueue(name);
299 }
300 
301 /**
302  * qdf_queue_work - Queue the work/task
303  * @hdl: OS handle
304  * @wqueue: pointer to workqueue
305  * @work: pointer to work
306  * Return: false if work was already on a queue, true otherwise
307  */
308 static inline bool
309 qdf_queue_work(qdf_handle_t hdl, qdf_workqueue_t *wqueue, qdf_work_t *work)
310 {
311 	return __qdf_queue_work(wqueue, work);
312 }
313 
314 /**
315  * qdf_flush_workqueue - flush the workqueue
316  * @hdl: OS handle
317  * @wqueue: pointer to workqueue
318  * Return: none
319  */
320 static inline void qdf_flush_workqueue(qdf_handle_t hdl,
321 				       qdf_workqueue_t *wqueue)
322 {
323 	return  __qdf_flush_workqueue(wqueue);
324 }
325 
326 /**
327  * qdf_destroy_workqueue - Destroy the workqueue
328  * @hdl: OS handle
329  * @wqueue: pointer to workqueue
330  * Return: none
331  */
332 static inline void qdf_destroy_workqueue(qdf_handle_t hdl,
333 					 qdf_workqueue_t *wqueue)
334 {
335 	return  __qdf_destroy_workqueue(wqueue);
336 }
337 
338 /**
339  * qdf_sched_work - Schedule a deferred task on non-interrupt context
340  * @hdl: OS handle
341  * @work: pointer to work
342  * Retrun: none
343  */
344 static inline void qdf_sched_work(qdf_handle_t hdl, qdf_work_t *work)
345 {
346 	__qdf_sched_work(work);
347 }
348 
349 /**
350  * qdf_cancel_work() - Cancel a work
351  * @work: pointer to work
352  *
353  * Cancel work and wait for its execution to finish.
354  * This function can be used even if the work re-queues
355  * itself or migrates to another workqueue. On return
356  * from this function, work is guaranteed to be not
357  * pending or executing on any CPU. The caller must
358  * ensure that the workqueue on which work was last
359  * queued can't be destroyed before this function returns.
360  *
361  * Return: true if work was pending, false otherwise
362  */
363 static inline bool qdf_cancel_work(qdf_work_t *work)
364 {
365 	return __qdf_cancel_work(work);
366 }
367 
368 /**
369  * qdf_flush_work - Flush a deferred task on non-interrupt context
370  * @work: pointer to work
371  *
372  * Wait until work has finished execution. work is guaranteed to be
373  * idle on return if it hasn't been requeued since flush started.
374  *
375  * Return: none
376  */
377 static inline void qdf_flush_work(qdf_work_t *work)
378 {
379 	__qdf_flush_work(work);
380 }
381 
382 /**
383  * qdf_disable_work - disable the deferred task (synchronous)
384  * @work: pointer to work
385  * Return: unsigned int
386  */
387 static inline uint32_t qdf_disable_work(qdf_work_t *work)
388 {
389 	return __qdf_disable_work(work);
390 }
391 
392 /**
393  * qdf_destroy_work - destroy the deferred task (synchronous)
394  * @hdl: OS handle
395  * @work: pointer to work
396  * Return: none
397  */
398 static inline void qdf_destroy_work(qdf_handle_t hdl, qdf_work_t *work)
399 {
400 	__qdf_disable_work(work);
401 }
402 #endif
403 
404 #endif /*_QDF_DEFER_H*/
405