1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: qdf_threads
22  * QCA driver framework (QDF) thread APIs
23  */
24 
25 /* Include Files */
26 #include <qdf_threads.h>
27 #include <qdf_types.h>
28 #include <qdf_trace.h>
29 #include <linux/jiffies.h>
30 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
31 #include <linux/sched.h>
32 #else
33 #include <linux/sched/signal.h>
34 #endif /* KERNEL_VERSION(4, 11, 0) */
35 /* Test against msm kernel version */
36 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) && \
37 	IS_ENABLED(CONFIG_SCHED_WALT)
38 #include <linux/sched/walt.h>
39 #endif
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/kthread.h>
43 #include <linux/stacktrace.h>
44 #include <qdf_defer.h>
45 #include <qdf_module.h>
46 #include <linux/cpumask.h>
47 /* Function declarations and documentation */
48 
49 typedef int (*qdf_thread_os_func)(void *data);
50 
51 /**
52  *  qdf_sleep() - sleep
53  *  @ms_interval : Number of milliseconds to suspend the current thread.
54  *  A value of 0 may or may not cause the current thread to yield.
55  *
56  *  This function suspends the execution of the current thread
57  *  until the specified time out interval elapses.
58  *
59  *  Return: none
60  */
qdf_sleep(uint32_t ms_interval)61 void qdf_sleep(uint32_t ms_interval)
62 {
63 	if (in_interrupt()) {
64 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
65 			  "%s cannot be called from interrupt context!!!",
66 			  __func__);
67 		return;
68 	}
69 	msleep_interruptible(ms_interval);
70 }
71 qdf_export_symbol(qdf_sleep);
72 
73 /**
74  *  qdf_sleep_us() - sleep
75  *  @us_interval : Number of microseconds to suspend the current thread.
76  *  A value of 0 may or may not cause the current thread to yield.
77  *
78  *  This function suspends the execution of the current thread
79  *  until the specified time out interval elapses.
80  *
81  *  Return : none
82  */
qdf_sleep_us(uint32_t us_interval)83 void qdf_sleep_us(uint32_t us_interval)
84 {
85 	unsigned long timeout = usecs_to_jiffies(us_interval) + 1;
86 
87 	if (in_interrupt()) {
88 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
89 			  "%s cannot be called from interrupt context!!!",
90 			  __func__);
91 		return;
92 	}
93 
94 	while (timeout && !signal_pending(current))
95 		timeout = schedule_timeout_interruptible(timeout);
96 }
97 qdf_export_symbol(qdf_sleep_us);
98 
99 /**
100  *  qdf_busy_wait() - busy wait
101  *  @us_interval : Number of microseconds to busy wait.
102  *
103  *  This function places the current thread in busy wait until the specified
104  *  time out interval elapses. If the interval is greater than 50us on WM, the
105  *  behaviour is undefined.
106  *
107  *  Return : none
108  */
qdf_busy_wait(uint32_t us_interval)109 void qdf_busy_wait(uint32_t us_interval)
110 {
111 	udelay(us_interval);
112 }
113 qdf_export_symbol(qdf_busy_wait);
114 
115 #if defined(PF_WAKE_UP_IDLE) || IS_ENABLED(CONFIG_SCHED_WALT)
qdf_set_wake_up_idle(bool idle)116 void qdf_set_wake_up_idle(bool idle)
117 {
118 	set_wake_up_idle(idle);
119 }
120 #else
qdf_set_wake_up_idle(bool idle)121 void qdf_set_wake_up_idle(bool idle)
122 {
123 }
124 #endif /* PF_WAKE_UP_IDLE */
125 
126 qdf_export_symbol(qdf_set_wake_up_idle);
127 
qdf_set_user_nice(qdf_thread_t * thread,long nice)128 void qdf_set_user_nice(qdf_thread_t *thread, long nice)
129 {
130 	set_user_nice(thread, nice);
131 }
132 qdf_export_symbol(qdf_set_user_nice);
133 
qdf_create_thread(int (* thread_handler)(void * data),void * data,const char thread_name[])134 qdf_thread_t *qdf_create_thread(int (*thread_handler)(void *data), void *data,
135 				const char thread_name[])
136 {
137 	struct task_struct *task;
138 
139 	task = kthread_create(thread_handler, data, thread_name);
140 
141 	if (IS_ERR(task))
142 		return NULL;
143 
144 	return task;
145 }
146 qdf_export_symbol(qdf_create_thread);
147 
148 static uint16_t qdf_thread_id;
149 
qdf_thread_run(qdf_thread_func callback,void * context)150 qdf_thread_t *qdf_thread_run(qdf_thread_func callback, void *context)
151 {
152 	struct task_struct *thread;
153 
154 	thread = kthread_create((qdf_thread_os_func)callback, context,
155 				"qdf %u", qdf_thread_id++);
156 	if (IS_ERR(thread))
157 		return NULL;
158 
159 	get_task_struct(thread);
160 	wake_up_process(thread);
161 
162 	return thread;
163 }
164 qdf_export_symbol(qdf_thread_run);
165 
qdf_thread_join(qdf_thread_t * thread)166 QDF_STATUS qdf_thread_join(qdf_thread_t *thread)
167 {
168 	QDF_STATUS status;
169 
170 	QDF_BUG(thread);
171 
172 	status = (QDF_STATUS)kthread_stop(thread);
173 	put_task_struct(thread);
174 
175 	return status;
176 }
177 qdf_export_symbol(qdf_thread_join);
178 
qdf_thread_should_stop(void)179 bool qdf_thread_should_stop(void)
180 {
181 	return kthread_should_stop();
182 }
183 qdf_export_symbol(qdf_thread_should_stop);
184 
qdf_wake_up_process(qdf_thread_t * thread)185 int qdf_wake_up_process(qdf_thread_t *thread)
186 {
187 	return wake_up_process(thread);
188 }
189 qdf_export_symbol(qdf_wake_up_process);
190 
191 /* save_stack_trace_tsk() is exported for:
192  * 1) non-arm architectures
193  * 2) arm architectures in kernel versions >=4.14
194  * 3) backported kernels defining BACKPORTED_EXPORT_SAVE_STACK_TRACE_TSK_ARM
195  */
196 #if ((defined(WLAN_HOST_ARCH_ARM) && !WLAN_HOST_ARCH_ARM) || \
197 	LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) || \
198 	defined(BACKPORTED_EXPORT_SAVE_STACK_TRACE_TSK_ARM)) && \
199 	defined(CONFIG_STACKTRACE)
200 #define QDF_PRINT_TRACE_COUNT 32
201 
202 #ifdef CONFIG_ARCH_STACKWALK
qdf_print_thread_trace(qdf_thread_t * thread)203 void qdf_print_thread_trace(qdf_thread_t *thread)
204 {
205 	const int spaces = 4;
206 	struct task_struct *task = thread;
207 	unsigned long entries[QDF_PRINT_TRACE_COUNT] = {0};
208 	unsigned int nr_entries = 0;
209 	unsigned int max_entries = QDF_PRINT_TRACE_COUNT;
210 	int skip = 0;
211 
212 	nr_entries = stack_trace_save_tsk(task, entries, max_entries, skip);
213 	stack_trace_print(entries, nr_entries, spaces);
214 }
215 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0))
qdf_print_thread_trace(qdf_thread_t * thread)216 void qdf_print_thread_trace(qdf_thread_t *thread)
217 {
218 	const int spaces = 4;
219 	struct task_struct *task = thread;
220 	unsigned long entries[QDF_PRINT_TRACE_COUNT] = {0};
221 	struct stack_trace trace = {
222 		.nr_entries = 0,
223 		.skip = 0,
224 		.entries = &entries[0],
225 		.max_entries = QDF_PRINT_TRACE_COUNT,
226 	};
227 
228 	save_stack_trace_tsk(task, &trace);
229 	stack_trace_print(entries, trace.nr_entries, spaces);
230 }
231 #else
qdf_print_thread_trace(qdf_thread_t * thread)232 void qdf_print_thread_trace(qdf_thread_t *thread)
233 {
234 	const int spaces = 4;
235 	struct task_struct *task = thread;
236 	unsigned long entries[QDF_PRINT_TRACE_COUNT] = {0};
237 	struct stack_trace trace = {
238 		.nr_entries = 0,
239 		.skip = 0,
240 		.entries = &entries[0],
241 		.max_entries = QDF_PRINT_TRACE_COUNT,
242 	};
243 
244 	save_stack_trace_tsk(task, &trace);
245 	print_stack_trace(&trace, spaces);
246 }
247 #endif
248 
249 #else
qdf_print_thread_trace(qdf_thread_t * thread)250 void qdf_print_thread_trace(qdf_thread_t *thread) { }
251 #endif /* KERNEL_VERSION(4, 14, 0) */
252 qdf_export_symbol(qdf_print_thread_trace);
253 
qdf_get_current_task(void)254 qdf_thread_t *qdf_get_current_task(void)
255 {
256 	return current;
257 }
258 qdf_export_symbol(qdf_get_current_task);
259 
qdf_get_current_pid(void)260 int qdf_get_current_pid(void)
261 {
262 	return current->pid;
263 }
264 qdf_export_symbol(qdf_get_current_pid);
265 
qdf_get_current_comm(void)266 const char *qdf_get_current_comm(void)
267 {
268 	return current->comm;
269 }
270 qdf_export_symbol(qdf_get_current_comm);
271 
272 void
qdf_thread_set_cpus_allowed_mask(qdf_thread_t * thread,qdf_cpu_mask * new_mask)273 qdf_thread_set_cpus_allowed_mask(qdf_thread_t *thread, qdf_cpu_mask *new_mask)
274 {
275 	set_cpus_allowed_ptr(thread, new_mask);
276 }
277 
278 qdf_export_symbol(qdf_thread_set_cpus_allowed_mask);
279 
qdf_cpumask_clear(qdf_cpu_mask * dstp)280 void qdf_cpumask_clear(qdf_cpu_mask *dstp)
281 {
282 	cpumask_clear(dstp);
283 }
284 
285 qdf_export_symbol(qdf_cpumask_clear);
286 
qdf_cpumask_set_cpu(unsigned int cpu,qdf_cpu_mask * dstp)287 void qdf_cpumask_set_cpu(unsigned int cpu, qdf_cpu_mask *dstp)
288 {
289 	cpumask_set_cpu(cpu, dstp);
290 }
291 qdf_export_symbol(qdf_cpumask_set_cpu);
292 
qdf_cpumask_clear_cpu(unsigned int cpu,qdf_cpu_mask * dstp)293 void qdf_cpumask_clear_cpu(unsigned int cpu, qdf_cpu_mask *dstp)
294 {
295 	cpumask_clear_cpu(cpu, dstp);
296 }
297 
298 qdf_export_symbol(qdf_cpumask_clear_cpu);
299 
qdf_cpumask_setall(qdf_cpu_mask * dstp)300 void qdf_cpumask_setall(qdf_cpu_mask *dstp)
301 {
302 	cpumask_setall(dstp);
303 }
304 
305 qdf_export_symbol(qdf_cpumask_setall);
306 
qdf_cpumask_empty(const qdf_cpu_mask * srcp)307 bool qdf_cpumask_empty(const qdf_cpu_mask *srcp)
308 {
309 	return cpumask_empty(srcp);
310 }
311 
312 qdf_export_symbol(qdf_cpumask_empty);
313 
qdf_cpumask_copy(qdf_cpu_mask * dstp,const qdf_cpu_mask * srcp)314 void qdf_cpumask_copy(qdf_cpu_mask *dstp,
315 		      const qdf_cpu_mask *srcp)
316 {
317 	return cpumask_copy(dstp, srcp);
318 }
319 
320 qdf_export_symbol(qdf_cpumask_copy);
321 
qdf_cpumask_or(qdf_cpu_mask * dstp,qdf_cpu_mask * src1p,qdf_cpu_mask * src2p)322 void qdf_cpumask_or(qdf_cpu_mask *dstp, qdf_cpu_mask *src1p,
323 		    qdf_cpu_mask *src2p)
324 {
325 	cpumask_or(dstp, src1p, src2p);
326 }
327 
328 qdf_export_symbol(qdf_cpumask_or);
329 
330 void
qdf_thread_cpumap_print_to_pagebuf(bool list,char * new_mask_str,qdf_cpu_mask * new_mask)331 qdf_thread_cpumap_print_to_pagebuf(bool list, char *new_mask_str,
332 				   qdf_cpu_mask *new_mask)
333 {
334 	cpumap_print_to_pagebuf(list, new_mask_str, new_mask);
335 }
336 
337 qdf_export_symbol(qdf_thread_cpumap_print_to_pagebuf);
338 
339 bool
qdf_cpumask_and(qdf_cpu_mask * dstp,const qdf_cpu_mask * src1p,const qdf_cpu_mask * src2p)340 qdf_cpumask_and(qdf_cpu_mask *dstp, const qdf_cpu_mask *src1p,
341 		const qdf_cpu_mask *src2p)
342 {
343 	return cpumask_and(dstp, src1p, src2p);
344 }
345 
346 qdf_export_symbol(qdf_cpumask_and);
347 
348 bool
qdf_cpumask_andnot(qdf_cpu_mask * dstp,const qdf_cpu_mask * src1p,const qdf_cpu_mask * src2p)349 qdf_cpumask_andnot(qdf_cpu_mask *dstp, const qdf_cpu_mask *src1p,
350 		   const qdf_cpu_mask *src2p)
351 {
352 	return cpumask_andnot(dstp, src1p, src2p);
353 }
354 
355 qdf_export_symbol(qdf_cpumask_andnot);
356 
357 bool
qdf_cpumask_equal(const qdf_cpu_mask * src1p,const qdf_cpu_mask * src2p)358 qdf_cpumask_equal(const qdf_cpu_mask *src1p, const qdf_cpu_mask *src2p)
359 {
360 	return cpumask_equal(src1p, src2p);
361 }
362 
363 qdf_export_symbol(qdf_cpumask_equal);
364 
365 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 2, 0))
366 void
qdf_cpumask_complement(qdf_cpu_mask * dstp,const qdf_cpu_mask * srcp)367 qdf_cpumask_complement(qdf_cpu_mask *dstp, const qdf_cpu_mask *srcp)
368 {
369 	cpumask_andnot(dstp, cpu_possible_mask, srcp);
370 }
371 #else
372 void
qdf_cpumask_complement(qdf_cpu_mask * dstp,const qdf_cpu_mask * srcp)373 qdf_cpumask_complement(qdf_cpu_mask *dstp, const qdf_cpu_mask *srcp)
374 {
375 	cpumask_complement(dstp, srcp);
376 }
377 #endif
378 
379 qdf_export_symbol(qdf_cpumask_complement);
380 
381 #if defined(WALT_GET_CPU_TAKEN_SUPPORT) && IS_ENABLED(CONFIG_SCHED_WALT)
qdf_walt_get_cpus_taken(void)382 qdf_cpu_mask qdf_walt_get_cpus_taken(void)
383 {
384 	return walt_get_cpus_taken();
385 }
386 
387 qdf_export_symbol(qdf_walt_get_cpus_taken);
388 #endif
389