xref: /wlan-dirver/qca-wifi-host-cmn/scheduler/src/scheduler_core.c (revision 4865edfd190c086bbe2c69aae12a8226f877b91e)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <scheduler_core.h>
20 #include <qdf_atomic.h>
21 
22 static struct scheduler_ctx g_sched_ctx;
23 static struct scheduler_ctx *gp_sched_ctx;
24 
25 QDF_STATUS scheduler_create_ctx(void)
26 {
27 	gp_sched_ctx = &g_sched_ctx;
28 
29 	return QDF_STATUS_SUCCESS;
30 }
31 
32 QDF_STATUS scheduler_destroy_ctx(void)
33 {
34 	gp_sched_ctx = NULL;
35 
36 	return QDF_STATUS_SUCCESS;
37 }
38 
39 struct scheduler_ctx *scheduler_get_context(void)
40 {
41 	return gp_sched_ctx;
42 }
43 
44 
45 static QDF_STATUS scheduler_all_queues_init(struct scheduler_ctx *sched_ctx)
46 {
47 	QDF_STATUS status;
48 	int i;
49 
50 	sched_enter();
51 
52 	if (!sched_ctx) {
53 		sched_err("sched_ctx is null");
54 		QDF_DEBUG_PANIC();
55 		return QDF_STATUS_E_FAILURE;
56 	}
57 
58 	status = scheduler_mq_init(&sched_ctx->queue_ctx.free_msg_q);
59 	if (QDF_STATUS_SUCCESS != status)
60 		return status;
61 
62 	sched_debug("free msg queue init complete");
63 
64 	/* Initialize all message queues */
65 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
66 		status = scheduler_mq_init(&sched_ctx->queue_ctx.sch_msg_q[i]);
67 		if (QDF_STATUS_SUCCESS != status)
68 			return status;
69 	}
70 
71 	/* Initialize all qid to qidx mapping to invalid values */
72 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
73 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
74 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
75 
76 	sched_exit();
77 
78 	return status;
79 }
80 
81 
82 static QDF_STATUS scheduler_all_queues_deinit(struct scheduler_ctx *sched_ctx)
83 {
84 	int i;
85 
86 	sched_enter();
87 
88 	if (!sched_ctx) {
89 		sched_err("sched_ctx is null");
90 		QDF_DEBUG_PANIC();
91 		return QDF_STATUS_E_FAILURE;
92 	}
93 
94 	scheduler_mq_deinit(&sched_ctx->queue_ctx.free_msg_q);
95 
96 	sched_debug("free msg queue inited");
97 
98 	/* De-Initialize all message queues */
99 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
100 		scheduler_mq_deinit(&sched_ctx->queue_ctx.sch_msg_q[i]);
101 
102 	/* Initialize all qid to qidx mapping to invalid values */
103 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
104 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
105 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
106 
107 	sched_exit();
108 
109 	return QDF_STATUS_SUCCESS;
110 }
111 
112 QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q)
113 {
114 	sched_enter();
115 
116 	if (!msg_q) {
117 		sched_err("msg_q is null");
118 		return QDF_STATUS_E_FAILURE;
119 	}
120 
121 	/* Now initialize the lock */
122 	qdf_spinlock_create(&msg_q->mq_lock);
123 
124 	/* Now initialize the List data structure */
125 	qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES);
126 
127 	sched_exit();
128 
129 	return QDF_STATUS_SUCCESS;
130 }
131 
132 void scheduler_mq_deinit(struct scheduler_mq_type *msg_q)
133 {
134 	if (!msg_q)
135 		sched_err("msg_q is null");
136 }
137 
138 void scheduler_mq_put(struct scheduler_mq_type *msg_q,
139 		      struct scheduler_msg_wrapper *msg_wrapper)
140 {
141 	if (!msg_q) {
142 		sched_err("msg_q is null");
143 		return;
144 	}
145 
146 	if (!msg_wrapper) {
147 		sched_err("msg_wrapper is null");
148 		return;
149 	}
150 
151 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
152 	qdf_list_insert_back(&msg_q->mq_list, &msg_wrapper->msg_node);
153 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
154 }
155 
156 void scheduler_mq_put_front(struct scheduler_mq_type *msg_q,
157 			    struct scheduler_msg_wrapper *msg_wrapper)
158 {
159 	if (!msg_q) {
160 		sched_err("msg_q is null");
161 		return;
162 	}
163 
164 	if (!msg_wrapper) {
165 		sched_err("msg_wrapper is null");
166 		return;
167 	}
168 
169 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
170 	qdf_list_insert_front(&msg_q->mq_list, &msg_wrapper->msg_node);
171 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
172 }
173 
174 struct scheduler_msg_wrapper *scheduler_mq_get(struct scheduler_mq_type *msg_q)
175 {
176 	qdf_list_node_t *listptr;
177 	struct scheduler_msg_wrapper *msg_wrapper = NULL;
178 
179 	if (!msg_q) {
180 		sched_err("msg_q is null");
181 		return NULL;
182 	}
183 
184 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
185 	if (qdf_list_empty(&msg_q->mq_list)) {
186 		sched_warn("Scheduler Message Queue is empty");
187 	} else {
188 		listptr = msg_q->mq_list.anchor.next;
189 		msg_wrapper = (struct scheduler_msg_wrapper *)
190 					qdf_container_of(listptr,
191 						struct scheduler_msg_wrapper,
192 						msg_node);
193 		qdf_list_remove_node(&msg_q->mq_list, listptr);
194 	}
195 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
196 
197 	return msg_wrapper;
198 }
199 
200 bool scheduler_is_mq_empty(struct scheduler_mq_type *msg_q)
201 {
202 	bool is_empty;
203 
204 	if (!msg_q) {
205 		sched_err("msg_q is null");
206 		return true;
207 	}
208 
209 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
210 	is_empty = qdf_list_empty(&msg_q->mq_list);
211 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
212 
213 	return is_empty;
214 }
215 
216 QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx)
217 {
218 	return scheduler_all_queues_deinit(sched_ctx);
219 }
220 
221 QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx)
222 {
223 	QDF_STATUS status;
224 	int i;
225 
226 	sched_enter();
227 
228 	if (!sched_ctx) {
229 		sched_err("sched_ctx is null");
230 		QDF_DEBUG_PANIC();
231 		return QDF_STATUS_E_FAILURE;
232 	}
233 
234 	status = scheduler_all_queues_init(sched_ctx);
235 	if (QDF_STATUS_SUCCESS != status) {
236 		scheduler_all_queues_deinit(sched_ctx);
237 		sched_err("Failed to initialize the msg queues");
238 		return status;
239 	}
240 
241 	sched_debug("Queue init passed");
242 
243 	for (i = 0; i < SCHEDULER_CORE_MAX_MESSAGES; i++) {
244 		(sched_ctx->queue_ctx.msg_wrappers[i]).msg_buf =
245 			&(sched_ctx->queue_ctx.msg_buffers[i]);
246 		qdf_init_list_head(
247 			&sched_ctx->queue_ctx.msg_wrappers[i].msg_node);
248 		scheduler_mq_put(&sched_ctx->queue_ctx.free_msg_q,
249 			   &(sched_ctx->queue_ctx.msg_wrappers[i]));
250 	}
251 
252 	sched_exit();
253 
254 	return QDF_STATUS_SUCCESS;
255 }
256 
257 static void scheduler_core_return_msg(struct scheduler_ctx *sch_ctx,
258 				      struct scheduler_msg_wrapper *msg_wrapper)
259 {
260 	if (!sch_ctx) {
261 		sched_err("sch_ctx is null");
262 		QDF_DEBUG_PANIC();
263 		return;
264 	}
265 
266 	QDF_ASSERT(msg_wrapper);
267 	if (!msg_wrapper) {
268 		sched_err("msg_wrapper is null");
269 		return;
270 	}
271 
272 	/*
273 	 * Return the message on the free message queue
274 	 */
275 	qdf_init_list_head(&msg_wrapper->msg_node);
276 	scheduler_mq_put(&sch_ctx->queue_ctx.free_msg_q, msg_wrapper);
277 }
278 
279 static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
280 					    bool *shutdown)
281 {
282 	int i;
283 	QDF_STATUS status;
284 	struct scheduler_msg_wrapper *msg_wrapper;
285 
286 	if (!sch_ctx) {
287 		sched_err("sch_ctx is null");
288 		QDF_DEBUG_PANIC();
289 		return;
290 	}
291 
292 	/* start with highest priority queue : timer queue at index 0 */
293 	i = 0;
294 	while (i < SCHEDULER_NUMBER_OF_MSG_QUEUE) {
295 		/* Check if MC needs to shutdown */
296 		if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK,
297 					&sch_ctx->sch_event_flag)) {
298 			sched_info("scheduler thread signaled to shutdown");
299 			*shutdown = true;
300 
301 			/* Check for any Suspend Indication */
302 			if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
303 						&sch_ctx->sch_event_flag)) {
304 				/* Unblock anyone waiting on suspend */
305 				if (gp_sched_ctx->hdd_callback)
306 					gp_sched_ctx->hdd_callback();
307 			}
308 
309 			break;
310 		}
311 
312 		if (scheduler_is_mq_empty(&sch_ctx->queue_ctx.sch_msg_q[i])) {
313 			/* check next queue */
314 			i++;
315 			continue;
316 		}
317 
318 		msg_wrapper =
319 			scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]);
320 		if (!msg_wrapper) {
321 			sched_err("msg_wrapper is NULL");
322 			QDF_ASSERT(0);
323 			return;
324 		}
325 
326 		if (sch_ctx->queue_ctx.scheduler_msg_process_fn[i]) {
327 			struct scheduler_msg *msg = msg_wrapper->msg_buf;
328 
329 			sch_ctx->watchdog_msg_type = msg->type;
330 			sch_ctx->watchdog_callback = msg->callback;
331 			qdf_timer_start(&sch_ctx->watchdog_timer,
332 					SCHEDULER_WATCHDOG_TIMEOUT);
333 			status = sch_ctx->queue_ctx.
334 					scheduler_msg_process_fn[i](msg);
335 			qdf_timer_stop(&sch_ctx->watchdog_timer);
336 
337 			if (QDF_IS_STATUS_ERROR(status))
338 				sched_err("Failed processing Qid[%d] message",
339 					  sch_ctx->queue_ctx.sch_msg_q[i].qid);
340 
341 			/* return message to the Core */
342 			scheduler_core_return_msg(sch_ctx, msg_wrapper);
343 		}
344 
345 		/* start again with highest priority queue at index 0 */
346 		i = 0;
347 
348 		continue;
349 	}
350 
351 	/* Check for any Suspend Indication */
352 	if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
353 			&sch_ctx->sch_event_flag)) {
354 		qdf_spin_lock(&sch_ctx->sch_thread_lock);
355 		qdf_event_reset(&sch_ctx->resume_sch_event);
356 		/* controller thread suspend completion callback */
357 		if (gp_sched_ctx->hdd_callback)
358 			gp_sched_ctx->hdd_callback();
359 		qdf_spin_unlock(&sch_ctx->sch_thread_lock);
360 		/* Wait for resume indication */
361 		qdf_wait_single_event(&sch_ctx->resume_sch_event, 0);
362 	}
363 
364 	return;  /* Nothing to process wait on wait queue */
365 }
366 
367 int scheduler_thread(void *arg)
368 {
369 	struct scheduler_ctx *sch_ctx = (struct scheduler_ctx *)arg;
370 	int retWaitStatus = 0;
371 	bool shutdown = false;
372 
373 	if (!arg) {
374 		sched_err("arg is null");
375 		QDF_DEBUG_PANIC();
376 		return 0;
377 	}
378 	qdf_set_user_nice(current, -2);
379 
380 	/* Ack back to the context from which the main controller thread
381 	 * has been created
382 	 */
383 	qdf_event_set(&sch_ctx->sch_start_event);
384 	sched_debug("scheduler thread %d (%s) starting up",
385 		    current->pid, current->comm);
386 
387 	while (!shutdown) {
388 		/* This implements the execution model algorithm */
389 		retWaitStatus = qdf_wait_queue_interruptible(
390 					sch_ctx->sch_wait_queue,
391 					qdf_atomic_test_bit(MC_POST_EVENT_MASK,
392 						&sch_ctx->sch_event_flag) ||
393 					qdf_atomic_test_bit(MC_SUSPEND_EVENT_MASK,
394 						&sch_ctx->sch_event_flag));
395 
396 		if (retWaitStatus == -ERESTARTSYS) {
397 			sched_err("wait_event_interruptible returned -ERESTARTSYS");
398 			QDF_DEBUG_PANIC();
399 		}
400 
401 		qdf_atomic_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag);
402 		scheduler_thread_process_queues(sch_ctx, &shutdown);
403 	}
404 
405 	/* If we get here the scheduler thread must exit */
406 	sched_info("Scheduler thread exiting");
407 	qdf_event_set(&sch_ctx->sch_shutdown);
408 	qdf_exit_thread(QDF_STATUS_SUCCESS);
409 
410 	return 0;
411 }
412 
413 void scheduler_cleanup_queues(struct scheduler_ctx *sch_ctx, int idx)
414 {
415 	struct scheduler_msg_wrapper *msg_wrapper;
416 	QDF_STATUS (*scheduler_flush_callback) (struct scheduler_msg *);
417 
418 	if (!sch_ctx) {
419 		sched_err("sch_ctx is null");
420 		QDF_DEBUG_PANIC();
421 		return;
422 	}
423 
424 	while ((msg_wrapper =
425 			scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[idx]))) {
426 		if (msg_wrapper->msg_buf) {
427 			if ((QDF_MODULE_ID_SYS ==
428 				sch_ctx->queue_ctx.sch_msg_q[idx].qid) &&
429 			    (SYS_MSG_ID_MC_TIMER ==
430 				msg_wrapper->msg_buf->type)) {
431 				sched_debug("Timer is freed by each module, not here");
432 				continue;
433 			}
434 			sched_info("Freeing MC MSG message type %d, module id:%d",
435 				   msg_wrapper->msg_buf->type,
436 				   sch_ctx->queue_ctx.sch_msg_q[idx].qid);
437 			if (msg_wrapper->msg_buf->flush_callback) {
438 				sched_debug("Flush callback called for type-%x",
439 					    msg_wrapper->msg_buf->type);
440 				scheduler_flush_callback =
441 					msg_wrapper->msg_buf->flush_callback;
442 				scheduler_flush_callback(msg_wrapper->msg_buf);
443 			} else if (msg_wrapper->msg_buf->bodyptr) {
444 				sched_debug("noflush cb given for type-%x",
445 					    msg_wrapper->msg_buf->type);
446 				qdf_mem_free(msg_wrapper->msg_buf->bodyptr);
447 			}
448 
449 			msg_wrapper->msg_buf->bodyptr = NULL;
450 			msg_wrapper->msg_buf->bodyval = 0;
451 			msg_wrapper->msg_buf->type = 0;
452 		}
453 
454 		scheduler_core_return_msg(sch_ctx, msg_wrapper);
455 	}
456 }
457