xref: /wlan-dirver/qca-wifi-host-cmn/scheduler/src/scheduler_core.c (revision 3149adf58a329e17232a4c0e58d460d025edd55a)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 #include <scheduler_core.h>
29 #include <qdf_atomic.h>
30 
31 static struct scheduler_ctx g_sched_ctx;
32 static struct scheduler_ctx *gp_sched_ctx;
33 
34 QDF_STATUS scheduler_create_ctx(void)
35 {
36 	gp_sched_ctx = &g_sched_ctx;
37 
38 	return QDF_STATUS_SUCCESS;
39 }
40 
41 QDF_STATUS scheduler_destroy_ctx(void)
42 {
43 	gp_sched_ctx = NULL;
44 
45 	return QDF_STATUS_SUCCESS;
46 }
47 
48 struct scheduler_ctx *scheduler_get_context(void)
49 {
50 	return gp_sched_ctx;
51 }
52 
53 
54 static QDF_STATUS scheduler_all_queues_init(struct scheduler_ctx *sched_ctx)
55 {
56 	QDF_STATUS status;
57 	int i;
58 
59 	sched_enter();
60 
61 	if (!sched_ctx) {
62 		sched_err("sched_ctx is null");
63 		QDF_DEBUG_PANIC();
64 		return QDF_STATUS_E_FAILURE;
65 	}
66 
67 	status = scheduler_mq_init(&sched_ctx->queue_ctx.free_msg_q);
68 	if (QDF_STATUS_SUCCESS != status)
69 		return status;
70 
71 	sched_debug("free msg queue init complete");
72 
73 	/* Initialize all message queues */
74 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
75 		status = scheduler_mq_init(&sched_ctx->queue_ctx.sch_msg_q[i]);
76 		if (QDF_STATUS_SUCCESS != status)
77 			return status;
78 	}
79 
80 	/* Initialize all qid to qidx mapping to invalid values */
81 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
82 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
83 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
84 
85 	sched_exit();
86 
87 	return status;
88 }
89 
90 
91 static QDF_STATUS scheduler_all_queues_deinit(struct scheduler_ctx *sched_ctx)
92 {
93 	int i;
94 
95 	sched_enter();
96 
97 	if (!sched_ctx) {
98 		sched_err("sched_ctx is null");
99 		QDF_DEBUG_PANIC();
100 		return QDF_STATUS_E_FAILURE;
101 	}
102 
103 	scheduler_mq_deinit(&sched_ctx->queue_ctx.free_msg_q);
104 
105 	sched_debug("free msg queue inited");
106 
107 	/* De-Initialize all message queues */
108 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
109 		scheduler_mq_deinit(&sched_ctx->queue_ctx.sch_msg_q[i]);
110 
111 	/* Initialize all qid to qidx mapping to invalid values */
112 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
113 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
114 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
115 
116 	sched_exit();
117 
118 	return QDF_STATUS_SUCCESS;
119 }
120 
121 QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q)
122 {
123 	sched_enter();
124 
125 	if (!msg_q) {
126 		sched_err("msg_q is null");
127 		return QDF_STATUS_E_FAILURE;
128 	}
129 
130 	/* Now initialize the lock */
131 	qdf_spinlock_create(&msg_q->mq_lock);
132 
133 	/* Now initialize the List data structure */
134 	qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES);
135 
136 	sched_exit();
137 
138 	return QDF_STATUS_SUCCESS;
139 }
140 
141 void scheduler_mq_deinit(struct scheduler_mq_type *msg_q)
142 {
143 	if (!msg_q)
144 		sched_err("msg_q is null");
145 }
146 
147 void scheduler_mq_put(struct scheduler_mq_type *msg_q,
148 		      struct scheduler_msg_wrapper *msg_wrapper)
149 {
150 	if (!msg_q) {
151 		sched_err("msg_q is null");
152 		return;
153 	}
154 
155 	if (!msg_wrapper) {
156 		sched_err("msg_wrapper is null");
157 		return;
158 	}
159 
160 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
161 	qdf_list_insert_back(&msg_q->mq_list, &msg_wrapper->msg_node);
162 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
163 }
164 
165 void scheduler_mq_put_front(struct scheduler_mq_type *msg_q,
166 			    struct scheduler_msg_wrapper *msg_wrapper)
167 {
168 	if (!msg_q) {
169 		sched_err("msg_q is null");
170 		return;
171 	}
172 
173 	if (!msg_wrapper) {
174 		sched_err("msg_wrapper is null");
175 		return;
176 	}
177 
178 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
179 	qdf_list_insert_front(&msg_q->mq_list, &msg_wrapper->msg_node);
180 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
181 }
182 
183 struct scheduler_msg_wrapper *scheduler_mq_get(struct scheduler_mq_type *msg_q)
184 {
185 	qdf_list_node_t *listptr;
186 	struct scheduler_msg_wrapper *msg_wrapper = NULL;
187 
188 	if (!msg_q) {
189 		sched_err("msg_q is null");
190 		return NULL;
191 	}
192 
193 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
194 	if (qdf_list_empty(&msg_q->mq_list)) {
195 		sched_warn("Scheduler Message Queue is empty");
196 	} else {
197 		listptr = msg_q->mq_list.anchor.next;
198 		msg_wrapper = (struct scheduler_msg_wrapper *)
199 					qdf_container_of(listptr,
200 						struct scheduler_msg_wrapper,
201 						msg_node);
202 		qdf_list_remove_node(&msg_q->mq_list, listptr);
203 	}
204 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
205 
206 	return msg_wrapper;
207 }
208 
209 bool scheduler_is_mq_empty(struct scheduler_mq_type *msg_q)
210 {
211 	bool is_empty;
212 
213 	if (!msg_q) {
214 		sched_err("msg_q is null");
215 		return true;
216 	}
217 
218 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
219 	is_empty = qdf_list_empty(&msg_q->mq_list);
220 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
221 
222 	return is_empty;
223 }
224 
225 QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx)
226 {
227 	return scheduler_all_queues_deinit(sched_ctx);
228 }
229 
230 QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx)
231 {
232 	QDF_STATUS status;
233 	int i;
234 
235 	sched_enter();
236 
237 	if (!sched_ctx) {
238 		sched_err("sched_ctx is null");
239 		QDF_DEBUG_PANIC();
240 		return QDF_STATUS_E_FAILURE;
241 	}
242 
243 	status = scheduler_all_queues_init(sched_ctx);
244 	if (QDF_STATUS_SUCCESS != status) {
245 		scheduler_all_queues_deinit(sched_ctx);
246 		sched_err("Failed to initialize the msg queues");
247 		return status;
248 	}
249 
250 	sched_debug("Queue init passed");
251 
252 	for (i = 0; i < SCHEDULER_CORE_MAX_MESSAGES; i++) {
253 		(sched_ctx->queue_ctx.msg_wrappers[i]).msg_buf =
254 			&(sched_ctx->queue_ctx.msg_buffers[i]);
255 		qdf_init_list_head(
256 			&sched_ctx->queue_ctx.msg_wrappers[i].msg_node);
257 		scheduler_mq_put(&sched_ctx->queue_ctx.free_msg_q,
258 			   &(sched_ctx->queue_ctx.msg_wrappers[i]));
259 	}
260 
261 	sched_exit();
262 
263 	return QDF_STATUS_SUCCESS;
264 }
265 
266 static void scheduler_core_return_msg(struct scheduler_ctx *sch_ctx,
267 				      struct scheduler_msg_wrapper *msg_wrapper)
268 {
269 	if (!sch_ctx) {
270 		sched_err("sch_ctx is null");
271 		QDF_DEBUG_PANIC();
272 		return;
273 	}
274 
275 	QDF_ASSERT(msg_wrapper);
276 	if (!msg_wrapper) {
277 		sched_err("msg_wrapper is null");
278 		return;
279 	}
280 
281 	/*
282 	 * Return the message on the free message queue
283 	 */
284 	qdf_init_list_head(&msg_wrapper->msg_node);
285 	scheduler_mq_put(&sch_ctx->queue_ctx.free_msg_q, msg_wrapper);
286 }
287 
288 static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
289 					    bool *shutdown)
290 {
291 	int i;
292 	QDF_STATUS status;
293 	struct scheduler_msg_wrapper *msg_wrapper;
294 
295 	if (!sch_ctx) {
296 		sched_err("sch_ctx is null");
297 		QDF_DEBUG_PANIC();
298 		return;
299 	}
300 
301 	/* start with highest priority queue : timer queue at index 0 */
302 	i = 0;
303 	while (i < SCHEDULER_NUMBER_OF_MSG_QUEUE) {
304 		/* Check if MC needs to shutdown */
305 		if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK,
306 					&sch_ctx->sch_event_flag)) {
307 			sched_info("scheduler thread signaled to shutdown");
308 			*shutdown = true;
309 
310 			/* Check for any Suspend Indication */
311 			if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
312 						&sch_ctx->sch_event_flag)) {
313 				/* Unblock anyone waiting on suspend */
314 				if (gp_sched_ctx->hdd_callback)
315 					gp_sched_ctx->hdd_callback();
316 			}
317 
318 			break;
319 		}
320 
321 		if (scheduler_is_mq_empty(&sch_ctx->queue_ctx.sch_msg_q[i])) {
322 			/* check next queue */
323 			i++;
324 			continue;
325 		}
326 
327 		msg_wrapper =
328 			scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]);
329 		if (!msg_wrapper) {
330 			sched_err("msg_wrapper is NULL");
331 			QDF_ASSERT(0);
332 			return;
333 		}
334 
335 		if (sch_ctx->queue_ctx.scheduler_msg_process_fn[i]) {
336 			struct scheduler_msg *msg = msg_wrapper->msg_buf;
337 
338 			sch_ctx->watchdog_msg_type = msg->type;
339 			sch_ctx->watchdog_callback = msg->callback;
340 			qdf_timer_start(&sch_ctx->watchdog_timer,
341 					SCHEDULER_WATCHDOG_TIMEOUT);
342 			status = sch_ctx->queue_ctx.
343 					scheduler_msg_process_fn[i](msg);
344 			qdf_timer_stop(&sch_ctx->watchdog_timer);
345 
346 			if (QDF_IS_STATUS_ERROR(status))
347 				sched_err("Failed processing Qid[%d] message",
348 					  sch_ctx->queue_ctx.sch_msg_q[i].qid);
349 
350 			/* return message to the Core */
351 			scheduler_core_return_msg(sch_ctx, msg_wrapper);
352 		}
353 
354 		/* start again with highest priority queue at index 0 */
355 		i = 0;
356 
357 		continue;
358 	}
359 
360 	/* Check for any Suspend Indication */
361 	if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
362 			&sch_ctx->sch_event_flag)) {
363 		qdf_spin_lock(&sch_ctx->sch_thread_lock);
364 		qdf_event_reset(&sch_ctx->resume_sch_event);
365 		/* controller thread suspend completion callback */
366 		if (gp_sched_ctx->hdd_callback)
367 			gp_sched_ctx->hdd_callback();
368 		qdf_spin_unlock(&sch_ctx->sch_thread_lock);
369 		/* Wait for resume indication */
370 		qdf_wait_single_event(&sch_ctx->resume_sch_event, 0);
371 	}
372 
373 	return;  /* Nothing to process wait on wait queue */
374 }
375 
376 int scheduler_thread(void *arg)
377 {
378 	struct scheduler_ctx *sch_ctx = (struct scheduler_ctx *)arg;
379 	int retWaitStatus = 0;
380 	bool shutdown = false;
381 
382 	if (!arg) {
383 		sched_err("arg is null");
384 		QDF_DEBUG_PANIC();
385 		return 0;
386 	}
387 	qdf_set_user_nice(current, -2);
388 
389 	/* Ack back to the context from which the main controller thread
390 	 * has been created
391 	 */
392 	qdf_event_set(&sch_ctx->sch_start_event);
393 	sched_debug("scheduler thread %d (%s) starting up",
394 		    current->pid, current->comm);
395 
396 	while (!shutdown) {
397 		/* This implements the execution model algorithm */
398 		retWaitStatus = qdf_wait_queue_interruptible(
399 					sch_ctx->sch_wait_queue,
400 					qdf_atomic_test_bit(MC_POST_EVENT_MASK,
401 						&sch_ctx->sch_event_flag) ||
402 					qdf_atomic_test_bit(MC_SUSPEND_EVENT_MASK,
403 						&sch_ctx->sch_event_flag));
404 
405 		if (retWaitStatus == -ERESTARTSYS) {
406 			sched_err("wait_event_interruptible returned -ERESTARTSYS");
407 			QDF_DEBUG_PANIC();
408 		}
409 
410 		qdf_atomic_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag);
411 		scheduler_thread_process_queues(sch_ctx, &shutdown);
412 	}
413 
414 	/* If we get here the scheduler thread must exit */
415 	sched_info("Scheduler thread exiting");
416 	qdf_event_set(&sch_ctx->sch_shutdown);
417 	qdf_exit_thread(QDF_STATUS_SUCCESS);
418 
419 	return 0;
420 }
421 
422 void scheduler_cleanup_queues(struct scheduler_ctx *sch_ctx, int idx)
423 {
424 	struct scheduler_msg_wrapper *msg_wrapper;
425 	QDF_STATUS (*scheduler_flush_callback) (struct scheduler_msg *);
426 
427 	if (!sch_ctx) {
428 		sched_err("sch_ctx is null");
429 		QDF_DEBUG_PANIC();
430 		return;
431 	}
432 
433 	while ((msg_wrapper =
434 			scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[idx]))) {
435 		if (msg_wrapper->msg_buf) {
436 			sched_info("Freeing MC WMA MSG message type %d",
437 				   msg_wrapper->msg_buf->type);
438 
439 			if (msg_wrapper->msg_buf->flush_callback) {
440 				sched_debug("Flush callback called for type-%x",
441 					    msg_wrapper->msg_buf->type);
442 				scheduler_flush_callback =
443 					msg_wrapper->msg_buf->flush_callback;
444 				scheduler_flush_callback(msg_wrapper->msg_buf);
445 			} else if (msg_wrapper->msg_buf->bodyptr) {
446 				sched_debug("noflush cb given for type-%x",
447 					    msg_wrapper->msg_buf->type);
448 				qdf_mem_free(msg_wrapper->msg_buf->bodyptr);
449 			}
450 
451 			msg_wrapper->msg_buf->bodyptr = NULL;
452 			msg_wrapper->msg_buf->bodyval = 0;
453 			msg_wrapper->msg_buf->type = 0;
454 		}
455 
456 		scheduler_core_return_msg(sch_ctx, msg_wrapper);
457 	}
458 }
459