xref: /wlan-dirver/qca-wifi-host-cmn/scheduler/src/scheduler_core.c (revision 7fb196901782e8fcd6c82b63746465c66cb3ac55)
1 /*
2  * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 #include <scheduler_core.h>
29 
30 static struct scheduler_ctx g_sched_ctx;
31 static struct scheduler_ctx *gp_sched_ctx;
32 
33 QDF_STATUS scheduler_create_ctx(void)
34 {
35 	gp_sched_ctx = &g_sched_ctx;
36 
37 	return QDF_STATUS_SUCCESS;
38 }
39 
40 QDF_STATUS scheduler_destroy_ctx(void)
41 {
42 	gp_sched_ctx = NULL;
43 
44 	return QDF_STATUS_SUCCESS;
45 }
46 
47 struct scheduler_ctx *scheduler_get_context(void)
48 {
49 	return gp_sched_ctx;
50 }
51 
52 
53 static QDF_STATUS scheduler_all_queues_init(
54 			struct scheduler_ctx *sched_ctx)
55 {
56 	QDF_STATUS status = QDF_STATUS_SUCCESS;
57 	int i;
58 
59 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("enter"));
60 	if (!sched_ctx) {
61 		QDF_ASSERT(0);
62 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
63 			  "%s: Null params being passed", __func__);
64 		return QDF_STATUS_E_FAILURE;
65 	}
66 
67 	status = scheduler_mq_init(&sched_ctx->queue_ctx.free_msg_q);
68 	if (QDF_STATUS_SUCCESS != status)
69 		return status;
70 
71 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
72 		QDF_TRACE_LEVEL_ERROR, FL("free msg queue init complete"));
73 
74 	/* Initialize all message queues */
75 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
76 		status = scheduler_mq_init(
77 				&sched_ctx->queue_ctx.sch_msg_q[i]);
78 		if (QDF_STATUS_SUCCESS != status)
79 			return status;
80 	}
81 	/* Initialize all qid to qidx mapping to invalid values */
82 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
83 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
84 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
85 
86 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("exit"));
87 
88 	return status;
89 }
90 
91 
92 static QDF_STATUS scheduler_all_queues_deinit(
93 		struct scheduler_ctx *sched_ctx)
94 {
95 	QDF_STATUS status = QDF_STATUS_SUCCESS;
96 	int i;
97 
98 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("enter"));
99 	if (!sched_ctx) {
100 		QDF_ASSERT(0);
101 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
102 			  "%s: Null params being passed", __func__);
103 		return QDF_STATUS_E_FAILURE;
104 	}
105 
106 	scheduler_mq_deinit(&sched_ctx->queue_ctx.free_msg_q);
107 
108 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
109 		  QDF_TRACE_LEVEL_ERROR, FL("free msg queue inited"));
110 
111 	/* De-Initialize all message queues */
112 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
113 		scheduler_mq_deinit(&sched_ctx->queue_ctx.sch_msg_q[i]);
114 
115 	/* Initialize all qid to qidx mapping to invalid values */
116 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
117 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
118 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
119 
120 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("exit"));
121 	return status;
122 }
123 
124 QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q)
125 {
126 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("Enter"));
127 	if (msg_q == NULL) {
128 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
129 			  "%s: NULL pointer passed", __func__);
130 		return QDF_STATUS_E_FAILURE;
131 	}
132 	/* Now initialize the lock */
133 	qdf_spinlock_create(&msg_q->mq_lock);
134 	/* Now initialize the List data structure */
135 	qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES);
136 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("Exit"));
137 
138 	return QDF_STATUS_SUCCESS;
139 }
140 
141 void scheduler_mq_deinit(struct scheduler_mq_type *msg_q)
142 {
143 	if (msg_q == NULL) {
144 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
145 				"%s: NULL pointer passed", __func__);
146 		return;
147 	}
148 }
149 
150 void scheduler_mq_put(struct scheduler_mq_type *msg_q,
151 			struct scheduler_msg_wrapper *msg_wrapper)
152 {
153 	if (msg_q == NULL || msg_wrapper == NULL) {
154 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
155 				"%s: NULL pointer passed", __func__);
156 		return;
157 	}
158 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
159 	qdf_list_insert_back(&msg_q->mq_list, &msg_wrapper->msg_node);
160 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
161 
162 }
163 
164 void scheduler_mq_put_front(struct scheduler_mq_type *msg_q,
165 			struct scheduler_msg_wrapper *msg_wrapper)
166 {
167 	if ((msg_q == NULL) || (msg_wrapper == NULL)) {
168 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
169 				"%s: NULL pointer passed", __func__);
170 		return;
171 	}
172 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
173 	qdf_list_insert_front(&msg_q->mq_list, &msg_wrapper->msg_node);
174 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
175 }
176 
177 struct scheduler_msg_wrapper *scheduler_mq_get(struct scheduler_mq_type *msg_q)
178 {
179 	qdf_list_node_t *listptr;
180 	struct scheduler_msg_wrapper *msg_wrapper = NULL;
181 
182 	if (msg_q == NULL) {
183 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
184 				"%s: NULL pointer passed", __func__);
185 		return NULL;
186 	}
187 
188 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
189 	if (qdf_list_empty(&msg_q->mq_list)) {
190 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_WARN,
191 			  "%s: Scheduler Message Queue is empty", __func__);
192 	} else {
193 		listptr = msg_q->mq_list.anchor.next;
194 		msg_wrapper = (struct scheduler_msg_wrapper *)
195 					qdf_container_of(listptr,
196 						struct scheduler_msg_wrapper,
197 						msg_node);
198 		qdf_list_remove_node(&msg_q->mq_list, listptr);
199 	}
200 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
201 	return msg_wrapper;
202 
203 }
204 
205 bool scheduler_is_mq_empty(struct scheduler_mq_type *msg_q)
206 {
207 	bool is_empty = false;
208 
209 	if (msg_q == NULL) {
210 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
211 				"%s: NULL pointer passed", __func__);
212 		return QDF_STATUS_E_FAILURE;
213 	}
214 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
215 	is_empty = qdf_list_empty(&msg_q->mq_list) ? true : false;
216 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
217 	return is_empty;
218 }
219 
220 QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx)
221 {
222 	return scheduler_all_queues_deinit(sched_ctx);
223 }
224 
225 QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx)
226 {
227 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
228 	int i;
229 
230 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("Enter"));
231 	if (!sched_ctx) {
232 		QDF_ASSERT(0);
233 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
234 			  "%s: Null params being passed", __func__);
235 		return QDF_STATUS_E_FAILURE;
236 	}
237 	status = scheduler_all_queues_init(sched_ctx);
238 	if (QDF_STATUS_SUCCESS != status) {
239 		scheduler_all_queues_deinit(sched_ctx);
240 		QDF_ASSERT(0);
241 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_FATAL,
242 				FL("Failed to initialize the msg queues"));
243 		return status;
244 	}
245 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
246 		QDF_TRACE_LEVEL_ERROR, FL("Queue init passed"));
247 
248 	for (i = 0; i < SCHEDULER_CORE_MAX_MESSAGES; i++) {
249 		(sched_ctx->queue_ctx.msg_wrappers[i]).msg_buf =
250 			&(sched_ctx->queue_ctx.msg_buffers[i]);
251 		qdf_init_list_head(
252 			&sched_ctx->queue_ctx.msg_wrappers[i].msg_node);
253 		scheduler_mq_put(&sched_ctx->queue_ctx.free_msg_q,
254 			   &(sched_ctx->queue_ctx.msg_wrappers[i]));
255 	}
256 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("Exit"));
257 	return status;
258 }
259 
260 static void scheduler_core_return_msg(struct scheduler_ctx *sch_ctx,
261 			struct scheduler_msg_wrapper *msg_wrapper)
262 {
263 	if (!sch_ctx) {
264 		QDF_ASSERT(0);
265 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
266 			"%s: gp_cds_context != p_cds_context", __func__);
267 		return;
268 	}
269 
270 	QDF_ASSERT(NULL != msg_wrapper);
271 
272 	if (msg_wrapper == NULL) {
273 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
274 			FL("msg_wrapper == NULL in function"));
275 		return;
276 	}
277 
278 	/*
279 	 * Return the message on the free message queue
280 	 */
281 	qdf_init_list_head(&msg_wrapper->msg_node);
282 	scheduler_mq_put(&sch_ctx->queue_ctx.free_msg_q, msg_wrapper);
283 }
284 
285 static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
286 					    bool *shutdown)
287 {
288 	int i;
289 	QDF_STATUS vStatus = QDF_STATUS_E_FAILURE;
290 	struct scheduler_msg_wrapper *msg_wrapper = NULL;
291 
292 	if (!sch_ctx) {
293 		QDF_ASSERT(0);
294 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
295 				FL("sch_ctx null"));
296 		return;
297 	}
298 
299 	/* start with highest priority queue : timer queue at index 0 */
300 	i = 0;
301 	while (i < SCHEDULER_NUMBER_OF_MSG_QUEUE) {
302 		/* Check if MC needs to shutdown */
303 		if (qdf_test_bit(MC_SHUTDOWN_EVENT_MASK,
304 					&sch_ctx->sch_event_flag)) {
305 			QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
306 				QDF_TRACE_LEVEL_ERROR,
307 				"%s: scheduler thread signaled to shutdown",
308 				__func__);
309 			*shutdown = true;
310 			/* Check for any Suspend Indication */
311 			if (qdf_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
312 						&sch_ctx->sch_event_flag)) {
313 				/* Unblock anyone waiting on suspend */
314 				if (gp_sched_ctx->hdd_callback)
315 					gp_sched_ctx->hdd_callback();
316 			}
317 			break;
318 		}
319 		if (scheduler_is_mq_empty(&sch_ctx->queue_ctx.sch_msg_q[i])) {
320 			/* check next queue */
321 			i++;
322 			continue;
323 		}
324 		msg_wrapper =
325 			scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]);
326 		if (msg_wrapper == NULL) {
327 			QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
328 				QDF_TRACE_LEVEL_ERROR,
329 				"%s: msg_wrapper is NULL", __func__);
330 			QDF_ASSERT(0);
331 			return;
332 		}
333 		if (sch_ctx->queue_ctx.scheduler_msg_process_fn[i]) {
334 			struct scheduler_msg *msg = msg_wrapper->msg_buf;
335 
336 			sch_ctx->watchdog_msg_type = msg->type;
337 			sch_ctx->watchdog_callback = msg->callback;
338 			qdf_timer_start(&sch_ctx->watchdog_timer,
339 					SCHEDULER_WATCHDOG_TIMEOUT);
340 			vStatus = sch_ctx->queue_ctx.
341 					scheduler_msg_process_fn[i](msg);
342 			qdf_timer_stop(&sch_ctx->watchdog_timer);
343 
344 			if (QDF_IS_STATUS_ERROR(vStatus)) {
345 				QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
346 					QDF_TRACE_LEVEL_ERROR,
347 					FL("Failed processing Qid[%d] message"),
348 					sch_ctx->queue_ctx.sch_msg_q[i].qid);
349 			}
350 			/* return message to the Core */
351 			scheduler_core_return_msg(sch_ctx, msg_wrapper);
352 		}
353 
354 		/* start again with highest priority queue at index 0 */
355 		i = 0;
356 		continue;
357 	}
358 	/* Check for any Suspend Indication */
359 	if (qdf_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
360 			&sch_ctx->sch_event_flag)) {
361 		qdf_spin_lock(&sch_ctx->sch_thread_lock);
362 		qdf_event_reset(&sch_ctx->resume_sch_event);
363 		/* controller thread suspend completion callback */
364 		if (gp_sched_ctx->hdd_callback)
365 			gp_sched_ctx->hdd_callback();
366 		qdf_spin_unlock(&sch_ctx->sch_thread_lock);
367 		/* Wait for resume indication */
368 		qdf_wait_single_event(&sch_ctx->resume_sch_event, 0);
369 	}
370 
371 	return;  /* Nothing to process wait on wait queue */
372 }
373 
374 int scheduler_thread(void *arg)
375 {
376 	struct scheduler_ctx *sch_ctx = (struct scheduler_ctx *)arg;
377 	int retWaitStatus = 0;
378 	bool shutdown = false;
379 
380 	if (arg == NULL) {
381 		QDF_ASSERT(0);
382 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
383 			  "%s: Bad Args passed", __func__);
384 		return 0;
385 	}
386 	qdf_set_user_nice(current, -2);
387 
388 	/* Ack back to the context from which the main controller thread
389 	 * has been created
390 	 */
391 	qdf_event_set(&sch_ctx->sch_start_event);
392 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
393 		  "%s: scheduler_thread %d (%s) starting up", __func__, current->pid,
394 		  current->comm);
395 
396 	while (!shutdown) {
397 		/* This implements the execution model algorithm */
398 		retWaitStatus = qdf_wait_queue_interruptible(
399 					sch_ctx->sch_wait_queue,
400 					qdf_test_bit(MC_POST_EVENT_MASK,
401 						&sch_ctx->sch_event_flag) ||
402 					qdf_test_bit(MC_SUSPEND_EVENT_MASK,
403 						&sch_ctx->sch_event_flag));
404 
405 		if (retWaitStatus == -ERESTARTSYS) {
406 			QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
407 					"%s: wait_event_interruptible returned -ERESTARTSYS",
408 					__func__);
409 			QDF_BUG(0);
410 		}
411 		qdf_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag);
412 		scheduler_thread_process_queues(sch_ctx, &shutdown);
413 	}
414 	/* If we get here the MC thread must exit */
415 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
416 		  "%s: Scheduler thread exiting!!!!", __func__);
417 	qdf_event_set(&sch_ctx->sch_shutdown);
418 	qdf_exit_thread(QDF_STATUS_SUCCESS);
419 
420 	return 0;
421 }
422 
423 void scheduler_cleanup_queues(struct scheduler_ctx *sch_ctx, int idx)
424 {
425 	struct scheduler_msg_wrapper *msg_wrapper = NULL;
426 
427 	if (!sch_ctx) {
428 		QDF_ASSERT(0);
429 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
430 			  "%s: Null params being passed", __func__);
431 		return;
432 	}
433 
434 	while ((msg_wrapper =
435 			scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[idx]))) {
436 		if (msg_wrapper->msg_buf != NULL) {
437 			QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO,
438 				"%s: Freeing MC WMA MSG message type %d",
439 				__func__, msg_wrapper->msg_buf->type);
440 			if (msg_wrapper->msg_buf->bodyptr)
441 				qdf_mem_free(
442 					(void *)msg_wrapper->msg_buf->bodyptr);
443 			msg_wrapper->msg_buf->bodyptr = NULL;
444 			msg_wrapper->msg_buf->bodyval = 0;
445 			msg_wrapper->msg_buf->type = 0;
446 		}
447 		scheduler_core_return_msg(sch_ctx, msg_wrapper);
448 	}
449 }
450