xref: /wlan-dirver/qca-wifi-host-cmn/scheduler/src/scheduler_core.c (revision cd39549564686e1d60a410c477b7c6e9e19791fd)
1 /*
2  * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 #include <scheduler_core.h>
29 
30 static struct scheduler_ctx *gp_sched_ctx;
31 
32 QDF_STATUS scheduler_create_ctx(void)
33 {
34 	if (gp_sched_ctx) {
35 		QDF_ASSERT(0);
36 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
37 			  FL("there is a already gp_sched_ctx mem allocated"));
38 		return QDF_STATUS_E_FAILURE;
39 	}
40 
41 	gp_sched_ctx = qdf_mem_malloc(sizeof(struct scheduler_ctx));
42 	if (!gp_sched_ctx) {
43 		QDF_ASSERT(0);
44 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
45 			  FL("gp_sched_ctx can't alloc mememory"));
46 		return QDF_STATUS_E_FAILURE;
47 	}
48 	return QDF_STATUS_SUCCESS;
49 }
50 
51 QDF_STATUS scheduler_destroy_ctx(void)
52 {
53 	if (gp_sched_ctx)
54 		qdf_mem_free(gp_sched_ctx);
55 	gp_sched_ctx = NULL;
56 	return QDF_STATUS_SUCCESS;
57 }
58 
59 struct scheduler_ctx *scheduler_get_context(void)
60 {
61 	return gp_sched_ctx;
62 }
63 
64 
65 static QDF_STATUS scheduler_all_queues_init(
66 			struct scheduler_ctx *sched_ctx)
67 {
68 	QDF_STATUS status = QDF_STATUS_SUCCESS;
69 	int i;
70 
71 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("enter"));
72 	if (!sched_ctx) {
73 		QDF_ASSERT(0);
74 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
75 			  "%s: Null params being passed", __func__);
76 		return QDF_STATUS_E_FAILURE;
77 	}
78 
79 	status = scheduler_mq_init(&sched_ctx->queue_ctx.free_msg_q);
80 	if (QDF_STATUS_SUCCESS != status)
81 		return status;
82 
83 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
84 		QDF_TRACE_LEVEL_ERROR, FL("free msg queue init complete"));
85 
86 	/* Initialize all message queues */
87 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
88 		status = scheduler_mq_init(
89 				&sched_ctx->queue_ctx.sch_msg_q[i]);
90 		if (QDF_STATUS_SUCCESS != status)
91 			return status;
92 	}
93 
94 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("exit"));
95 
96 	return status;
97 }
98 
99 
100 static QDF_STATUS scheduler_all_queues_deinit(
101 		struct scheduler_ctx *sched_ctx)
102 {
103 	QDF_STATUS status = QDF_STATUS_SUCCESS;
104 	int i;
105 
106 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("enter"));
107 	if (!sched_ctx) {
108 		QDF_ASSERT(0);
109 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
110 			  "%s: Null params being passed", __func__);
111 		return QDF_STATUS_E_FAILURE;
112 	}
113 
114 	scheduler_mq_deinit(&sched_ctx->queue_ctx.free_msg_q);
115 
116 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
117 		  QDF_TRACE_LEVEL_ERROR, FL("free msg queue inited"));
118 
119 	/* De-Initialize all message queues */
120 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
121 		scheduler_mq_deinit(&sched_ctx->queue_ctx.sch_msg_q[i]);
122 
123 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("exit"));
124 	return status;
125 }
126 
127 QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q)
128 {
129 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("Enter"));
130 	if (msg_q == NULL) {
131 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
132 			  "%s: NULL pointer passed", __func__);
133 		return QDF_STATUS_E_FAILURE;
134 	}
135 	/* Now initialize the lock */
136 	qdf_spinlock_create(&msg_q->mq_lock);
137 	/* Now initialize the List data structure */
138 	qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES);
139 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("Exit"));
140 
141 	return QDF_STATUS_SUCCESS;
142 }
143 
144 void scheduler_mq_deinit(struct scheduler_mq_type *msg_q)
145 {
146 	if (msg_q == NULL) {
147 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
148 				"%s: NULL pointer passed", __func__);
149 		return;
150 	}
151 }
152 
153 void scheduler_mq_put(struct scheduler_mq_type *msg_q,
154 			struct scheduler_msg_wrapper *msg_wrapper)
155 {
156 	if (msg_q == NULL || msg_wrapper == NULL) {
157 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
158 				"%s: NULL pointer passed", __func__);
159 		return;
160 	}
161 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
162 	qdf_list_insert_back(&msg_q->mq_list, &msg_wrapper->msg_node);
163 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
164 
165 }
166 
167 void scheduler_mq_put_front(struct scheduler_mq_type *msg_q,
168 			struct scheduler_msg_wrapper *msg_wrapper)
169 {
170 	if ((msg_q == NULL) || (msg_wrapper == NULL)) {
171 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
172 				"%s: NULL pointer passed", __func__);
173 		return;
174 	}
175 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
176 	qdf_list_insert_front(&msg_q->mq_list, &msg_wrapper->msg_node);
177 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
178 }
179 
180 struct scheduler_msg_wrapper *scheduler_mq_get(struct scheduler_mq_type *msg_q)
181 {
182 	qdf_list_node_t *listptr;
183 	struct scheduler_msg_wrapper *msg_wrapper = NULL;
184 
185 	if (msg_q == NULL) {
186 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
187 				"%s: NULL pointer passed", __func__);
188 		return NULL;
189 	}
190 
191 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
192 	if (qdf_list_empty(&msg_q->mq_list)) {
193 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_WARN,
194 			  "%s: Scheduler Message Queue is empty", __func__);
195 	} else {
196 		listptr = msg_q->mq_list.anchor.next;
197 		msg_wrapper = (struct scheduler_msg_wrapper *)
198 					qdf_container_of(listptr,
199 						struct scheduler_msg_wrapper,
200 						msg_node);
201 		qdf_list_remove_node(&msg_q->mq_list, listptr);
202 	}
203 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
204 	return msg_wrapper;
205 
206 }
207 
208 bool scheduler_is_mq_empty(struct scheduler_mq_type *msg_q)
209 {
210 	bool is_empty = false;
211 
212 	if (msg_q == NULL) {
213 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
214 				"%s: NULL pointer passed", __func__);
215 		return QDF_STATUS_E_FAILURE;
216 	}
217 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
218 	is_empty = qdf_list_empty(&msg_q->mq_list) ? true : false;
219 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
220 	return is_empty;
221 }
222 
223 QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx)
224 {
225 	return scheduler_all_queues_deinit(sched_ctx);
226 }
227 
228 QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx)
229 {
230 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
231 	int i;
232 
233 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("Enter"));
234 	if (!sched_ctx) {
235 		QDF_ASSERT(0);
236 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
237 			  "%s: Null params being passed", __func__);
238 		return QDF_STATUS_E_FAILURE;
239 	}
240 	status = scheduler_all_queues_init(sched_ctx);
241 	if (QDF_STATUS_SUCCESS != status) {
242 		scheduler_all_queues_deinit(sched_ctx);
243 		QDF_ASSERT(0);
244 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_FATAL,
245 				FL("Failed to initialize the msg queues"));
246 		return status;
247 	}
248 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
249 		QDF_TRACE_LEVEL_ERROR, FL("Queue init passed"));
250 
251 	for (i = 0; i < SCHEDULER_CORE_MAX_MESSAGES; i++) {
252 		(sched_ctx->queue_ctx.msg_wrappers[i]).msg_buf =
253 			&(sched_ctx->queue_ctx.msg_buffers[i]);
254 		qdf_init_list_head(
255 			&sched_ctx->queue_ctx.msg_wrappers[i].msg_node);
256 		scheduler_mq_put(&sched_ctx->queue_ctx.free_msg_q,
257 			   &(sched_ctx->queue_ctx.msg_wrappers[i]));
258 	}
259 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("Exit"));
260 	return status;
261 }
262 
263 static void scheduler_core_return_msg(struct scheduler_ctx *sch_ctx,
264 			struct scheduler_msg_wrapper *msg_wrapper)
265 {
266 	if (!sch_ctx) {
267 		QDF_ASSERT(0);
268 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
269 			"%s: gp_cds_context != p_cds_context", __func__);
270 		return;
271 	}
272 
273 	QDF_ASSERT(NULL != msg_wrapper);
274 
275 	if (msg_wrapper == NULL) {
276 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
277 			FL("msg_wrapper == NULL in function"));
278 		return;
279 	}
280 
281 	/*
282 	 * Return the message on the free message queue
283 	 */
284 	qdf_init_list_head(&msg_wrapper->msg_node);
285 	scheduler_mq_put(&sch_ctx->queue_ctx.free_msg_q, msg_wrapper);
286 }
287 
288 static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
289 						bool *shutdown)
290 {
291 	int i;
292 	QDF_STATUS vStatus = QDF_STATUS_E_FAILURE;
293 	struct scheduler_msg_wrapper *pMsgWrapper = NULL;
294 
295 	if (!sch_ctx) {
296 		QDF_ASSERT(0);
297 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
298 				FL("sch_ctx null"));
299 		return;
300 	}
301 
302 	/* start with highest priority queue : timer queue at index 0 */
303 	i = 0;
304 	while (i < SCHEDULER_NUMBER_OF_MSG_QUEUE) {
305 		/* Check if MC needs to shutdown */
306 		if (qdf_test_bit(MC_SHUTDOWN_EVENT_MASK,
307 					&sch_ctx->sch_event_flag)) {
308 			QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
309 				QDF_TRACE_LEVEL_ERROR,
310 				"%s: scheduler thread signaled to shutdown",
311 				__func__);
312 			*shutdown = true;
313 			/* Check for any Suspend Indication */
314 			if (qdf_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
315 						&sch_ctx->sch_event_flag)) {
316 				/* Unblock anyone waiting on suspend */
317 				if (gp_sched_ctx->hdd_callback)
318 					gp_sched_ctx->hdd_callback();
319 			}
320 			break;
321 		}
322 		if (scheduler_is_mq_empty(&sch_ctx->queue_ctx.sch_msg_q[i])) {
323 			/* check next queue */
324 			i++;
325 			continue;
326 		}
327 		pMsgWrapper =
328 			scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]);
329 		if (pMsgWrapper == NULL) {
330 			QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
331 				QDF_TRACE_LEVEL_ERROR,
332 				"%s: pMsgWrapper is NULL", __func__);
333 			QDF_ASSERT(0);
334 			return;
335 		}
336 		if (sch_ctx->queue_ctx.scheduler_msg_process_fn[i]) {
337 			vStatus = sch_ctx->queue_ctx.
338 					scheduler_msg_process_fn[i](
339 							pMsgWrapper->msg_buf);
340 			if (QDF_IS_STATUS_ERROR(vStatus)) {
341 				QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
342 					QDF_TRACE_LEVEL_ERROR,
343 					FL("Failed processing Qid[%d] message"),
344 					sch_ctx->queue_ctx.sch_msg_q[i].qid);
345 			}
346 			/* return message to the Core */
347 			scheduler_core_return_msg(sch_ctx, pMsgWrapper);
348 		}
349 
350 		/* start again with highest priority queue at index 0 */
351 		i = 0;
352 		continue;
353 	}
354 	/* Check for any Suspend Indication */
355 	if (qdf_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
356 			&sch_ctx->sch_event_flag)) {
357 		qdf_spin_lock(&sch_ctx->sch_thread_lock);
358 		qdf_event_reset(&sch_ctx->resume_sch_event);
359 		/* controller thread suspend completion callback */
360 		if (gp_sched_ctx->hdd_callback)
361 			gp_sched_ctx->hdd_callback();
362 		qdf_spin_unlock(&sch_ctx->sch_thread_lock);
363 		/* Wait for resume indication */
364 		qdf_wait_single_event(&sch_ctx->resume_sch_event, 0);
365 	}
366 
367 	return;  /* Nothing to process wait on wait queue */
368 }
369 
370 int scheduler_thread(void *arg)
371 {
372 	struct scheduler_ctx *sch_ctx = (struct scheduler_ctx *)arg;
373 	int retWaitStatus = 0;
374 	bool shutdown = false;
375 
376 	if (arg == NULL) {
377 		QDF_ASSERT(0);
378 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
379 			  "%s: Bad Args passed", __func__);
380 		return 0;
381 	}
382 	qdf_set_user_nice(current, -2);
383 
384 	/* Ack back to the context from which the main controller thread
385 	 * has been created
386 	 */
387 	qdf_event_set(&sch_ctx->sch_start_event);
388 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
389 		  "%s: scheduler_thread %d (%s) starting up", __func__, current->pid,
390 		  current->comm);
391 
392 	while (!shutdown) {
393 		/* This implements the execution model algorithm */
394 		retWaitStatus = qdf_wait_queue_interruptible(
395 					sch_ctx->sch_wait_queue,
396 					qdf_test_bit(MC_POST_EVENT_MASK,
397 						&sch_ctx->sch_event_flag) ||
398 					qdf_test_bit(MC_SUSPEND_EVENT_MASK,
399 						&sch_ctx->sch_event_flag));
400 
401 		if (retWaitStatus == -ERESTARTSYS) {
402 			QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
403 					"%s: wait_event_interruptible returned -ERESTARTSYS",
404 					__func__);
405 			QDF_BUG(0);
406 		}
407 		qdf_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag);
408 		scheduler_thread_process_queues(sch_ctx, &shutdown);
409 	}
410 	/* If we get here the MC thread must exit */
411 	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
412 		  "%s: Scheduler thread exiting!!!!", __func__);
413 	qdf_event_set(&sch_ctx->sch_shutdown);
414 	qdf_exit_thread(QDF_STATUS_SUCCESS);
415 
416 	return 0;
417 }
418 
419 void scheduler_cleanup_queues(struct scheduler_ctx *sch_ctx, int idx)
420 {
421 	struct scheduler_msg_wrapper *msg_wrapper = NULL;
422 
423 	if (!sch_ctx) {
424 		QDF_ASSERT(0);
425 		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
426 			  "%s: Null params being passed", __func__);
427 		return;
428 	}
429 
430 	while ((msg_wrapper =
431 			scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[idx]))) {
432 		if (msg_wrapper->msg_buf != NULL) {
433 			QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO,
434 				"%s: Freeing MC WMA MSG message type %d",
435 				__func__, msg_wrapper->msg_buf->type);
436 			if (msg_wrapper->msg_buf->bodyptr)
437 				qdf_mem_free(
438 					(void *)msg_wrapper->msg_buf->bodyptr);
439 			msg_wrapper->msg_buf->bodyptr = NULL;
440 			msg_wrapper->msg_buf->bodyval = 0;
441 			msg_wrapper->msg_buf->type = 0;
442 		}
443 		scheduler_core_return_msg(sch_ctx, msg_wrapper);
444 	}
445 }
446