xref: /wlan-dirver/qca-wifi-host-cmn/scheduler/src/scheduler_core.c (revision f28396d060cff5c6519f883cb28ae0116ce479f1)
1 /*
2  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <scheduler_core.h>
20 #include <qdf_atomic.h>
21 #include "qdf_flex_mem.h"
22 
23 static struct scheduler_ctx g_sched_ctx;
24 static struct scheduler_ctx *gp_sched_ctx;
25 
26 DEFINE_QDF_FLEX_MEM_POOL(sched_pool, sizeof(struct scheduler_msg),
27 			 WLAN_SCHED_REDUCTION_LIMIT);
28 
29 #ifdef WLAN_SCHED_HISTORY_SIZE
30 
31 #define SCHEDULER_HISTORY_HEADER "|Callback                               "\
32 				 "|Message Type"			   \
33 				 "|Queue Duration(us)|Queue Depth"	   \
34 				 "|Run Duration(us)|"
35 
36 #define SCHEDULER_HISTORY_LINE "--------------------------------------" \
37 			       "--------------------------------------" \
38 			       "--------------------------------------"
39 
40 /**
41  * struct sched_history_item - metrics for a scheduler message
42  * @callback: the message's execution callback
43  * @type_id: the message's type_id
44  * @queue_id: Id of the queue the message was added to
45  * @queue_start_us: timestamp when the message was queued in microseconds
46  * @queue_duration_us: duration the message was queued in microseconds
47  * @queue_depth: depth of the queue when the message was queued
48  * @run_start_us: timesatmp when the message started execution in microseconds
49  * @run_duration_us: duration the message was executed in microseconds
50  */
51 struct sched_history_item {
52 	void *callback;
53 	uint32_t type_id;
54 	QDF_MODULE_ID queue_id;
55 	uint64_t queue_start_us;
56 	uint32_t queue_duration_us;
57 	uint32_t queue_depth;
58 	uint64_t run_start_us;
59 	uint32_t run_duration_us;
60 };
61 
62 static struct sched_history_item sched_history[WLAN_SCHED_HISTORY_SIZE];
63 static uint32_t sched_history_index;
64 
65 static void sched_history_queue(struct scheduler_mq_type *queue,
66 				struct scheduler_msg *msg)
67 {
68 	msg->queue_id = queue->qid;
69 	msg->queue_depth = qdf_list_size(&queue->mq_list);
70 	msg->queued_at_us = qdf_get_log_timestamp_usecs();
71 }
72 
73 static void sched_history_start(struct scheduler_msg *msg)
74 {
75 	uint64_t started_at_us = qdf_get_log_timestamp_usecs();
76 	struct sched_history_item hist = {
77 		.callback = msg->callback,
78 		.type_id = msg->type,
79 		.queue_start_us = msg->queued_at_us,
80 		.queue_duration_us = started_at_us - msg->queued_at_us,
81 		.queue_depth = msg->queue_depth,
82 		.run_start_us = started_at_us,
83 	};
84 
85 	sched_history[sched_history_index] = hist;
86 }
87 
88 static void sched_history_stop(void)
89 {
90 	struct sched_history_item *hist = &sched_history[sched_history_index];
91 	uint64_t stopped_at_us = qdf_get_log_timestamp_usecs();
92 
93 	hist->run_duration_us = stopped_at_us - hist->run_start_us;
94 
95 	sched_history_index++;
96 	sched_history_index %= WLAN_SCHED_HISTORY_SIZE;
97 }
98 
99 void sched_history_print(void)
100 {
101 	struct sched_history_item *history, *item;
102 	uint32_t history_idx;
103 	uint32_t idx, index;
104 
105 	history = qdf_mem_malloc(sizeof(*history) * WLAN_SCHED_HISTORY_SIZE);
106 
107 	if (!history) {
108 		sched_err("Mem alloc failed");
109 		return;
110 	}
111 
112 	qdf_mem_copy(history, &sched_history,
113 		     (sizeof(*history) * WLAN_SCHED_HISTORY_SIZE));
114 	history_idx = sched_history_index;
115 
116 	sched_nofl_fatal(SCHEDULER_HISTORY_LINE);
117 	sched_nofl_fatal(SCHEDULER_HISTORY_HEADER);
118 	sched_nofl_fatal(SCHEDULER_HISTORY_LINE);
119 
120 	for (idx = 0; idx < WLAN_SCHED_HISTORY_SIZE; idx++) {
121 		index = (history_idx + idx) % WLAN_SCHED_HISTORY_SIZE;
122 		item = history + index;
123 
124 		if (!item->callback)
125 			continue;
126 
127 		sched_nofl_fatal("%40pF|%12d|%18d|%11d|%16d|",
128 				 item->callback, item->type_id,
129 				 item->queue_duration_us,
130 				 item->queue_depth,
131 				 item->run_duration_us);
132 	}
133 
134 	sched_nofl_fatal(SCHEDULER_HISTORY_LINE);
135 
136 	qdf_mem_free(history);
137 }
138 #else /* WLAN_SCHED_HISTORY_SIZE */
139 
140 static inline void sched_history_queue(struct scheduler_mq_type *queue,
141 				       struct scheduler_msg *msg) { }
142 static inline void sched_history_start(struct scheduler_msg *msg) { }
143 static inline void sched_history_stop(void) { }
144 void sched_history_print(void) { }
145 
146 #endif /* WLAN_SCHED_HISTORY_SIZE */
147 
148 QDF_STATUS scheduler_create_ctx(void)
149 {
150 	qdf_flex_mem_init(&sched_pool);
151 	gp_sched_ctx = &g_sched_ctx;
152 
153 	return QDF_STATUS_SUCCESS;
154 }
155 
156 QDF_STATUS scheduler_destroy_ctx(void)
157 {
158 	gp_sched_ctx = NULL;
159 	qdf_flex_mem_deinit(&sched_pool);
160 
161 	return QDF_STATUS_SUCCESS;
162 }
163 
164 struct scheduler_ctx *scheduler_get_context(void)
165 {
166 	QDF_BUG(gp_sched_ctx);
167 
168 	return gp_sched_ctx;
169 }
170 
171 static QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q)
172 {
173 	sched_enter();
174 
175 	qdf_spinlock_create(&msg_q->mq_lock);
176 	qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES);
177 
178 	sched_exit();
179 
180 	return QDF_STATUS_SUCCESS;
181 }
182 
183 static void scheduler_mq_deinit(struct scheduler_mq_type *msg_q)
184 {
185 	sched_enter();
186 
187 	qdf_list_destroy(&msg_q->mq_list);
188 	qdf_spinlock_destroy(&msg_q->mq_lock);
189 
190 	sched_exit();
191 }
192 
193 static qdf_atomic_t __sched_queue_depth;
194 static qdf_atomic_t __sched_dup_fail_count;
195 
196 static QDF_STATUS scheduler_all_queues_init(struct scheduler_ctx *sched_ctx)
197 {
198 	QDF_STATUS status;
199 	int i;
200 
201 	sched_enter();
202 
203 	QDF_BUG(sched_ctx);
204 	if (!sched_ctx)
205 		return QDF_STATUS_E_FAILURE;
206 
207 	qdf_atomic_set(&__sched_queue_depth, 0);
208 
209 	/* Initialize all message queues */
210 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
211 		status = scheduler_mq_init(&sched_ctx->queue_ctx.sch_msg_q[i]);
212 		if (QDF_STATUS_SUCCESS != status)
213 			return status;
214 	}
215 
216 	/* Initialize all qid to qidx mapping to invalid values */
217 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
218 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
219 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
220 
221 	sched_exit();
222 
223 	return status;
224 }
225 
226 static QDF_STATUS scheduler_all_queues_deinit(struct scheduler_ctx *sched_ctx)
227 {
228 	int i;
229 
230 	sched_enter();
231 
232 	QDF_BUG(sched_ctx);
233 	if (!sched_ctx)
234 		return QDF_STATUS_E_FAILURE;
235 
236 	/* De-Initialize all message queues */
237 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
238 		scheduler_mq_deinit(&sched_ctx->queue_ctx.sch_msg_q[i]);
239 
240 	/* Initialize all qid to qidx mapping to invalid values */
241 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
242 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
243 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
244 
245 	sched_exit();
246 
247 	return QDF_STATUS_SUCCESS;
248 }
249 
250 void scheduler_mq_put(struct scheduler_mq_type *msg_q,
251 		      struct scheduler_msg *msg)
252 {
253 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
254 	sched_history_queue(msg_q, msg);
255 	qdf_list_insert_back(&msg_q->mq_list, &msg->node);
256 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
257 }
258 
259 void scheduler_mq_put_front(struct scheduler_mq_type *msg_q,
260 			    struct scheduler_msg *msg)
261 {
262 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
263 	sched_history_queue(msg_q, msg);
264 	qdf_list_insert_front(&msg_q->mq_list, &msg->node);
265 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
266 }
267 
268 struct scheduler_msg *scheduler_mq_get(struct scheduler_mq_type *msg_q)
269 {
270 	QDF_STATUS status;
271 	qdf_list_node_t *node;
272 
273 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
274 	status = qdf_list_remove_front(&msg_q->mq_list, &node);
275 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
276 
277 	if (QDF_IS_STATUS_ERROR(status))
278 		return NULL;
279 
280 	return qdf_container_of(node, struct scheduler_msg, node);
281 }
282 
283 QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx)
284 {
285 	return scheduler_all_queues_deinit(sched_ctx);
286 }
287 
288 QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx)
289 {
290 	QDF_STATUS status;
291 
292 	sched_enter();
293 
294 	QDF_BUG(sched_ctx);
295 	if (!sched_ctx)
296 		return QDF_STATUS_E_FAILURE;
297 
298 	status = scheduler_all_queues_init(sched_ctx);
299 	if (QDF_IS_STATUS_ERROR(status)) {
300 		scheduler_all_queues_deinit(sched_ctx);
301 		sched_err("Failed to initialize the msg queues");
302 		return status;
303 	}
304 
305 	sched_debug("Queue init passed");
306 
307 	sched_exit();
308 
309 	return QDF_STATUS_SUCCESS;
310 }
311 
312 struct scheduler_msg *scheduler_core_msg_dup(struct scheduler_msg *msg)
313 {
314 	struct scheduler_msg *dup;
315 
316 	if (qdf_atomic_inc_return(&__sched_queue_depth) >
317 	    SCHEDULER_CORE_MAX_MESSAGES)
318 		goto buffer_full;
319 
320 	dup = qdf_flex_mem_alloc(&sched_pool);
321 	if (!dup) {
322 		sched_err("out of memory");
323 		goto dec_queue_count;
324 	}
325 
326 	qdf_mem_copy(dup, msg, sizeof(*dup));
327 
328 	qdf_atomic_set(&__sched_dup_fail_count, 0);
329 
330 	return dup;
331 
332 buffer_full:
333 	if (qdf_atomic_inc_return(&__sched_dup_fail_count) >
334 	    SCHEDULER_WRAPPER_MAX_FAIL_COUNT)
335 		QDF_DEBUG_PANIC("Scheduler buffer is full");
336 
337 
338 dec_queue_count:
339 	qdf_atomic_dec(&__sched_queue_depth);
340 
341 	return NULL;
342 }
343 
344 void scheduler_core_msg_free(struct scheduler_msg *msg)
345 {
346 	qdf_flex_mem_free(&sched_pool, msg);
347 	qdf_atomic_dec(&__sched_queue_depth);
348 }
349 
350 static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
351 					    bool *shutdown)
352 {
353 	int i;
354 	QDF_STATUS status;
355 	struct scheduler_msg *msg;
356 
357 	if (!sch_ctx) {
358 		QDF_DEBUG_PANIC("sch_ctx is null");
359 		return;
360 	}
361 
362 	/* start with highest priority queue : timer queue at index 0 */
363 	i = 0;
364 	while (i < SCHEDULER_NUMBER_OF_MSG_QUEUE) {
365 		/* Check if MC needs to shutdown */
366 		if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK,
367 					&sch_ctx->sch_event_flag)) {
368 			sched_debug("scheduler thread signaled to shutdown");
369 			*shutdown = true;
370 
371 			/* Check for any Suspend Indication */
372 			if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
373 						&sch_ctx->sch_event_flag)) {
374 				/* Unblock anyone waiting on suspend */
375 				if (gp_sched_ctx->hdd_callback)
376 					gp_sched_ctx->hdd_callback();
377 			}
378 
379 			break;
380 		}
381 
382 		msg = scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]);
383 		if (!msg) {
384 			/* check next queue */
385 			i++;
386 			continue;
387 		}
388 
389 		if (sch_ctx->queue_ctx.scheduler_msg_process_fn[i]) {
390 			sch_ctx->watchdog_msg_type = msg->type;
391 			sch_ctx->watchdog_callback = msg->callback;
392 
393 			sched_history_start(msg);
394 			qdf_timer_start(&sch_ctx->watchdog_timer,
395 					SCHEDULER_WATCHDOG_TIMEOUT);
396 			status = sch_ctx->queue_ctx.
397 					scheduler_msg_process_fn[i](msg);
398 			qdf_timer_stop(&sch_ctx->watchdog_timer);
399 			sched_history_stop();
400 
401 			if (QDF_IS_STATUS_ERROR(status))
402 				sched_err("Failed processing Qid[%d] message",
403 					  sch_ctx->queue_ctx.sch_msg_q[i].qid);
404 
405 			scheduler_core_msg_free(msg);
406 		}
407 
408 		/* start again with highest priority queue at index 0 */
409 		i = 0;
410 	}
411 
412 	/* Check for any Suspend Indication */
413 	if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
414 			&sch_ctx->sch_event_flag)) {
415 		qdf_spin_lock(&sch_ctx->sch_thread_lock);
416 		qdf_event_reset(&sch_ctx->resume_sch_event);
417 		/* controller thread suspend completion callback */
418 		if (gp_sched_ctx->hdd_callback)
419 			gp_sched_ctx->hdd_callback();
420 		qdf_spin_unlock(&sch_ctx->sch_thread_lock);
421 		/* Wait for resume indication */
422 		qdf_wait_single_event(&sch_ctx->resume_sch_event, 0);
423 	}
424 
425 	return;  /* Nothing to process wait on wait queue */
426 }
427 
428 int scheduler_thread(void *arg)
429 {
430 	struct scheduler_ctx *sch_ctx = (struct scheduler_ctx *)arg;
431 	int retWaitStatus = 0;
432 	bool shutdown = false;
433 
434 	if (!arg) {
435 		QDF_DEBUG_PANIC("arg is null");
436 		return 0;
437 	}
438 	qdf_set_user_nice(current, -2);
439 
440 	/* Ack back to the context from which the main controller thread
441 	 * has been created
442 	 */
443 	qdf_event_set(&sch_ctx->sch_start_event);
444 	sched_debug("scheduler thread %d (%s) starting up",
445 		    current->pid, current->comm);
446 
447 	while (!shutdown) {
448 		/* This implements the execution model algorithm */
449 		retWaitStatus = qdf_wait_queue_interruptible(
450 					sch_ctx->sch_wait_queue,
451 					qdf_atomic_test_bit(MC_POST_EVENT_MASK,
452 						&sch_ctx->sch_event_flag) ||
453 					qdf_atomic_test_bit(MC_SUSPEND_EVENT_MASK,
454 						&sch_ctx->sch_event_flag));
455 
456 		if (retWaitStatus == -ERESTARTSYS)
457 			QDF_DEBUG_PANIC("Scheduler received -ERESTARTSYS");
458 
459 		qdf_atomic_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag);
460 		scheduler_thread_process_queues(sch_ctx, &shutdown);
461 	}
462 
463 	/* If we get here the scheduler thread must exit */
464 	sched_debug("Scheduler thread exiting");
465 	qdf_event_set(&sch_ctx->sch_shutdown);
466 	qdf_exit_thread(QDF_STATUS_SUCCESS);
467 
468 	return 0;
469 }
470 
471 static void scheduler_flush_single_queue(struct scheduler_mq_type *mq)
472 {
473 	struct scheduler_msg *msg;
474 	QDF_STATUS (*flush_cb)(struct scheduler_msg *);
475 
476 	while ((msg = scheduler_mq_get(mq))) {
477 		if (msg->flush_callback) {
478 			sched_debug("Calling flush callback; type: %x",
479 				    msg->type);
480 			flush_cb = msg->flush_callback;
481 			flush_cb(msg);
482 		} else if (msg->bodyptr) {
483 			sched_debug("Freeing scheduler msg bodyptr; type: %x",
484 				    msg->type);
485 			qdf_mem_free(msg->bodyptr);
486 		}
487 
488 		scheduler_core_msg_free(msg);
489 	}
490 }
491 
492 void scheduler_queues_flush(struct scheduler_ctx *sched_ctx)
493 {
494 	struct scheduler_mq_type *mq;
495 	int i;
496 
497 	sched_debug("Flushing scheduler message queues");
498 
499 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
500 		mq = &sched_ctx->queue_ctx.sch_msg_q[i];
501 		scheduler_flush_single_queue(mq);
502 	}
503 }
504 
505