xref: /wlan-dirver/qca-wifi-host-cmn/scheduler/src/scheduler_core.c (revision dae10a5fbc53d54c53c4ba24fa018ad8b1e7c008)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <scheduler_core.h>
20 #include <qdf_atomic.h>
21 #include "qdf_flex_mem.h"
22 
23 static struct scheduler_ctx g_sched_ctx;
24 static struct scheduler_ctx *gp_sched_ctx;
25 
26 #ifndef WLAN_SCHED_REDUCTION_LIMIT
27 #define WLAN_SCHED_REDUCTION_LIMIT 0
28 #endif
29 
30 DEFINE_QDF_FLEX_MEM_POOL(sched_pool, sizeof(struct scheduler_msg),
31 			 WLAN_SCHED_REDUCTION_LIMIT);
32 
33 #ifdef WLAN_SCHED_HISTORY_SIZE
34 
35 /**
36  * struct sched_history_item - metrics for a scheduler message
37  * @callback: the message's execution callback
38  * @type_id: the message's type_id
39  * @queue_id: Id of the queue the message was added to
40  * @queue_start_us: timestamp when the message was queued in microseconds
41  * @queue_duration_us: duration the message was queued in microseconds
42  * @queue_depth: depth of the queue when the message was queued
43  * @run_start_us: timesatmp when the message started execution in microseconds
44  * @run_duration_us: duration the message was executed in microseconds
45  */
46 struct sched_history_item {
47 	void *callback;
48 	uint32_t type_id;
49 	QDF_MODULE_ID queue_id;
50 	uint64_t queue_start_us;
51 	uint32_t queue_duration_us;
52 	uint32_t queue_depth;
53 	uint64_t run_start_us;
54 	uint32_t run_duration_us;
55 };
56 
57 static struct sched_history_item sched_history[WLAN_SCHED_HISTORY_SIZE];
58 static uint32_t sched_history_index;
59 
60 static void sched_history_queue(struct scheduler_mq_type *queue,
61 				struct scheduler_msg *msg)
62 {
63 	msg->queue_id = queue->qid;
64 	msg->queue_depth = qdf_list_size(&queue->mq_list);
65 	msg->queued_at_us = qdf_get_log_timestamp_usecs();
66 }
67 
68 static void sched_history_start(struct scheduler_msg *msg)
69 {
70 	uint64_t started_at_us = qdf_get_log_timestamp_usecs();
71 	struct sched_history_item hist = {
72 		.callback = msg->callback,
73 		.type_id = msg->type,
74 		.queue_start_us = msg->queued_at_us,
75 		.queue_duration_us = started_at_us - msg->queued_at_us,
76 		.run_start_us = started_at_us,
77 	};
78 
79 	sched_history[sched_history_index] = hist;
80 }
81 
82 static void sched_history_stop(void)
83 {
84 	struct sched_history_item *hist = &sched_history[sched_history_index];
85 	uint64_t stopped_at_us = qdf_get_log_timestamp_usecs();
86 
87 	hist->run_duration_us = stopped_at_us - hist->run_start_us;
88 
89 	sched_history_index++;
90 	sched_history_index %= WLAN_SCHED_HISTORY_SIZE;
91 }
92 
93 #else /* WLAN_SCHED_HISTORY_SIZE */
94 
95 static inline void sched_history_queue(struct scheduler_mq_type *queue,
96 				       struct scheduler_msg *msg) { }
97 static inline void sched_history_start(struct scheduler_msg *msg) { }
98 static inline void sched_history_stop(void) { }
99 
100 #endif /* WLAN_SCHED_HISTORY_SIZE */
101 
102 QDF_STATUS scheduler_create_ctx(void)
103 {
104 	qdf_flex_mem_init(&sched_pool);
105 	gp_sched_ctx = &g_sched_ctx;
106 
107 	return QDF_STATUS_SUCCESS;
108 }
109 
110 QDF_STATUS scheduler_destroy_ctx(void)
111 {
112 	gp_sched_ctx = NULL;
113 	qdf_flex_mem_deinit(&sched_pool);
114 
115 	return QDF_STATUS_SUCCESS;
116 }
117 
118 struct scheduler_ctx *scheduler_get_context(void)
119 {
120 	QDF_BUG(gp_sched_ctx);
121 
122 	return gp_sched_ctx;
123 }
124 
125 static QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q)
126 {
127 	sched_enter();
128 
129 	qdf_spinlock_create(&msg_q->mq_lock);
130 	qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES);
131 
132 	sched_exit();
133 
134 	return QDF_STATUS_SUCCESS;
135 }
136 
137 static void scheduler_mq_deinit(struct scheduler_mq_type *msg_q)
138 {
139 	sched_enter();
140 
141 	qdf_list_destroy(&msg_q->mq_list);
142 	qdf_spinlock_destroy(&msg_q->mq_lock);
143 
144 	sched_exit();
145 }
146 
147 static qdf_atomic_t __sched_queue_depth;
148 static qdf_atomic_t __sched_dup_fail_count;
149 
150 static QDF_STATUS scheduler_all_queues_init(struct scheduler_ctx *sched_ctx)
151 {
152 	QDF_STATUS status;
153 	int i;
154 
155 	sched_enter();
156 
157 	QDF_BUG(sched_ctx);
158 	if (!sched_ctx)
159 		return QDF_STATUS_E_FAILURE;
160 
161 	qdf_atomic_set(&__sched_queue_depth, 0);
162 
163 	/* Initialize all message queues */
164 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
165 		status = scheduler_mq_init(&sched_ctx->queue_ctx.sch_msg_q[i]);
166 		if (QDF_STATUS_SUCCESS != status)
167 			return status;
168 	}
169 
170 	/* Initialize all qid to qidx mapping to invalid values */
171 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
172 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
173 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
174 
175 	sched_exit();
176 
177 	return status;
178 }
179 
180 static QDF_STATUS scheduler_all_queues_deinit(struct scheduler_ctx *sched_ctx)
181 {
182 	int i;
183 
184 	sched_enter();
185 
186 	QDF_BUG(sched_ctx);
187 	if (!sched_ctx)
188 		return QDF_STATUS_E_FAILURE;
189 
190 	/* De-Initialize all message queues */
191 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
192 		scheduler_mq_deinit(&sched_ctx->queue_ctx.sch_msg_q[i]);
193 
194 	/* Initialize all qid to qidx mapping to invalid values */
195 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
196 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
197 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
198 
199 	sched_exit();
200 
201 	return QDF_STATUS_SUCCESS;
202 }
203 
204 void scheduler_mq_put(struct scheduler_mq_type *msg_q,
205 		      struct scheduler_msg *msg)
206 {
207 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
208 	sched_history_queue(msg_q, msg);
209 	qdf_list_insert_back(&msg_q->mq_list, &msg->node);
210 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
211 }
212 
213 void scheduler_mq_put_front(struct scheduler_mq_type *msg_q,
214 			    struct scheduler_msg *msg)
215 {
216 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
217 	sched_history_queue(msg_q, msg);
218 	qdf_list_insert_front(&msg_q->mq_list, &msg->node);
219 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
220 }
221 
222 struct scheduler_msg *scheduler_mq_get(struct scheduler_mq_type *msg_q)
223 {
224 	QDF_STATUS status;
225 	qdf_list_node_t *node;
226 
227 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
228 	status = qdf_list_remove_front(&msg_q->mq_list, &node);
229 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
230 
231 	if (QDF_IS_STATUS_ERROR(status))
232 		return NULL;
233 
234 	return qdf_container_of(node, struct scheduler_msg, node);
235 }
236 
237 QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx)
238 {
239 	return scheduler_all_queues_deinit(sched_ctx);
240 }
241 
242 QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx)
243 {
244 	QDF_STATUS status;
245 
246 	sched_enter();
247 
248 	QDF_BUG(sched_ctx);
249 	if (!sched_ctx)
250 		return QDF_STATUS_E_FAILURE;
251 
252 	status = scheduler_all_queues_init(sched_ctx);
253 	if (QDF_IS_STATUS_ERROR(status)) {
254 		scheduler_all_queues_deinit(sched_ctx);
255 		sched_err("Failed to initialize the msg queues");
256 		return status;
257 	}
258 
259 	sched_debug("Queue init passed");
260 
261 	sched_exit();
262 
263 	return QDF_STATUS_SUCCESS;
264 }
265 
266 struct scheduler_msg *scheduler_core_msg_dup(struct scheduler_msg *msg)
267 {
268 	struct scheduler_msg *dup;
269 
270 	if (qdf_atomic_inc_return(&__sched_queue_depth) >
271 	    SCHEDULER_CORE_MAX_MESSAGES)
272 		goto buffer_full;
273 
274 	dup = qdf_flex_mem_alloc(&sched_pool);
275 	if (!dup) {
276 		sched_err("out of memory");
277 		goto dec_queue_count;
278 	}
279 
280 	qdf_mem_copy(dup, msg, sizeof(*dup));
281 
282 	qdf_atomic_set(&__sched_dup_fail_count, 0);
283 
284 	return dup;
285 
286 buffer_full:
287 	if (qdf_atomic_inc_return(&__sched_dup_fail_count) >
288 	    SCHEDULER_WRAPPER_MAX_FAIL_COUNT)
289 		QDF_DEBUG_PANIC("Scheduler buffer is full");
290 
291 
292 dec_queue_count:
293 	qdf_atomic_dec(&__sched_queue_depth);
294 
295 	return NULL;
296 }
297 
298 void scheduler_core_msg_free(struct scheduler_msg *msg)
299 {
300 	qdf_flex_mem_free(&sched_pool, msg);
301 	qdf_atomic_dec(&__sched_queue_depth);
302 }
303 
304 static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
305 					    bool *shutdown)
306 {
307 	int i;
308 	QDF_STATUS status;
309 	struct scheduler_msg *msg;
310 
311 	if (!sch_ctx) {
312 		QDF_DEBUG_PANIC("sch_ctx is null");
313 		return;
314 	}
315 
316 	/* start with highest priority queue : timer queue at index 0 */
317 	i = 0;
318 	while (i < SCHEDULER_NUMBER_OF_MSG_QUEUE) {
319 		/* Check if MC needs to shutdown */
320 		if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK,
321 					&sch_ctx->sch_event_flag)) {
322 			sched_debug("scheduler thread signaled to shutdown");
323 			*shutdown = true;
324 
325 			/* Check for any Suspend Indication */
326 			if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
327 						&sch_ctx->sch_event_flag)) {
328 				/* Unblock anyone waiting on suspend */
329 				if (gp_sched_ctx->hdd_callback)
330 					gp_sched_ctx->hdd_callback();
331 			}
332 
333 			break;
334 		}
335 
336 		msg = scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]);
337 		if (!msg) {
338 			/* check next queue */
339 			i++;
340 			continue;
341 		}
342 
343 		if (sch_ctx->queue_ctx.scheduler_msg_process_fn[i]) {
344 			sch_ctx->watchdog_msg_type = msg->type;
345 			sch_ctx->watchdog_callback = msg->callback;
346 
347 			sched_history_start(msg);
348 			qdf_timer_start(&sch_ctx->watchdog_timer,
349 					SCHEDULER_WATCHDOG_TIMEOUT);
350 			status = sch_ctx->queue_ctx.
351 					scheduler_msg_process_fn[i](msg);
352 			qdf_timer_stop(&sch_ctx->watchdog_timer);
353 			sched_history_stop();
354 
355 			if (QDF_IS_STATUS_ERROR(status))
356 				sched_err("Failed processing Qid[%d] message",
357 					  sch_ctx->queue_ctx.sch_msg_q[i].qid);
358 
359 			scheduler_core_msg_free(msg);
360 		}
361 
362 		/* start again with highest priority queue at index 0 */
363 		i = 0;
364 	}
365 
366 	/* Check for any Suspend Indication */
367 	if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
368 			&sch_ctx->sch_event_flag)) {
369 		qdf_spin_lock(&sch_ctx->sch_thread_lock);
370 		qdf_event_reset(&sch_ctx->resume_sch_event);
371 		/* controller thread suspend completion callback */
372 		if (gp_sched_ctx->hdd_callback)
373 			gp_sched_ctx->hdd_callback();
374 		qdf_spin_unlock(&sch_ctx->sch_thread_lock);
375 		/* Wait for resume indication */
376 		qdf_wait_single_event(&sch_ctx->resume_sch_event, 0);
377 	}
378 
379 	return;  /* Nothing to process wait on wait queue */
380 }
381 
382 int scheduler_thread(void *arg)
383 {
384 	struct scheduler_ctx *sch_ctx = (struct scheduler_ctx *)arg;
385 	int retWaitStatus = 0;
386 	bool shutdown = false;
387 
388 	if (!arg) {
389 		QDF_DEBUG_PANIC("arg is null");
390 		return 0;
391 	}
392 	qdf_set_user_nice(current, -2);
393 
394 	/* Ack back to the context from which the main controller thread
395 	 * has been created
396 	 */
397 	qdf_event_set(&sch_ctx->sch_start_event);
398 	sched_debug("scheduler thread %d (%s) starting up",
399 		    current->pid, current->comm);
400 
401 	while (!shutdown) {
402 		/* This implements the execution model algorithm */
403 		retWaitStatus = qdf_wait_queue_interruptible(
404 					sch_ctx->sch_wait_queue,
405 					qdf_atomic_test_bit(MC_POST_EVENT_MASK,
406 						&sch_ctx->sch_event_flag) ||
407 					qdf_atomic_test_bit(MC_SUSPEND_EVENT_MASK,
408 						&sch_ctx->sch_event_flag));
409 
410 		if (retWaitStatus == -ERESTARTSYS)
411 			QDF_DEBUG_PANIC("Scheduler received -ERESTARTSYS");
412 
413 		qdf_atomic_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag);
414 		scheduler_thread_process_queues(sch_ctx, &shutdown);
415 	}
416 
417 	/* If we get here the scheduler thread must exit */
418 	sched_debug("Scheduler thread exiting");
419 	qdf_event_set(&sch_ctx->sch_shutdown);
420 	qdf_exit_thread(QDF_STATUS_SUCCESS);
421 
422 	return 0;
423 }
424 
425 static void scheduler_flush_single_queue(struct scheduler_mq_type *mq)
426 {
427 	struct scheduler_msg *msg;
428 	QDF_STATUS (*flush_cb)(struct scheduler_msg *);
429 
430 	while ((msg = scheduler_mq_get(mq))) {
431 		if (msg->flush_callback) {
432 			sched_debug("Calling flush callback; type: %x",
433 				    msg->type);
434 			flush_cb = msg->flush_callback;
435 			flush_cb(msg);
436 		} else if (msg->bodyptr) {
437 			sched_debug("Freeing scheduler msg bodyptr; type: %x",
438 				    msg->type);
439 			qdf_mem_free(msg->bodyptr);
440 		}
441 
442 		scheduler_core_msg_free(msg);
443 	}
444 }
445 
446 void scheduler_queues_flush(struct scheduler_ctx *sched_ctx)
447 {
448 	struct scheduler_mq_type *mq;
449 	int i;
450 
451 	sched_debug("Flushing scheduler message queues");
452 
453 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
454 		mq = &sched_ctx->queue_ctx.sch_msg_q[i];
455 		scheduler_flush_single_queue(mq);
456 	}
457 
458 	qdf_flex_mem_release(&sched_pool);
459 }
460 
461