xref: /wlan-dirver/qca-wifi-host-cmn/scheduler/src/scheduler_core.c (revision 1397a33f48ea6455be40871470b286e535820eb8)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <scheduler_core.h>
20 #include <qdf_atomic.h>
21 #include "qdf_flex_mem.h"
22 
23 static struct scheduler_ctx g_sched_ctx;
24 static struct scheduler_ctx *gp_sched_ctx;
25 
26 #ifndef WLAN_SCHED_REDUCTION_LIMIT
27 #define WLAN_SCHED_REDUCTION_LIMIT 0
28 #endif
29 
30 DEFINE_QDF_FLEX_MEM_POOL(sched_pool, sizeof(struct scheduler_msg),
31 			 WLAN_SCHED_REDUCTION_LIMIT);
32 
33 #ifdef WLAN_SCHED_HISTORY_SIZE
34 
35 /**
36  * struct sched_history_item - metrics for a scheduler message
37  * @callback: the message's execution callback
38  * @type_id: the message's type_id
39  * @queue_id: Id of the queue the message was added to
40  * @queue_start_us: timestamp when the message was queued in microseconds
41  * @queue_duration_us: duration the message was queued in microseconds
42  * @queue_depth: depth of the queue when the message was queued
43  * @run_start_us: timesatmp when the message started execution in microseconds
44  * @run_duration_us: duration the message was executed in microseconds
45  */
46 struct sched_history_item {
47 	void *callback;
48 	uint32_t type_id;
49 	QDF_MODULE_ID queue_id;
50 	uint64_t queue_start_us;
51 	uint32_t queue_duration_us;
52 	uint32_t queue_depth;
53 	uint64_t run_start_us;
54 	uint32_t run_duration_us;
55 };
56 
57 static struct sched_history_item sched_history[WLAN_SCHED_HISTORY_SIZE];
58 static uint32_t sched_history_index;
59 
60 static void sched_history_queue(struct scheduler_mq_type *queue,
61 				struct scheduler_msg *msg)
62 {
63 	msg->queue_id = queue->qid;
64 	msg->queue_depth = qdf_list_size(&queue->mq_list);
65 	msg->queued_at_us = qdf_get_log_timestamp_usecs();
66 }
67 
68 static void sched_history_start(struct scheduler_msg *msg)
69 {
70 	uint64_t started_at_us = qdf_get_log_timestamp_usecs();
71 	struct sched_history_item hist = {
72 		.callback = msg->callback,
73 		.type_id = msg->type,
74 		.queue_start_us = msg->queued_at_us,
75 		.queue_duration_us = started_at_us - msg->queued_at_us,
76 		.queue_depth = msg->queue_depth,
77 		.run_start_us = started_at_us,
78 	};
79 
80 	sched_history[sched_history_index] = hist;
81 }
82 
83 static void sched_history_stop(void)
84 {
85 	struct sched_history_item *hist = &sched_history[sched_history_index];
86 	uint64_t stopped_at_us = qdf_get_log_timestamp_usecs();
87 
88 	hist->run_duration_us = stopped_at_us - hist->run_start_us;
89 
90 	sched_history_index++;
91 	sched_history_index %= WLAN_SCHED_HISTORY_SIZE;
92 }
93 
94 #else /* WLAN_SCHED_HISTORY_SIZE */
95 
96 static inline void sched_history_queue(struct scheduler_mq_type *queue,
97 				       struct scheduler_msg *msg) { }
98 static inline void sched_history_start(struct scheduler_msg *msg) { }
99 static inline void sched_history_stop(void) { }
100 
101 #endif /* WLAN_SCHED_HISTORY_SIZE */
102 
103 QDF_STATUS scheduler_create_ctx(void)
104 {
105 	qdf_flex_mem_init(&sched_pool);
106 	gp_sched_ctx = &g_sched_ctx;
107 
108 	return QDF_STATUS_SUCCESS;
109 }
110 
111 QDF_STATUS scheduler_destroy_ctx(void)
112 {
113 	gp_sched_ctx = NULL;
114 	qdf_flex_mem_deinit(&sched_pool);
115 
116 	return QDF_STATUS_SUCCESS;
117 }
118 
119 struct scheduler_ctx *scheduler_get_context(void)
120 {
121 	QDF_BUG(gp_sched_ctx);
122 
123 	return gp_sched_ctx;
124 }
125 
126 static QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q)
127 {
128 	sched_enter();
129 
130 	qdf_spinlock_create(&msg_q->mq_lock);
131 	qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES);
132 
133 	sched_exit();
134 
135 	return QDF_STATUS_SUCCESS;
136 }
137 
138 static void scheduler_mq_deinit(struct scheduler_mq_type *msg_q)
139 {
140 	sched_enter();
141 
142 	qdf_list_destroy(&msg_q->mq_list);
143 	qdf_spinlock_destroy(&msg_q->mq_lock);
144 
145 	sched_exit();
146 }
147 
148 static qdf_atomic_t __sched_queue_depth;
149 static qdf_atomic_t __sched_dup_fail_count;
150 
151 static QDF_STATUS scheduler_all_queues_init(struct scheduler_ctx *sched_ctx)
152 {
153 	QDF_STATUS status;
154 	int i;
155 
156 	sched_enter();
157 
158 	QDF_BUG(sched_ctx);
159 	if (!sched_ctx)
160 		return QDF_STATUS_E_FAILURE;
161 
162 	qdf_atomic_set(&__sched_queue_depth, 0);
163 
164 	/* Initialize all message queues */
165 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
166 		status = scheduler_mq_init(&sched_ctx->queue_ctx.sch_msg_q[i]);
167 		if (QDF_STATUS_SUCCESS != status)
168 			return status;
169 	}
170 
171 	/* Initialize all qid to qidx mapping to invalid values */
172 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
173 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
174 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
175 
176 	sched_exit();
177 
178 	return status;
179 }
180 
181 static QDF_STATUS scheduler_all_queues_deinit(struct scheduler_ctx *sched_ctx)
182 {
183 	int i;
184 
185 	sched_enter();
186 
187 	QDF_BUG(sched_ctx);
188 	if (!sched_ctx)
189 		return QDF_STATUS_E_FAILURE;
190 
191 	/* De-Initialize all message queues */
192 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
193 		scheduler_mq_deinit(&sched_ctx->queue_ctx.sch_msg_q[i]);
194 
195 	/* Initialize all qid to qidx mapping to invalid values */
196 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
197 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
198 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
199 
200 	sched_exit();
201 
202 	return QDF_STATUS_SUCCESS;
203 }
204 
205 void scheduler_mq_put(struct scheduler_mq_type *msg_q,
206 		      struct scheduler_msg *msg)
207 {
208 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
209 	sched_history_queue(msg_q, msg);
210 	qdf_list_insert_back(&msg_q->mq_list, &msg->node);
211 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
212 }
213 
214 void scheduler_mq_put_front(struct scheduler_mq_type *msg_q,
215 			    struct scheduler_msg *msg)
216 {
217 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
218 	sched_history_queue(msg_q, msg);
219 	qdf_list_insert_front(&msg_q->mq_list, &msg->node);
220 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
221 }
222 
223 struct scheduler_msg *scheduler_mq_get(struct scheduler_mq_type *msg_q)
224 {
225 	QDF_STATUS status;
226 	qdf_list_node_t *node;
227 
228 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
229 	status = qdf_list_remove_front(&msg_q->mq_list, &node);
230 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
231 
232 	if (QDF_IS_STATUS_ERROR(status))
233 		return NULL;
234 
235 	return qdf_container_of(node, struct scheduler_msg, node);
236 }
237 
238 QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx)
239 {
240 	return scheduler_all_queues_deinit(sched_ctx);
241 }
242 
243 QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx)
244 {
245 	QDF_STATUS status;
246 
247 	sched_enter();
248 
249 	QDF_BUG(sched_ctx);
250 	if (!sched_ctx)
251 		return QDF_STATUS_E_FAILURE;
252 
253 	status = scheduler_all_queues_init(sched_ctx);
254 	if (QDF_IS_STATUS_ERROR(status)) {
255 		scheduler_all_queues_deinit(sched_ctx);
256 		sched_err("Failed to initialize the msg queues");
257 		return status;
258 	}
259 
260 	sched_debug("Queue init passed");
261 
262 	sched_exit();
263 
264 	return QDF_STATUS_SUCCESS;
265 }
266 
267 struct scheduler_msg *scheduler_core_msg_dup(struct scheduler_msg *msg)
268 {
269 	struct scheduler_msg *dup;
270 
271 	if (qdf_atomic_inc_return(&__sched_queue_depth) >
272 	    SCHEDULER_CORE_MAX_MESSAGES)
273 		goto buffer_full;
274 
275 	dup = qdf_flex_mem_alloc(&sched_pool);
276 	if (!dup) {
277 		sched_err("out of memory");
278 		goto dec_queue_count;
279 	}
280 
281 	qdf_mem_copy(dup, msg, sizeof(*dup));
282 
283 	qdf_atomic_set(&__sched_dup_fail_count, 0);
284 
285 	return dup;
286 
287 buffer_full:
288 	if (qdf_atomic_inc_return(&__sched_dup_fail_count) >
289 	    SCHEDULER_WRAPPER_MAX_FAIL_COUNT)
290 		QDF_DEBUG_PANIC("Scheduler buffer is full");
291 
292 
293 dec_queue_count:
294 	qdf_atomic_dec(&__sched_queue_depth);
295 
296 	return NULL;
297 }
298 
299 void scheduler_core_msg_free(struct scheduler_msg *msg)
300 {
301 	qdf_flex_mem_free(&sched_pool, msg);
302 	qdf_atomic_dec(&__sched_queue_depth);
303 }
304 
305 static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
306 					    bool *shutdown)
307 {
308 	int i;
309 	QDF_STATUS status;
310 	struct scheduler_msg *msg;
311 
312 	if (!sch_ctx) {
313 		QDF_DEBUG_PANIC("sch_ctx is null");
314 		return;
315 	}
316 
317 	/* start with highest priority queue : timer queue at index 0 */
318 	i = 0;
319 	while (i < SCHEDULER_NUMBER_OF_MSG_QUEUE) {
320 		/* Check if MC needs to shutdown */
321 		if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK,
322 					&sch_ctx->sch_event_flag)) {
323 			sched_debug("scheduler thread signaled to shutdown");
324 			*shutdown = true;
325 
326 			/* Check for any Suspend Indication */
327 			if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
328 						&sch_ctx->sch_event_flag)) {
329 				/* Unblock anyone waiting on suspend */
330 				if (gp_sched_ctx->hdd_callback)
331 					gp_sched_ctx->hdd_callback();
332 			}
333 
334 			break;
335 		}
336 
337 		msg = scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]);
338 		if (!msg) {
339 			/* check next queue */
340 			i++;
341 			continue;
342 		}
343 
344 		if (sch_ctx->queue_ctx.scheduler_msg_process_fn[i]) {
345 			sch_ctx->watchdog_msg_type = msg->type;
346 			sch_ctx->watchdog_callback = msg->callback;
347 
348 			sched_history_start(msg);
349 			qdf_timer_start(&sch_ctx->watchdog_timer,
350 					SCHEDULER_WATCHDOG_TIMEOUT);
351 			status = sch_ctx->queue_ctx.
352 					scheduler_msg_process_fn[i](msg);
353 			qdf_timer_stop(&sch_ctx->watchdog_timer);
354 			sched_history_stop();
355 
356 			if (QDF_IS_STATUS_ERROR(status))
357 				sched_err("Failed processing Qid[%d] message",
358 					  sch_ctx->queue_ctx.sch_msg_q[i].qid);
359 
360 			scheduler_core_msg_free(msg);
361 		}
362 
363 		/* start again with highest priority queue at index 0 */
364 		i = 0;
365 	}
366 
367 	/* Check for any Suspend Indication */
368 	if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
369 			&sch_ctx->sch_event_flag)) {
370 		qdf_spin_lock(&sch_ctx->sch_thread_lock);
371 		qdf_event_reset(&sch_ctx->resume_sch_event);
372 		/* controller thread suspend completion callback */
373 		if (gp_sched_ctx->hdd_callback)
374 			gp_sched_ctx->hdd_callback();
375 		qdf_spin_unlock(&sch_ctx->sch_thread_lock);
376 		/* Wait for resume indication */
377 		qdf_wait_single_event(&sch_ctx->resume_sch_event, 0);
378 	}
379 
380 	return;  /* Nothing to process wait on wait queue */
381 }
382 
383 int scheduler_thread(void *arg)
384 {
385 	struct scheduler_ctx *sch_ctx = (struct scheduler_ctx *)arg;
386 	int retWaitStatus = 0;
387 	bool shutdown = false;
388 
389 	if (!arg) {
390 		QDF_DEBUG_PANIC("arg is null");
391 		return 0;
392 	}
393 	qdf_set_user_nice(current, -2);
394 
395 	/* Ack back to the context from which the main controller thread
396 	 * has been created
397 	 */
398 	qdf_event_set(&sch_ctx->sch_start_event);
399 	sched_debug("scheduler thread %d (%s) starting up",
400 		    current->pid, current->comm);
401 
402 	while (!shutdown) {
403 		/* This implements the execution model algorithm */
404 		retWaitStatus = qdf_wait_queue_interruptible(
405 					sch_ctx->sch_wait_queue,
406 					qdf_atomic_test_bit(MC_POST_EVENT_MASK,
407 						&sch_ctx->sch_event_flag) ||
408 					qdf_atomic_test_bit(MC_SUSPEND_EVENT_MASK,
409 						&sch_ctx->sch_event_flag));
410 
411 		if (retWaitStatus == -ERESTARTSYS)
412 			QDF_DEBUG_PANIC("Scheduler received -ERESTARTSYS");
413 
414 		qdf_atomic_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag);
415 		scheduler_thread_process_queues(sch_ctx, &shutdown);
416 	}
417 
418 	/* If we get here the scheduler thread must exit */
419 	sched_debug("Scheduler thread exiting");
420 	qdf_event_set(&sch_ctx->sch_shutdown);
421 	qdf_exit_thread(QDF_STATUS_SUCCESS);
422 
423 	return 0;
424 }
425 
426 static void scheduler_flush_single_queue(struct scheduler_mq_type *mq)
427 {
428 	struct scheduler_msg *msg;
429 	QDF_STATUS (*flush_cb)(struct scheduler_msg *);
430 
431 	while ((msg = scheduler_mq_get(mq))) {
432 		if (msg->flush_callback) {
433 			sched_debug("Calling flush callback; type: %x",
434 				    msg->type);
435 			flush_cb = msg->flush_callback;
436 			flush_cb(msg);
437 		} else if (msg->bodyptr) {
438 			sched_debug("Freeing scheduler msg bodyptr; type: %x",
439 				    msg->type);
440 			qdf_mem_free(msg->bodyptr);
441 		}
442 
443 		scheduler_core_msg_free(msg);
444 	}
445 }
446 
447 void scheduler_queues_flush(struct scheduler_ctx *sched_ctx)
448 {
449 	struct scheduler_mq_type *mq;
450 	int i;
451 
452 	sched_debug("Flushing scheduler message queues");
453 
454 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
455 		mq = &sched_ctx->queue_ctx.sch_msg_q[i];
456 		scheduler_flush_single_queue(mq);
457 	}
458 
459 	qdf_flex_mem_release(&sched_pool);
460 }
461 
462