xref: /wlan-dirver/qca-wifi-host-cmn/scheduler/src/scheduler_core.c (revision dd4dc88b837a295134aa9869114a2efee0f4894b)
1 /*
2  * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <scheduler_core.h>
20 #include <qdf_atomic.h>
21 #include "qdf_flex_mem.h"
22 
23 static struct scheduler_ctx g_sched_ctx;
24 static struct scheduler_ctx *gp_sched_ctx;
25 
26 DEFINE_QDF_FLEX_MEM_POOL(sched_pool, sizeof(struct scheduler_msg),
27 			 WLAN_SCHED_REDUCTION_LIMIT);
28 
29 #ifdef WLAN_SCHED_HISTORY_SIZE
30 
31 /**
32  * struct sched_history_item - metrics for a scheduler message
33  * @callback: the message's execution callback
34  * @type_id: the message's type_id
35  * @queue_id: Id of the queue the message was added to
36  * @queue_start_us: timestamp when the message was queued in microseconds
37  * @queue_duration_us: duration the message was queued in microseconds
38  * @queue_depth: depth of the queue when the message was queued
39  * @run_start_us: timesatmp when the message started execution in microseconds
40  * @run_duration_us: duration the message was executed in microseconds
41  */
42 struct sched_history_item {
43 	void *callback;
44 	uint32_t type_id;
45 	QDF_MODULE_ID queue_id;
46 	uint64_t queue_start_us;
47 	uint32_t queue_duration_us;
48 	uint32_t queue_depth;
49 	uint64_t run_start_us;
50 	uint32_t run_duration_us;
51 };
52 
53 static struct sched_history_item sched_history[WLAN_SCHED_HISTORY_SIZE];
54 static uint32_t sched_history_index;
55 
56 static void sched_history_queue(struct scheduler_mq_type *queue,
57 				struct scheduler_msg *msg)
58 {
59 	msg->queue_id = queue->qid;
60 	msg->queue_depth = qdf_list_size(&queue->mq_list);
61 	msg->queued_at_us = qdf_get_log_timestamp_usecs();
62 }
63 
64 static void sched_history_start(struct scheduler_msg *msg)
65 {
66 	uint64_t started_at_us = qdf_get_log_timestamp_usecs();
67 	struct sched_history_item hist = {
68 		.callback = msg->callback,
69 		.type_id = msg->type,
70 		.queue_start_us = msg->queued_at_us,
71 		.queue_duration_us = started_at_us - msg->queued_at_us,
72 		.queue_depth = msg->queue_depth,
73 		.run_start_us = started_at_us,
74 	};
75 
76 	sched_history[sched_history_index] = hist;
77 }
78 
79 static void sched_history_stop(void)
80 {
81 	struct sched_history_item *hist = &sched_history[sched_history_index];
82 	uint64_t stopped_at_us = qdf_get_log_timestamp_usecs();
83 
84 	hist->run_duration_us = stopped_at_us - hist->run_start_us;
85 
86 	sched_history_index++;
87 	sched_history_index %= WLAN_SCHED_HISTORY_SIZE;
88 }
89 
90 #else /* WLAN_SCHED_HISTORY_SIZE */
91 
92 static inline void sched_history_queue(struct scheduler_mq_type *queue,
93 				       struct scheduler_msg *msg) { }
94 static inline void sched_history_start(struct scheduler_msg *msg) { }
95 static inline void sched_history_stop(void) { }
96 
97 #endif /* WLAN_SCHED_HISTORY_SIZE */
98 
99 QDF_STATUS scheduler_create_ctx(void)
100 {
101 	qdf_flex_mem_init(&sched_pool);
102 	gp_sched_ctx = &g_sched_ctx;
103 
104 	return QDF_STATUS_SUCCESS;
105 }
106 
107 QDF_STATUS scheduler_destroy_ctx(void)
108 {
109 	gp_sched_ctx = NULL;
110 	qdf_flex_mem_deinit(&sched_pool);
111 
112 	return QDF_STATUS_SUCCESS;
113 }
114 
115 struct scheduler_ctx *scheduler_get_context(void)
116 {
117 	QDF_BUG(gp_sched_ctx);
118 
119 	return gp_sched_ctx;
120 }
121 
122 static QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q)
123 {
124 	sched_enter();
125 
126 	qdf_spinlock_create(&msg_q->mq_lock);
127 	qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES);
128 
129 	sched_exit();
130 
131 	return QDF_STATUS_SUCCESS;
132 }
133 
134 static void scheduler_mq_deinit(struct scheduler_mq_type *msg_q)
135 {
136 	sched_enter();
137 
138 	qdf_list_destroy(&msg_q->mq_list);
139 	qdf_spinlock_destroy(&msg_q->mq_lock);
140 
141 	sched_exit();
142 }
143 
144 static qdf_atomic_t __sched_queue_depth;
145 static qdf_atomic_t __sched_dup_fail_count;
146 
147 static QDF_STATUS scheduler_all_queues_init(struct scheduler_ctx *sched_ctx)
148 {
149 	QDF_STATUS status;
150 	int i;
151 
152 	sched_enter();
153 
154 	QDF_BUG(sched_ctx);
155 	if (!sched_ctx)
156 		return QDF_STATUS_E_FAILURE;
157 
158 	qdf_atomic_set(&__sched_queue_depth, 0);
159 
160 	/* Initialize all message queues */
161 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
162 		status = scheduler_mq_init(&sched_ctx->queue_ctx.sch_msg_q[i]);
163 		if (QDF_STATUS_SUCCESS != status)
164 			return status;
165 	}
166 
167 	/* Initialize all qid to qidx mapping to invalid values */
168 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
169 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
170 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
171 
172 	sched_exit();
173 
174 	return status;
175 }
176 
177 static QDF_STATUS scheduler_all_queues_deinit(struct scheduler_ctx *sched_ctx)
178 {
179 	int i;
180 
181 	sched_enter();
182 
183 	QDF_BUG(sched_ctx);
184 	if (!sched_ctx)
185 		return QDF_STATUS_E_FAILURE;
186 
187 	/* De-Initialize all message queues */
188 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
189 		scheduler_mq_deinit(&sched_ctx->queue_ctx.sch_msg_q[i]);
190 
191 	/* Initialize all qid to qidx mapping to invalid values */
192 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
193 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
194 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
195 
196 	sched_exit();
197 
198 	return QDF_STATUS_SUCCESS;
199 }
200 
201 void scheduler_mq_put(struct scheduler_mq_type *msg_q,
202 		      struct scheduler_msg *msg)
203 {
204 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
205 	sched_history_queue(msg_q, msg);
206 	qdf_list_insert_back(&msg_q->mq_list, &msg->node);
207 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
208 }
209 
210 void scheduler_mq_put_front(struct scheduler_mq_type *msg_q,
211 			    struct scheduler_msg *msg)
212 {
213 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
214 	sched_history_queue(msg_q, msg);
215 	qdf_list_insert_front(&msg_q->mq_list, &msg->node);
216 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
217 }
218 
219 struct scheduler_msg *scheduler_mq_get(struct scheduler_mq_type *msg_q)
220 {
221 	QDF_STATUS status;
222 	qdf_list_node_t *node;
223 
224 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
225 	status = qdf_list_remove_front(&msg_q->mq_list, &node);
226 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
227 
228 	if (QDF_IS_STATUS_ERROR(status))
229 		return NULL;
230 
231 	return qdf_container_of(node, struct scheduler_msg, node);
232 }
233 
234 QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx)
235 {
236 	return scheduler_all_queues_deinit(sched_ctx);
237 }
238 
239 QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx)
240 {
241 	QDF_STATUS status;
242 
243 	sched_enter();
244 
245 	QDF_BUG(sched_ctx);
246 	if (!sched_ctx)
247 		return QDF_STATUS_E_FAILURE;
248 
249 	status = scheduler_all_queues_init(sched_ctx);
250 	if (QDF_IS_STATUS_ERROR(status)) {
251 		scheduler_all_queues_deinit(sched_ctx);
252 		sched_err("Failed to initialize the msg queues");
253 		return status;
254 	}
255 
256 	sched_debug("Queue init passed");
257 
258 	sched_exit();
259 
260 	return QDF_STATUS_SUCCESS;
261 }
262 
263 struct scheduler_msg *scheduler_core_msg_dup(struct scheduler_msg *msg)
264 {
265 	struct scheduler_msg *dup;
266 
267 	if (qdf_atomic_inc_return(&__sched_queue_depth) >
268 	    SCHEDULER_CORE_MAX_MESSAGES)
269 		goto buffer_full;
270 
271 	dup = qdf_flex_mem_alloc(&sched_pool);
272 	if (!dup) {
273 		sched_err("out of memory");
274 		goto dec_queue_count;
275 	}
276 
277 	qdf_mem_copy(dup, msg, sizeof(*dup));
278 
279 	qdf_atomic_set(&__sched_dup_fail_count, 0);
280 
281 	return dup;
282 
283 buffer_full:
284 	if (qdf_atomic_inc_return(&__sched_dup_fail_count) >
285 	    SCHEDULER_WRAPPER_MAX_FAIL_COUNT)
286 		QDF_DEBUG_PANIC("Scheduler buffer is full");
287 
288 
289 dec_queue_count:
290 	qdf_atomic_dec(&__sched_queue_depth);
291 
292 	return NULL;
293 }
294 
295 void scheduler_core_msg_free(struct scheduler_msg *msg)
296 {
297 	qdf_flex_mem_free(&sched_pool, msg);
298 	qdf_atomic_dec(&__sched_queue_depth);
299 }
300 
301 static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
302 					    bool *shutdown)
303 {
304 	int i;
305 	QDF_STATUS status;
306 	struct scheduler_msg *msg;
307 
308 	if (!sch_ctx) {
309 		QDF_DEBUG_PANIC("sch_ctx is null");
310 		return;
311 	}
312 
313 	/* start with highest priority queue : timer queue at index 0 */
314 	i = 0;
315 	while (i < SCHEDULER_NUMBER_OF_MSG_QUEUE) {
316 		/* Check if MC needs to shutdown */
317 		if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK,
318 					&sch_ctx->sch_event_flag)) {
319 			sched_debug("scheduler thread signaled to shutdown");
320 			*shutdown = true;
321 
322 			/* Check for any Suspend Indication */
323 			if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
324 						&sch_ctx->sch_event_flag)) {
325 				/* Unblock anyone waiting on suspend */
326 				if (gp_sched_ctx->hdd_callback)
327 					gp_sched_ctx->hdd_callback();
328 			}
329 
330 			break;
331 		}
332 
333 		msg = scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]);
334 		if (!msg) {
335 			/* check next queue */
336 			i++;
337 			continue;
338 		}
339 
340 		if (sch_ctx->queue_ctx.scheduler_msg_process_fn[i]) {
341 			sch_ctx->watchdog_msg_type = msg->type;
342 			sch_ctx->watchdog_callback = msg->callback;
343 
344 			sched_history_start(msg);
345 			qdf_timer_start(&sch_ctx->watchdog_timer,
346 					SCHEDULER_WATCHDOG_TIMEOUT);
347 			status = sch_ctx->queue_ctx.
348 					scheduler_msg_process_fn[i](msg);
349 			qdf_timer_stop(&sch_ctx->watchdog_timer);
350 			sched_history_stop();
351 
352 			if (QDF_IS_STATUS_ERROR(status))
353 				sched_err("Failed processing Qid[%d] message",
354 					  sch_ctx->queue_ctx.sch_msg_q[i].qid);
355 
356 			scheduler_core_msg_free(msg);
357 		}
358 
359 		/* start again with highest priority queue at index 0 */
360 		i = 0;
361 	}
362 
363 	/* Check for any Suspend Indication */
364 	if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
365 			&sch_ctx->sch_event_flag)) {
366 		qdf_spin_lock(&sch_ctx->sch_thread_lock);
367 		qdf_event_reset(&sch_ctx->resume_sch_event);
368 		/* controller thread suspend completion callback */
369 		if (gp_sched_ctx->hdd_callback)
370 			gp_sched_ctx->hdd_callback();
371 		qdf_spin_unlock(&sch_ctx->sch_thread_lock);
372 		/* Wait for resume indication */
373 		qdf_wait_single_event(&sch_ctx->resume_sch_event, 0);
374 	}
375 
376 	return;  /* Nothing to process wait on wait queue */
377 }
378 
379 int scheduler_thread(void *arg)
380 {
381 	struct scheduler_ctx *sch_ctx = (struct scheduler_ctx *)arg;
382 	int retWaitStatus = 0;
383 	bool shutdown = false;
384 
385 	if (!arg) {
386 		QDF_DEBUG_PANIC("arg is null");
387 		return 0;
388 	}
389 	qdf_set_user_nice(current, -2);
390 
391 	/* Ack back to the context from which the main controller thread
392 	 * has been created
393 	 */
394 	qdf_event_set(&sch_ctx->sch_start_event);
395 	sched_debug("scheduler thread %d (%s) starting up",
396 		    current->pid, current->comm);
397 
398 	while (!shutdown) {
399 		/* This implements the execution model algorithm */
400 		retWaitStatus = qdf_wait_queue_interruptible(
401 					sch_ctx->sch_wait_queue,
402 					qdf_atomic_test_bit(MC_POST_EVENT_MASK,
403 						&sch_ctx->sch_event_flag) ||
404 					qdf_atomic_test_bit(MC_SUSPEND_EVENT_MASK,
405 						&sch_ctx->sch_event_flag));
406 
407 		if (retWaitStatus == -ERESTARTSYS)
408 			QDF_DEBUG_PANIC("Scheduler received -ERESTARTSYS");
409 
410 		qdf_atomic_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag);
411 		scheduler_thread_process_queues(sch_ctx, &shutdown);
412 	}
413 
414 	/* If we get here the scheduler thread must exit */
415 	sched_debug("Scheduler thread exiting");
416 	qdf_event_set(&sch_ctx->sch_shutdown);
417 	qdf_exit_thread(QDF_STATUS_SUCCESS);
418 
419 	return 0;
420 }
421 
422 static void scheduler_flush_single_queue(struct scheduler_mq_type *mq)
423 {
424 	struct scheduler_msg *msg;
425 	QDF_STATUS (*flush_cb)(struct scheduler_msg *);
426 
427 	while ((msg = scheduler_mq_get(mq))) {
428 		if (msg->flush_callback) {
429 			sched_debug("Calling flush callback; type: %x",
430 				    msg->type);
431 			flush_cb = msg->flush_callback;
432 			flush_cb(msg);
433 		} else if (msg->bodyptr) {
434 			sched_debug("Freeing scheduler msg bodyptr; type: %x",
435 				    msg->type);
436 			qdf_mem_free(msg->bodyptr);
437 		}
438 
439 		scheduler_core_msg_free(msg);
440 	}
441 }
442 
443 void scheduler_queues_flush(struct scheduler_ctx *sched_ctx)
444 {
445 	struct scheduler_mq_type *mq;
446 	int i;
447 
448 	sched_debug("Flushing scheduler message queues");
449 
450 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
451 		mq = &sched_ctx->queue_ctx.sch_msg_q[i];
452 		scheduler_flush_single_queue(mq);
453 	}
454 }
455 
456