xref: /wlan-dirver/qca-wifi-host-cmn/scheduler/src/scheduler_core.c (revision 0626a4da6c07f30da06dd6747e8cc290a60371d8)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <scheduler_core.h>
20 #include <qdf_atomic.h>
21 #include "qdf_flex_mem.h"
22 
23 static struct scheduler_ctx g_sched_ctx;
24 static struct scheduler_ctx *gp_sched_ctx;
25 
26 #ifndef WLAN_SCHED_REDUCTION_LIMIT
27 #define WLAN_SCHED_REDUCTION_LIMIT 0
28 #endif
29 
30 DEFINE_QDF_FLEX_MEM_POOL(sched_pool, sizeof(struct scheduler_msg),
31 			 WLAN_SCHED_REDUCTION_LIMIT);
32 
33 #ifdef WLAN_SCHED_HISTORY_SIZE
34 
35 struct sched_history_item {
36 	void *callback;
37 	uint32_t type_id;
38 	uint64_t start_us;
39 	uint64_t duration_us;
40 };
41 
42 static struct sched_history_item sched_history[WLAN_SCHED_HISTORY_SIZE];
43 static uint32_t sched_history_index;
44 
45 static void sched_history_start(struct scheduler_msg *msg)
46 {
47 	struct sched_history_item hist = {
48 		.callback = msg->callback,
49 		.type_id = msg->type,
50 		.start_us = qdf_get_log_timestamp_usecs(),
51 	};
52 
53 	sched_history[sched_history_index] = hist;
54 }
55 
56 static void sched_history_stop(void)
57 {
58 	struct sched_history_item *hist = &sched_history[sched_history_index];
59 
60 	hist->duration_us = qdf_get_log_timestamp_usecs() - hist->start_us;
61 
62 	sched_history_index++;
63 	sched_history_index %= WLAN_SCHED_HISTORY_SIZE;
64 }
65 
66 #else /* WLAN_SCHED_HISTORY_SIZE */
67 
68 static inline void sched_history_start(struct scheduler_msg *msg) { }
69 static inline void sched_history_stop(void) { }
70 
71 #endif /* WLAN_SCHED_HISTORY_SIZE */
72 
73 QDF_STATUS scheduler_create_ctx(void)
74 {
75 	qdf_flex_mem_init(&sched_pool);
76 	gp_sched_ctx = &g_sched_ctx;
77 
78 	return QDF_STATUS_SUCCESS;
79 }
80 
81 QDF_STATUS scheduler_destroy_ctx(void)
82 {
83 	gp_sched_ctx = NULL;
84 	qdf_flex_mem_deinit(&sched_pool);
85 
86 	return QDF_STATUS_SUCCESS;
87 }
88 
89 struct scheduler_ctx *scheduler_get_context(void)
90 {
91 	QDF_BUG(gp_sched_ctx);
92 
93 	return gp_sched_ctx;
94 }
95 
96 static QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q)
97 {
98 	sched_enter();
99 
100 	qdf_spinlock_create(&msg_q->mq_lock);
101 	qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES);
102 
103 	sched_exit();
104 
105 	return QDF_STATUS_SUCCESS;
106 }
107 
108 static void scheduler_mq_deinit(struct scheduler_mq_type *msg_q)
109 {
110 	sched_enter();
111 
112 	qdf_list_destroy(&msg_q->mq_list);
113 	qdf_spinlock_destroy(&msg_q->mq_lock);
114 
115 	sched_exit();
116 }
117 
118 static qdf_atomic_t __sched_queue_depth;
119 static qdf_atomic_t __sched_dup_fail_count;
120 
121 static QDF_STATUS scheduler_all_queues_init(struct scheduler_ctx *sched_ctx)
122 {
123 	QDF_STATUS status;
124 	int i;
125 
126 	sched_enter();
127 
128 	QDF_BUG(sched_ctx);
129 	if (!sched_ctx)
130 		return QDF_STATUS_E_FAILURE;
131 
132 	qdf_atomic_set(&__sched_queue_depth, 0);
133 
134 	/* Initialize all message queues */
135 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
136 		status = scheduler_mq_init(&sched_ctx->queue_ctx.sch_msg_q[i]);
137 		if (QDF_STATUS_SUCCESS != status)
138 			return status;
139 	}
140 
141 	/* Initialize all qid to qidx mapping to invalid values */
142 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
143 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
144 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
145 
146 	sched_exit();
147 
148 	return status;
149 }
150 
151 static QDF_STATUS scheduler_all_queues_deinit(struct scheduler_ctx *sched_ctx)
152 {
153 	int i;
154 
155 	sched_enter();
156 
157 	QDF_BUG(sched_ctx);
158 	if (!sched_ctx)
159 		return QDF_STATUS_E_FAILURE;
160 
161 	/* De-Initialize all message queues */
162 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
163 		scheduler_mq_deinit(&sched_ctx->queue_ctx.sch_msg_q[i]);
164 
165 	/* Initialize all qid to qidx mapping to invalid values */
166 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
167 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
168 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
169 
170 	sched_exit();
171 
172 	return QDF_STATUS_SUCCESS;
173 }
174 
175 void scheduler_mq_put(struct scheduler_mq_type *msg_q,
176 		      struct scheduler_msg *msg)
177 {
178 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
179 	qdf_list_insert_back(&msg_q->mq_list, &msg->node);
180 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
181 }
182 
183 void scheduler_mq_put_front(struct scheduler_mq_type *msg_q,
184 			    struct scheduler_msg *msg)
185 {
186 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
187 	qdf_list_insert_front(&msg_q->mq_list, &msg->node);
188 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
189 }
190 
191 struct scheduler_msg *scheduler_mq_get(struct scheduler_mq_type *msg_q)
192 {
193 	QDF_STATUS status;
194 	qdf_list_node_t *node;
195 
196 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
197 	status = qdf_list_remove_front(&msg_q->mq_list, &node);
198 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
199 
200 	if (QDF_IS_STATUS_ERROR(status))
201 		return NULL;
202 
203 	return qdf_container_of(node, struct scheduler_msg, node);
204 }
205 
206 QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx)
207 {
208 	return scheduler_all_queues_deinit(sched_ctx);
209 }
210 
211 QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx)
212 {
213 	QDF_STATUS status;
214 
215 	sched_enter();
216 
217 	QDF_BUG(sched_ctx);
218 	if (!sched_ctx)
219 		return QDF_STATUS_E_FAILURE;
220 
221 	status = scheduler_all_queues_init(sched_ctx);
222 	if (QDF_IS_STATUS_ERROR(status)) {
223 		scheduler_all_queues_deinit(sched_ctx);
224 		sched_err("Failed to initialize the msg queues");
225 		return status;
226 	}
227 
228 	sched_debug("Queue init passed");
229 
230 	sched_exit();
231 
232 	return QDF_STATUS_SUCCESS;
233 }
234 
235 struct scheduler_msg *scheduler_core_msg_dup(struct scheduler_msg *msg)
236 {
237 	struct scheduler_msg *dup;
238 
239 	if (qdf_atomic_inc_return(&__sched_queue_depth) >
240 	    SCHEDULER_CORE_MAX_MESSAGES)
241 		goto buffer_full;
242 
243 	dup = qdf_flex_mem_alloc(&sched_pool);
244 	if (!dup) {
245 		sched_err("out of memory");
246 		goto dec_queue_count;
247 	}
248 
249 	qdf_mem_copy(dup, msg, sizeof(*dup));
250 
251 	qdf_atomic_set(&__sched_dup_fail_count, 0);
252 
253 	return dup;
254 
255 buffer_full:
256 	if (qdf_atomic_inc_return(&__sched_dup_fail_count) >
257 	    SCHEDULER_WRAPPER_MAX_FAIL_COUNT)
258 		QDF_DEBUG_PANIC("Scheduler buffer is full");
259 
260 
261 dec_queue_count:
262 	qdf_atomic_dec(&__sched_queue_depth);
263 
264 	return NULL;
265 }
266 
267 void scheduler_core_msg_free(struct scheduler_msg *msg)
268 {
269 	qdf_flex_mem_free(&sched_pool, msg);
270 	qdf_atomic_dec(&__sched_queue_depth);
271 }
272 
273 static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
274 					    bool *shutdown)
275 {
276 	int i;
277 	QDF_STATUS status;
278 	struct scheduler_msg *msg;
279 
280 	if (!sch_ctx) {
281 		QDF_DEBUG_PANIC("sch_ctx is null");
282 		return;
283 	}
284 
285 	/* start with highest priority queue : timer queue at index 0 */
286 	i = 0;
287 	while (i < SCHEDULER_NUMBER_OF_MSG_QUEUE) {
288 		/* Check if MC needs to shutdown */
289 		if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK,
290 					&sch_ctx->sch_event_flag)) {
291 			sched_debug("scheduler thread signaled to shutdown");
292 			*shutdown = true;
293 
294 			/* Check for any Suspend Indication */
295 			if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
296 						&sch_ctx->sch_event_flag)) {
297 				/* Unblock anyone waiting on suspend */
298 				if (gp_sched_ctx->hdd_callback)
299 					gp_sched_ctx->hdd_callback();
300 			}
301 
302 			break;
303 		}
304 
305 		msg = scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]);
306 		if (!msg) {
307 			/* check next queue */
308 			i++;
309 			continue;
310 		}
311 
312 		if (sch_ctx->queue_ctx.scheduler_msg_process_fn[i]) {
313 			sch_ctx->watchdog_msg_type = msg->type;
314 			sch_ctx->watchdog_callback = msg->callback;
315 
316 			sched_history_start(msg);
317 			qdf_timer_start(&sch_ctx->watchdog_timer,
318 					SCHEDULER_WATCHDOG_TIMEOUT);
319 			status = sch_ctx->queue_ctx.
320 					scheduler_msg_process_fn[i](msg);
321 			qdf_timer_stop(&sch_ctx->watchdog_timer);
322 			sched_history_stop();
323 
324 			if (QDF_IS_STATUS_ERROR(status))
325 				sched_err("Failed processing Qid[%d] message",
326 					  sch_ctx->queue_ctx.sch_msg_q[i].qid);
327 
328 			scheduler_core_msg_free(msg);
329 		}
330 
331 		/* start again with highest priority queue at index 0 */
332 		i = 0;
333 	}
334 
335 	/* Check for any Suspend Indication */
336 	if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
337 			&sch_ctx->sch_event_flag)) {
338 		qdf_spin_lock(&sch_ctx->sch_thread_lock);
339 		qdf_event_reset(&sch_ctx->resume_sch_event);
340 		/* controller thread suspend completion callback */
341 		if (gp_sched_ctx->hdd_callback)
342 			gp_sched_ctx->hdd_callback();
343 		qdf_spin_unlock(&sch_ctx->sch_thread_lock);
344 		/* Wait for resume indication */
345 		qdf_wait_single_event(&sch_ctx->resume_sch_event, 0);
346 	}
347 
348 	return;  /* Nothing to process wait on wait queue */
349 }
350 
351 int scheduler_thread(void *arg)
352 {
353 	struct scheduler_ctx *sch_ctx = (struct scheduler_ctx *)arg;
354 	int retWaitStatus = 0;
355 	bool shutdown = false;
356 
357 	if (!arg) {
358 		QDF_DEBUG_PANIC("arg is null");
359 		return 0;
360 	}
361 	qdf_set_user_nice(current, -2);
362 
363 	/* Ack back to the context from which the main controller thread
364 	 * has been created
365 	 */
366 	qdf_event_set(&sch_ctx->sch_start_event);
367 	sched_debug("scheduler thread %d (%s) starting up",
368 		    current->pid, current->comm);
369 
370 	while (!shutdown) {
371 		/* This implements the execution model algorithm */
372 		retWaitStatus = qdf_wait_queue_interruptible(
373 					sch_ctx->sch_wait_queue,
374 					qdf_atomic_test_bit(MC_POST_EVENT_MASK,
375 						&sch_ctx->sch_event_flag) ||
376 					qdf_atomic_test_bit(MC_SUSPEND_EVENT_MASK,
377 						&sch_ctx->sch_event_flag));
378 
379 		if (retWaitStatus == -ERESTARTSYS)
380 			QDF_DEBUG_PANIC("Scheduler received -ERESTARTSYS");
381 
382 		qdf_atomic_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag);
383 		scheduler_thread_process_queues(sch_ctx, &shutdown);
384 	}
385 
386 	/* If we get here the scheduler thread must exit */
387 	sched_debug("Scheduler thread exiting");
388 	qdf_event_set(&sch_ctx->sch_shutdown);
389 	qdf_exit_thread(QDF_STATUS_SUCCESS);
390 
391 	return 0;
392 }
393 
394 static void scheduler_flush_single_queue(struct scheduler_mq_type *mq)
395 {
396 	struct scheduler_msg *msg;
397 	QDF_STATUS (*flush_cb)(struct scheduler_msg *);
398 
399 	while ((msg = scheduler_mq_get(mq))) {
400 		if (msg->flush_callback) {
401 			sched_debug("Calling flush callback; type: %x",
402 				    msg->type);
403 			flush_cb = msg->flush_callback;
404 			flush_cb(msg);
405 		} else if (msg->bodyptr) {
406 			sched_debug("Freeing scheduler msg bodyptr; type: %x",
407 				    msg->type);
408 			qdf_mem_free(msg->bodyptr);
409 		}
410 
411 		scheduler_core_msg_free(msg);
412 	}
413 }
414 
415 void scheduler_queues_flush(struct scheduler_ctx *sched_ctx)
416 {
417 	struct scheduler_mq_type *mq;
418 	int i;
419 
420 	sched_debug("Flushing scheduler message queues");
421 
422 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
423 		mq = &sched_ctx->queue_ctx.sch_msg_q[i];
424 		scheduler_flush_single_queue(mq);
425 	}
426 
427 	qdf_flex_mem_release(&sched_pool);
428 }
429 
430