xref: /wlan-dirver/qca-wifi-host-cmn/scheduler/src/scheduler_core.c (revision c7ee85c4a8a137b3fbd2615cffa733a81882242f)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <scheduler_core.h>
20 #include <qdf_atomic.h>
21 #include "qdf_flex_mem.h"
22 
23 static struct scheduler_ctx g_sched_ctx;
24 static struct scheduler_ctx *gp_sched_ctx;
25 
26 #ifndef WLAN_SCHED_REDUCTION_LIMIT
27 #define WLAN_SCHED_REDUCTION_LIMIT 0
28 #endif
29 
30 DEFINE_QDF_FLEX_MEM_POOL(sched_pool, sizeof(struct scheduler_msg),
31 			 WLAN_SCHED_REDUCTION_LIMIT);
32 
33 QDF_STATUS scheduler_create_ctx(void)
34 {
35 	qdf_flex_mem_init(&sched_pool);
36 	gp_sched_ctx = &g_sched_ctx;
37 
38 	return QDF_STATUS_SUCCESS;
39 }
40 
41 QDF_STATUS scheduler_destroy_ctx(void)
42 {
43 	gp_sched_ctx = NULL;
44 	qdf_flex_mem_deinit(&sched_pool);
45 
46 	return QDF_STATUS_SUCCESS;
47 }
48 
49 struct scheduler_ctx *scheduler_get_context(void)
50 {
51 	QDF_BUG(gp_sched_ctx);
52 
53 	return gp_sched_ctx;
54 }
55 
56 static QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q)
57 {
58 	sched_enter();
59 
60 	qdf_spinlock_create(&msg_q->mq_lock);
61 	qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES);
62 
63 	sched_exit();
64 
65 	return QDF_STATUS_SUCCESS;
66 }
67 
68 static void scheduler_mq_deinit(struct scheduler_mq_type *msg_q)
69 {
70 	sched_enter();
71 
72 	qdf_list_destroy(&msg_q->mq_list);
73 	qdf_spinlock_destroy(&msg_q->mq_lock);
74 
75 	sched_exit();
76 }
77 
78 static qdf_atomic_t __sched_queue_depth;
79 
80 static QDF_STATUS scheduler_all_queues_init(struct scheduler_ctx *sched_ctx)
81 {
82 	QDF_STATUS status;
83 	int i;
84 
85 	sched_enter();
86 
87 	if (!sched_ctx) {
88 		sched_err("sched_ctx is null");
89 		QDF_DEBUG_PANIC();
90 		return QDF_STATUS_E_FAILURE;
91 	}
92 
93 	qdf_atomic_set(&__sched_queue_depth, 0);
94 
95 	/* Initialize all message queues */
96 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
97 		status = scheduler_mq_init(&sched_ctx->queue_ctx.sch_msg_q[i]);
98 		if (QDF_STATUS_SUCCESS != status)
99 			return status;
100 	}
101 
102 	/* Initialize all qid to qidx mapping to invalid values */
103 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
104 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
105 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
106 
107 	sched_exit();
108 
109 	return status;
110 }
111 
112 static QDF_STATUS scheduler_all_queues_deinit(struct scheduler_ctx *sched_ctx)
113 {
114 	int i;
115 
116 	sched_enter();
117 
118 	if (!sched_ctx) {
119 		sched_err("sched_ctx is null");
120 		QDF_DEBUG_PANIC();
121 		return QDF_STATUS_E_FAILURE;
122 	}
123 
124 	/* De-Initialize all message queues */
125 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
126 		scheduler_mq_deinit(&sched_ctx->queue_ctx.sch_msg_q[i]);
127 
128 	/* Initialize all qid to qidx mapping to invalid values */
129 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
130 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
131 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
132 
133 	sched_exit();
134 
135 	return QDF_STATUS_SUCCESS;
136 }
137 
138 void scheduler_mq_put(struct scheduler_mq_type *msg_q,
139 		      struct scheduler_msg *msg)
140 {
141 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
142 	qdf_list_insert_back(&msg_q->mq_list, &msg->node);
143 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
144 }
145 
146 void scheduler_mq_put_front(struct scheduler_mq_type *msg_q,
147 			    struct scheduler_msg *msg)
148 {
149 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
150 	qdf_list_insert_front(&msg_q->mq_list, &msg->node);
151 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
152 }
153 
154 struct scheduler_msg *scheduler_mq_get(struct scheduler_mq_type *msg_q)
155 {
156 	QDF_STATUS status;
157 	qdf_list_node_t *node;
158 
159 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
160 	status = qdf_list_remove_front(&msg_q->mq_list, &node);
161 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
162 
163 	if (QDF_IS_STATUS_ERROR(status))
164 		return NULL;
165 
166 	return qdf_container_of(node, struct scheduler_msg, node);
167 }
168 
169 QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx)
170 {
171 	return scheduler_all_queues_deinit(sched_ctx);
172 }
173 
174 QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx)
175 {
176 	QDF_STATUS status;
177 
178 	sched_enter();
179 
180 	if (!sched_ctx) {
181 		sched_err("sched_ctx is null");
182 		QDF_DEBUG_PANIC();
183 		return QDF_STATUS_E_FAILURE;
184 	}
185 
186 	status = scheduler_all_queues_init(sched_ctx);
187 	if (QDF_IS_STATUS_ERROR(status)) {
188 		scheduler_all_queues_deinit(sched_ctx);
189 		sched_err("Failed to initialize the msg queues");
190 		return status;
191 	}
192 
193 	sched_debug("Queue init passed");
194 
195 	sched_exit();
196 
197 	return QDF_STATUS_SUCCESS;
198 }
199 
200 struct scheduler_msg *scheduler_core_msg_dup(struct scheduler_msg *msg)
201 {
202 	struct scheduler_msg *dup;
203 
204 	if (qdf_atomic_inc_return(&__sched_queue_depth) >
205 	    SCHEDULER_CORE_MAX_MESSAGES)
206 		goto buffer_full;
207 
208 	dup = qdf_flex_mem_alloc(&sched_pool);
209 	if (!dup) {
210 		sched_err("out of memory");
211 		goto dec_queue_count;
212 	}
213 
214 	qdf_mem_copy(dup, msg, sizeof(*dup));
215 
216 	return dup;
217 
218 buffer_full:
219 	sched_err("Scheduler buffer is full");
220 	QDF_DEBUG_PANIC();
221 
222 dec_queue_count:
223 	qdf_atomic_dec(&__sched_queue_depth);
224 
225 	return NULL;
226 }
227 
228 void scheduler_core_msg_free(struct scheduler_msg *msg)
229 {
230 	qdf_flex_mem_free(&sched_pool, msg);
231 	qdf_atomic_dec(&__sched_queue_depth);
232 }
233 
234 static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
235 					    bool *shutdown)
236 {
237 	int i;
238 	QDF_STATUS status;
239 	struct scheduler_msg *msg;
240 
241 	if (!sch_ctx) {
242 		sched_err("sch_ctx is null");
243 		QDF_DEBUG_PANIC();
244 		return;
245 	}
246 
247 	/* start with highest priority queue : timer queue at index 0 */
248 	i = 0;
249 	while (i < SCHEDULER_NUMBER_OF_MSG_QUEUE) {
250 		/* Check if MC needs to shutdown */
251 		if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK,
252 					&sch_ctx->sch_event_flag)) {
253 			sched_info("scheduler thread signaled to shutdown");
254 			*shutdown = true;
255 
256 			/* Check for any Suspend Indication */
257 			if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
258 						&sch_ctx->sch_event_flag)) {
259 				/* Unblock anyone waiting on suspend */
260 				if (gp_sched_ctx->hdd_callback)
261 					gp_sched_ctx->hdd_callback();
262 			}
263 
264 			break;
265 		}
266 
267 		msg = scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]);
268 		if (!msg) {
269 			/* check next queue */
270 			i++;
271 			continue;
272 		}
273 
274 		if (sch_ctx->queue_ctx.scheduler_msg_process_fn[i]) {
275 			sch_ctx->watchdog_msg_type = msg->type;
276 			sch_ctx->watchdog_callback = msg->callback;
277 			qdf_timer_start(&sch_ctx->watchdog_timer,
278 					SCHEDULER_WATCHDOG_TIMEOUT);
279 			status = sch_ctx->queue_ctx.
280 					scheduler_msg_process_fn[i](msg);
281 			qdf_timer_stop(&sch_ctx->watchdog_timer);
282 
283 			if (QDF_IS_STATUS_ERROR(status))
284 				sched_err("Failed processing Qid[%d] message",
285 					  sch_ctx->queue_ctx.sch_msg_q[i].qid);
286 
287 			scheduler_core_msg_free(msg);
288 		}
289 
290 		/* start again with highest priority queue at index 0 */
291 		i = 0;
292 	}
293 
294 	/* Check for any Suspend Indication */
295 	if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
296 			&sch_ctx->sch_event_flag)) {
297 		qdf_spin_lock(&sch_ctx->sch_thread_lock);
298 		qdf_event_reset(&sch_ctx->resume_sch_event);
299 		/* controller thread suspend completion callback */
300 		if (gp_sched_ctx->hdd_callback)
301 			gp_sched_ctx->hdd_callback();
302 		qdf_spin_unlock(&sch_ctx->sch_thread_lock);
303 		/* Wait for resume indication */
304 		qdf_wait_single_event(&sch_ctx->resume_sch_event, 0);
305 	}
306 
307 	return;  /* Nothing to process wait on wait queue */
308 }
309 
310 int scheduler_thread(void *arg)
311 {
312 	struct scheduler_ctx *sch_ctx = (struct scheduler_ctx *)arg;
313 	int retWaitStatus = 0;
314 	bool shutdown = false;
315 
316 	if (!arg) {
317 		sched_err("arg is null");
318 		QDF_DEBUG_PANIC();
319 		return 0;
320 	}
321 	qdf_set_user_nice(current, -2);
322 
323 	/* Ack back to the context from which the main controller thread
324 	 * has been created
325 	 */
326 	qdf_event_set(&sch_ctx->sch_start_event);
327 	sched_debug("scheduler thread %d (%s) starting up",
328 		    current->pid, current->comm);
329 
330 	while (!shutdown) {
331 		/* This implements the execution model algorithm */
332 		retWaitStatus = qdf_wait_queue_interruptible(
333 					sch_ctx->sch_wait_queue,
334 					qdf_atomic_test_bit(MC_POST_EVENT_MASK,
335 						&sch_ctx->sch_event_flag) ||
336 					qdf_atomic_test_bit(MC_SUSPEND_EVENT_MASK,
337 						&sch_ctx->sch_event_flag));
338 
339 		if (retWaitStatus == -ERESTARTSYS) {
340 			sched_err("wait_event_interruptible returned -ERESTARTSYS");
341 			QDF_DEBUG_PANIC();
342 		}
343 
344 		qdf_atomic_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag);
345 		scheduler_thread_process_queues(sch_ctx, &shutdown);
346 	}
347 
348 	/* If we get here the scheduler thread must exit */
349 	sched_info("Scheduler thread exiting");
350 	qdf_event_set(&sch_ctx->sch_shutdown);
351 	qdf_exit_thread(QDF_STATUS_SUCCESS);
352 
353 	return 0;
354 }
355 
356 static void scheduler_flush_single_queue(struct scheduler_mq_type *mq)
357 {
358 	struct scheduler_msg *msg;
359 	QDF_STATUS (*flush_cb)(struct scheduler_msg *);
360 
361 	while ((msg = scheduler_mq_get(mq))) {
362 		if (msg->flush_callback) {
363 			sched_info("Calling flush callback; type: %x",
364 				   msg->type);
365 			flush_cb = msg->flush_callback;
366 			flush_cb(msg);
367 		} else if (msg->bodyptr) {
368 			sched_info("Freeing scheduler msg bodyptr; type: %x",
369 				   msg->type);
370 			qdf_mem_free(msg->bodyptr);
371 		}
372 
373 		scheduler_core_msg_free(msg);
374 	}
375 }
376 
377 void scheduler_queues_flush(struct scheduler_ctx *sched_ctx)
378 {
379 	struct scheduler_mq_type *mq;
380 	int i;
381 
382 	sched_info("Flushing scheduler message queues");
383 
384 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
385 		mq = &sched_ctx->queue_ctx.sch_msg_q[i];
386 		scheduler_flush_single_queue(mq);
387 	}
388 
389 	qdf_flex_mem_release(&sched_pool);
390 }
391 
392