xref: /wlan-dirver/qca-wifi-host-cmn/scheduler/src/scheduler_core.c (revision 1b9674e21e24478fba4530f5ae7396b9555e9c6a)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <scheduler_core.h>
20 #include <qdf_atomic.h>
21 #include "qdf_flex_mem.h"
22 
23 static struct scheduler_ctx g_sched_ctx;
24 static struct scheduler_ctx *gp_sched_ctx;
25 
26 #ifndef WLAN_SCHED_REDUCTION_LIMIT
27 #define WLAN_SCHED_REDUCTION_LIMIT 0
28 #endif
29 
30 DEFINE_QDF_FLEX_MEM_POOL(sched_pool, sizeof(struct scheduler_msg),
31 			 WLAN_SCHED_REDUCTION_LIMIT);
32 
33 QDF_STATUS scheduler_create_ctx(void)
34 {
35 	qdf_flex_mem_init(&sched_pool);
36 	gp_sched_ctx = &g_sched_ctx;
37 
38 	return QDF_STATUS_SUCCESS;
39 }
40 
41 QDF_STATUS scheduler_destroy_ctx(void)
42 {
43 	gp_sched_ctx = NULL;
44 	qdf_flex_mem_deinit(&sched_pool);
45 
46 	return QDF_STATUS_SUCCESS;
47 }
48 
49 struct scheduler_ctx *scheduler_get_context(void)
50 {
51 	QDF_BUG(gp_sched_ctx);
52 
53 	return gp_sched_ctx;
54 }
55 
56 static QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q)
57 {
58 	sched_enter();
59 
60 	qdf_spinlock_create(&msg_q->mq_lock);
61 	qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES);
62 
63 	sched_exit();
64 
65 	return QDF_STATUS_SUCCESS;
66 }
67 
68 static void scheduler_mq_deinit(struct scheduler_mq_type *msg_q)
69 {
70 	sched_enter();
71 
72 	qdf_list_destroy(&msg_q->mq_list);
73 	qdf_spinlock_destroy(&msg_q->mq_lock);
74 
75 	sched_exit();
76 }
77 
78 static qdf_atomic_t __sched_queue_depth;
79 
80 static QDF_STATUS scheduler_all_queues_init(struct scheduler_ctx *sched_ctx)
81 {
82 	QDF_STATUS status;
83 	int i;
84 
85 	sched_enter();
86 
87 	QDF_BUG(sched_ctx);
88 	if (!sched_ctx)
89 		return QDF_STATUS_E_FAILURE;
90 
91 	qdf_atomic_set(&__sched_queue_depth, 0);
92 
93 	/* Initialize all message queues */
94 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
95 		status = scheduler_mq_init(&sched_ctx->queue_ctx.sch_msg_q[i]);
96 		if (QDF_STATUS_SUCCESS != status)
97 			return status;
98 	}
99 
100 	/* Initialize all qid to qidx mapping to invalid values */
101 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
102 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
103 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
104 
105 	sched_exit();
106 
107 	return status;
108 }
109 
110 static QDF_STATUS scheduler_all_queues_deinit(struct scheduler_ctx *sched_ctx)
111 {
112 	int i;
113 
114 	sched_enter();
115 
116 	QDF_BUG(sched_ctx);
117 	if (!sched_ctx)
118 		return QDF_STATUS_E_FAILURE;
119 
120 	/* De-Initialize all message queues */
121 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
122 		scheduler_mq_deinit(&sched_ctx->queue_ctx.sch_msg_q[i]);
123 
124 	/* Initialize all qid to qidx mapping to invalid values */
125 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
126 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
127 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
128 
129 	sched_exit();
130 
131 	return QDF_STATUS_SUCCESS;
132 }
133 
134 void scheduler_mq_put(struct scheduler_mq_type *msg_q,
135 		      struct scheduler_msg *msg)
136 {
137 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
138 	qdf_list_insert_back(&msg_q->mq_list, &msg->node);
139 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
140 }
141 
142 void scheduler_mq_put_front(struct scheduler_mq_type *msg_q,
143 			    struct scheduler_msg *msg)
144 {
145 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
146 	qdf_list_insert_front(&msg_q->mq_list, &msg->node);
147 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
148 }
149 
150 struct scheduler_msg *scheduler_mq_get(struct scheduler_mq_type *msg_q)
151 {
152 	QDF_STATUS status;
153 	qdf_list_node_t *node;
154 
155 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
156 	status = qdf_list_remove_front(&msg_q->mq_list, &node);
157 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
158 
159 	if (QDF_IS_STATUS_ERROR(status))
160 		return NULL;
161 
162 	return qdf_container_of(node, struct scheduler_msg, node);
163 }
164 
165 QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx)
166 {
167 	return scheduler_all_queues_deinit(sched_ctx);
168 }
169 
170 QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx)
171 {
172 	QDF_STATUS status;
173 
174 	sched_enter();
175 
176 	QDF_BUG(sched_ctx);
177 	if (!sched_ctx)
178 		return QDF_STATUS_E_FAILURE;
179 
180 	status = scheduler_all_queues_init(sched_ctx);
181 	if (QDF_IS_STATUS_ERROR(status)) {
182 		scheduler_all_queues_deinit(sched_ctx);
183 		sched_err("Failed to initialize the msg queues");
184 		return status;
185 	}
186 
187 	sched_debug("Queue init passed");
188 
189 	sched_exit();
190 
191 	return QDF_STATUS_SUCCESS;
192 }
193 
194 struct scheduler_msg *scheduler_core_msg_dup(struct scheduler_msg *msg)
195 {
196 	struct scheduler_msg *dup;
197 
198 	if (qdf_atomic_inc_return(&__sched_queue_depth) >
199 	    SCHEDULER_CORE_MAX_MESSAGES)
200 		goto buffer_full;
201 
202 	dup = qdf_flex_mem_alloc(&sched_pool);
203 	if (!dup) {
204 		sched_err("out of memory");
205 		goto dec_queue_count;
206 	}
207 
208 	qdf_mem_copy(dup, msg, sizeof(*dup));
209 
210 	return dup;
211 
212 buffer_full:
213 	QDF_DEBUG_PANIC("Scheduler buffer is full");
214 
215 dec_queue_count:
216 	qdf_atomic_dec(&__sched_queue_depth);
217 
218 	return NULL;
219 }
220 
221 void scheduler_core_msg_free(struct scheduler_msg *msg)
222 {
223 	qdf_flex_mem_free(&sched_pool, msg);
224 	qdf_atomic_dec(&__sched_queue_depth);
225 }
226 
227 static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
228 					    bool *shutdown)
229 {
230 	int i;
231 	QDF_STATUS status;
232 	struct scheduler_msg *msg;
233 
234 	if (!sch_ctx) {
235 		QDF_DEBUG_PANIC("sch_ctx is null");
236 		return;
237 	}
238 
239 	/* start with highest priority queue : timer queue at index 0 */
240 	i = 0;
241 	while (i < SCHEDULER_NUMBER_OF_MSG_QUEUE) {
242 		/* Check if MC needs to shutdown */
243 		if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK,
244 					&sch_ctx->sch_event_flag)) {
245 			sched_debug("scheduler thread signaled to shutdown");
246 			*shutdown = true;
247 
248 			/* Check for any Suspend Indication */
249 			if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
250 						&sch_ctx->sch_event_flag)) {
251 				/* Unblock anyone waiting on suspend */
252 				if (gp_sched_ctx->hdd_callback)
253 					gp_sched_ctx->hdd_callback();
254 			}
255 
256 			break;
257 		}
258 
259 		msg = scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]);
260 		if (!msg) {
261 			/* check next queue */
262 			i++;
263 			continue;
264 		}
265 
266 		if (sch_ctx->queue_ctx.scheduler_msg_process_fn[i]) {
267 			sch_ctx->watchdog_msg_type = msg->type;
268 			sch_ctx->watchdog_callback = msg->callback;
269 			qdf_timer_start(&sch_ctx->watchdog_timer,
270 					SCHEDULER_WATCHDOG_TIMEOUT);
271 			status = sch_ctx->queue_ctx.
272 					scheduler_msg_process_fn[i](msg);
273 			qdf_timer_stop(&sch_ctx->watchdog_timer);
274 
275 			if (QDF_IS_STATUS_ERROR(status))
276 				sched_err("Failed processing Qid[%d] message",
277 					  sch_ctx->queue_ctx.sch_msg_q[i].qid);
278 
279 			scheduler_core_msg_free(msg);
280 		}
281 
282 		/* start again with highest priority queue at index 0 */
283 		i = 0;
284 	}
285 
286 	/* Check for any Suspend Indication */
287 	if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
288 			&sch_ctx->sch_event_flag)) {
289 		qdf_spin_lock(&sch_ctx->sch_thread_lock);
290 		qdf_event_reset(&sch_ctx->resume_sch_event);
291 		/* controller thread suspend completion callback */
292 		if (gp_sched_ctx->hdd_callback)
293 			gp_sched_ctx->hdd_callback();
294 		qdf_spin_unlock(&sch_ctx->sch_thread_lock);
295 		/* Wait for resume indication */
296 		qdf_wait_single_event(&sch_ctx->resume_sch_event, 0);
297 	}
298 
299 	return;  /* Nothing to process wait on wait queue */
300 }
301 
302 int scheduler_thread(void *arg)
303 {
304 	struct scheduler_ctx *sch_ctx = (struct scheduler_ctx *)arg;
305 	int retWaitStatus = 0;
306 	bool shutdown = false;
307 
308 	if (!arg) {
309 		QDF_DEBUG_PANIC("arg is null");
310 		return 0;
311 	}
312 	qdf_set_user_nice(current, -2);
313 
314 	/* Ack back to the context from which the main controller thread
315 	 * has been created
316 	 */
317 	qdf_event_set(&sch_ctx->sch_start_event);
318 	sched_debug("scheduler thread %d (%s) starting up",
319 		    current->pid, current->comm);
320 
321 	while (!shutdown) {
322 		/* This implements the execution model algorithm */
323 		retWaitStatus = qdf_wait_queue_interruptible(
324 					sch_ctx->sch_wait_queue,
325 					qdf_atomic_test_bit(MC_POST_EVENT_MASK,
326 						&sch_ctx->sch_event_flag) ||
327 					qdf_atomic_test_bit(MC_SUSPEND_EVENT_MASK,
328 						&sch_ctx->sch_event_flag));
329 
330 		if (retWaitStatus == -ERESTARTSYS)
331 			QDF_DEBUG_PANIC("Scheduler received -ERESTARTSYS");
332 
333 		qdf_atomic_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag);
334 		scheduler_thread_process_queues(sch_ctx, &shutdown);
335 	}
336 
337 	/* If we get here the scheduler thread must exit */
338 	sched_debug("Scheduler thread exiting");
339 	qdf_event_set(&sch_ctx->sch_shutdown);
340 	qdf_exit_thread(QDF_STATUS_SUCCESS);
341 
342 	return 0;
343 }
344 
345 static void scheduler_flush_single_queue(struct scheduler_mq_type *mq)
346 {
347 	struct scheduler_msg *msg;
348 	QDF_STATUS (*flush_cb)(struct scheduler_msg *);
349 
350 	while ((msg = scheduler_mq_get(mq))) {
351 		if (msg->flush_callback) {
352 			sched_debug("Calling flush callback; type: %x",
353 				    msg->type);
354 			flush_cb = msg->flush_callback;
355 			flush_cb(msg);
356 		} else if (msg->bodyptr) {
357 			sched_debug("Freeing scheduler msg bodyptr; type: %x",
358 				    msg->type);
359 			qdf_mem_free(msg->bodyptr);
360 		}
361 
362 		scheduler_core_msg_free(msg);
363 	}
364 }
365 
366 void scheduler_queues_flush(struct scheduler_ctx *sched_ctx)
367 {
368 	struct scheduler_mq_type *mq;
369 	int i;
370 
371 	sched_debug("Flushing scheduler message queues");
372 
373 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
374 		mq = &sched_ctx->queue_ctx.sch_msg_q[i];
375 		scheduler_flush_single_queue(mq);
376 	}
377 
378 	qdf_flex_mem_release(&sched_pool);
379 }
380 
381