xref: /wlan-dirver/qca-wifi-host-cmn/scheduler/src/scheduler_core.c (revision a175314c51a4ce5cec2835cc8a8c7dc0c1810915)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <scheduler_core.h>
20 #include <qdf_atomic.h>
21 #include "qdf_flex_mem.h"
22 
23 static struct scheduler_ctx g_sched_ctx;
24 static struct scheduler_ctx *gp_sched_ctx;
25 
26 #ifndef WLAN_SCHED_REDUCTION_LIMIT
27 #define WLAN_SCHED_REDUCTION_LIMIT 0
28 #endif
29 
30 DEFINE_QDF_FLEX_MEM_POOL(sched_pool, sizeof(struct scheduler_msg),
31 			 WLAN_SCHED_REDUCTION_LIMIT);
32 
33 QDF_STATUS scheduler_create_ctx(void)
34 {
35 	qdf_flex_mem_init(&sched_pool);
36 	gp_sched_ctx = &g_sched_ctx;
37 
38 	return QDF_STATUS_SUCCESS;
39 }
40 
41 QDF_STATUS scheduler_destroy_ctx(void)
42 {
43 	gp_sched_ctx = NULL;
44 	qdf_flex_mem_deinit(&sched_pool);
45 
46 	return QDF_STATUS_SUCCESS;
47 }
48 
49 struct scheduler_ctx *scheduler_get_context(void)
50 {
51 	QDF_BUG(gp_sched_ctx);
52 
53 	return gp_sched_ctx;
54 }
55 
56 static QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q)
57 {
58 	sched_enter();
59 
60 	qdf_spinlock_create(&msg_q->mq_lock);
61 	qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES);
62 
63 	sched_exit();
64 
65 	return QDF_STATUS_SUCCESS;
66 }
67 
68 static void scheduler_mq_deinit(struct scheduler_mq_type *msg_q)
69 {
70 	sched_enter();
71 
72 	qdf_list_destroy(&msg_q->mq_list);
73 	qdf_spinlock_destroy(&msg_q->mq_lock);
74 
75 	sched_exit();
76 }
77 
78 static qdf_atomic_t __sched_queue_depth;
79 
80 static QDF_STATUS scheduler_all_queues_init(struct scheduler_ctx *sched_ctx)
81 {
82 	QDF_STATUS status;
83 	int i;
84 
85 	sched_enter();
86 
87 	if (!sched_ctx) {
88 		QDF_DEBUG_PANIC("sched_ctx is null");
89 		return QDF_STATUS_E_FAILURE;
90 	}
91 
92 	qdf_atomic_set(&__sched_queue_depth, 0);
93 
94 	/* Initialize all message queues */
95 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
96 		status = scheduler_mq_init(&sched_ctx->queue_ctx.sch_msg_q[i]);
97 		if (QDF_STATUS_SUCCESS != status)
98 			return status;
99 	}
100 
101 	/* Initialize all qid to qidx mapping to invalid values */
102 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
103 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
104 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
105 
106 	sched_exit();
107 
108 	return status;
109 }
110 
111 static QDF_STATUS scheduler_all_queues_deinit(struct scheduler_ctx *sched_ctx)
112 {
113 	int i;
114 
115 	sched_enter();
116 
117 	if (!sched_ctx) {
118 		QDF_DEBUG_PANIC("sched_ctx is null");
119 		return QDF_STATUS_E_FAILURE;
120 	}
121 
122 	/* De-Initialize all message queues */
123 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
124 		scheduler_mq_deinit(&sched_ctx->queue_ctx.sch_msg_q[i]);
125 
126 	/* Initialize all qid to qidx mapping to invalid values */
127 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
128 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
129 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
130 
131 	sched_exit();
132 
133 	return QDF_STATUS_SUCCESS;
134 }
135 
136 void scheduler_mq_put(struct scheduler_mq_type *msg_q,
137 		      struct scheduler_msg *msg)
138 {
139 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
140 	qdf_list_insert_back(&msg_q->mq_list, &msg->node);
141 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
142 }
143 
144 void scheduler_mq_put_front(struct scheduler_mq_type *msg_q,
145 			    struct scheduler_msg *msg)
146 {
147 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
148 	qdf_list_insert_front(&msg_q->mq_list, &msg->node);
149 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
150 }
151 
152 struct scheduler_msg *scheduler_mq_get(struct scheduler_mq_type *msg_q)
153 {
154 	QDF_STATUS status;
155 	qdf_list_node_t *node;
156 
157 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
158 	status = qdf_list_remove_front(&msg_q->mq_list, &node);
159 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
160 
161 	if (QDF_IS_STATUS_ERROR(status))
162 		return NULL;
163 
164 	return qdf_container_of(node, struct scheduler_msg, node);
165 }
166 
167 QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx)
168 {
169 	return scheduler_all_queues_deinit(sched_ctx);
170 }
171 
172 QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx)
173 {
174 	QDF_STATUS status;
175 
176 	sched_enter();
177 
178 	if (!sched_ctx) {
179 		QDF_DEBUG_PANIC("sched_ctx is null");
180 		return QDF_STATUS_E_FAILURE;
181 	}
182 
183 	status = scheduler_all_queues_init(sched_ctx);
184 	if (QDF_IS_STATUS_ERROR(status)) {
185 		scheduler_all_queues_deinit(sched_ctx);
186 		sched_err("Failed to initialize the msg queues");
187 		return status;
188 	}
189 
190 	sched_debug("Queue init passed");
191 
192 	sched_exit();
193 
194 	return QDF_STATUS_SUCCESS;
195 }
196 
197 struct scheduler_msg *scheduler_core_msg_dup(struct scheduler_msg *msg)
198 {
199 	struct scheduler_msg *dup;
200 
201 	if (qdf_atomic_inc_return(&__sched_queue_depth) >
202 	    SCHEDULER_CORE_MAX_MESSAGES)
203 		goto buffer_full;
204 
205 	dup = qdf_flex_mem_alloc(&sched_pool);
206 	if (!dup) {
207 		sched_err("out of memory");
208 		goto dec_queue_count;
209 	}
210 
211 	qdf_mem_copy(dup, msg, sizeof(*dup));
212 
213 	return dup;
214 
215 buffer_full:
216 	QDF_DEBUG_PANIC("Scheduler buffer is full");
217 
218 dec_queue_count:
219 	qdf_atomic_dec(&__sched_queue_depth);
220 
221 	return NULL;
222 }
223 
224 void scheduler_core_msg_free(struct scheduler_msg *msg)
225 {
226 	qdf_flex_mem_free(&sched_pool, msg);
227 	qdf_atomic_dec(&__sched_queue_depth);
228 }
229 
230 static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
231 					    bool *shutdown)
232 {
233 	int i;
234 	QDF_STATUS status;
235 	struct scheduler_msg *msg;
236 
237 	if (!sch_ctx) {
238 		QDF_DEBUG_PANIC("sch_ctx is null");
239 		return;
240 	}
241 
242 	/* start with highest priority queue : timer queue at index 0 */
243 	i = 0;
244 	while (i < SCHEDULER_NUMBER_OF_MSG_QUEUE) {
245 		/* Check if MC needs to shutdown */
246 		if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK,
247 					&sch_ctx->sch_event_flag)) {
248 			sched_info("scheduler thread signaled to shutdown");
249 			*shutdown = true;
250 
251 			/* Check for any Suspend Indication */
252 			if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
253 						&sch_ctx->sch_event_flag)) {
254 				/* Unblock anyone waiting on suspend */
255 				if (gp_sched_ctx->hdd_callback)
256 					gp_sched_ctx->hdd_callback();
257 			}
258 
259 			break;
260 		}
261 
262 		msg = scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]);
263 		if (!msg) {
264 			/* check next queue */
265 			i++;
266 			continue;
267 		}
268 
269 		if (sch_ctx->queue_ctx.scheduler_msg_process_fn[i]) {
270 			sch_ctx->watchdog_msg_type = msg->type;
271 			sch_ctx->watchdog_callback = msg->callback;
272 			qdf_timer_start(&sch_ctx->watchdog_timer,
273 					SCHEDULER_WATCHDOG_TIMEOUT);
274 			status = sch_ctx->queue_ctx.
275 					scheduler_msg_process_fn[i](msg);
276 			qdf_timer_stop(&sch_ctx->watchdog_timer);
277 
278 			if (QDF_IS_STATUS_ERROR(status))
279 				sched_err("Failed processing Qid[%d] message",
280 					  sch_ctx->queue_ctx.sch_msg_q[i].qid);
281 
282 			scheduler_core_msg_free(msg);
283 		}
284 
285 		/* start again with highest priority queue at index 0 */
286 		i = 0;
287 	}
288 
289 	/* Check for any Suspend Indication */
290 	if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
291 			&sch_ctx->sch_event_flag)) {
292 		qdf_spin_lock(&sch_ctx->sch_thread_lock);
293 		qdf_event_reset(&sch_ctx->resume_sch_event);
294 		/* controller thread suspend completion callback */
295 		if (gp_sched_ctx->hdd_callback)
296 			gp_sched_ctx->hdd_callback();
297 		qdf_spin_unlock(&sch_ctx->sch_thread_lock);
298 		/* Wait for resume indication */
299 		qdf_wait_single_event(&sch_ctx->resume_sch_event, 0);
300 	}
301 
302 	return;  /* Nothing to process wait on wait queue */
303 }
304 
305 int scheduler_thread(void *arg)
306 {
307 	struct scheduler_ctx *sch_ctx = (struct scheduler_ctx *)arg;
308 	int retWaitStatus = 0;
309 	bool shutdown = false;
310 
311 	if (!arg) {
312 		QDF_DEBUG_PANIC("arg is null");
313 		return 0;
314 	}
315 	qdf_set_user_nice(current, -2);
316 
317 	/* Ack back to the context from which the main controller thread
318 	 * has been created
319 	 */
320 	qdf_event_set(&sch_ctx->sch_start_event);
321 	sched_debug("scheduler thread %d (%s) starting up",
322 		    current->pid, current->comm);
323 
324 	while (!shutdown) {
325 		/* This implements the execution model algorithm */
326 		retWaitStatus = qdf_wait_queue_interruptible(
327 					sch_ctx->sch_wait_queue,
328 					qdf_atomic_test_bit(MC_POST_EVENT_MASK,
329 						&sch_ctx->sch_event_flag) ||
330 					qdf_atomic_test_bit(MC_SUSPEND_EVENT_MASK,
331 						&sch_ctx->sch_event_flag));
332 
333 		if (retWaitStatus == -ERESTARTSYS)
334 			QDF_DEBUG_PANIC("Scheduler received -ERESTARTSYS");
335 
336 		qdf_atomic_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag);
337 		scheduler_thread_process_queues(sch_ctx, &shutdown);
338 	}
339 
340 	/* If we get here the scheduler thread must exit */
341 	sched_info("Scheduler thread exiting");
342 	qdf_event_set(&sch_ctx->sch_shutdown);
343 	qdf_exit_thread(QDF_STATUS_SUCCESS);
344 
345 	return 0;
346 }
347 
348 static void scheduler_flush_single_queue(struct scheduler_mq_type *mq)
349 {
350 	struct scheduler_msg *msg;
351 	QDF_STATUS (*flush_cb)(struct scheduler_msg *);
352 
353 	while ((msg = scheduler_mq_get(mq))) {
354 		if (msg->flush_callback) {
355 			sched_info("Calling flush callback; type: %x",
356 				   msg->type);
357 			flush_cb = msg->flush_callback;
358 			flush_cb(msg);
359 		} else if (msg->bodyptr) {
360 			sched_info("Freeing scheduler msg bodyptr; type: %x",
361 				   msg->type);
362 			qdf_mem_free(msg->bodyptr);
363 		}
364 
365 		scheduler_core_msg_free(msg);
366 	}
367 }
368 
369 void scheduler_queues_flush(struct scheduler_ctx *sched_ctx)
370 {
371 	struct scheduler_mq_type *mq;
372 	int i;
373 
374 	sched_info("Flushing scheduler message queues");
375 
376 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
377 		mq = &sched_ctx->queue_ctx.sch_msg_q[i];
378 		scheduler_flush_single_queue(mq);
379 	}
380 
381 	qdf_flex_mem_release(&sched_pool);
382 }
383 
384