xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_exec.c (revision 1397a33f48ea6455be40871470b286e535820eb8)
1 /*
2  * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <hif_exec.h>
20 #include <ce_main.h>
21 #include <hif_irq_affinity.h>
22 #include "qdf_module.h"
23 #include "qdf_net_if.h"
24 
25 /* mapping NAPI budget 0 to internal budget 0
26  * NAPI budget 1 to internal budget [1,scaler -1]
27  * NAPI budget 2 to internal budget [scaler, 2 * scaler - 1], etc
28  */
29 #define NAPI_BUDGET_TO_INTERNAL_BUDGET(n, s) \
30 	(((n) << (s)) - 1)
31 #define INTERNAL_BUDGET_TO_NAPI_BUDGET(n, s) \
32 	(((n) + 1) >> (s))
33 
34 static struct hif_exec_context *hif_exec_tasklet_create(void);
35 
36 /**
37  * hif_print_napi_stats() - print NAPI stats
38  * @hif_ctx: hif context
39  *
40  * return: void
41  */
42 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
43 {
44 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
45 	struct hif_exec_context *hif_ext_group;
46 	struct qca_napi_stat *napi_stats;
47 	int i, j;
48 
49 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
50 		"NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone");
51 
52 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
53 		if (hif_state->hif_ext_group[i]) {
54 			hif_ext_group = hif_state->hif_ext_group[i];
55 			for (j = 0; j < num_possible_cpus(); j++) {
56 				napi_stats = &(hif_ext_group->stats[j]);
57 				if (napi_stats->napi_schedules != 0)
58 					QDF_TRACE(QDF_MODULE_ID_HIF,
59 						QDF_TRACE_LEVEL_FATAL,
60 						"NAPI[%2d]CPU[%d]: "
61 						"%7d %7d %7d %7d ",
62 						i, j,
63 						napi_stats->napi_schedules,
64 						napi_stats->napi_polls,
65 						napi_stats->napi_completes,
66 						napi_stats->napi_workdone);
67 			}
68 		}
69 	}
70 }
71 qdf_export_symbol(hif_print_napi_stats);
72 
73 static void hif_exec_tasklet_schedule(struct hif_exec_context *ctx)
74 {
75 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
76 
77 	tasklet_schedule(&t_ctx->tasklet);
78 }
79 
80 /**
81  * hif_exec_tasklet() - grp tasklet
82  * data: context
83  *
84  * return: void
85  */
86 static void hif_exec_tasklet_fn(unsigned long data)
87 {
88 	struct hif_exec_context *hif_ext_group =
89 			(struct hif_exec_context *)data;
90 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
91 	unsigned int work_done;
92 
93 	work_done =
94 		hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET);
95 
96 	if (hif_ext_group->work_complete(hif_ext_group, work_done)) {
97 		qdf_atomic_dec(&(scn->active_grp_tasklet_cnt));
98 		hif_ext_group->irq_enable(hif_ext_group);
99 	} else {
100 		hif_exec_tasklet_schedule(hif_ext_group);
101 	}
102 }
103 
104 /**
105  * hif_exec_poll() - grp tasklet
106  * data: context
107  *
108  * return: void
109  */
110 static int hif_exec_poll(struct napi_struct *napi, int budget)
111 {
112 	struct hif_napi_exec_context *exec_ctx =
113 		    qdf_container_of(napi, struct hif_napi_exec_context, napi);
114 	struct hif_exec_context *hif_ext_group = &exec_ctx->exec_ctx;
115 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
116 	int work_done;
117 	int normalized_budget = 0;
118 	int shift = hif_ext_group->scale_bin_shift;
119 	int cpu = smp_processor_id();
120 
121 	if (budget)
122 		normalized_budget = NAPI_BUDGET_TO_INTERNAL_BUDGET(budget, shift);
123 	work_done = hif_ext_group->handler(hif_ext_group->context,
124 							normalized_budget);
125 
126 	if (work_done < normalized_budget) {
127 		napi_complete(napi);
128 		qdf_atomic_dec(&scn->active_grp_tasklet_cnt);
129 		hif_ext_group->irq_enable(hif_ext_group);
130 		hif_ext_group->stats[cpu].napi_completes++;
131 	} else {
132 		/* if the ext_group supports time based yield, claim full work
133 		 * done anyways */
134 		work_done = normalized_budget;
135 	}
136 
137 	hif_ext_group->stats[cpu].napi_polls++;
138 	hif_ext_group->stats[cpu].napi_workdone += work_done;
139 
140 	/* map internal budget to NAPI budget */
141 	if (work_done)
142 		work_done = INTERNAL_BUDGET_TO_NAPI_BUDGET(work_done, shift);
143 
144 	return work_done;
145 }
146 
147 /**
148  * hif_exec_napi_schedule() - schedule the napi exec instance
149  * @ctx: a hif_exec_context known to be of napi type
150  */
151 static void hif_exec_napi_schedule(struct hif_exec_context *ctx)
152 {
153 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
154 	ctx->stats[smp_processor_id()].napi_schedules++;
155 
156 	napi_schedule(&n_ctx->napi);
157 }
158 
159 /**
160  * hif_exec_napi_kill() - stop a napi exec context from being rescheduled
161  * @ctx: a hif_exec_context known to be of napi type
162  */
163 static void hif_exec_napi_kill(struct hif_exec_context *ctx)
164 {
165 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
166 	int irq_ind;
167 
168 	if (ctx->inited) {
169 		napi_disable(&n_ctx->napi);
170 		ctx->inited = 0;
171 	}
172 
173 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
174 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
175 
176 	netif_napi_del(&(n_ctx->napi));
177 }
178 
179 struct hif_execution_ops napi_sched_ops = {
180 	.schedule = &hif_exec_napi_schedule,
181 	.kill = &hif_exec_napi_kill,
182 };
183 
184 #ifdef FEATURE_NAPI
185 /**
186  * hif_exec_napi_create() - allocate and initialize a napi exec context
187  * @scale: a binary shift factor to map NAPI budget from\to internal
188  *         budget
189  */
190 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
191 {
192 	struct hif_napi_exec_context *ctx;
193 
194 	ctx = qdf_mem_malloc(sizeof(struct hif_napi_exec_context));
195 	if (ctx == NULL)
196 		return NULL;
197 
198 	ctx->exec_ctx.sched_ops = &napi_sched_ops;
199 	ctx->exec_ctx.inited = true;
200 	ctx->exec_ctx.scale_bin_shift = scale;
201 	qdf_net_if_create_dummy_if((struct qdf_net_if *)&ctx->netdev);
202 	netif_napi_add(&(ctx->netdev), &(ctx->napi), hif_exec_poll,
203 		       QCA_NAPI_BUDGET);
204 	napi_enable(&ctx->napi);
205 
206 	return &ctx->exec_ctx;
207 }
208 #else
209 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
210 {
211 	HIF_WARN("%s: FEATURE_NAPI not defined, making tasklet");
212 	return hif_exec_tasklet_create();
213 }
214 #endif
215 
216 
217 /**
218  * hif_exec_tasklet_kill() - stop a tasklet exec context from being rescheduled
219  * @ctx: a hif_exec_context known to be of tasklet type
220  */
221 static void hif_exec_tasklet_kill(struct hif_exec_context *ctx)
222 {
223 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
224 	int irq_ind;
225 
226 	if (ctx->inited) {
227 		tasklet_disable(&t_ctx->tasklet);
228 		tasklet_kill(&t_ctx->tasklet);
229 	}
230 	ctx->inited = false;
231 
232 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
233 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
234 }
235 
236 struct hif_execution_ops tasklet_sched_ops = {
237 	.schedule = &hif_exec_tasklet_schedule,
238 	.kill = &hif_exec_tasklet_kill,
239 };
240 
241 /**
242  * hif_exec_tasklet_schedule() -  allocate and initialize a tasklet exec context
243  */
244 static struct hif_exec_context *hif_exec_tasklet_create(void)
245 {
246 	struct hif_tasklet_exec_context *ctx;
247 
248 	ctx = qdf_mem_malloc(sizeof(struct hif_tasklet_exec_context));
249 	if (ctx == NULL)
250 		return NULL;
251 
252 	ctx->exec_ctx.sched_ops = &tasklet_sched_ops;
253 	tasklet_init(&ctx->tasklet, hif_exec_tasklet_fn,
254 		     (unsigned long)ctx);
255 
256 	ctx->exec_ctx.inited = true;
257 
258 	return &ctx->exec_ctx;
259 }
260 
261 /**
262  * hif_exec_get_ctx() - retrieve an exec context based on an id
263  * @softc: the hif context owning the exec context
264  * @id: the id of the exec context
265  *
266  * mostly added to make it easier to rename or move the context array
267  */
268 struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *softc,
269 					  uint8_t id)
270 {
271 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
272 
273 	if (id < hif_state->hif_num_extgroup)
274 		return hif_state->hif_ext_group[id];
275 
276 	return NULL;
277 }
278 
279 /**
280  * hif_configure_ext_group_interrupts() - API to configure external group
281  * interrpts
282  * @hif_ctx : HIF Context
283  *
284  * Return: status
285  */
286 uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
287 {
288 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
289 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
290 	struct hif_exec_context *hif_ext_group;
291 	int i, status;
292 
293 	if (scn->ext_grp_irq_configured) {
294 		HIF_ERROR("%s Called after ext grp irq configured\n", __func__);
295 		return QDF_STATUS_E_FAILURE;
296 	}
297 
298 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
299 		hif_ext_group = hif_state->hif_ext_group[i];
300 		status = 0;
301 		qdf_spinlock_create(&hif_ext_group->irq_lock);
302 		if (hif_ext_group->configured &&
303 		    hif_ext_group->irq_requested == false) {
304 			hif_ext_group->irq_enabled = true;
305 			status = hif_grp_irq_configure(scn, hif_ext_group);
306 		}
307 		if (status != 0) {
308 			HIF_ERROR("%s: failed for group %d", __func__, i);
309 			hif_ext_group->irq_enabled = false;
310 		}
311 	}
312 
313 	scn->ext_grp_irq_configured = true;
314 
315 	return QDF_STATUS_SUCCESS;
316 }
317 qdf_export_symbol(hif_configure_ext_group_interrupts);
318 
319 #ifdef WLAN_SUSPEND_RESUME_TEST
320 /**
321  * hif_check_and_trigger_ut_resume() - check if unit-test command was used to
322  *				       to trigger fake-suspend command, if yes
323  *				       then issue resume procedure.
324  * @scn: opaque HIF software context
325  *
326  * This API checks if unit-test command was used to trigger fake-suspend command
327  * and if answer is yes then it would trigger resume procedure.
328  *
329  * Make this API inline to save API-switch overhead and do branch-prediction to
330  * optimize performance impact.
331  *
332  * Return: void
333  */
334 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
335 {
336 	if (qdf_unlikely(hif_irq_trigger_ut_resume(scn)))
337 		hif_ut_fw_resume(scn);
338 }
339 #else
340 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
341 {
342 }
343 #endif
344 
345 /**
346  * hif_ext_group_interrupt_handler() - handler for related interrupts
347  * @irq: irq number of the interrupt
348  * @context: the associated hif_exec_group context
349  *
350  * This callback function takes care of dissabling the associated interrupts
351  * and scheduling the expected bottom half for the exec_context.
352  * This callback function also helps keep track of the count running contexts.
353  */
354 irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context)
355 {
356 	struct hif_exec_context *hif_ext_group = context;
357 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
358 
359 	if (hif_ext_group->irq_requested) {
360 		hif_ext_group->irq_disable(hif_ext_group);
361 		/*
362 		 * if private ioctl has issued fake suspend command to put
363 		 * FW in D0-WOW state then here is our chance to bring FW out
364 		 * of WOW mode.
365 		 *
366 		 * The reason why you need to explicitly wake-up the FW is here:
367 		 * APSS should have been in fully awake through-out when
368 		 * fake APSS suspend command was issued (to put FW in WOW mode)
369 		 * hence organic way of waking-up the FW
370 		 * (as part-of APSS-host wake-up) won't happen because
371 		 * in reality APSS didn't really suspend.
372 		 */
373 		hif_check_and_trigger_ut_resume(scn);
374 		qdf_atomic_inc(&scn->active_grp_tasklet_cnt);
375 
376 		hif_ext_group->sched_ops->schedule(hif_ext_group);
377 	}
378 
379 	return IRQ_HANDLED;
380 }
381 
382 /**
383  * hif_exec_kill() - grp tasklet kill
384  * scn: hif_softc
385  *
386  * return: void
387  */
388 void hif_exec_kill(struct hif_opaque_softc *hif_ctx)
389 {
390 	int i;
391 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
392 
393 	for (i = 0; i < hif_state->hif_num_extgroup; i++)
394 		hif_state->hif_ext_group[i]->sched_ops->kill(
395 			hif_state->hif_ext_group[i]);
396 
397 	qdf_atomic_set(&hif_state->ol_sc.active_grp_tasklet_cnt, 0);
398 }
399 
400 /**
401  * hif_register_ext_group() - API to register external group
402  * interrupt handler.
403  * @hif_ctx : HIF Context
404  * @numirq: number of irq's in the group
405  * @irq: array of irq values
406  * @handler: callback interrupt handler function
407  * @cb_ctx: context to passed in callback
408  * @type: napi vs tasklet
409  *
410  * Return: status
411  */
412 uint32_t hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
413 		uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
414 		void *cb_ctx, const char *context_name,
415 		enum hif_exec_type type, uint32_t scale)
416 {
417 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
418 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
419 	struct hif_exec_context *hif_ext_group;
420 
421 	if (scn->ext_grp_irq_configured) {
422 		HIF_ERROR("%s Called after ext grp irq configured\n", __func__);
423 		return QDF_STATUS_E_FAILURE;
424 	}
425 
426 	if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) {
427 		HIF_ERROR("%s Max groups reached\n", __func__);
428 		return QDF_STATUS_E_FAILURE;
429 	}
430 
431 	if (numirq >= HIF_MAX_GRP_IRQ) {
432 		HIF_ERROR("%s invalid numirq\n", __func__);
433 		return QDF_STATUS_E_FAILURE;
434 	}
435 
436 	hif_ext_group = hif_exec_create(type, scale);
437 	if (hif_ext_group == NULL)
438 		return QDF_STATUS_E_FAILURE;
439 
440 	hif_state->hif_ext_group[hif_state->hif_num_extgroup] =
441 		hif_ext_group;
442 
443 	hif_ext_group->numirq = numirq;
444 	qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0]));
445 	hif_ext_group->context = cb_ctx;
446 	hif_ext_group->handler = handler;
447 	hif_ext_group->configured = true;
448 	hif_ext_group->grp_id = hif_state->hif_num_extgroup;
449 	hif_ext_group->hif = hif_ctx;
450 	hif_ext_group->context_name = context_name;
451 
452 	hif_state->hif_num_extgroup++;
453 	return QDF_STATUS_SUCCESS;
454 }
455 qdf_export_symbol(hif_register_ext_group);
456 
457 /**
458  * hif_exec_create() - create an execution context
459  * @type: the type of execution context to create
460  */
461 struct hif_exec_context *hif_exec_create(enum hif_exec_type type,
462 						uint32_t scale)
463 {
464 	HIF_INFO("%s: create exec_type %d budget %d\n",
465 			__func__, type, QCA_NAPI_BUDGET * scale);
466 
467 	switch (type) {
468 	case HIF_EXEC_NAPI_TYPE:
469 		return hif_exec_napi_create(scale);
470 
471 	case HIF_EXEC_TASKLET_TYPE:
472 		return hif_exec_tasklet_create();
473 	default:
474 		return NULL;
475 	}
476 }
477 
478 /**
479  * hif_exec_destroy() - free the hif_exec context
480  * @ctx: context to free
481  *
482  * please kill the context before freeing it to avoid a use after free.
483  */
484 void hif_exec_destroy(struct hif_exec_context *ctx)
485 {
486 	qdf_spinlock_destroy(&ctx->irq_lock);
487 	qdf_mem_free(ctx);
488 }
489 
490 /**
491  * hif_deregister_exec_group() - API to free the exec contexts
492  * @hif_ctx: HIF context
493  * @context_name: name of the module whose contexts need to be deregistered
494  *
495  * This function deregisters the contexts of the requestor identified
496  * based on the context_name & frees the memory.
497  *
498  * Return: void
499  */
500 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
501 				const char *context_name)
502 {
503 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
504 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
505 	struct hif_exec_context *hif_ext_group;
506 	int i;
507 
508 	for (i = 0; i < HIF_MAX_GROUP; i++) {
509 		hif_ext_group = hif_state->hif_ext_group[i];
510 
511 		if (!hif_ext_group)
512 			continue;
513 
514 		HIF_INFO("%s: Deregistering grp id %d name %s\n",
515 				__func__,
516 				hif_ext_group->grp_id,
517 				hif_ext_group->context_name);
518 
519 		if (strcmp(hif_ext_group->context_name, context_name) == 0) {
520 			hif_ext_group->sched_ops->kill(hif_ext_group);
521 			hif_state->hif_ext_group[i] = NULL;
522 			hif_exec_destroy(hif_ext_group);
523 			hif_state->hif_num_extgroup--;
524 		}
525 
526 	}
527 }
528 qdf_export_symbol(hif_deregister_exec_group);
529