xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_exec.c (revision 1b9674e21e24478fba4530f5ae7396b9555e9c6a)
1 /*
2  * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <hif_exec.h>
20 #include <ce_main.h>
21 #include <hif_irq_affinity.h>
22 #include "qdf_module.h"
23 
24 /* mapping NAPI budget 0 to internal budget 0
25  * NAPI budget 1 to internal budget [1,scaler -1]
26  * NAPI budget 2 to internal budget [scaler, 2 * scaler - 1], etc
27  */
28 #define NAPI_BUDGET_TO_INTERNAL_BUDGET(n, s) \
29 	(((n) << (s)) - 1)
30 #define INTERNAL_BUDGET_TO_NAPI_BUDGET(n, s) \
31 	(((n) + 1) >> (s))
32 
33 static struct hif_exec_context *hif_exec_tasklet_create(void);
34 
35 /**
36  * hif_print_napi_stats() - print NAPI stats
37  * @hif_ctx: hif context
38  *
39  * return: void
40  */
41 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
42 {
43 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
44 	struct hif_exec_context *hif_ext_group;
45 	struct qca_napi_stat *napi_stats;
46 	int i, j;
47 
48 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
49 		"NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone");
50 
51 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
52 		if (hif_state->hif_ext_group[i]) {
53 			hif_ext_group = hif_state->hif_ext_group[i];
54 			for (j = 0; j < num_possible_cpus(); j++) {
55 				napi_stats = &(hif_ext_group->stats[j]);
56 				if (napi_stats->napi_schedules != 0)
57 					QDF_TRACE(QDF_MODULE_ID_HIF,
58 						QDF_TRACE_LEVEL_FATAL,
59 						"NAPI[%2d]CPU[%d]: "
60 						"%7d %7d %7d %7d ",
61 						i, j,
62 						napi_stats->napi_schedules,
63 						napi_stats->napi_polls,
64 						napi_stats->napi_completes,
65 						napi_stats->napi_workdone);
66 			}
67 		}
68 	}
69 }
70 qdf_export_symbol(hif_print_napi_stats);
71 
72 static void hif_exec_tasklet_schedule(struct hif_exec_context *ctx)
73 {
74 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
75 
76 	tasklet_schedule(&t_ctx->tasklet);
77 }
78 
79 /**
80  * hif_exec_tasklet() - grp tasklet
81  * data: context
82  *
83  * return: void
84  */
85 static void hif_exec_tasklet_fn(unsigned long data)
86 {
87 	struct hif_exec_context *hif_ext_group =
88 			(struct hif_exec_context *)data;
89 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
90 	unsigned int work_done;
91 
92 	work_done =
93 		hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET);
94 
95 	if (hif_ext_group->work_complete(hif_ext_group, work_done)) {
96 		qdf_atomic_dec(&(scn->active_grp_tasklet_cnt));
97 		hif_ext_group->irq_enable(hif_ext_group);
98 	} else {
99 		hif_exec_tasklet_schedule(hif_ext_group);
100 	}
101 }
102 
103 /**
104  * hif_exec_poll() - grp tasklet
105  * data: context
106  *
107  * return: void
108  */
109 static int hif_exec_poll(struct napi_struct *napi, int budget)
110 {
111 	struct hif_napi_exec_context *exec_ctx =
112 		    qdf_container_of(napi, struct hif_napi_exec_context, napi);
113 	struct hif_exec_context *hif_ext_group = &exec_ctx->exec_ctx;
114 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
115 	int work_done;
116 	int normalized_budget = 0;
117 	int shift = hif_ext_group->scale_bin_shift;
118 	int cpu = smp_processor_id();
119 
120 	if (budget)
121 		normalized_budget = NAPI_BUDGET_TO_INTERNAL_BUDGET(budget, shift);
122 	work_done = hif_ext_group->handler(hif_ext_group->context,
123 							normalized_budget);
124 
125 	if (work_done < normalized_budget) {
126 		napi_complete(napi);
127 		qdf_atomic_dec(&scn->active_grp_tasklet_cnt);
128 		hif_ext_group->irq_enable(hif_ext_group);
129 		hif_ext_group->stats[cpu].napi_completes++;
130 	} else {
131 		/* if the ext_group supports time based yield, claim full work
132 		 * done anyways */
133 		work_done = normalized_budget;
134 	}
135 
136 	hif_ext_group->stats[cpu].napi_polls++;
137 	hif_ext_group->stats[cpu].napi_workdone += work_done;
138 
139 	/* map internal budget to NAPI budget */
140 	if (work_done)
141 		work_done = INTERNAL_BUDGET_TO_NAPI_BUDGET(work_done, shift);
142 
143 	return work_done;
144 }
145 
146 /**
147  * hif_exec_napi_schedule() - schedule the napi exec instance
148  * @ctx: a hif_exec_context known to be of napi type
149  */
150 static void hif_exec_napi_schedule(struct hif_exec_context *ctx)
151 {
152 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
153 	ctx->stats[smp_processor_id()].napi_schedules++;
154 
155 	napi_schedule(&n_ctx->napi);
156 }
157 
158 /**
159  * hif_exec_napi_kill() - stop a napi exec context from being rescheduled
160  * @ctx: a hif_exec_context known to be of napi type
161  */
162 static void hif_exec_napi_kill(struct hif_exec_context *ctx)
163 {
164 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
165 	int irq_ind;
166 
167 	if (ctx->inited) {
168 		napi_disable(&n_ctx->napi);
169 		ctx->inited = 0;
170 	}
171 
172 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
173 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
174 
175 	netif_napi_del(&(n_ctx->napi));
176 }
177 
178 struct hif_execution_ops napi_sched_ops = {
179 	.schedule = &hif_exec_napi_schedule,
180 	.kill = &hif_exec_napi_kill,
181 };
182 
183 #ifdef FEATURE_NAPI
184 /**
185  * hif_exec_napi_create() - allocate and initialize a napi exec context
186  * @scale: a binary shift factor to map NAPI budget from\to internal
187  *         budget
188  */
189 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
190 {
191 	struct hif_napi_exec_context *ctx;
192 
193 	ctx = qdf_mem_malloc(sizeof(struct hif_napi_exec_context));
194 	if (ctx == NULL)
195 		return NULL;
196 
197 	ctx->exec_ctx.sched_ops = &napi_sched_ops;
198 	ctx->exec_ctx.inited = true;
199 	ctx->exec_ctx.scale_bin_shift = scale;
200 	init_dummy_netdev(&(ctx->netdev));
201 	netif_napi_add(&(ctx->netdev), &(ctx->napi), hif_exec_poll,
202 		       QCA_NAPI_BUDGET);
203 	napi_enable(&ctx->napi);
204 
205 	return &ctx->exec_ctx;
206 }
207 #else
208 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
209 {
210 	HIF_WARN("%s: FEATURE_NAPI not defined, making tasklet");
211 	return hif_exec_tasklet_create();
212 }
213 #endif
214 
215 
216 /**
217  * hif_exec_tasklet_kill() - stop a tasklet exec context from being rescheduled
218  * @ctx: a hif_exec_context known to be of tasklet type
219  */
220 static void hif_exec_tasklet_kill(struct hif_exec_context *ctx)
221 {
222 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
223 	int irq_ind;
224 
225 	if (ctx->inited) {
226 		tasklet_disable(&t_ctx->tasklet);
227 		tasklet_kill(&t_ctx->tasklet);
228 	}
229 	ctx->inited = false;
230 
231 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
232 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
233 }
234 
235 struct hif_execution_ops tasklet_sched_ops = {
236 	.schedule = &hif_exec_tasklet_schedule,
237 	.kill = &hif_exec_tasklet_kill,
238 };
239 
240 /**
241  * hif_exec_tasklet_schedule() -  allocate and initialize a tasklet exec context
242  */
243 static struct hif_exec_context *hif_exec_tasklet_create(void)
244 {
245 	struct hif_tasklet_exec_context *ctx;
246 
247 	ctx = qdf_mem_malloc(sizeof(struct hif_tasklet_exec_context));
248 	if (ctx == NULL)
249 		return NULL;
250 
251 	ctx->exec_ctx.sched_ops = &tasklet_sched_ops;
252 	tasklet_init(&ctx->tasklet, hif_exec_tasklet_fn,
253 		     (unsigned long)ctx);
254 
255 	ctx->exec_ctx.inited = true;
256 
257 	return &ctx->exec_ctx;
258 }
259 
260 /**
261  * hif_exec_get_ctx() - retrieve an exec context based on an id
262  * @softc: the hif context owning the exec context
263  * @id: the id of the exec context
264  *
265  * mostly added to make it easier to rename or move the context array
266  */
267 struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *softc,
268 					  uint8_t id)
269 {
270 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
271 
272 	if (id < hif_state->hif_num_extgroup)
273 		return hif_state->hif_ext_group[id];
274 
275 	return NULL;
276 }
277 
278 /**
279  * hif_configure_ext_group_interrupts() - API to configure external group
280  * interrpts
281  * @hif_ctx : HIF Context
282  *
283  * Return: status
284  */
285 uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
286 {
287 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
288 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
289 	struct hif_exec_context *hif_ext_group;
290 	int i, status;
291 
292 	if (scn->ext_grp_irq_configured) {
293 		HIF_ERROR("%s Called after ext grp irq configured\n", __func__);
294 		return QDF_STATUS_E_FAILURE;
295 	}
296 
297 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
298 		hif_ext_group = hif_state->hif_ext_group[i];
299 		status = 0;
300 		qdf_spinlock_create(&hif_ext_group->irq_lock);
301 		if (hif_ext_group->configured &&
302 		    hif_ext_group->irq_requested == false) {
303 			hif_ext_group->irq_enabled = true;
304 			status = hif_grp_irq_configure(scn, hif_ext_group);
305 		}
306 		if (status != 0) {
307 			HIF_ERROR("%s: failed for group %d", __func__, i);
308 			hif_ext_group->irq_enabled = false;
309 		}
310 	}
311 
312 	scn->ext_grp_irq_configured = true;
313 
314 	return QDF_STATUS_SUCCESS;
315 }
316 qdf_export_symbol(hif_configure_ext_group_interrupts);
317 
318 /**
319  * hif_ext_group_interrupt_handler() - handler for related interrupts
320  * @irq: irq number of the interrupt
321  * @context: the associated hif_exec_group context
322  *
323  * This callback function takes care of dissabling the associated interrupts
324  * and scheduling the expected bottom half for the exec_context.
325  * This callback function also helps keep track of the count running contexts.
326  */
327 irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context)
328 {
329 	struct hif_exec_context *hif_ext_group = context;
330 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
331 
332 	if (hif_ext_group->irq_requested) {
333 		hif_ext_group->irq_disable(hif_ext_group);
334 		qdf_atomic_inc(&scn->active_grp_tasklet_cnt);
335 
336 		hif_ext_group->sched_ops->schedule(hif_ext_group);
337 	}
338 
339 	return IRQ_HANDLED;
340 }
341 
342 /**
343  * hif_exec_kill() - grp tasklet kill
344  * scn: hif_softc
345  *
346  * return: void
347  */
348 void hif_exec_kill(struct hif_opaque_softc *hif_ctx)
349 {
350 	int i;
351 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
352 
353 	for (i = 0; i < hif_state->hif_num_extgroup; i++)
354 		hif_state->hif_ext_group[i]->sched_ops->kill(
355 			hif_state->hif_ext_group[i]);
356 
357 	qdf_atomic_set(&hif_state->ol_sc.active_grp_tasklet_cnt, 0);
358 }
359 
360 /**
361  * hif_register_ext_group() - API to register external group
362  * interrupt handler.
363  * @hif_ctx : HIF Context
364  * @numirq: number of irq's in the group
365  * @irq: array of irq values
366  * @handler: callback interrupt handler function
367  * @cb_ctx: context to passed in callback
368  * @type: napi vs tasklet
369  *
370  * Return: status
371  */
372 uint32_t hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
373 		uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
374 		void *cb_ctx, const char *context_name,
375 		enum hif_exec_type type, uint32_t scale)
376 {
377 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
378 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
379 	struct hif_exec_context *hif_ext_group;
380 
381 	if (scn->ext_grp_irq_configured) {
382 		HIF_ERROR("%s Called after ext grp irq configured\n", __func__);
383 		return QDF_STATUS_E_FAILURE;
384 	}
385 
386 	if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) {
387 		HIF_ERROR("%s Max groups reached\n", __func__);
388 		return QDF_STATUS_E_FAILURE;
389 	}
390 
391 	if (numirq >= HIF_MAX_GRP_IRQ) {
392 		HIF_ERROR("%s invalid numirq\n", __func__);
393 		return QDF_STATUS_E_FAILURE;
394 	}
395 
396 	hif_ext_group = hif_exec_create(type, scale);
397 	if (hif_ext_group == NULL)
398 		return QDF_STATUS_E_FAILURE;
399 
400 	hif_state->hif_ext_group[hif_state->hif_num_extgroup] =
401 		hif_ext_group;
402 
403 	hif_ext_group->numirq = numirq;
404 	qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0]));
405 	hif_ext_group->context = cb_ctx;
406 	hif_ext_group->handler = handler;
407 	hif_ext_group->configured = true;
408 	hif_ext_group->grp_id = hif_state->hif_num_extgroup;
409 	hif_ext_group->hif = hif_ctx;
410 	hif_ext_group->context_name = context_name;
411 
412 	hif_state->hif_num_extgroup++;
413 	return QDF_STATUS_SUCCESS;
414 }
415 qdf_export_symbol(hif_register_ext_group);
416 
417 /**
418  * hif_exec_create() - create an execution context
419  * @type: the type of execution context to create
420  */
421 struct hif_exec_context *hif_exec_create(enum hif_exec_type type,
422 						uint32_t scale)
423 {
424 	HIF_INFO("%s: create exec_type %d budget %d\n",
425 			__func__, type, QCA_NAPI_BUDGET * scale);
426 
427 	switch (type) {
428 	case HIF_EXEC_NAPI_TYPE:
429 		return hif_exec_napi_create(scale);
430 
431 	case HIF_EXEC_TASKLET_TYPE:
432 		return hif_exec_tasklet_create();
433 	default:
434 		return NULL;
435 	}
436 }
437 
438 /**
439  * hif_exec_destroy() - free the hif_exec context
440  * @ctx: context to free
441  *
442  * please kill the context before freeing it to avoid a use after free.
443  */
444 void hif_exec_destroy(struct hif_exec_context *ctx)
445 {
446 	qdf_spinlock_destroy(&ctx->irq_lock);
447 	qdf_mem_free(ctx);
448 }
449 
450 /**
451  * hif_deregister_exec_group() - API to free the exec contexts
452  * @hif_ctx: HIF context
453  * @context_name: name of the module whose contexts need to be deregistered
454  *
455  * This function deregisters the contexts of the requestor identified
456  * based on the context_name & frees the memory.
457  *
458  * Return: void
459  */
460 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
461 				const char *context_name)
462 {
463 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
464 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
465 	struct hif_exec_context *hif_ext_group;
466 	int i;
467 
468 	for (i = 0; i < HIF_MAX_GROUP; i++) {
469 		hif_ext_group = hif_state->hif_ext_group[i];
470 
471 		if (!hif_ext_group)
472 			continue;
473 
474 		HIF_INFO("%s: Deregistering grp id %d name %s\n",
475 				__func__,
476 				hif_ext_group->grp_id,
477 				hif_ext_group->context_name);
478 
479 		if (strcmp(hif_ext_group->context_name, context_name) == 0) {
480 			hif_ext_group->sched_ops->kill(hif_ext_group);
481 			hif_state->hif_ext_group[i] = NULL;
482 			hif_exec_destroy(hif_ext_group);
483 			hif_state->hif_num_extgroup--;
484 		}
485 
486 	}
487 }
488 qdf_export_symbol(hif_deregister_exec_group);
489