xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_exec.c (revision dd4dc88b837a295134aa9869114a2efee0f4894b)
1 /*
2  * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <hif_exec.h>
20 #include <ce_main.h>
21 #include <hif_irq_affinity.h>
22 #include "qdf_module.h"
23 #include "qdf_net_if.h"
24 /* mapping NAPI budget 0 to internal budget 0
25  * NAPI budget 1 to internal budget [1,scaler -1]
26  * NAPI budget 2 to internal budget [scaler, 2 * scaler - 1], etc
27  */
28 #define NAPI_BUDGET_TO_INTERNAL_BUDGET(n, s) \
29 	(((n) << (s)) - 1)
30 #define INTERNAL_BUDGET_TO_NAPI_BUDGET(n, s) \
31 	(((n) + 1) >> (s))
32 
33 static struct hif_exec_context *hif_exec_tasklet_create(void);
34 
35 /**
36  * hif_print_napi_latency_stats() - print NAPI scheduling latency stats
37  * @hif_state: hif context
38  *
39  * return: void
40  */
41 #ifdef HIF_LATENCY_PROFILE_ENABLE
42 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
43 {
44 	struct hif_exec_context *hif_ext_group;
45 	int i, j;
46 	int64_t cur_tstamp;
47 
48 	const char time_str[HIF_SCHED_LATENCY_BUCKETS][15] =  {
49 		"0-2   ms",
50 		"3-10  ms",
51 		"11-20 ms",
52 		"21-50 ms",
53 		"51-100 ms",
54 		"101-250 ms",
55 		"251-500 ms",
56 		"> 500 ms"
57 	};
58 
59 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
60 
61 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
62 		  "Current timestamp: %lld", cur_tstamp);
63 
64 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
65 		if (hif_state->hif_ext_group[i]) {
66 			hif_ext_group = hif_state->hif_ext_group[i];
67 
68 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
69 				  "Interrupts in the HIF Group");
70 
71 			for (j = 0; j < hif_ext_group->numirq; j++) {
72 				QDF_TRACE(QDF_MODULE_ID_HIF,
73 					  QDF_TRACE_LEVEL_FATAL,
74 					  "  %s",
75 					  hif_ext_group->irq_name
76 					  (hif_ext_group->irq[j]));
77 			}
78 
79 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
80 				  "Last serviced timestamp: %lld",
81 				  hif_ext_group->tstamp);
82 
83 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
84 				  "Latency Bucket     | Time elapsed");
85 
86 			for (j = 0; j < HIF_SCHED_LATENCY_BUCKETS; j++) {
87 				QDF_TRACE(QDF_MODULE_ID_HIF,
88 					  QDF_TRACE_LEVEL_FATAL,
89 					  "%s     |    %lld", time_str[j],
90 					  hif_ext_group->
91 					  sched_latency_stats[j]);
92 			}
93 		}
94 	}
95 }
96 #else
97 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
98 {
99 }
100 #endif
101 
102 /**
103  * hif_clear_napi_stats() - reset NAPI stats
104  * @hif_ctx: hif context
105  *
106  * return: void
107  */
108 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx)
109 {
110 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
111 	struct hif_exec_context *hif_ext_group;
112 	size_t i;
113 
114 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
115 		hif_ext_group = hif_state->hif_ext_group[i];
116 
117 		if (!hif_ext_group)
118 			return;
119 
120 		qdf_mem_set(hif_ext_group->sched_latency_stats,
121 			    sizeof(hif_ext_group->sched_latency_stats),
122 			    0x0);
123 	}
124 }
125 
126 qdf_export_symbol(hif_clear_napi_stats);
127 
128 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
129 /**
130  * hif_get_poll_times_hist_str() - Get HIF poll times histogram string
131  * @stats: NAPI stats to get poll time buckets
132  * @buf: buffer to fill histogram string
133  * @buf_len: length of the buffer
134  *
135  * Return: void
136  */
137 static void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
138 					uint8_t buf_len)
139 {
140 	int i;
141 	int str_index = 0;
142 
143 	for (i = 0; i < QCA_NAPI_NUM_BUCKETS; i++)
144 		str_index += qdf_scnprintf(buf + str_index, buf_len - str_index,
145 					   "%u|", stats->poll_time_buckets[i]);
146 }
147 
148 /**
149  * hif_exec_fill_poll_time_histogram() - fills poll time histogram for a NAPI
150  * @hif_ext_group: hif_ext_group of type NAPI
151  *
152  * The function is called at the end of a NAPI poll to calculate poll time
153  * buckets.
154  *
155  * Return: void
156  */
157 static
158 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
159 {
160 	struct qca_napi_stat *napi_stat;
161 	unsigned long long poll_time_ns;
162 	uint32_t poll_time_us;
163 	uint32_t bucket_size_us = 500;
164 	uint32_t bucket;
165 	uint32_t cpu_id = qdf_get_cpu();
166 
167 	poll_time_ns = sched_clock() - hif_ext_group->poll_start_time;
168 	poll_time_us = qdf_do_div(poll_time_ns, 1000);
169 
170 	napi_stat = &hif_ext_group->stats[cpu_id];
171 	if (poll_time_ns > hif_ext_group->stats[cpu_id].napi_max_poll_time)
172 		hif_ext_group->stats[cpu_id].napi_max_poll_time = poll_time_ns;
173 
174 	bucket = poll_time_us / bucket_size_us;
175 	if (bucket >= QCA_NAPI_NUM_BUCKETS)
176 		bucket = QCA_NAPI_NUM_BUCKETS - 1;
177 	++napi_stat->poll_time_buckets[bucket];
178 }
179 
180 /**
181  * hif_exec_poll_should_yield() - Local function deciding if NAPI should yield
182  * @hif_ext_group: hif_ext_group of type NAPI
183  *
184  * Return: true if NAPI needs to yield, else false
185  */
186 static bool hif_exec_poll_should_yield(struct hif_exec_context *hif_ext_group)
187 {
188 	bool time_limit_reached = false;
189 	unsigned long long poll_time_ns;
190 	int cpu_id = qdf_get_cpu();
191 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
192 	struct hif_config_info *cfg = &scn->hif_config;
193 
194 	poll_time_ns = sched_clock() - hif_ext_group->poll_start_time;
195 	time_limit_reached =
196 		poll_time_ns > cfg->rx_softirq_max_yield_duration_ns ? 1 : 0;
197 
198 	if (time_limit_reached) {
199 		hif_ext_group->stats[cpu_id].time_limit_reached++;
200 		hif_ext_group->force_break = true;
201 	}
202 
203 	return time_limit_reached;
204 }
205 
206 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id)
207 {
208 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
209 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
210 	struct hif_exec_context *hif_ext_group;
211 	bool ret_val = false;
212 
213 	if (!(grp_id < hif_state->hif_num_extgroup) ||
214 	    !(grp_id < HIF_MAX_GROUP))
215 		return false;
216 
217 	hif_ext_group = hif_state->hif_ext_group[grp_id];
218 
219 	if (hif_ext_group->type == HIF_EXEC_NAPI_TYPE)
220 		ret_val = hif_exec_poll_should_yield(hif_ext_group);
221 
222 	return ret_val;
223 }
224 
225 /**
226  * hif_exec_update_service_start_time() - Update NAPI poll start time
227  * @hif_ext_group: hif_ext_group of type NAPI
228  *
229  * The function is called at the beginning of a NAPI poll to record the poll
230  * start time.
231  *
232  * Return: None
233  */
234 static inline
235 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
236 {
237 	hif_ext_group->poll_start_time = sched_clock();
238 }
239 
240 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
241 {
242 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
243 	struct hif_exec_context *hif_ext_group;
244 	struct qca_napi_stat *napi_stats;
245 	int i, j;
246 
247 	/*
248 	 * Max value of uint_32 (poll_time_bucket) = 4294967295
249 	 * Thus we need 10 chars + 1 space =11 chars for each bucket value.
250 	 * +1 space for '\0'.
251 	 */
252 	char hist_str[(QCA_NAPI_NUM_BUCKETS * 11) + 1] = {'\0'};
253 
254 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
255 		  "NAPI[#]CPU[#] |scheds |polls  |comps  |dones  |t-lim  |max(us)|hist(500us buckets)");
256 
257 	for (i = 0;
258 	     (i < hif_state->hif_num_extgroup && hif_state->hif_ext_group[i]);
259 	     i++) {
260 		hif_ext_group = hif_state->hif_ext_group[i];
261 		for (j = 0; j < num_possible_cpus(); j++) {
262 			napi_stats = &hif_ext_group->stats[j];
263 			if (!napi_stats->napi_schedules)
264 				continue;
265 
266 			hif_get_poll_times_hist_str(napi_stats,
267 						    hist_str,
268 						    sizeof(hist_str));
269 			QDF_TRACE(QDF_MODULE_ID_HIF,
270 				  QDF_TRACE_LEVEL_ERROR,
271 				  "NAPI[%d]CPU[%d]: %7u %7u %7u %7u %7u %7llu %s",
272 				  i, j,
273 				  napi_stats->napi_schedules,
274 				  napi_stats->napi_polls,
275 				  napi_stats->napi_completes,
276 				  napi_stats->napi_workdone,
277 				  napi_stats->time_limit_reached,
278 				  qdf_do_div(napi_stats->napi_max_poll_time,
279 					     1000),
280 				  hist_str);
281 		}
282 	}
283 
284 	hif_print_napi_latency_stats(hif_state);
285 }
286 
287 qdf_export_symbol(hif_print_napi_stats);
288 
289 #else
290 
291 static inline
292 void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
293 				 uint8_t buf_len)
294 {
295 }
296 
297 static inline
298 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
299 {
300 }
301 
302 static inline
303 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
304 {
305 }
306 
307 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
308 {
309 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
310 	struct hif_exec_context *hif_ext_group;
311 	struct qca_napi_stat *napi_stats;
312 	int i, j;
313 
314 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
315 		"NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone");
316 
317 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
318 		if (hif_state->hif_ext_group[i]) {
319 			hif_ext_group = hif_state->hif_ext_group[i];
320 			for (j = 0; j < num_possible_cpus(); j++) {
321 				napi_stats = &(hif_ext_group->stats[j]);
322 				if (napi_stats->napi_schedules != 0)
323 					QDF_TRACE(QDF_MODULE_ID_HIF,
324 						QDF_TRACE_LEVEL_FATAL,
325 						"NAPI[%2d]CPU[%d]: "
326 						"%7d %7d %7d %7d ",
327 						i, j,
328 						napi_stats->napi_schedules,
329 						napi_stats->napi_polls,
330 						napi_stats->napi_completes,
331 						napi_stats->napi_workdone);
332 			}
333 		}
334 	}
335 
336 	hif_print_napi_latency_stats(hif_state);
337 }
338 qdf_export_symbol(hif_print_napi_stats);
339 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
340 
341 static void hif_exec_tasklet_schedule(struct hif_exec_context *ctx)
342 {
343 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
344 
345 	tasklet_schedule(&t_ctx->tasklet);
346 }
347 
348 /**
349  * hif_exec_tasklet() - grp tasklet
350  * data: context
351  *
352  * return: void
353  */
354 static void hif_exec_tasklet_fn(unsigned long data)
355 {
356 	struct hif_exec_context *hif_ext_group =
357 			(struct hif_exec_context *)data;
358 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
359 	unsigned int work_done;
360 
361 	work_done =
362 		hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET);
363 
364 	if (hif_ext_group->work_complete(hif_ext_group, work_done)) {
365 		qdf_atomic_dec(&(scn->active_grp_tasklet_cnt));
366 		hif_ext_group->irq_enable(hif_ext_group);
367 	} else {
368 		hif_exec_tasklet_schedule(hif_ext_group);
369 	}
370 }
371 
372 /**
373  * hif_latency_profile_measure() - calculate latency and update histogram
374  * hif_ext_group: hif exec context
375  *
376  * return: None
377  */
378 #ifdef HIF_LATENCY_PROFILE_ENABLE
379 static void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
380 {
381 	int64_t cur_tstamp;
382 	int64_t time_elapsed;
383 
384 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
385 
386 	if (cur_tstamp > hif_ext_group->tstamp)
387 		time_elapsed = (cur_tstamp - hif_ext_group->tstamp);
388 	else
389 		time_elapsed = ~0x0 - (hif_ext_group->tstamp - cur_tstamp);
390 
391 	hif_ext_group->tstamp = cur_tstamp;
392 
393 	if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_0_2)
394 		hif_ext_group->sched_latency_stats[0]++;
395 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_3_10)
396 		hif_ext_group->sched_latency_stats[1]++;
397 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_11_20)
398 		hif_ext_group->sched_latency_stats[2]++;
399 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_21_50)
400 		hif_ext_group->sched_latency_stats[3]++;
401 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_51_100)
402 		hif_ext_group->sched_latency_stats[4]++;
403 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_101_250)
404 		hif_ext_group->sched_latency_stats[5]++;
405 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_251_500)
406 		hif_ext_group->sched_latency_stats[6]++;
407 	else
408 		hif_ext_group->sched_latency_stats[7]++;
409 }
410 #else
411 static void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
412 {
413 }
414 #endif
415 
416 /**
417  * hif_latency_profile_start() - Update the start timestamp for HIF ext group
418  * hif_ext_group: hif exec context
419  *
420  * return: None
421  */
422 #ifdef HIF_LATENCY_PROFILE_ENABLE
423 static void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
424 {
425 	hif_ext_group->tstamp = qdf_ktime_to_ms(qdf_ktime_get());
426 }
427 #else
428 static void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
429 {
430 }
431 #endif
432 
433 /**
434  * hif_exec_poll() - napi poll
435  * napi: napi struct
436  * budget: budget for napi
437  *
438  * Return: mapping of internal budget to napi
439  */
440 static int hif_exec_poll(struct napi_struct *napi, int budget)
441 {
442 	struct hif_napi_exec_context *napi_exec_ctx =
443 		    qdf_container_of(napi, struct hif_napi_exec_context, napi);
444 	struct hif_exec_context *hif_ext_group = &napi_exec_ctx->exec_ctx;
445 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
446 	int work_done;
447 	int normalized_budget = 0;
448 	int actual_dones;
449 	int shift = hif_ext_group->scale_bin_shift;
450 	int cpu = smp_processor_id();
451 
452 	hif_ext_group->force_break = false;
453 	hif_exec_update_service_start_time(hif_ext_group);
454 
455 	if (budget)
456 		normalized_budget = NAPI_BUDGET_TO_INTERNAL_BUDGET(budget, shift);
457 
458 	hif_latency_profile_measure(hif_ext_group);
459 
460 	work_done = hif_ext_group->handler(hif_ext_group->context,
461 					   normalized_budget);
462 
463 	actual_dones = work_done;
464 
465 	if (!hif_ext_group->force_break && work_done < normalized_budget) {
466 		napi_complete(napi);
467 		qdf_atomic_dec(&scn->active_grp_tasklet_cnt);
468 		hif_ext_group->irq_enable(hif_ext_group);
469 		hif_ext_group->stats[cpu].napi_completes++;
470 	} else {
471 		/* if the ext_group supports time based yield, claim full work
472 		 * done anyways */
473 		work_done = normalized_budget;
474 	}
475 
476 	hif_ext_group->stats[cpu].napi_polls++;
477 	hif_ext_group->stats[cpu].napi_workdone += actual_dones;
478 
479 	/* map internal budget to NAPI budget */
480 	if (work_done)
481 		work_done = INTERNAL_BUDGET_TO_NAPI_BUDGET(work_done, shift);
482 
483 	hif_exec_fill_poll_time_histogram(hif_ext_group);
484 
485 	return work_done;
486 }
487 
488 /**
489  * hif_exec_napi_schedule() - schedule the napi exec instance
490  * @ctx: a hif_exec_context known to be of napi type
491  */
492 static void hif_exec_napi_schedule(struct hif_exec_context *ctx)
493 {
494 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
495 	ctx->stats[smp_processor_id()].napi_schedules++;
496 
497 	napi_schedule(&n_ctx->napi);
498 }
499 
500 /**
501  * hif_exec_napi_kill() - stop a napi exec context from being rescheduled
502  * @ctx: a hif_exec_context known to be of napi type
503  */
504 static void hif_exec_napi_kill(struct hif_exec_context *ctx)
505 {
506 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
507 	int irq_ind;
508 
509 	if (ctx->inited) {
510 		napi_disable(&n_ctx->napi);
511 		ctx->inited = 0;
512 	}
513 
514 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
515 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
516 
517 	netif_napi_del(&(n_ctx->napi));
518 }
519 
520 struct hif_execution_ops napi_sched_ops = {
521 	.schedule = &hif_exec_napi_schedule,
522 	.kill = &hif_exec_napi_kill,
523 };
524 
525 #ifdef FEATURE_NAPI
526 /**
527  * hif_exec_napi_create() - allocate and initialize a napi exec context
528  * @scale: a binary shift factor to map NAPI budget from\to internal
529  *         budget
530  */
531 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
532 {
533 	struct hif_napi_exec_context *ctx;
534 
535 	ctx = qdf_mem_malloc(sizeof(struct hif_napi_exec_context));
536 	if (!ctx)
537 		return NULL;
538 
539 	ctx->exec_ctx.sched_ops = &napi_sched_ops;
540 	ctx->exec_ctx.inited = true;
541 	ctx->exec_ctx.scale_bin_shift = scale;
542 	qdf_net_if_create_dummy_if((struct qdf_net_if *)&ctx->netdev);
543 	netif_napi_add(&(ctx->netdev), &(ctx->napi), hif_exec_poll,
544 		       QCA_NAPI_BUDGET);
545 	napi_enable(&ctx->napi);
546 
547 	return &ctx->exec_ctx;
548 }
549 #else
550 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
551 {
552 	HIF_WARN("%s: FEATURE_NAPI not defined, making tasklet");
553 	return hif_exec_tasklet_create();
554 }
555 #endif
556 
557 
558 /**
559  * hif_exec_tasklet_kill() - stop a tasklet exec context from being rescheduled
560  * @ctx: a hif_exec_context known to be of tasklet type
561  */
562 static void hif_exec_tasklet_kill(struct hif_exec_context *ctx)
563 {
564 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
565 	int irq_ind;
566 
567 	if (ctx->inited) {
568 		tasklet_disable(&t_ctx->tasklet);
569 		tasklet_kill(&t_ctx->tasklet);
570 	}
571 	ctx->inited = false;
572 
573 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
574 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
575 }
576 
577 struct hif_execution_ops tasklet_sched_ops = {
578 	.schedule = &hif_exec_tasklet_schedule,
579 	.kill = &hif_exec_tasklet_kill,
580 };
581 
582 /**
583  * hif_exec_tasklet_schedule() -  allocate and initialize a tasklet exec context
584  */
585 static struct hif_exec_context *hif_exec_tasklet_create(void)
586 {
587 	struct hif_tasklet_exec_context *ctx;
588 
589 	ctx = qdf_mem_malloc(sizeof(struct hif_tasklet_exec_context));
590 	if (!ctx)
591 		return NULL;
592 
593 	ctx->exec_ctx.sched_ops = &tasklet_sched_ops;
594 	tasklet_init(&ctx->tasklet, hif_exec_tasklet_fn,
595 		     (unsigned long)ctx);
596 
597 	ctx->exec_ctx.inited = true;
598 
599 	return &ctx->exec_ctx;
600 }
601 
602 /**
603  * hif_exec_get_ctx() - retrieve an exec context based on an id
604  * @softc: the hif context owning the exec context
605  * @id: the id of the exec context
606  *
607  * mostly added to make it easier to rename or move the context array
608  */
609 struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *softc,
610 					  uint8_t id)
611 {
612 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
613 
614 	if (id < hif_state->hif_num_extgroup)
615 		return hif_state->hif_ext_group[id];
616 
617 	return NULL;
618 }
619 
620 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
621 				uint8_t id)
622 {
623 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
624 
625 	if (id < hif_state->hif_num_extgroup)
626 		return hif_state->hif_ext_group[id]->os_irq[0];
627 	return -EINVAL;
628 }
629 
630 qdf_export_symbol(hif_get_int_ctx_irq_num);
631 
632 uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
633 {
634 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
635 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
636 	struct hif_exec_context *hif_ext_group;
637 	int i, status;
638 
639 	if (scn->ext_grp_irq_configured) {
640 		HIF_ERROR("%s Called after ext grp irq configured\n", __func__);
641 		return QDF_STATUS_E_FAILURE;
642 	}
643 
644 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
645 		hif_ext_group = hif_state->hif_ext_group[i];
646 		status = 0;
647 		qdf_spinlock_create(&hif_ext_group->irq_lock);
648 		if (hif_ext_group->configured &&
649 		    hif_ext_group->irq_requested == false) {
650 			hif_ext_group->irq_enabled = true;
651 			status = hif_grp_irq_configure(scn, hif_ext_group);
652 		}
653 		if (status != 0) {
654 			HIF_ERROR("%s: failed for group %d", __func__, i);
655 			hif_ext_group->irq_enabled = false;
656 		}
657 	}
658 
659 	scn->ext_grp_irq_configured = true;
660 
661 	return QDF_STATUS_SUCCESS;
662 }
663 
664 qdf_export_symbol(hif_configure_ext_group_interrupts);
665 
666 #ifdef WLAN_SUSPEND_RESUME_TEST
667 /**
668  * hif_check_and_trigger_ut_resume() - check if unit-test command was used to
669  *				       to trigger fake-suspend command, if yes
670  *				       then issue resume procedure.
671  * @scn: opaque HIF software context
672  *
673  * This API checks if unit-test command was used to trigger fake-suspend command
674  * and if answer is yes then it would trigger resume procedure.
675  *
676  * Make this API inline to save API-switch overhead and do branch-prediction to
677  * optimize performance impact.
678  *
679  * Return: void
680  */
681 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
682 {
683 	if (qdf_unlikely(hif_irq_trigger_ut_resume(scn)))
684 		hif_ut_fw_resume(scn);
685 }
686 #else
687 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
688 {
689 }
690 #endif
691 
692 /**
693  * hif_ext_group_interrupt_handler() - handler for related interrupts
694  * @irq: irq number of the interrupt
695  * @context: the associated hif_exec_group context
696  *
697  * This callback function takes care of dissabling the associated interrupts
698  * and scheduling the expected bottom half for the exec_context.
699  * This callback function also helps keep track of the count running contexts.
700  */
701 irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context)
702 {
703 	struct hif_exec_context *hif_ext_group = context;
704 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
705 
706 	if (hif_ext_group->irq_requested) {
707 		hif_latency_profile_start(hif_ext_group);
708 
709 		hif_ext_group->irq_disable(hif_ext_group);
710 		/*
711 		 * if private ioctl has issued fake suspend command to put
712 		 * FW in D0-WOW state then here is our chance to bring FW out
713 		 * of WOW mode.
714 		 *
715 		 * The reason why you need to explicitly wake-up the FW is here:
716 		 * APSS should have been in fully awake through-out when
717 		 * fake APSS suspend command was issued (to put FW in WOW mode)
718 		 * hence organic way of waking-up the FW
719 		 * (as part-of APSS-host wake-up) won't happen because
720 		 * in reality APSS didn't really suspend.
721 		 */
722 		hif_check_and_trigger_ut_resume(scn);
723 		qdf_atomic_inc(&scn->active_grp_tasklet_cnt);
724 
725 		hif_ext_group->sched_ops->schedule(hif_ext_group);
726 	}
727 
728 	return IRQ_HANDLED;
729 }
730 
731 /**
732  * hif_exec_kill() - grp tasklet kill
733  * scn: hif_softc
734  *
735  * return: void
736  */
737 void hif_exec_kill(struct hif_opaque_softc *hif_ctx)
738 {
739 	int i;
740 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
741 
742 	for (i = 0; i < hif_state->hif_num_extgroup; i++)
743 		hif_state->hif_ext_group[i]->sched_ops->kill(
744 			hif_state->hif_ext_group[i]);
745 
746 	qdf_atomic_set(&hif_state->ol_sc.active_grp_tasklet_cnt, 0);
747 }
748 
749 /**
750  * hif_register_ext_group() - API to register external group
751  * interrupt handler.
752  * @hif_ctx : HIF Context
753  * @numirq: number of irq's in the group
754  * @irq: array of irq values
755  * @handler: callback interrupt handler function
756  * @cb_ctx: context to passed in callback
757  * @type: napi vs tasklet
758  *
759  * Return: status
760  */
761 uint32_t hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
762 		uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
763 		void *cb_ctx, const char *context_name,
764 		enum hif_exec_type type, uint32_t scale)
765 {
766 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
767 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
768 	struct hif_exec_context *hif_ext_group;
769 
770 	if (scn->ext_grp_irq_configured) {
771 		HIF_ERROR("%s Called after ext grp irq configured\n", __func__);
772 		return QDF_STATUS_E_FAILURE;
773 	}
774 
775 	if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) {
776 		HIF_ERROR("%s Max groups reached\n", __func__);
777 		return QDF_STATUS_E_FAILURE;
778 	}
779 
780 	if (numirq >= HIF_MAX_GRP_IRQ) {
781 		HIF_ERROR("%s invalid numirq\n", __func__);
782 		return QDF_STATUS_E_FAILURE;
783 	}
784 
785 	hif_ext_group = hif_exec_create(type, scale);
786 	if (!hif_ext_group)
787 		return QDF_STATUS_E_FAILURE;
788 
789 	hif_state->hif_ext_group[hif_state->hif_num_extgroup] =
790 		hif_ext_group;
791 
792 	hif_ext_group->numirq = numirq;
793 	qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0]));
794 	hif_ext_group->context = cb_ctx;
795 	hif_ext_group->handler = handler;
796 	hif_ext_group->configured = true;
797 	hif_ext_group->grp_id = hif_state->hif_num_extgroup;
798 	hif_ext_group->hif = hif_ctx;
799 	hif_ext_group->context_name = context_name;
800 	hif_ext_group->type = type;
801 
802 	hif_state->hif_num_extgroup++;
803 	return QDF_STATUS_SUCCESS;
804 }
805 qdf_export_symbol(hif_register_ext_group);
806 
807 /**
808  * hif_exec_create() - create an execution context
809  * @type: the type of execution context to create
810  */
811 struct hif_exec_context *hif_exec_create(enum hif_exec_type type,
812 						uint32_t scale)
813 {
814 	HIF_INFO("%s: create exec_type %d budget %d\n",
815 			__func__, type, QCA_NAPI_BUDGET * scale);
816 
817 	switch (type) {
818 	case HIF_EXEC_NAPI_TYPE:
819 		return hif_exec_napi_create(scale);
820 
821 	case HIF_EXEC_TASKLET_TYPE:
822 		return hif_exec_tasklet_create();
823 	default:
824 		return NULL;
825 	}
826 }
827 
828 /**
829  * hif_exec_destroy() - free the hif_exec context
830  * @ctx: context to free
831  *
832  * please kill the context before freeing it to avoid a use after free.
833  */
834 void hif_exec_destroy(struct hif_exec_context *ctx)
835 {
836 	qdf_spinlock_destroy(&ctx->irq_lock);
837 	qdf_mem_free(ctx);
838 }
839 
840 /**
841  * hif_deregister_exec_group() - API to free the exec contexts
842  * @hif_ctx: HIF context
843  * @context_name: name of the module whose contexts need to be deregistered
844  *
845  * This function deregisters the contexts of the requestor identified
846  * based on the context_name & frees the memory.
847  *
848  * Return: void
849  */
850 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
851 				const char *context_name)
852 {
853 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
854 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
855 	struct hif_exec_context *hif_ext_group;
856 	int i;
857 
858 	for (i = 0; i < HIF_MAX_GROUP; i++) {
859 		hif_ext_group = hif_state->hif_ext_group[i];
860 
861 		if (!hif_ext_group)
862 			continue;
863 
864 		HIF_INFO("%s: Deregistering grp id %d name %s\n",
865 				__func__,
866 				hif_ext_group->grp_id,
867 				hif_ext_group->context_name);
868 
869 		if (strcmp(hif_ext_group->context_name, context_name) == 0) {
870 			hif_ext_group->sched_ops->kill(hif_ext_group);
871 			hif_state->hif_ext_group[i] = NULL;
872 			hif_exec_destroy(hif_ext_group);
873 			hif_state->hif_num_extgroup--;
874 		}
875 
876 	}
877 }
878 qdf_export_symbol(hif_deregister_exec_group);
879