xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_exec.c (revision 503663c6daafffe652fa360bde17243568cd6d2a)
1 /*
2  * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <hif_exec.h>
20 #include <ce_main.h>
21 #include <hif_irq_affinity.h>
22 #include "qdf_module.h"
23 #include "qdf_net_if.h"
24 /* mapping NAPI budget 0 to internal budget 0
25  * NAPI budget 1 to internal budget [1,scaler -1]
26  * NAPI budget 2 to internal budget [scaler, 2 * scaler - 1], etc
27  */
28 #define NAPI_BUDGET_TO_INTERNAL_BUDGET(n, s) \
29 	(((n) << (s)) - 1)
30 #define INTERNAL_BUDGET_TO_NAPI_BUDGET(n, s) \
31 	(((n) + 1) >> (s))
32 
33 static struct hif_exec_context *hif_exec_tasklet_create(void);
34 
35 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
36 struct hif_event_history hif_event_desc_history[HIF_NUM_INT_CONTEXTS];
37 
38 static inline
39 int hif_get_next_record_index(qdf_atomic_t *table_index,
40 			      int array_size)
41 {
42 	int record_index = qdf_atomic_inc_return(table_index);
43 
44 	return record_index & (array_size - 1);
45 }
46 
47 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
48 			   struct hif_event_record *event, uint8_t intr_grp_id)
49 {
50 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
51 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
52 	struct hif_exec_context *hif_ext_group;
53 	struct hif_event_history *hist_ev;
54 	struct hif_event_record *record;
55 	int record_index;
56 
57 	if (!hif_state->hif_num_extgroup)
58 		return;
59 
60 	if (scn->event_disable_mask & BIT(event->type))
61 		return;
62 
63 	if (intr_grp_id >= HIF_NUM_INT_CONTEXTS) {
64 		hif_err("Invalid interrupt group id %d", intr_grp_id);
65 		return;
66 	}
67 
68 	hif_ext_group = hif_state->hif_ext_group[intr_grp_id];
69 	hist_ev = hif_ext_group->evt_hist;
70 
71 	record_index = hif_get_next_record_index(
72 			&hist_ev->index, HIF_EVENT_HIST_MAX);
73 
74 	record = &hist_ev->event[record_index];
75 
76 	record->hal_ring_id = event->hal_ring_id;
77 	record->hp = event->hp;
78 	record->tp = event->tp;
79 	record->cpu_id = qdf_get_cpu();
80 	record->timestamp = qdf_get_log_timestamp();
81 	record->type = event->type;
82 }
83 
84 static void hif_event_history_init(struct hif_exec_context *hif_ext_grp)
85 {
86 	hif_ext_grp->evt_hist = &hif_event_desc_history[hif_ext_grp->grp_id];
87 	qdf_atomic_set(&hif_ext_grp->evt_hist->index, -1);
88 }
89 #else
90 static inline void hif_event_history_init(struct hif_exec_context *hif_ext_grp)
91 {
92 }
93 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
94 
95 /**
96  * hif_print_napi_latency_stats() - print NAPI scheduling latency stats
97  * @hif_state: hif context
98  *
99  * return: void
100  */
101 #ifdef HIF_LATENCY_PROFILE_ENABLE
102 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
103 {
104 	struct hif_exec_context *hif_ext_group;
105 	int i, j;
106 	int64_t cur_tstamp;
107 
108 	const char time_str[HIF_SCHED_LATENCY_BUCKETS][15] =  {
109 		"0-2   ms",
110 		"3-10  ms",
111 		"11-20 ms",
112 		"21-50 ms",
113 		"51-100 ms",
114 		"101-250 ms",
115 		"251-500 ms",
116 		"> 500 ms"
117 	};
118 
119 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
120 
121 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
122 		  "Current timestamp: %lld", cur_tstamp);
123 
124 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
125 		if (hif_state->hif_ext_group[i]) {
126 			hif_ext_group = hif_state->hif_ext_group[i];
127 
128 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
129 				  "Interrupts in the HIF Group");
130 
131 			for (j = 0; j < hif_ext_group->numirq; j++) {
132 				QDF_TRACE(QDF_MODULE_ID_HIF,
133 					  QDF_TRACE_LEVEL_FATAL,
134 					  "  %s",
135 					  hif_ext_group->irq_name
136 					  (hif_ext_group->irq[j]));
137 			}
138 
139 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
140 				  "Last serviced timestamp: %lld",
141 				  hif_ext_group->tstamp);
142 
143 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
144 				  "Latency Bucket     | Time elapsed");
145 
146 			for (j = 0; j < HIF_SCHED_LATENCY_BUCKETS; j++) {
147 				QDF_TRACE(QDF_MODULE_ID_HIF,
148 					  QDF_TRACE_LEVEL_FATAL,
149 					  "%s     |    %lld", time_str[j],
150 					  hif_ext_group->
151 					  sched_latency_stats[j]);
152 			}
153 		}
154 	}
155 }
156 #else
157 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
158 {
159 }
160 #endif
161 
162 /**
163  * hif_clear_napi_stats() - reset NAPI stats
164  * @hif_ctx: hif context
165  *
166  * return: void
167  */
168 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx)
169 {
170 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
171 	struct hif_exec_context *hif_ext_group;
172 	size_t i;
173 
174 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
175 		hif_ext_group = hif_state->hif_ext_group[i];
176 
177 		if (!hif_ext_group)
178 			return;
179 
180 		qdf_mem_set(hif_ext_group->sched_latency_stats,
181 			    sizeof(hif_ext_group->sched_latency_stats),
182 			    0x0);
183 	}
184 }
185 
186 qdf_export_symbol(hif_clear_napi_stats);
187 
188 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
189 /**
190  * hif_get_poll_times_hist_str() - Get HIF poll times histogram string
191  * @stats: NAPI stats to get poll time buckets
192  * @buf: buffer to fill histogram string
193  * @buf_len: length of the buffer
194  *
195  * Return: void
196  */
197 static void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
198 					uint8_t buf_len)
199 {
200 	int i;
201 	int str_index = 0;
202 
203 	for (i = 0; i < QCA_NAPI_NUM_BUCKETS; i++)
204 		str_index += qdf_scnprintf(buf + str_index, buf_len - str_index,
205 					   "%u|", stats->poll_time_buckets[i]);
206 }
207 
208 /**
209  * hif_exec_fill_poll_time_histogram() - fills poll time histogram for a NAPI
210  * @hif_ext_group: hif_ext_group of type NAPI
211  *
212  * The function is called at the end of a NAPI poll to calculate poll time
213  * buckets.
214  *
215  * Return: void
216  */
217 static
218 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
219 {
220 	struct qca_napi_stat *napi_stat;
221 	unsigned long long poll_time_ns;
222 	uint32_t poll_time_us;
223 	uint32_t bucket_size_us = 500;
224 	uint32_t bucket;
225 	uint32_t cpu_id = qdf_get_cpu();
226 
227 	poll_time_ns = sched_clock() - hif_ext_group->poll_start_time;
228 	poll_time_us = qdf_do_div(poll_time_ns, 1000);
229 
230 	napi_stat = &hif_ext_group->stats[cpu_id];
231 	if (poll_time_ns > hif_ext_group->stats[cpu_id].napi_max_poll_time)
232 		hif_ext_group->stats[cpu_id].napi_max_poll_time = poll_time_ns;
233 
234 	bucket = poll_time_us / bucket_size_us;
235 	if (bucket >= QCA_NAPI_NUM_BUCKETS)
236 		bucket = QCA_NAPI_NUM_BUCKETS - 1;
237 	++napi_stat->poll_time_buckets[bucket];
238 }
239 
240 /**
241  * hif_exec_poll_should_yield() - Local function deciding if NAPI should yield
242  * @hif_ext_group: hif_ext_group of type NAPI
243  *
244  * Return: true if NAPI needs to yield, else false
245  */
246 static bool hif_exec_poll_should_yield(struct hif_exec_context *hif_ext_group)
247 {
248 	bool time_limit_reached = false;
249 	unsigned long long poll_time_ns;
250 	int cpu_id = qdf_get_cpu();
251 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
252 	struct hif_config_info *cfg = &scn->hif_config;
253 
254 	poll_time_ns = sched_clock() - hif_ext_group->poll_start_time;
255 	time_limit_reached =
256 		poll_time_ns > cfg->rx_softirq_max_yield_duration_ns ? 1 : 0;
257 
258 	if (time_limit_reached) {
259 		hif_ext_group->stats[cpu_id].time_limit_reached++;
260 		hif_ext_group->force_break = true;
261 	}
262 
263 	return time_limit_reached;
264 }
265 
266 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id)
267 {
268 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
269 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
270 	struct hif_exec_context *hif_ext_group;
271 	bool ret_val = false;
272 
273 	if (!(grp_id < hif_state->hif_num_extgroup) ||
274 	    !(grp_id < HIF_MAX_GROUP))
275 		return false;
276 
277 	hif_ext_group = hif_state->hif_ext_group[grp_id];
278 
279 	if (hif_ext_group->type == HIF_EXEC_NAPI_TYPE)
280 		ret_val = hif_exec_poll_should_yield(hif_ext_group);
281 
282 	return ret_val;
283 }
284 
285 /**
286  * hif_exec_update_service_start_time() - Update NAPI poll start time
287  * @hif_ext_group: hif_ext_group of type NAPI
288  *
289  * The function is called at the beginning of a NAPI poll to record the poll
290  * start time.
291  *
292  * Return: None
293  */
294 static inline
295 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
296 {
297 	hif_ext_group->poll_start_time = sched_clock();
298 }
299 
300 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
301 {
302 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
303 	struct hif_exec_context *hif_ext_group;
304 	struct qca_napi_stat *napi_stats;
305 	int i, j;
306 
307 	/*
308 	 * Max value of uint_32 (poll_time_bucket) = 4294967295
309 	 * Thus we need 10 chars + 1 space =11 chars for each bucket value.
310 	 * +1 space for '\0'.
311 	 */
312 	char hist_str[(QCA_NAPI_NUM_BUCKETS * 11) + 1] = {'\0'};
313 
314 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
315 		  "NAPI[#]CPU[#] |scheds |polls  |comps  |dones  |t-lim  |max(us)|hist(500us buckets)");
316 
317 	for (i = 0;
318 	     (i < hif_state->hif_num_extgroup && hif_state->hif_ext_group[i]);
319 	     i++) {
320 		hif_ext_group = hif_state->hif_ext_group[i];
321 		for (j = 0; j < num_possible_cpus(); j++) {
322 			napi_stats = &hif_ext_group->stats[j];
323 			if (!napi_stats->napi_schedules)
324 				continue;
325 
326 			hif_get_poll_times_hist_str(napi_stats,
327 						    hist_str,
328 						    sizeof(hist_str));
329 			QDF_TRACE(QDF_MODULE_ID_HIF,
330 				  QDF_TRACE_LEVEL_ERROR,
331 				  "NAPI[%d]CPU[%d]: %7u %7u %7u %7u %7u %7llu %s",
332 				  i, j,
333 				  napi_stats->napi_schedules,
334 				  napi_stats->napi_polls,
335 				  napi_stats->napi_completes,
336 				  napi_stats->napi_workdone,
337 				  napi_stats->time_limit_reached,
338 				  qdf_do_div(napi_stats->napi_max_poll_time,
339 					     1000),
340 				  hist_str);
341 		}
342 	}
343 
344 	hif_print_napi_latency_stats(hif_state);
345 }
346 
347 qdf_export_symbol(hif_print_napi_stats);
348 
349 #else
350 
351 static inline
352 void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
353 				 uint8_t buf_len)
354 {
355 }
356 
357 static inline
358 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
359 {
360 }
361 
362 static inline
363 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
364 {
365 }
366 
367 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
368 {
369 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
370 	struct hif_exec_context *hif_ext_group;
371 	struct qca_napi_stat *napi_stats;
372 	int i, j;
373 
374 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
375 		"NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone");
376 
377 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
378 		if (hif_state->hif_ext_group[i]) {
379 			hif_ext_group = hif_state->hif_ext_group[i];
380 			for (j = 0; j < num_possible_cpus(); j++) {
381 				napi_stats = &(hif_ext_group->stats[j]);
382 				if (napi_stats->napi_schedules != 0)
383 					QDF_TRACE(QDF_MODULE_ID_HIF,
384 						QDF_TRACE_LEVEL_FATAL,
385 						"NAPI[%2d]CPU[%d]: "
386 						"%7d %7d %7d %7d ",
387 						i, j,
388 						napi_stats->napi_schedules,
389 						napi_stats->napi_polls,
390 						napi_stats->napi_completes,
391 						napi_stats->napi_workdone);
392 			}
393 		}
394 	}
395 
396 	hif_print_napi_latency_stats(hif_state);
397 }
398 qdf_export_symbol(hif_print_napi_stats);
399 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
400 
401 static void hif_exec_tasklet_schedule(struct hif_exec_context *ctx)
402 {
403 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
404 
405 	tasklet_schedule(&t_ctx->tasklet);
406 }
407 
408 /**
409  * hif_exec_tasklet() - grp tasklet
410  * data: context
411  *
412  * return: void
413  */
414 static void hif_exec_tasklet_fn(unsigned long data)
415 {
416 	struct hif_exec_context *hif_ext_group =
417 			(struct hif_exec_context *)data;
418 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
419 	unsigned int work_done;
420 
421 	work_done =
422 		hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET);
423 
424 	if (hif_ext_group->work_complete(hif_ext_group, work_done)) {
425 		qdf_atomic_dec(&(scn->active_grp_tasklet_cnt));
426 		hif_ext_group->irq_enable(hif_ext_group);
427 	} else {
428 		hif_exec_tasklet_schedule(hif_ext_group);
429 	}
430 }
431 
432 /**
433  * hif_latency_profile_measure() - calculate latency and update histogram
434  * hif_ext_group: hif exec context
435  *
436  * return: None
437  */
438 #ifdef HIF_LATENCY_PROFILE_ENABLE
439 static void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
440 {
441 	int64_t cur_tstamp;
442 	int64_t time_elapsed;
443 
444 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
445 
446 	if (cur_tstamp > hif_ext_group->tstamp)
447 		time_elapsed = (cur_tstamp - hif_ext_group->tstamp);
448 	else
449 		time_elapsed = ~0x0 - (hif_ext_group->tstamp - cur_tstamp);
450 
451 	hif_ext_group->tstamp = cur_tstamp;
452 
453 	if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_0_2)
454 		hif_ext_group->sched_latency_stats[0]++;
455 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_3_10)
456 		hif_ext_group->sched_latency_stats[1]++;
457 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_11_20)
458 		hif_ext_group->sched_latency_stats[2]++;
459 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_21_50)
460 		hif_ext_group->sched_latency_stats[3]++;
461 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_51_100)
462 		hif_ext_group->sched_latency_stats[4]++;
463 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_101_250)
464 		hif_ext_group->sched_latency_stats[5]++;
465 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_251_500)
466 		hif_ext_group->sched_latency_stats[6]++;
467 	else
468 		hif_ext_group->sched_latency_stats[7]++;
469 }
470 #else
471 static inline
472 void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
473 {
474 }
475 #endif
476 
477 /**
478  * hif_latency_profile_start() - Update the start timestamp for HIF ext group
479  * hif_ext_group: hif exec context
480  *
481  * return: None
482  */
483 #ifdef HIF_LATENCY_PROFILE_ENABLE
484 static void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
485 {
486 	hif_ext_group->tstamp = qdf_ktime_to_ms(qdf_ktime_get());
487 }
488 #else
489 static inline
490 void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
491 {
492 }
493 #endif
494 
495 #ifdef FEATURE_NAPI
496 /**
497  * hif_exec_poll() - napi poll
498  * napi: napi struct
499  * budget: budget for napi
500  *
501  * Return: mapping of internal budget to napi
502  */
503 static int hif_exec_poll(struct napi_struct *napi, int budget)
504 {
505 	struct hif_napi_exec_context *napi_exec_ctx =
506 		    qdf_container_of(napi, struct hif_napi_exec_context, napi);
507 	struct hif_exec_context *hif_ext_group = &napi_exec_ctx->exec_ctx;
508 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
509 	int work_done;
510 	int normalized_budget = 0;
511 	int actual_dones;
512 	int shift = hif_ext_group->scale_bin_shift;
513 	int cpu = smp_processor_id();
514 
515 	hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
516 			 0, 0, 0, HIF_EVENT_BH_SCHED);
517 
518 	hif_ext_group->force_break = false;
519 	hif_exec_update_service_start_time(hif_ext_group);
520 
521 	if (budget)
522 		normalized_budget = NAPI_BUDGET_TO_INTERNAL_BUDGET(budget, shift);
523 
524 	hif_latency_profile_measure(hif_ext_group);
525 
526 	work_done = hif_ext_group->handler(hif_ext_group->context,
527 					   normalized_budget);
528 
529 	actual_dones = work_done;
530 
531 	if (!hif_ext_group->force_break && work_done < normalized_budget) {
532 		napi_complete(napi);
533 		qdf_atomic_dec(&scn->active_grp_tasklet_cnt);
534 		hif_ext_group->irq_enable(hif_ext_group);
535 		hif_ext_group->stats[cpu].napi_completes++;
536 	} else {
537 		/* if the ext_group supports time based yield, claim full work
538 		 * done anyways */
539 		work_done = normalized_budget;
540 	}
541 
542 	hif_ext_group->stats[cpu].napi_polls++;
543 	hif_ext_group->stats[cpu].napi_workdone += actual_dones;
544 
545 	/* map internal budget to NAPI budget */
546 	if (work_done)
547 		work_done = INTERNAL_BUDGET_TO_NAPI_BUDGET(work_done, shift);
548 
549 	hif_exec_fill_poll_time_histogram(hif_ext_group);
550 
551 	return work_done;
552 }
553 
554 /**
555  * hif_exec_napi_schedule() - schedule the napi exec instance
556  * @ctx: a hif_exec_context known to be of napi type
557  */
558 static void hif_exec_napi_schedule(struct hif_exec_context *ctx)
559 {
560 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
561 	ctx->stats[smp_processor_id()].napi_schedules++;
562 
563 	napi_schedule(&n_ctx->napi);
564 }
565 
566 /**
567  * hif_exec_napi_kill() - stop a napi exec context from being rescheduled
568  * @ctx: a hif_exec_context known to be of napi type
569  */
570 static void hif_exec_napi_kill(struct hif_exec_context *ctx)
571 {
572 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
573 	int irq_ind;
574 
575 	if (ctx->inited) {
576 		napi_disable(&n_ctx->napi);
577 		ctx->inited = 0;
578 	}
579 
580 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
581 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
582 
583 	netif_napi_del(&(n_ctx->napi));
584 }
585 
586 struct hif_execution_ops napi_sched_ops = {
587 	.schedule = &hif_exec_napi_schedule,
588 	.kill = &hif_exec_napi_kill,
589 };
590 
591 /**
592  * hif_exec_napi_create() - allocate and initialize a napi exec context
593  * @scale: a binary shift factor to map NAPI budget from\to internal
594  *         budget
595  */
596 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
597 {
598 	struct hif_napi_exec_context *ctx;
599 
600 	ctx = qdf_mem_malloc(sizeof(struct hif_napi_exec_context));
601 	if (!ctx)
602 		return NULL;
603 
604 	ctx->exec_ctx.sched_ops = &napi_sched_ops;
605 	ctx->exec_ctx.inited = true;
606 	ctx->exec_ctx.scale_bin_shift = scale;
607 	qdf_net_if_create_dummy_if((struct qdf_net_if *)&ctx->netdev);
608 	netif_napi_add(&(ctx->netdev), &(ctx->napi), hif_exec_poll,
609 		       QCA_NAPI_BUDGET);
610 	napi_enable(&ctx->napi);
611 
612 	return &ctx->exec_ctx;
613 }
614 #else
615 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
616 {
617 	HIF_WARN("%s: FEATURE_NAPI not defined, making tasklet", __func__);
618 	return hif_exec_tasklet_create();
619 }
620 #endif
621 
622 
623 /**
624  * hif_exec_tasklet_kill() - stop a tasklet exec context from being rescheduled
625  * @ctx: a hif_exec_context known to be of tasklet type
626  */
627 static void hif_exec_tasklet_kill(struct hif_exec_context *ctx)
628 {
629 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
630 	int irq_ind;
631 
632 	if (ctx->inited) {
633 		tasklet_disable(&t_ctx->tasklet);
634 		tasklet_kill(&t_ctx->tasklet);
635 	}
636 	ctx->inited = false;
637 
638 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
639 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
640 }
641 
642 struct hif_execution_ops tasklet_sched_ops = {
643 	.schedule = &hif_exec_tasklet_schedule,
644 	.kill = &hif_exec_tasklet_kill,
645 };
646 
647 /**
648  * hif_exec_tasklet_schedule() -  allocate and initialize a tasklet exec context
649  */
650 static struct hif_exec_context *hif_exec_tasklet_create(void)
651 {
652 	struct hif_tasklet_exec_context *ctx;
653 
654 	ctx = qdf_mem_malloc(sizeof(struct hif_tasklet_exec_context));
655 	if (!ctx)
656 		return NULL;
657 
658 	ctx->exec_ctx.sched_ops = &tasklet_sched_ops;
659 	tasklet_init(&ctx->tasklet, hif_exec_tasklet_fn,
660 		     (unsigned long)ctx);
661 
662 	ctx->exec_ctx.inited = true;
663 
664 	return &ctx->exec_ctx;
665 }
666 
667 /**
668  * hif_exec_get_ctx() - retrieve an exec context based on an id
669  * @softc: the hif context owning the exec context
670  * @id: the id of the exec context
671  *
672  * mostly added to make it easier to rename or move the context array
673  */
674 struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *softc,
675 					  uint8_t id)
676 {
677 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
678 
679 	if (id < hif_state->hif_num_extgroup)
680 		return hif_state->hif_ext_group[id];
681 
682 	return NULL;
683 }
684 
685 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
686 				uint8_t id)
687 {
688 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
689 
690 	if (id < hif_state->hif_num_extgroup)
691 		return hif_state->hif_ext_group[id]->os_irq[0];
692 	return -EINVAL;
693 }
694 
695 qdf_export_symbol(hif_get_int_ctx_irq_num);
696 
697 uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
698 {
699 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
700 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
701 	struct hif_exec_context *hif_ext_group;
702 	int i, status;
703 
704 	if (scn->ext_grp_irq_configured) {
705 		HIF_ERROR("%s Called after ext grp irq configured\n", __func__);
706 		return QDF_STATUS_E_FAILURE;
707 	}
708 
709 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
710 		hif_ext_group = hif_state->hif_ext_group[i];
711 		status = 0;
712 		qdf_spinlock_create(&hif_ext_group->irq_lock);
713 		if (hif_ext_group->configured &&
714 		    hif_ext_group->irq_requested == false) {
715 			hif_ext_group->irq_enabled = true;
716 			status = hif_grp_irq_configure(scn, hif_ext_group);
717 		}
718 		if (status != 0) {
719 			HIF_ERROR("%s: failed for group %d", __func__, i);
720 			hif_ext_group->irq_enabled = false;
721 		}
722 	}
723 
724 	scn->ext_grp_irq_configured = true;
725 
726 	return QDF_STATUS_SUCCESS;
727 }
728 
729 qdf_export_symbol(hif_configure_ext_group_interrupts);
730 
731 #ifdef WLAN_SUSPEND_RESUME_TEST
732 /**
733  * hif_check_and_trigger_ut_resume() - check if unit-test command was used to
734  *				       to trigger fake-suspend command, if yes
735  *				       then issue resume procedure.
736  * @scn: opaque HIF software context
737  *
738  * This API checks if unit-test command was used to trigger fake-suspend command
739  * and if answer is yes then it would trigger resume procedure.
740  *
741  * Make this API inline to save API-switch overhead and do branch-prediction to
742  * optimize performance impact.
743  *
744  * Return: void
745  */
746 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
747 {
748 	if (qdf_unlikely(hif_irq_trigger_ut_resume(scn)))
749 		hif_ut_fw_resume(scn);
750 }
751 #else
752 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
753 {
754 }
755 #endif
756 
757 /**
758  * hif_ext_group_interrupt_handler() - handler for related interrupts
759  * @irq: irq number of the interrupt
760  * @context: the associated hif_exec_group context
761  *
762  * This callback function takes care of dissabling the associated interrupts
763  * and scheduling the expected bottom half for the exec_context.
764  * This callback function also helps keep track of the count running contexts.
765  */
766 irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context)
767 {
768 	struct hif_exec_context *hif_ext_group = context;
769 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
770 
771 	if (hif_ext_group->irq_requested) {
772 		hif_latency_profile_start(hif_ext_group);
773 
774 		hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
775 				 0, 0, 0, HIF_EVENT_IRQ_TRIGGER);
776 
777 		hif_ext_group->irq_disable(hif_ext_group);
778 		/*
779 		 * if private ioctl has issued fake suspend command to put
780 		 * FW in D0-WOW state then here is our chance to bring FW out
781 		 * of WOW mode.
782 		 *
783 		 * The reason why you need to explicitly wake-up the FW is here:
784 		 * APSS should have been in fully awake through-out when
785 		 * fake APSS suspend command was issued (to put FW in WOW mode)
786 		 * hence organic way of waking-up the FW
787 		 * (as part-of APSS-host wake-up) won't happen because
788 		 * in reality APSS didn't really suspend.
789 		 */
790 		hif_check_and_trigger_ut_resume(scn);
791 		qdf_atomic_inc(&scn->active_grp_tasklet_cnt);
792 
793 		hif_ext_group->sched_ops->schedule(hif_ext_group);
794 	}
795 
796 	return IRQ_HANDLED;
797 }
798 
799 /**
800  * hif_exec_kill() - grp tasklet kill
801  * scn: hif_softc
802  *
803  * return: void
804  */
805 void hif_exec_kill(struct hif_opaque_softc *hif_ctx)
806 {
807 	int i;
808 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
809 
810 	for (i = 0; i < hif_state->hif_num_extgroup; i++)
811 		hif_state->hif_ext_group[i]->sched_ops->kill(
812 			hif_state->hif_ext_group[i]);
813 
814 	qdf_atomic_set(&hif_state->ol_sc.active_grp_tasklet_cnt, 0);
815 }
816 
817 /**
818  * hif_register_ext_group() - API to register external group
819  * interrupt handler.
820  * @hif_ctx : HIF Context
821  * @numirq: number of irq's in the group
822  * @irq: array of irq values
823  * @handler: callback interrupt handler function
824  * @cb_ctx: context to passed in callback
825  * @type: napi vs tasklet
826  *
827  * Return: status
828  */
829 uint32_t hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
830 		uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
831 		void *cb_ctx, const char *context_name,
832 		enum hif_exec_type type, uint32_t scale)
833 {
834 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
835 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
836 	struct hif_exec_context *hif_ext_group;
837 
838 	if (scn->ext_grp_irq_configured) {
839 		HIF_ERROR("%s Called after ext grp irq configured\n", __func__);
840 		return QDF_STATUS_E_FAILURE;
841 	}
842 
843 	if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) {
844 		HIF_ERROR("%s Max groups reached\n", __func__);
845 		return QDF_STATUS_E_FAILURE;
846 	}
847 
848 	if (numirq >= HIF_MAX_GRP_IRQ) {
849 		HIF_ERROR("%s invalid numirq\n", __func__);
850 		return QDF_STATUS_E_FAILURE;
851 	}
852 
853 	hif_ext_group = hif_exec_create(type, scale);
854 	if (!hif_ext_group)
855 		return QDF_STATUS_E_FAILURE;
856 
857 	hif_state->hif_ext_group[hif_state->hif_num_extgroup] =
858 		hif_ext_group;
859 
860 	hif_ext_group->numirq = numirq;
861 	qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0]));
862 	hif_ext_group->context = cb_ctx;
863 	hif_ext_group->handler = handler;
864 	hif_ext_group->configured = true;
865 	hif_ext_group->grp_id = hif_state->hif_num_extgroup;
866 	hif_ext_group->hif = hif_ctx;
867 	hif_ext_group->context_name = context_name;
868 	hif_ext_group->type = type;
869 	hif_event_history_init(hif_ext_group);
870 
871 	hif_state->hif_num_extgroup++;
872 	return QDF_STATUS_SUCCESS;
873 }
874 qdf_export_symbol(hif_register_ext_group);
875 
876 /**
877  * hif_exec_create() - create an execution context
878  * @type: the type of execution context to create
879  */
880 struct hif_exec_context *hif_exec_create(enum hif_exec_type type,
881 						uint32_t scale)
882 {
883 	hif_debug("%s: create exec_type %d budget %d\n",
884 		  __func__, type, QCA_NAPI_BUDGET * scale);
885 
886 	switch (type) {
887 	case HIF_EXEC_NAPI_TYPE:
888 		return hif_exec_napi_create(scale);
889 
890 	case HIF_EXEC_TASKLET_TYPE:
891 		return hif_exec_tasklet_create();
892 	default:
893 		return NULL;
894 	}
895 }
896 
897 /**
898  * hif_exec_destroy() - free the hif_exec context
899  * @ctx: context to free
900  *
901  * please kill the context before freeing it to avoid a use after free.
902  */
903 void hif_exec_destroy(struct hif_exec_context *ctx)
904 {
905 	qdf_spinlock_destroy(&ctx->irq_lock);
906 	qdf_mem_free(ctx);
907 }
908 
909 /**
910  * hif_deregister_exec_group() - API to free the exec contexts
911  * @hif_ctx: HIF context
912  * @context_name: name of the module whose contexts need to be deregistered
913  *
914  * This function deregisters the contexts of the requestor identified
915  * based on the context_name & frees the memory.
916  *
917  * Return: void
918  */
919 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
920 				const char *context_name)
921 {
922 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
923 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
924 	struct hif_exec_context *hif_ext_group;
925 	int i;
926 
927 	for (i = 0; i < HIF_MAX_GROUP; i++) {
928 		hif_ext_group = hif_state->hif_ext_group[i];
929 
930 		if (!hif_ext_group)
931 			continue;
932 
933 		hif_debug("%s: Deregistering grp id %d name %s\n",
934 			  __func__,
935 			  hif_ext_group->grp_id,
936 			  hif_ext_group->context_name);
937 
938 		if (strcmp(hif_ext_group->context_name, context_name) == 0) {
939 			hif_ext_group->sched_ops->kill(hif_ext_group);
940 			hif_state->hif_ext_group[i] = NULL;
941 			hif_exec_destroy(hif_ext_group);
942 			hif_state->hif_num_extgroup--;
943 		}
944 
945 	}
946 }
947 qdf_export_symbol(hif_deregister_exec_group);
948