xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_exec.c (revision 6d768494e5ce14eb1603a695c86739d12ecc6ec2)
1 /*
2  * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <hif_exec.h>
20 #include <ce_main.h>
21 #include "qdf_module.h"
22 #include "qdf_net_if.h"
23 /* mapping NAPI budget 0 to internal budget 0
24  * NAPI budget 1 to internal budget [1,scaler -1]
25  * NAPI budget 2 to internal budget [scaler, 2 * scaler - 1], etc
26  */
27 #define NAPI_BUDGET_TO_INTERNAL_BUDGET(n, s) \
28 	(((n) << (s)) - 1)
29 #define INTERNAL_BUDGET_TO_NAPI_BUDGET(n, s) \
30 	(((n) + 1) >> (s))
31 
32 static struct hif_exec_context *hif_exec_tasklet_create(void);
33 
34 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
35 struct hif_event_history hif_event_desc_history[HIF_NUM_INT_CONTEXTS];
36 
37 static inline
38 int hif_get_next_record_index(qdf_atomic_t *table_index,
39 			      int array_size)
40 {
41 	int record_index = qdf_atomic_inc_return(table_index);
42 
43 	return record_index & (array_size - 1);
44 }
45 
46 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
47 			   struct hif_event_record *event, uint8_t intr_grp_id)
48 {
49 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
50 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
51 	struct hif_exec_context *hif_ext_group;
52 	struct hif_event_history *hist_ev;
53 	struct hif_event_record *record;
54 	int record_index;
55 
56 	if (!hif_state->hif_num_extgroup)
57 		return;
58 
59 	if (scn->event_disable_mask & BIT(event->type))
60 		return;
61 
62 	if (intr_grp_id >= HIF_NUM_INT_CONTEXTS) {
63 		hif_err("Invalid interrupt group id %d", intr_grp_id);
64 		return;
65 	}
66 
67 	hif_ext_group = hif_state->hif_ext_group[intr_grp_id];
68 	hist_ev = hif_ext_group->evt_hist;
69 
70 	record_index = hif_get_next_record_index(
71 			&hist_ev->index, HIF_EVENT_HIST_MAX);
72 
73 	record = &hist_ev->event[record_index];
74 
75 	record->hal_ring_id = event->hal_ring_id;
76 	record->hp = event->hp;
77 	record->tp = event->tp;
78 	record->cpu_id = qdf_get_cpu();
79 	record->timestamp = qdf_get_log_timestamp();
80 	record->type = event->type;
81 }
82 
83 static void hif_event_history_init(struct hif_exec_context *hif_ext_grp)
84 {
85 	hif_ext_grp->evt_hist = &hif_event_desc_history[hif_ext_grp->grp_id];
86 	qdf_atomic_set(&hif_ext_grp->evt_hist->index, -1);
87 }
88 #else
89 static inline void hif_event_history_init(struct hif_exec_context *hif_ext_grp)
90 {
91 }
92 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
93 
94 /**
95  * hif_print_napi_latency_stats() - print NAPI scheduling latency stats
96  * @hif_state: hif context
97  *
98  * return: void
99  */
100 #ifdef HIF_LATENCY_PROFILE_ENABLE
101 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
102 {
103 	struct hif_exec_context *hif_ext_group;
104 	int i, j;
105 	int64_t cur_tstamp;
106 
107 	const char time_str[HIF_SCHED_LATENCY_BUCKETS][15] =  {
108 		"0-2   ms",
109 		"3-10  ms",
110 		"11-20 ms",
111 		"21-50 ms",
112 		"51-100 ms",
113 		"101-250 ms",
114 		"251-500 ms",
115 		"> 500 ms"
116 	};
117 
118 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
119 
120 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
121 		  "Current timestamp: %lld", cur_tstamp);
122 
123 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
124 		if (hif_state->hif_ext_group[i]) {
125 			hif_ext_group = hif_state->hif_ext_group[i];
126 
127 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
128 				  "Interrupts in the HIF Group");
129 
130 			for (j = 0; j < hif_ext_group->numirq; j++) {
131 				QDF_TRACE(QDF_MODULE_ID_HIF,
132 					  QDF_TRACE_LEVEL_FATAL,
133 					  "  %s",
134 					  hif_ext_group->irq_name
135 					  (hif_ext_group->irq[j]));
136 			}
137 
138 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
139 				  "Last serviced timestamp: %lld",
140 				  hif_ext_group->tstamp);
141 
142 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
143 				  "Latency Bucket     | Time elapsed");
144 
145 			for (j = 0; j < HIF_SCHED_LATENCY_BUCKETS; j++) {
146 				QDF_TRACE(QDF_MODULE_ID_HIF,
147 					  QDF_TRACE_LEVEL_FATAL,
148 					  "%s     |    %lld", time_str[j],
149 					  hif_ext_group->
150 					  sched_latency_stats[j]);
151 			}
152 		}
153 	}
154 }
155 #else
156 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
157 {
158 }
159 #endif
160 
161 /**
162  * hif_clear_napi_stats() - reset NAPI stats
163  * @hif_ctx: hif context
164  *
165  * return: void
166  */
167 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx)
168 {
169 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
170 	struct hif_exec_context *hif_ext_group;
171 	size_t i;
172 
173 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
174 		hif_ext_group = hif_state->hif_ext_group[i];
175 
176 		if (!hif_ext_group)
177 			return;
178 
179 		qdf_mem_set(hif_ext_group->sched_latency_stats,
180 			    sizeof(hif_ext_group->sched_latency_stats),
181 			    0x0);
182 	}
183 }
184 
185 qdf_export_symbol(hif_clear_napi_stats);
186 
187 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
188 /**
189  * hif_get_poll_times_hist_str() - Get HIF poll times histogram string
190  * @stats: NAPI stats to get poll time buckets
191  * @buf: buffer to fill histogram string
192  * @buf_len: length of the buffer
193  *
194  * Return: void
195  */
196 static void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
197 					uint8_t buf_len)
198 {
199 	int i;
200 	int str_index = 0;
201 
202 	for (i = 0; i < QCA_NAPI_NUM_BUCKETS; i++)
203 		str_index += qdf_scnprintf(buf + str_index, buf_len - str_index,
204 					   "%u|", stats->poll_time_buckets[i]);
205 }
206 
207 /**
208  * hif_exec_fill_poll_time_histogram() - fills poll time histogram for a NAPI
209  * @hif_ext_group: hif_ext_group of type NAPI
210  *
211  * The function is called at the end of a NAPI poll to calculate poll time
212  * buckets.
213  *
214  * Return: void
215  */
216 static
217 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
218 {
219 	struct qca_napi_stat *napi_stat;
220 	unsigned long long poll_time_ns;
221 	uint32_t poll_time_us;
222 	uint32_t bucket_size_us = 500;
223 	uint32_t bucket;
224 	uint32_t cpu_id = qdf_get_cpu();
225 
226 	poll_time_ns = sched_clock() - hif_ext_group->poll_start_time;
227 	poll_time_us = qdf_do_div(poll_time_ns, 1000);
228 
229 	napi_stat = &hif_ext_group->stats[cpu_id];
230 	if (poll_time_ns > hif_ext_group->stats[cpu_id].napi_max_poll_time)
231 		hif_ext_group->stats[cpu_id].napi_max_poll_time = poll_time_ns;
232 
233 	bucket = poll_time_us / bucket_size_us;
234 	if (bucket >= QCA_NAPI_NUM_BUCKETS)
235 		bucket = QCA_NAPI_NUM_BUCKETS - 1;
236 	++napi_stat->poll_time_buckets[bucket];
237 }
238 
239 /**
240  * hif_exec_poll_should_yield() - Local function deciding if NAPI should yield
241  * @hif_ext_group: hif_ext_group of type NAPI
242  *
243  * Return: true if NAPI needs to yield, else false
244  */
245 static bool hif_exec_poll_should_yield(struct hif_exec_context *hif_ext_group)
246 {
247 	bool time_limit_reached = false;
248 	unsigned long long poll_time_ns;
249 	int cpu_id = qdf_get_cpu();
250 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
251 	struct hif_config_info *cfg = &scn->hif_config;
252 
253 	poll_time_ns = sched_clock() - hif_ext_group->poll_start_time;
254 	time_limit_reached =
255 		poll_time_ns > cfg->rx_softirq_max_yield_duration_ns ? 1 : 0;
256 
257 	if (time_limit_reached) {
258 		hif_ext_group->stats[cpu_id].time_limit_reached++;
259 		hif_ext_group->force_break = true;
260 	}
261 
262 	return time_limit_reached;
263 }
264 
265 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id)
266 {
267 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
268 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
269 	struct hif_exec_context *hif_ext_group;
270 	bool ret_val = false;
271 
272 	if (!(grp_id < hif_state->hif_num_extgroup) ||
273 	    !(grp_id < HIF_MAX_GROUP))
274 		return false;
275 
276 	hif_ext_group = hif_state->hif_ext_group[grp_id];
277 
278 	if (hif_ext_group->type == HIF_EXEC_NAPI_TYPE)
279 		ret_val = hif_exec_poll_should_yield(hif_ext_group);
280 
281 	return ret_val;
282 }
283 
284 /**
285  * hif_exec_update_service_start_time() - Update NAPI poll start time
286  * @hif_ext_group: hif_ext_group of type NAPI
287  *
288  * The function is called at the beginning of a NAPI poll to record the poll
289  * start time.
290  *
291  * Return: None
292  */
293 static inline
294 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
295 {
296 	hif_ext_group->poll_start_time = sched_clock();
297 }
298 
299 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
300 {
301 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
302 	struct hif_exec_context *hif_ext_group;
303 	struct qca_napi_stat *napi_stats;
304 	int i, j;
305 
306 	/*
307 	 * Max value of uint_32 (poll_time_bucket) = 4294967295
308 	 * Thus we need 10 chars + 1 space =11 chars for each bucket value.
309 	 * +1 space for '\0'.
310 	 */
311 	char hist_str[(QCA_NAPI_NUM_BUCKETS * 11) + 1] = {'\0'};
312 
313 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
314 		  "NAPI[#]CPU[#] |scheds |polls  |comps  |dones  |t-lim  |max(us)|hist(500us buckets)");
315 
316 	for (i = 0;
317 	     (i < hif_state->hif_num_extgroup && hif_state->hif_ext_group[i]);
318 	     i++) {
319 		hif_ext_group = hif_state->hif_ext_group[i];
320 		for (j = 0; j < num_possible_cpus(); j++) {
321 			napi_stats = &hif_ext_group->stats[j];
322 			if (!napi_stats->napi_schedules)
323 				continue;
324 
325 			hif_get_poll_times_hist_str(napi_stats,
326 						    hist_str,
327 						    sizeof(hist_str));
328 			QDF_TRACE(QDF_MODULE_ID_HIF,
329 				  QDF_TRACE_LEVEL_ERROR,
330 				  "NAPI[%d]CPU[%d]: %7u %7u %7u %7u %7u %7llu %s",
331 				  i, j,
332 				  napi_stats->napi_schedules,
333 				  napi_stats->napi_polls,
334 				  napi_stats->napi_completes,
335 				  napi_stats->napi_workdone,
336 				  napi_stats->time_limit_reached,
337 				  qdf_do_div(napi_stats->napi_max_poll_time,
338 					     1000),
339 				  hist_str);
340 		}
341 	}
342 
343 	hif_print_napi_latency_stats(hif_state);
344 }
345 
346 qdf_export_symbol(hif_print_napi_stats);
347 
348 #else
349 
350 static inline
351 void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
352 				 uint8_t buf_len)
353 {
354 }
355 
356 static inline
357 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
358 {
359 }
360 
361 static inline
362 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
363 {
364 }
365 
366 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
367 {
368 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
369 	struct hif_exec_context *hif_ext_group;
370 	struct qca_napi_stat *napi_stats;
371 	int i, j;
372 
373 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
374 		"NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone");
375 
376 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
377 		if (hif_state->hif_ext_group[i]) {
378 			hif_ext_group = hif_state->hif_ext_group[i];
379 			for (j = 0; j < num_possible_cpus(); j++) {
380 				napi_stats = &(hif_ext_group->stats[j]);
381 				if (napi_stats->napi_schedules != 0)
382 					QDF_TRACE(QDF_MODULE_ID_HIF,
383 						QDF_TRACE_LEVEL_FATAL,
384 						"NAPI[%2d]CPU[%d]: "
385 						"%7d %7d %7d %7d ",
386 						i, j,
387 						napi_stats->napi_schedules,
388 						napi_stats->napi_polls,
389 						napi_stats->napi_completes,
390 						napi_stats->napi_workdone);
391 			}
392 		}
393 	}
394 
395 	hif_print_napi_latency_stats(hif_state);
396 }
397 qdf_export_symbol(hif_print_napi_stats);
398 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
399 
400 static void hif_exec_tasklet_schedule(struct hif_exec_context *ctx)
401 {
402 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
403 
404 	tasklet_schedule(&t_ctx->tasklet);
405 }
406 
407 /**
408  * hif_exec_tasklet() - grp tasklet
409  * data: context
410  *
411  * return: void
412  */
413 static void hif_exec_tasklet_fn(unsigned long data)
414 {
415 	struct hif_exec_context *hif_ext_group =
416 			(struct hif_exec_context *)data;
417 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
418 	unsigned int work_done;
419 
420 	work_done =
421 		hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET);
422 
423 	if (hif_ext_group->work_complete(hif_ext_group, work_done)) {
424 		qdf_atomic_dec(&(scn->active_grp_tasklet_cnt));
425 		hif_ext_group->irq_enable(hif_ext_group);
426 	} else {
427 		hif_exec_tasklet_schedule(hif_ext_group);
428 	}
429 }
430 
431 /**
432  * hif_latency_profile_measure() - calculate latency and update histogram
433  * hif_ext_group: hif exec context
434  *
435  * return: None
436  */
437 #ifdef HIF_LATENCY_PROFILE_ENABLE
438 static void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
439 {
440 	int64_t cur_tstamp;
441 	int64_t time_elapsed;
442 
443 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
444 
445 	if (cur_tstamp > hif_ext_group->tstamp)
446 		time_elapsed = (cur_tstamp - hif_ext_group->tstamp);
447 	else
448 		time_elapsed = ~0x0 - (hif_ext_group->tstamp - cur_tstamp);
449 
450 	hif_ext_group->tstamp = cur_tstamp;
451 
452 	if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_0_2)
453 		hif_ext_group->sched_latency_stats[0]++;
454 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_3_10)
455 		hif_ext_group->sched_latency_stats[1]++;
456 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_11_20)
457 		hif_ext_group->sched_latency_stats[2]++;
458 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_21_50)
459 		hif_ext_group->sched_latency_stats[3]++;
460 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_51_100)
461 		hif_ext_group->sched_latency_stats[4]++;
462 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_101_250)
463 		hif_ext_group->sched_latency_stats[5]++;
464 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_251_500)
465 		hif_ext_group->sched_latency_stats[6]++;
466 	else
467 		hif_ext_group->sched_latency_stats[7]++;
468 }
469 #else
470 static inline
471 void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
472 {
473 }
474 #endif
475 
476 /**
477  * hif_latency_profile_start() - Update the start timestamp for HIF ext group
478  * hif_ext_group: hif exec context
479  *
480  * return: None
481  */
482 #ifdef HIF_LATENCY_PROFILE_ENABLE
483 static void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
484 {
485 	hif_ext_group->tstamp = qdf_ktime_to_ms(qdf_ktime_get());
486 }
487 #else
488 static inline
489 void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
490 {
491 }
492 #endif
493 
494 #ifdef FEATURE_NAPI
495 /**
496  * hif_exec_poll() - napi poll
497  * napi: napi struct
498  * budget: budget for napi
499  *
500  * Return: mapping of internal budget to napi
501  */
502 static int hif_exec_poll(struct napi_struct *napi, int budget)
503 {
504 	struct hif_napi_exec_context *napi_exec_ctx =
505 		    qdf_container_of(napi, struct hif_napi_exec_context, napi);
506 	struct hif_exec_context *hif_ext_group = &napi_exec_ctx->exec_ctx;
507 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
508 	int work_done;
509 	int normalized_budget = 0;
510 	int actual_dones;
511 	int shift = hif_ext_group->scale_bin_shift;
512 	int cpu = smp_processor_id();
513 
514 	hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
515 			 0, 0, 0, HIF_EVENT_BH_SCHED);
516 
517 	hif_ext_group->force_break = false;
518 	hif_exec_update_service_start_time(hif_ext_group);
519 
520 	if (budget)
521 		normalized_budget = NAPI_BUDGET_TO_INTERNAL_BUDGET(budget, shift);
522 
523 	hif_latency_profile_measure(hif_ext_group);
524 
525 	work_done = hif_ext_group->handler(hif_ext_group->context,
526 					   normalized_budget);
527 
528 	actual_dones = work_done;
529 
530 	if (!hif_ext_group->force_break && work_done < normalized_budget) {
531 		napi_complete(napi);
532 		qdf_atomic_dec(&scn->active_grp_tasklet_cnt);
533 		hif_ext_group->irq_enable(hif_ext_group);
534 		hif_ext_group->stats[cpu].napi_completes++;
535 	} else {
536 		/* if the ext_group supports time based yield, claim full work
537 		 * done anyways */
538 		work_done = normalized_budget;
539 	}
540 
541 	hif_ext_group->stats[cpu].napi_polls++;
542 	hif_ext_group->stats[cpu].napi_workdone += actual_dones;
543 
544 	/* map internal budget to NAPI budget */
545 	if (work_done)
546 		work_done = INTERNAL_BUDGET_TO_NAPI_BUDGET(work_done, shift);
547 
548 	hif_exec_fill_poll_time_histogram(hif_ext_group);
549 
550 	return work_done;
551 }
552 
553 /**
554  * hif_exec_napi_schedule() - schedule the napi exec instance
555  * @ctx: a hif_exec_context known to be of napi type
556  */
557 static void hif_exec_napi_schedule(struct hif_exec_context *ctx)
558 {
559 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
560 	ctx->stats[smp_processor_id()].napi_schedules++;
561 
562 	napi_schedule(&n_ctx->napi);
563 }
564 
565 /**
566  * hif_exec_napi_kill() - stop a napi exec context from being rescheduled
567  * @ctx: a hif_exec_context known to be of napi type
568  */
569 static void hif_exec_napi_kill(struct hif_exec_context *ctx)
570 {
571 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
572 	int irq_ind;
573 
574 	if (ctx->inited) {
575 		napi_disable(&n_ctx->napi);
576 		ctx->inited = 0;
577 	}
578 
579 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
580 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
581 
582 	hif_core_ctl_set_boost(false);
583 	netif_napi_del(&(n_ctx->napi));
584 }
585 
586 struct hif_execution_ops napi_sched_ops = {
587 	.schedule = &hif_exec_napi_schedule,
588 	.kill = &hif_exec_napi_kill,
589 };
590 
591 /**
592  * hif_exec_napi_create() - allocate and initialize a napi exec context
593  * @scale: a binary shift factor to map NAPI budget from\to internal
594  *         budget
595  */
596 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
597 {
598 	struct hif_napi_exec_context *ctx;
599 
600 	ctx = qdf_mem_malloc(sizeof(struct hif_napi_exec_context));
601 	if (!ctx)
602 		return NULL;
603 
604 	ctx->exec_ctx.sched_ops = &napi_sched_ops;
605 	ctx->exec_ctx.inited = true;
606 	ctx->exec_ctx.scale_bin_shift = scale;
607 	qdf_net_if_create_dummy_if((struct qdf_net_if *)&ctx->netdev);
608 	netif_napi_add(&(ctx->netdev), &(ctx->napi), hif_exec_poll,
609 		       QCA_NAPI_BUDGET);
610 	napi_enable(&ctx->napi);
611 
612 	return &ctx->exec_ctx;
613 }
614 #else
615 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
616 {
617 	HIF_WARN("%s: FEATURE_NAPI not defined, making tasklet", __func__);
618 	return hif_exec_tasklet_create();
619 }
620 #endif
621 
622 
623 /**
624  * hif_exec_tasklet_kill() - stop a tasklet exec context from being rescheduled
625  * @ctx: a hif_exec_context known to be of tasklet type
626  */
627 static void hif_exec_tasklet_kill(struct hif_exec_context *ctx)
628 {
629 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
630 	int irq_ind;
631 
632 	if (ctx->inited) {
633 		tasklet_disable(&t_ctx->tasklet);
634 		tasklet_kill(&t_ctx->tasklet);
635 	}
636 	ctx->inited = false;
637 
638 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
639 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
640 }
641 
642 struct hif_execution_ops tasklet_sched_ops = {
643 	.schedule = &hif_exec_tasklet_schedule,
644 	.kill = &hif_exec_tasklet_kill,
645 };
646 
647 /**
648  * hif_exec_tasklet_schedule() -  allocate and initialize a tasklet exec context
649  */
650 static struct hif_exec_context *hif_exec_tasklet_create(void)
651 {
652 	struct hif_tasklet_exec_context *ctx;
653 
654 	ctx = qdf_mem_malloc(sizeof(struct hif_tasklet_exec_context));
655 	if (!ctx)
656 		return NULL;
657 
658 	ctx->exec_ctx.sched_ops = &tasklet_sched_ops;
659 	tasklet_init(&ctx->tasklet, hif_exec_tasklet_fn,
660 		     (unsigned long)ctx);
661 
662 	ctx->exec_ctx.inited = true;
663 
664 	return &ctx->exec_ctx;
665 }
666 
667 /**
668  * hif_exec_get_ctx() - retrieve an exec context based on an id
669  * @softc: the hif context owning the exec context
670  * @id: the id of the exec context
671  *
672  * mostly added to make it easier to rename or move the context array
673  */
674 struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *softc,
675 					  uint8_t id)
676 {
677 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
678 
679 	if (id < hif_state->hif_num_extgroup)
680 		return hif_state->hif_ext_group[id];
681 
682 	return NULL;
683 }
684 
685 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
686 				uint8_t id)
687 {
688 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
689 
690 	if (id < hif_state->hif_num_extgroup)
691 		return hif_state->hif_ext_group[id]->os_irq[0];
692 	return -EINVAL;
693 }
694 
695 qdf_export_symbol(hif_get_int_ctx_irq_num);
696 
697 #ifdef HIF_CPU_PERF_AFFINE_MASK
698 void hif_config_irq_set_perf_affinity_hint(
699 	struct hif_opaque_softc *hif_ctx)
700 {
701 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
702 
703 	hif_config_irq_affinity(scn);
704 }
705 
706 qdf_export_symbol(hif_config_irq_set_perf_affinity_hint);
707 #endif
708 
709 uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
710 {
711 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
712 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
713 	struct hif_exec_context *hif_ext_group;
714 	int i, status;
715 
716 	if (scn->ext_grp_irq_configured) {
717 		HIF_ERROR("%s Called after ext grp irq configured\n", __func__);
718 		return QDF_STATUS_E_FAILURE;
719 	}
720 
721 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
722 		hif_ext_group = hif_state->hif_ext_group[i];
723 		status = 0;
724 		qdf_spinlock_create(&hif_ext_group->irq_lock);
725 		if (hif_ext_group->configured &&
726 		    hif_ext_group->irq_requested == false) {
727 			hif_ext_group->irq_enabled = true;
728 			status = hif_grp_irq_configure(scn, hif_ext_group);
729 		}
730 		if (status != 0) {
731 			HIF_ERROR("%s: failed for group %d", __func__, i);
732 			hif_ext_group->irq_enabled = false;
733 		}
734 	}
735 
736 	scn->ext_grp_irq_configured = true;
737 
738 	return QDF_STATUS_SUCCESS;
739 }
740 
741 qdf_export_symbol(hif_configure_ext_group_interrupts);
742 
743 #ifdef WLAN_SUSPEND_RESUME_TEST
744 /**
745  * hif_check_and_trigger_ut_resume() - check if unit-test command was used to
746  *				       to trigger fake-suspend command, if yes
747  *				       then issue resume procedure.
748  * @scn: opaque HIF software context
749  *
750  * This API checks if unit-test command was used to trigger fake-suspend command
751  * and if answer is yes then it would trigger resume procedure.
752  *
753  * Make this API inline to save API-switch overhead and do branch-prediction to
754  * optimize performance impact.
755  *
756  * Return: void
757  */
758 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
759 {
760 	if (qdf_unlikely(hif_irq_trigger_ut_resume(scn)))
761 		hif_ut_fw_resume(scn);
762 }
763 #else
764 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
765 {
766 }
767 #endif
768 
769 /**
770  * hif_ext_group_interrupt_handler() - handler for related interrupts
771  * @irq: irq number of the interrupt
772  * @context: the associated hif_exec_group context
773  *
774  * This callback function takes care of dissabling the associated interrupts
775  * and scheduling the expected bottom half for the exec_context.
776  * This callback function also helps keep track of the count running contexts.
777  */
778 irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context)
779 {
780 	struct hif_exec_context *hif_ext_group = context;
781 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
782 
783 	if (hif_ext_group->irq_requested) {
784 		hif_latency_profile_start(hif_ext_group);
785 
786 		hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
787 				 0, 0, 0, HIF_EVENT_IRQ_TRIGGER);
788 
789 		hif_ext_group->irq_disable(hif_ext_group);
790 		/*
791 		 * if private ioctl has issued fake suspend command to put
792 		 * FW in D0-WOW state then here is our chance to bring FW out
793 		 * of WOW mode.
794 		 *
795 		 * The reason why you need to explicitly wake-up the FW is here:
796 		 * APSS should have been in fully awake through-out when
797 		 * fake APSS suspend command was issued (to put FW in WOW mode)
798 		 * hence organic way of waking-up the FW
799 		 * (as part-of APSS-host wake-up) won't happen because
800 		 * in reality APSS didn't really suspend.
801 		 */
802 		hif_check_and_trigger_ut_resume(scn);
803 		qdf_atomic_inc(&scn->active_grp_tasklet_cnt);
804 
805 		hif_ext_group->sched_ops->schedule(hif_ext_group);
806 	}
807 
808 	return IRQ_HANDLED;
809 }
810 
811 /**
812  * hif_exec_kill() - grp tasklet kill
813  * scn: hif_softc
814  *
815  * return: void
816  */
817 void hif_exec_kill(struct hif_opaque_softc *hif_ctx)
818 {
819 	int i;
820 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
821 
822 	for (i = 0; i < hif_state->hif_num_extgroup; i++)
823 		hif_state->hif_ext_group[i]->sched_ops->kill(
824 			hif_state->hif_ext_group[i]);
825 
826 	qdf_atomic_set(&hif_state->ol_sc.active_grp_tasklet_cnt, 0);
827 }
828 
829 /**
830  * hif_register_ext_group() - API to register external group
831  * interrupt handler.
832  * @hif_ctx : HIF Context
833  * @numirq: number of irq's in the group
834  * @irq: array of irq values
835  * @handler: callback interrupt handler function
836  * @cb_ctx: context to passed in callback
837  * @type: napi vs tasklet
838  *
839  * Return: status
840  */
841 uint32_t hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
842 		uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
843 		void *cb_ctx, const char *context_name,
844 		enum hif_exec_type type, uint32_t scale)
845 {
846 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
847 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
848 	struct hif_exec_context *hif_ext_group;
849 
850 	if (scn->ext_grp_irq_configured) {
851 		HIF_ERROR("%s Called after ext grp irq configured\n", __func__);
852 		return QDF_STATUS_E_FAILURE;
853 	}
854 
855 	if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) {
856 		HIF_ERROR("%s Max groups reached\n", __func__);
857 		return QDF_STATUS_E_FAILURE;
858 	}
859 
860 	if (numirq >= HIF_MAX_GRP_IRQ) {
861 		HIF_ERROR("%s invalid numirq\n", __func__);
862 		return QDF_STATUS_E_FAILURE;
863 	}
864 
865 	hif_ext_group = hif_exec_create(type, scale);
866 	if (!hif_ext_group)
867 		return QDF_STATUS_E_FAILURE;
868 
869 	hif_state->hif_ext_group[hif_state->hif_num_extgroup] =
870 		hif_ext_group;
871 
872 	hif_ext_group->numirq = numirq;
873 	qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0]));
874 	hif_ext_group->context = cb_ctx;
875 	hif_ext_group->handler = handler;
876 	hif_ext_group->configured = true;
877 	hif_ext_group->grp_id = hif_state->hif_num_extgroup;
878 	hif_ext_group->hif = hif_ctx;
879 	hif_ext_group->context_name = context_name;
880 	hif_ext_group->type = type;
881 	hif_event_history_init(hif_ext_group);
882 
883 	hif_state->hif_num_extgroup++;
884 	return QDF_STATUS_SUCCESS;
885 }
886 qdf_export_symbol(hif_register_ext_group);
887 
888 /**
889  * hif_exec_create() - create an execution context
890  * @type: the type of execution context to create
891  */
892 struct hif_exec_context *hif_exec_create(enum hif_exec_type type,
893 						uint32_t scale)
894 {
895 	hif_debug("%s: create exec_type %d budget %d\n",
896 		  __func__, type, QCA_NAPI_BUDGET * scale);
897 
898 	switch (type) {
899 	case HIF_EXEC_NAPI_TYPE:
900 		return hif_exec_napi_create(scale);
901 
902 	case HIF_EXEC_TASKLET_TYPE:
903 		return hif_exec_tasklet_create();
904 	default:
905 		return NULL;
906 	}
907 }
908 
909 /**
910  * hif_exec_destroy() - free the hif_exec context
911  * @ctx: context to free
912  *
913  * please kill the context before freeing it to avoid a use after free.
914  */
915 void hif_exec_destroy(struct hif_exec_context *ctx)
916 {
917 	qdf_spinlock_destroy(&ctx->irq_lock);
918 	qdf_mem_free(ctx);
919 }
920 
921 /**
922  * hif_deregister_exec_group() - API to free the exec contexts
923  * @hif_ctx: HIF context
924  * @context_name: name of the module whose contexts need to be deregistered
925  *
926  * This function deregisters the contexts of the requestor identified
927  * based on the context_name & frees the memory.
928  *
929  * Return: void
930  */
931 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
932 				const char *context_name)
933 {
934 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
935 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
936 	struct hif_exec_context *hif_ext_group;
937 	int i;
938 
939 	for (i = 0; i < HIF_MAX_GROUP; i++) {
940 		hif_ext_group = hif_state->hif_ext_group[i];
941 
942 		if (!hif_ext_group)
943 			continue;
944 
945 		hif_debug("%s: Deregistering grp id %d name %s\n",
946 			  __func__,
947 			  hif_ext_group->grp_id,
948 			  hif_ext_group->context_name);
949 
950 		if (strcmp(hif_ext_group->context_name, context_name) == 0) {
951 			hif_ext_group->sched_ops->kill(hif_ext_group);
952 			hif_state->hif_ext_group[i] = NULL;
953 			hif_exec_destroy(hif_ext_group);
954 			hif_state->hif_num_extgroup--;
955 		}
956 
957 	}
958 }
959 qdf_export_symbol(hif_deregister_exec_group);
960