xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_exec.c (revision de13832745e9d526fece15dcf8595bbf830b705b)
1 /*
2  * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <hif_exec.h>
20 #include <ce_main.h>
21 #include "qdf_module.h"
22 #include "qdf_net_if.h"
23 /* mapping NAPI budget 0 to internal budget 0
24  * NAPI budget 1 to internal budget [1,scaler -1]
25  * NAPI budget 2 to internal budget [scaler, 2 * scaler - 1], etc
26  */
27 #define NAPI_BUDGET_TO_INTERNAL_BUDGET(n, s) \
28 	(((n) << (s)) - 1)
29 #define INTERNAL_BUDGET_TO_NAPI_BUDGET(n, s) \
30 	(((n) + 1) >> (s))
31 
32 static struct hif_exec_context *hif_exec_tasklet_create(void);
33 
34 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
35 struct hif_event_history hif_event_desc_history[HIF_NUM_INT_CONTEXTS];
36 
37 static inline
38 int hif_get_next_record_index(qdf_atomic_t *table_index,
39 			      int array_size)
40 {
41 	int record_index = qdf_atomic_inc_return(table_index);
42 
43 	return record_index & (array_size - 1);
44 }
45 
46 /**
47  * hif_hist_is_prev_record() - Check if index is the immediate
48  *  previous record wrt curr_index
49  * @curr_index: curr index in the event history
50  * @index: index to be checked
51  * @hist_size: history size
52  *
53  * Return: true if index is immediately behind curr_index else false
54  */
55 static inline
56 bool hif_hist_is_prev_record(int32_t curr_index, int32_t index,
57 			     uint32_t hist_size)
58 {
59 	return (((index + 1) & (hist_size - 1)) == curr_index) ?
60 			true : false;
61 }
62 
63 /**
64  * hif_hist_skip_event_record() - Check if current event needs to be
65  *  recorded or not
66  * @hist_ev: HIF event history
67  * @event: DP event entry
68  *
69  * Return: true if current event needs to be skipped else false
70  */
71 static bool
72 hif_hist_skip_event_record(struct hif_event_history *hist_ev,
73 			   struct hif_event_record *event)
74 {
75 	struct hif_event_record *rec;
76 	struct hif_event_record *last_irq_rec;
77 	int32_t index;
78 
79 	index = qdf_atomic_read(&hist_ev->index);
80 	if (index < 0)
81 		return false;
82 
83 	index &= (HIF_EVENT_HIST_MAX - 1);
84 	rec = &hist_ev->event[index];
85 
86 	switch (event->type) {
87 	case HIF_EVENT_IRQ_TRIGGER:
88 		/*
89 		 * The prev record check is to prevent skipping the IRQ event
90 		 * record in case where BH got re-scheduled due to force_break
91 		 * but there are no entries to be reaped in the rings.
92 		 */
93 		if (rec->type == HIF_EVENT_BH_SCHED &&
94 		    hif_hist_is_prev_record(index,
95 					    hist_ev->misc.last_irq_index,
96 					    HIF_EVENT_HIST_MAX)) {
97 			last_irq_rec =
98 				&hist_ev->event[hist_ev->misc.last_irq_index];
99 			last_irq_rec->timestamp = hif_get_log_timestamp();
100 			last_irq_rec->cpu_id = qdf_get_cpu();
101 			last_irq_rec->hp++;
102 			last_irq_rec->tp = last_irq_rec->timestamp -
103 						hist_ev->misc.last_irq_ts;
104 			return true;
105 		}
106 		break;
107 	case HIF_EVENT_BH_SCHED:
108 		if (rec->type == HIF_EVENT_BH_SCHED) {
109 			rec->timestamp = hif_get_log_timestamp();
110 			rec->cpu_id = qdf_get_cpu();
111 			return true;
112 		}
113 		break;
114 	case HIF_EVENT_SRNG_ACCESS_START:
115 		if (event->hp == event->tp)
116 			return true;
117 		break;
118 	case HIF_EVENT_SRNG_ACCESS_END:
119 		if (rec->type != HIF_EVENT_SRNG_ACCESS_START)
120 			return true;
121 		break;
122 	case HIF_EVENT_BH_COMPLETE:
123 	case HIF_EVENT_BH_FORCE_BREAK:
124 		if (rec->type != HIF_EVENT_SRNG_ACCESS_END)
125 			return true;
126 		break;
127 	default:
128 		break;
129 	}
130 
131 	return false;
132 }
133 
134 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
135 			   struct hif_event_record *event, uint8_t intr_grp_id)
136 {
137 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
138 	struct hif_event_history *hist_ev;
139 	struct hif_event_record *record;
140 	int record_index;
141 
142 	if (!(scn->event_enable_mask & BIT(event->type)))
143 		return;
144 
145 	if (qdf_unlikely(intr_grp_id >= HIF_NUM_INT_CONTEXTS)) {
146 		hif_err("Invalid interrupt group id %d", intr_grp_id);
147 		return;
148 	}
149 
150 	hist_ev = scn->evt_hist[intr_grp_id];
151 	if (qdf_unlikely(!hist_ev))
152 		return;
153 
154 	if (hif_hist_skip_event_record(hist_ev, event))
155 		return;
156 
157 	record_index = hif_get_next_record_index(
158 			&hist_ev->index, HIF_EVENT_HIST_MAX);
159 
160 	record = &hist_ev->event[record_index];
161 
162 	if (event->type == HIF_EVENT_IRQ_TRIGGER) {
163 		hist_ev->misc.last_irq_index = record_index;
164 		hist_ev->misc.last_irq_ts = hif_get_log_timestamp();
165 	}
166 
167 	record->hal_ring_id = event->hal_ring_id;
168 	record->hp = event->hp;
169 	record->tp = event->tp;
170 	record->cpu_id = qdf_get_cpu();
171 	record->timestamp = hif_get_log_timestamp();
172 	record->type = event->type;
173 }
174 
175 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id)
176 {
177 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
178 
179 	scn->evt_hist[id] = &hif_event_desc_history[id];
180 	qdf_atomic_set(&scn->evt_hist[id]->index, -1);
181 
182 	hif_info("SRNG events history initialized for group: %d", id);
183 }
184 
185 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id)
186 {
187 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
188 
189 	scn->evt_hist[id] = NULL;
190 	hif_info("SRNG events history de-initialized for group: %d", id);
191 }
192 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
193 
194 /**
195  * hif_print_napi_latency_stats() - print NAPI scheduling latency stats
196  * @hif_state: hif context
197  *
198  * return: void
199  */
200 #ifdef HIF_LATENCY_PROFILE_ENABLE
201 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
202 {
203 	struct hif_exec_context *hif_ext_group;
204 	int i, j;
205 	int64_t cur_tstamp;
206 
207 	const char time_str[HIF_SCHED_LATENCY_BUCKETS][15] =  {
208 		"0-2   ms",
209 		"3-10  ms",
210 		"11-20 ms",
211 		"21-50 ms",
212 		"51-100 ms",
213 		"101-250 ms",
214 		"251-500 ms",
215 		"> 500 ms"
216 	};
217 
218 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
219 
220 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
221 		  "Current timestamp: %lld", cur_tstamp);
222 
223 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
224 		if (hif_state->hif_ext_group[i]) {
225 			hif_ext_group = hif_state->hif_ext_group[i];
226 
227 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
228 				  "Interrupts in the HIF Group");
229 
230 			for (j = 0; j < hif_ext_group->numirq; j++) {
231 				QDF_TRACE(QDF_MODULE_ID_HIF,
232 					  QDF_TRACE_LEVEL_FATAL,
233 					  "  %s",
234 					  hif_ext_group->irq_name
235 					  (hif_ext_group->irq[j]));
236 			}
237 
238 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
239 				  "Last serviced timestamp: %lld",
240 				  hif_ext_group->tstamp);
241 
242 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
243 				  "Latency Bucket     | Time elapsed");
244 
245 			for (j = 0; j < HIF_SCHED_LATENCY_BUCKETS; j++) {
246 				QDF_TRACE(QDF_MODULE_ID_HIF,
247 					  QDF_TRACE_LEVEL_FATAL,
248 					  "%s     |    %lld", time_str[j],
249 					  hif_ext_group->
250 					  sched_latency_stats[j]);
251 			}
252 		}
253 	}
254 }
255 #else
256 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
257 {
258 }
259 #endif
260 
261 /**
262  * hif_clear_napi_stats() - reset NAPI stats
263  * @hif_ctx: hif context
264  *
265  * return: void
266  */
267 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx)
268 {
269 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
270 	struct hif_exec_context *hif_ext_group;
271 	size_t i;
272 
273 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
274 		hif_ext_group = hif_state->hif_ext_group[i];
275 
276 		if (!hif_ext_group)
277 			return;
278 
279 		qdf_mem_set(hif_ext_group->sched_latency_stats,
280 			    sizeof(hif_ext_group->sched_latency_stats),
281 			    0x0);
282 	}
283 }
284 
285 qdf_export_symbol(hif_clear_napi_stats);
286 
287 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
288 /**
289  * hif_get_poll_times_hist_str() - Get HIF poll times histogram string
290  * @stats: NAPI stats to get poll time buckets
291  * @buf: buffer to fill histogram string
292  * @buf_len: length of the buffer
293  *
294  * Return: void
295  */
296 static void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
297 					uint8_t buf_len)
298 {
299 	int i;
300 	int str_index = 0;
301 
302 	for (i = 0; i < QCA_NAPI_NUM_BUCKETS; i++)
303 		str_index += qdf_scnprintf(buf + str_index, buf_len - str_index,
304 					   "%u|", stats->poll_time_buckets[i]);
305 }
306 
307 /**
308  * hif_exec_fill_poll_time_histogram() - fills poll time histogram for a NAPI
309  * @hif_ext_group: hif_ext_group of type NAPI
310  *
311  * The function is called at the end of a NAPI poll to calculate poll time
312  * buckets.
313  *
314  * Return: void
315  */
316 static
317 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
318 {
319 	struct qca_napi_stat *napi_stat;
320 	unsigned long long poll_time_ns;
321 	uint32_t poll_time_us;
322 	uint32_t bucket_size_us = 500;
323 	uint32_t bucket;
324 	uint32_t cpu_id = qdf_get_cpu();
325 
326 	poll_time_ns = qdf_time_sched_clock() - hif_ext_group->poll_start_time;
327 	poll_time_us = qdf_do_div(poll_time_ns, 1000);
328 
329 	napi_stat = &hif_ext_group->stats[cpu_id];
330 	if (poll_time_ns > hif_ext_group->stats[cpu_id].napi_max_poll_time)
331 		hif_ext_group->stats[cpu_id].napi_max_poll_time = poll_time_ns;
332 
333 	bucket = poll_time_us / bucket_size_us;
334 	if (bucket >= QCA_NAPI_NUM_BUCKETS)
335 		bucket = QCA_NAPI_NUM_BUCKETS - 1;
336 	++napi_stat->poll_time_buckets[bucket];
337 }
338 
339 /**
340  * hif_exec_poll_should_yield() - Local function deciding if NAPI should yield
341  * @hif_ext_group: hif_ext_group of type NAPI
342  *
343  * Return: true if NAPI needs to yield, else false
344  */
345 static bool hif_exec_poll_should_yield(struct hif_exec_context *hif_ext_group)
346 {
347 	bool time_limit_reached = false;
348 	unsigned long long poll_time_ns;
349 	int cpu_id = qdf_get_cpu();
350 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
351 	struct hif_config_info *cfg = &scn->hif_config;
352 
353 	poll_time_ns = qdf_time_sched_clock() - hif_ext_group->poll_start_time;
354 	time_limit_reached =
355 		poll_time_ns > cfg->rx_softirq_max_yield_duration_ns ? 1 : 0;
356 
357 	if (time_limit_reached) {
358 		hif_ext_group->stats[cpu_id].time_limit_reached++;
359 		hif_ext_group->force_break = true;
360 	}
361 
362 	return time_limit_reached;
363 }
364 
365 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id)
366 {
367 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
368 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
369 	struct hif_exec_context *hif_ext_group;
370 	bool ret_val = false;
371 
372 	if (!(grp_id < hif_state->hif_num_extgroup) ||
373 	    !(grp_id < HIF_MAX_GROUP))
374 		return false;
375 
376 	hif_ext_group = hif_state->hif_ext_group[grp_id];
377 
378 	if (hif_ext_group->type == HIF_EXEC_NAPI_TYPE)
379 		ret_val = hif_exec_poll_should_yield(hif_ext_group);
380 
381 	return ret_val;
382 }
383 
384 /**
385  * hif_exec_update_service_start_time() - Update NAPI poll start time
386  * @hif_ext_group: hif_ext_group of type NAPI
387  *
388  * The function is called at the beginning of a NAPI poll to record the poll
389  * start time.
390  *
391  * Return: None
392  */
393 static inline
394 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
395 {
396 	hif_ext_group->poll_start_time = qdf_time_sched_clock();
397 }
398 
399 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
400 {
401 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
402 	struct hif_exec_context *hif_ext_group;
403 	struct qca_napi_stat *napi_stats;
404 	int i, j;
405 
406 	/*
407 	 * Max value of uint_32 (poll_time_bucket) = 4294967295
408 	 * Thus we need 10 chars + 1 space =11 chars for each bucket value.
409 	 * +1 space for '\0'.
410 	 */
411 	char hist_str[(QCA_NAPI_NUM_BUCKETS * 11) + 1] = {'\0'};
412 
413 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
414 		  "NAPI[#]CPU[#] |scheds |polls  |comps  |dones  |t-lim  |max(us)|hist(500us buckets)");
415 
416 	for (i = 0;
417 	     (i < hif_state->hif_num_extgroup && hif_state->hif_ext_group[i]);
418 	     i++) {
419 		hif_ext_group = hif_state->hif_ext_group[i];
420 		for (j = 0; j < num_possible_cpus(); j++) {
421 			napi_stats = &hif_ext_group->stats[j];
422 			if (!napi_stats->napi_schedules)
423 				continue;
424 
425 			hif_get_poll_times_hist_str(napi_stats,
426 						    hist_str,
427 						    sizeof(hist_str));
428 			QDF_TRACE(QDF_MODULE_ID_HIF,
429 				  QDF_TRACE_LEVEL_INFO_HIGH,
430 				  "NAPI[%d]CPU[%d]: %7u %7u %7u %7u %7u %7llu %s",
431 				  i, j,
432 				  napi_stats->napi_schedules,
433 				  napi_stats->napi_polls,
434 				  napi_stats->napi_completes,
435 				  napi_stats->napi_workdone,
436 				  napi_stats->time_limit_reached,
437 				  qdf_do_div(napi_stats->napi_max_poll_time,
438 					     1000),
439 				  hist_str);
440 		}
441 	}
442 
443 	hif_print_napi_latency_stats(hif_state);
444 }
445 
446 qdf_export_symbol(hif_print_napi_stats);
447 
448 #else
449 
450 static inline
451 void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
452 				 uint8_t buf_len)
453 {
454 }
455 
456 static inline
457 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
458 {
459 }
460 
461 static inline
462 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
463 {
464 }
465 
466 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
467 {
468 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
469 	struct hif_exec_context *hif_ext_group;
470 	struct qca_napi_stat *napi_stats;
471 	int i, j;
472 
473 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
474 		"NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone");
475 
476 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
477 		if (hif_state->hif_ext_group[i]) {
478 			hif_ext_group = hif_state->hif_ext_group[i];
479 			for (j = 0; j < num_possible_cpus(); j++) {
480 				napi_stats = &(hif_ext_group->stats[j]);
481 				if (napi_stats->napi_schedules != 0)
482 					QDF_TRACE(QDF_MODULE_ID_HIF,
483 						QDF_TRACE_LEVEL_FATAL,
484 						"NAPI[%2d]CPU[%d]: "
485 						"%7d %7d %7d %7d ",
486 						i, j,
487 						napi_stats->napi_schedules,
488 						napi_stats->napi_polls,
489 						napi_stats->napi_completes,
490 						napi_stats->napi_workdone);
491 			}
492 		}
493 	}
494 
495 	hif_print_napi_latency_stats(hif_state);
496 }
497 qdf_export_symbol(hif_print_napi_stats);
498 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
499 
500 static void hif_exec_tasklet_schedule(struct hif_exec_context *ctx)
501 {
502 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
503 
504 	tasklet_schedule(&t_ctx->tasklet);
505 }
506 
507 /**
508  * hif_exec_tasklet() - grp tasklet
509  * data: context
510  *
511  * return: void
512  */
513 static void hif_exec_tasklet_fn(unsigned long data)
514 {
515 	struct hif_exec_context *hif_ext_group =
516 			(struct hif_exec_context *)data;
517 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
518 	unsigned int work_done;
519 
520 	work_done =
521 		hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET);
522 
523 	if (hif_ext_group->work_complete(hif_ext_group, work_done)) {
524 		qdf_atomic_dec(&(scn->active_grp_tasklet_cnt));
525 		hif_ext_group->irq_enable(hif_ext_group);
526 	} else {
527 		hif_exec_tasklet_schedule(hif_ext_group);
528 	}
529 }
530 
531 /**
532  * hif_latency_profile_measure() - calculate latency and update histogram
533  * hif_ext_group: hif exec context
534  *
535  * return: None
536  */
537 #ifdef HIF_LATENCY_PROFILE_ENABLE
538 static void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
539 {
540 	int64_t cur_tstamp;
541 	int64_t time_elapsed;
542 
543 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
544 
545 	if (cur_tstamp > hif_ext_group->tstamp)
546 		time_elapsed = (cur_tstamp - hif_ext_group->tstamp);
547 	else
548 		time_elapsed = ~0x0 - (hif_ext_group->tstamp - cur_tstamp);
549 
550 	hif_ext_group->tstamp = cur_tstamp;
551 
552 	if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_0_2)
553 		hif_ext_group->sched_latency_stats[0]++;
554 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_3_10)
555 		hif_ext_group->sched_latency_stats[1]++;
556 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_11_20)
557 		hif_ext_group->sched_latency_stats[2]++;
558 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_21_50)
559 		hif_ext_group->sched_latency_stats[3]++;
560 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_51_100)
561 		hif_ext_group->sched_latency_stats[4]++;
562 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_101_250)
563 		hif_ext_group->sched_latency_stats[5]++;
564 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_251_500)
565 		hif_ext_group->sched_latency_stats[6]++;
566 	else
567 		hif_ext_group->sched_latency_stats[7]++;
568 }
569 #else
570 static inline
571 void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
572 {
573 }
574 #endif
575 
576 /**
577  * hif_latency_profile_start() - Update the start timestamp for HIF ext group
578  * hif_ext_group: hif exec context
579  *
580  * return: None
581  */
582 #ifdef HIF_LATENCY_PROFILE_ENABLE
583 static void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
584 {
585 	hif_ext_group->tstamp = qdf_ktime_to_ms(qdf_ktime_get());
586 }
587 #else
588 static inline
589 void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
590 {
591 }
592 #endif
593 
594 #ifdef FEATURE_NAPI
595 /**
596  * hif_exec_poll() - napi poll
597  * napi: napi struct
598  * budget: budget for napi
599  *
600  * Return: mapping of internal budget to napi
601  */
602 static int hif_exec_poll(struct napi_struct *napi, int budget)
603 {
604 	struct hif_napi_exec_context *napi_exec_ctx =
605 		    qdf_container_of(napi, struct hif_napi_exec_context, napi);
606 	struct hif_exec_context *hif_ext_group = &napi_exec_ctx->exec_ctx;
607 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
608 	int work_done;
609 	int normalized_budget = 0;
610 	int actual_dones;
611 	int shift = hif_ext_group->scale_bin_shift;
612 	int cpu = smp_processor_id();
613 
614 	hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
615 			 0, 0, 0, HIF_EVENT_BH_SCHED);
616 
617 	hif_ext_group->force_break = false;
618 	hif_exec_update_service_start_time(hif_ext_group);
619 
620 	if (budget)
621 		normalized_budget = NAPI_BUDGET_TO_INTERNAL_BUDGET(budget, shift);
622 
623 	hif_latency_profile_measure(hif_ext_group);
624 
625 	work_done = hif_ext_group->handler(hif_ext_group->context,
626 					   normalized_budget);
627 
628 	actual_dones = work_done;
629 
630 	if (!hif_ext_group->force_break && work_done < normalized_budget) {
631 		hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
632 				 0, 0, 0, HIF_EVENT_BH_COMPLETE);
633 		napi_complete(napi);
634 		qdf_atomic_dec(&scn->active_grp_tasklet_cnt);
635 		hif_ext_group->irq_enable(hif_ext_group);
636 		hif_ext_group->stats[cpu].napi_completes++;
637 	} else {
638 		/* if the ext_group supports time based yield, claim full work
639 		 * done anyways */
640 		hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
641 				 0, 0, 0, HIF_EVENT_BH_FORCE_BREAK);
642 		work_done = normalized_budget;
643 	}
644 
645 	hif_ext_group->stats[cpu].napi_polls++;
646 	hif_ext_group->stats[cpu].napi_workdone += actual_dones;
647 
648 	/* map internal budget to NAPI budget */
649 	if (work_done)
650 		work_done = INTERNAL_BUDGET_TO_NAPI_BUDGET(work_done, shift);
651 
652 	hif_exec_fill_poll_time_histogram(hif_ext_group);
653 
654 	return work_done;
655 }
656 
657 /**
658  * hif_exec_napi_schedule() - schedule the napi exec instance
659  * @ctx: a hif_exec_context known to be of napi type
660  */
661 static void hif_exec_napi_schedule(struct hif_exec_context *ctx)
662 {
663 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
664 	ctx->stats[smp_processor_id()].napi_schedules++;
665 
666 	napi_schedule(&n_ctx->napi);
667 }
668 
669 /**
670  * hif_exec_napi_kill() - stop a napi exec context from being rescheduled
671  * @ctx: a hif_exec_context known to be of napi type
672  */
673 static void hif_exec_napi_kill(struct hif_exec_context *ctx)
674 {
675 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
676 	int irq_ind;
677 
678 	if (ctx->inited) {
679 		napi_disable(&n_ctx->napi);
680 		ctx->inited = 0;
681 	}
682 
683 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
684 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
685 
686 	hif_core_ctl_set_boost(false);
687 	netif_napi_del(&(n_ctx->napi));
688 }
689 
690 struct hif_execution_ops napi_sched_ops = {
691 	.schedule = &hif_exec_napi_schedule,
692 	.kill = &hif_exec_napi_kill,
693 };
694 
695 /**
696  * hif_exec_napi_create() - allocate and initialize a napi exec context
697  * @scale: a binary shift factor to map NAPI budget from\to internal
698  *         budget
699  */
700 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
701 {
702 	struct hif_napi_exec_context *ctx;
703 
704 	ctx = qdf_mem_malloc(sizeof(struct hif_napi_exec_context));
705 	if (!ctx)
706 		return NULL;
707 
708 	ctx->exec_ctx.sched_ops = &napi_sched_ops;
709 	ctx->exec_ctx.inited = true;
710 	ctx->exec_ctx.scale_bin_shift = scale;
711 	qdf_net_if_create_dummy_if((struct qdf_net_if *)&ctx->netdev);
712 	netif_napi_add(&(ctx->netdev), &(ctx->napi), hif_exec_poll,
713 		       QCA_NAPI_BUDGET);
714 	napi_enable(&ctx->napi);
715 
716 	return &ctx->exec_ctx;
717 }
718 #else
719 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
720 {
721 	hif_warn("FEATURE_NAPI not defined, making tasklet");
722 	return hif_exec_tasklet_create();
723 }
724 #endif
725 
726 
727 /**
728  * hif_exec_tasklet_kill() - stop a tasklet exec context from being rescheduled
729  * @ctx: a hif_exec_context known to be of tasklet type
730  */
731 static void hif_exec_tasklet_kill(struct hif_exec_context *ctx)
732 {
733 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
734 	int irq_ind;
735 
736 	if (ctx->inited) {
737 		tasklet_disable(&t_ctx->tasklet);
738 		tasklet_kill(&t_ctx->tasklet);
739 	}
740 	ctx->inited = false;
741 
742 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
743 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
744 }
745 
746 struct hif_execution_ops tasklet_sched_ops = {
747 	.schedule = &hif_exec_tasklet_schedule,
748 	.kill = &hif_exec_tasklet_kill,
749 };
750 
751 /**
752  * hif_exec_tasklet_schedule() -  allocate and initialize a tasklet exec context
753  */
754 static struct hif_exec_context *hif_exec_tasklet_create(void)
755 {
756 	struct hif_tasklet_exec_context *ctx;
757 
758 	ctx = qdf_mem_malloc(sizeof(struct hif_tasklet_exec_context));
759 	if (!ctx)
760 		return NULL;
761 
762 	ctx->exec_ctx.sched_ops = &tasklet_sched_ops;
763 	tasklet_init(&ctx->tasklet, hif_exec_tasklet_fn,
764 		     (unsigned long)ctx);
765 
766 	ctx->exec_ctx.inited = true;
767 
768 	return &ctx->exec_ctx;
769 }
770 
771 /**
772  * hif_exec_get_ctx() - retrieve an exec context based on an id
773  * @softc: the hif context owning the exec context
774  * @id: the id of the exec context
775  *
776  * mostly added to make it easier to rename or move the context array
777  */
778 struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *softc,
779 					  uint8_t id)
780 {
781 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
782 
783 	if (id < hif_state->hif_num_extgroup)
784 		return hif_state->hif_ext_group[id];
785 
786 	return NULL;
787 }
788 
789 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
790 				uint8_t id)
791 {
792 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
793 
794 	if (id < hif_state->hif_num_extgroup)
795 		return hif_state->hif_ext_group[id]->os_irq[0];
796 	return -EINVAL;
797 }
798 
799 qdf_export_symbol(hif_get_int_ctx_irq_num);
800 
801 #ifdef HIF_CPU_PERF_AFFINE_MASK
802 void hif_config_irq_set_perf_affinity_hint(
803 	struct hif_opaque_softc *hif_ctx)
804 {
805 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
806 
807 	hif_config_irq_affinity(scn);
808 }
809 
810 qdf_export_symbol(hif_config_irq_set_perf_affinity_hint);
811 #endif
812 
813 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
814 {
815 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
816 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
817 	struct hif_exec_context *hif_ext_group;
818 	int i, status;
819 
820 	if (scn->ext_grp_irq_configured) {
821 		hif_err("Called after ext grp irq configured");
822 		return QDF_STATUS_E_FAILURE;
823 	}
824 
825 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
826 		hif_ext_group = hif_state->hif_ext_group[i];
827 		status = 0;
828 		qdf_spinlock_create(&hif_ext_group->irq_lock);
829 		if (hif_ext_group->configured &&
830 		    hif_ext_group->irq_requested == false) {
831 			hif_ext_group->irq_enabled = true;
832 			status = hif_grp_irq_configure(scn, hif_ext_group);
833 		}
834 		if (status != 0) {
835 			hif_err("Failed for group %d", i);
836 			hif_ext_group->irq_enabled = false;
837 		}
838 	}
839 
840 	scn->ext_grp_irq_configured = true;
841 
842 	return QDF_STATUS_SUCCESS;
843 }
844 
845 qdf_export_symbol(hif_configure_ext_group_interrupts);
846 
847 void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
848 {
849 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
850 
851 	if (!scn || !scn->ext_grp_irq_configured) {
852 		hif_err("scn(%pk) is NULL or grp irq not configured", scn);
853 		return;
854 	}
855 
856 	hif_grp_irq_deconfigure(scn);
857 	scn->ext_grp_irq_configured = false;
858 }
859 
860 qdf_export_symbol(hif_deconfigure_ext_group_interrupts);
861 
862 #ifdef WLAN_SUSPEND_RESUME_TEST
863 /**
864  * hif_check_and_trigger_ut_resume() - check if unit-test command was used to
865  *				       to trigger fake-suspend command, if yes
866  *				       then issue resume procedure.
867  * @scn: opaque HIF software context
868  *
869  * This API checks if unit-test command was used to trigger fake-suspend command
870  * and if answer is yes then it would trigger resume procedure.
871  *
872  * Make this API inline to save API-switch overhead and do branch-prediction to
873  * optimize performance impact.
874  *
875  * Return: void
876  */
877 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
878 {
879 	if (qdf_unlikely(hif_irq_trigger_ut_resume(scn)))
880 		hif_ut_fw_resume(scn);
881 }
882 #else
883 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
884 {
885 }
886 #endif
887 
888 /**
889  * hif_check_and_trigger_sys_resume() - Check for bus suspend and
890  *  trigger system resume
891  * @scn: hif context
892  * @irq: irq number
893  *
894  * Return: None
895  */
896 static inline void
897 hif_check_and_trigger_sys_resume(struct hif_softc *scn, int irq)
898 {
899 	if (scn->bus_suspended && scn->linkstate_vote) {
900 		hif_info_rl("interrupt rcvd:%d trigger sys resume", irq);
901 		qdf_pm_system_wakeup();
902 	}
903 }
904 
905 /**
906  * hif_ext_group_interrupt_handler() - handler for related interrupts
907  * @irq: irq number of the interrupt
908  * @context: the associated hif_exec_group context
909  *
910  * This callback function takes care of dissabling the associated interrupts
911  * and scheduling the expected bottom half for the exec_context.
912  * This callback function also helps keep track of the count running contexts.
913  */
914 irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context)
915 {
916 	struct hif_exec_context *hif_ext_group = context;
917 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
918 
919 	if (hif_ext_group->irq_requested) {
920 		hif_latency_profile_start(hif_ext_group);
921 
922 		hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
923 				 0, 0, 0, HIF_EVENT_IRQ_TRIGGER);
924 
925 		hif_ext_group->irq_disable(hif_ext_group);
926 		/*
927 		 * if private ioctl has issued fake suspend command to put
928 		 * FW in D0-WOW state then here is our chance to bring FW out
929 		 * of WOW mode.
930 		 *
931 		 * The reason why you need to explicitly wake-up the FW is here:
932 		 * APSS should have been in fully awake through-out when
933 		 * fake APSS suspend command was issued (to put FW in WOW mode)
934 		 * hence organic way of waking-up the FW
935 		 * (as part-of APSS-host wake-up) won't happen because
936 		 * in reality APSS didn't really suspend.
937 		 */
938 		hif_check_and_trigger_ut_resume(scn);
939 
940 		hif_check_and_trigger_sys_resume(scn, irq);
941 
942 		qdf_atomic_inc(&scn->active_grp_tasklet_cnt);
943 
944 		hif_ext_group->sched_ops->schedule(hif_ext_group);
945 	}
946 
947 	return IRQ_HANDLED;
948 }
949 
950 /**
951  * hif_exec_kill() - grp tasklet kill
952  * scn: hif_softc
953  *
954  * return: void
955  */
956 void hif_exec_kill(struct hif_opaque_softc *hif_ctx)
957 {
958 	int i;
959 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
960 
961 	for (i = 0; i < hif_state->hif_num_extgroup; i++)
962 		hif_state->hif_ext_group[i]->sched_ops->kill(
963 			hif_state->hif_ext_group[i]);
964 
965 	qdf_atomic_set(&hif_state->ol_sc.active_grp_tasklet_cnt, 0);
966 }
967 
968 /**
969  * hif_register_ext_group() - API to register external group
970  * interrupt handler.
971  * @hif_ctx : HIF Context
972  * @numirq: number of irq's in the group
973  * @irq: array of irq values
974  * @handler: callback interrupt handler function
975  * @cb_ctx: context to passed in callback
976  * @type: napi vs tasklet
977  *
978  * Return: QDF_STATUS
979  */
980 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
981 				  uint32_t numirq, uint32_t irq[],
982 				  ext_intr_handler handler,
983 				  void *cb_ctx, const char *context_name,
984 				  enum hif_exec_type type, uint32_t scale)
985 {
986 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
987 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
988 	struct hif_exec_context *hif_ext_group;
989 
990 	if (scn->ext_grp_irq_configured) {
991 		hif_err("Called after ext grp irq configured");
992 		return QDF_STATUS_E_FAILURE;
993 	}
994 
995 	if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) {
996 		hif_err("Max groups: %d reached", hif_state->hif_num_extgroup);
997 		return QDF_STATUS_E_FAILURE;
998 	}
999 
1000 	if (numirq >= HIF_MAX_GRP_IRQ) {
1001 		hif_err("Invalid numirq: %d", numirq);
1002 		return QDF_STATUS_E_FAILURE;
1003 	}
1004 
1005 	hif_ext_group = hif_exec_create(type, scale);
1006 	if (!hif_ext_group)
1007 		return QDF_STATUS_E_FAILURE;
1008 
1009 	hif_state->hif_ext_group[hif_state->hif_num_extgroup] =
1010 		hif_ext_group;
1011 
1012 	hif_ext_group->numirq = numirq;
1013 	qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0]));
1014 	hif_ext_group->context = cb_ctx;
1015 	hif_ext_group->handler = handler;
1016 	hif_ext_group->configured = true;
1017 	hif_ext_group->grp_id = hif_state->hif_num_extgroup;
1018 	hif_ext_group->hif = hif_ctx;
1019 	hif_ext_group->context_name = context_name;
1020 	hif_ext_group->type = type;
1021 
1022 	hif_state->hif_num_extgroup++;
1023 	return QDF_STATUS_SUCCESS;
1024 }
1025 qdf_export_symbol(hif_register_ext_group);
1026 
1027 /**
1028  * hif_exec_create() - create an execution context
1029  * @type: the type of execution context to create
1030  */
1031 struct hif_exec_context *hif_exec_create(enum hif_exec_type type,
1032 						uint32_t scale)
1033 {
1034 	hif_debug("%s: create exec_type %d budget %d\n",
1035 		  __func__, type, QCA_NAPI_BUDGET * scale);
1036 
1037 	switch (type) {
1038 	case HIF_EXEC_NAPI_TYPE:
1039 		return hif_exec_napi_create(scale);
1040 
1041 	case HIF_EXEC_TASKLET_TYPE:
1042 		return hif_exec_tasklet_create();
1043 	default:
1044 		return NULL;
1045 	}
1046 }
1047 
1048 /**
1049  * hif_exec_destroy() - free the hif_exec context
1050  * @ctx: context to free
1051  *
1052  * please kill the context before freeing it to avoid a use after free.
1053  */
1054 void hif_exec_destroy(struct hif_exec_context *ctx)
1055 {
1056 	struct hif_softc *scn = HIF_GET_SOFTC(ctx->hif);
1057 
1058 	if (scn->ext_grp_irq_configured)
1059 		qdf_spinlock_destroy(&ctx->irq_lock);
1060 	qdf_mem_free(ctx);
1061 }
1062 
1063 /**
1064  * hif_deregister_exec_group() - API to free the exec contexts
1065  * @hif_ctx: HIF context
1066  * @context_name: name of the module whose contexts need to be deregistered
1067  *
1068  * This function deregisters the contexts of the requestor identified
1069  * based on the context_name & frees the memory.
1070  *
1071  * Return: void
1072  */
1073 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
1074 				const char *context_name)
1075 {
1076 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1077 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1078 	struct hif_exec_context *hif_ext_group;
1079 	int i;
1080 
1081 	for (i = 0; i < HIF_MAX_GROUP; i++) {
1082 		hif_ext_group = hif_state->hif_ext_group[i];
1083 
1084 		if (!hif_ext_group)
1085 			continue;
1086 
1087 		hif_debug("%s: Deregistering grp id %d name %s\n",
1088 			  __func__,
1089 			  hif_ext_group->grp_id,
1090 			  hif_ext_group->context_name);
1091 
1092 		if (strcmp(hif_ext_group->context_name, context_name) == 0) {
1093 			hif_ext_group->sched_ops->kill(hif_ext_group);
1094 			hif_state->hif_ext_group[i] = NULL;
1095 			hif_exec_destroy(hif_ext_group);
1096 			hif_state->hif_num_extgroup--;
1097 		}
1098 
1099 	}
1100 }
1101 qdf_export_symbol(hif_deregister_exec_group);
1102