xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_exec.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <hif_exec.h>
20 #include <ce_main.h>
21 #include "qdf_module.h"
22 #include "qdf_net_if.h"
23 /* mapping NAPI budget 0 to internal budget 0
24  * NAPI budget 1 to internal budget [1,scaler -1]
25  * NAPI budget 2 to internal budget [scaler, 2 * scaler - 1], etc
26  */
27 #define NAPI_BUDGET_TO_INTERNAL_BUDGET(n, s) \
28 	(((n) << (s)) - 1)
29 #define INTERNAL_BUDGET_TO_NAPI_BUDGET(n, s) \
30 	(((n) + 1) >> (s))
31 
32 static struct hif_exec_context *hif_exec_tasklet_create(void);
33 
34 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
35 struct hif_event_history hif_event_desc_history[HIF_NUM_INT_CONTEXTS];
36 
37 static inline
38 int hif_get_next_record_index(qdf_atomic_t *table_index,
39 			      int array_size)
40 {
41 	int record_index = qdf_atomic_inc_return(table_index);
42 
43 	return record_index & (array_size - 1);
44 }
45 
46 /**
47  * hif_hist_is_prev_record() - Check if index is the immediate
48  *  previous record wrt curr_index
49  * @curr_index: curr index in the event history
50  * @index: index to be checked
51  * @hist_size: history size
52  *
53  * Return: true if index is immediately behind curr_index else false
54  */
55 static inline
56 bool hif_hist_is_prev_record(int32_t curr_index, int32_t index,
57 			     uint32_t hist_size)
58 {
59 	return (((index + 1) & (hist_size - 1)) == curr_index) ?
60 			true : false;
61 }
62 
63 /**
64  * hif_hist_skip_event_record() - Check if current event needs to be
65  *  recorded or not
66  * @hist_ev: HIF event history
67  * @event: DP event entry
68  *
69  * Return: true if current event needs to be skipped else false
70  */
71 static bool
72 hif_hist_skip_event_record(struct hif_event_history *hist_ev,
73 			   struct hif_event_record *event)
74 {
75 	struct hif_event_record *rec;
76 	struct hif_event_record *last_irq_rec;
77 	int32_t index;
78 
79 	index = qdf_atomic_read(&hist_ev->index);
80 	if (index < 0)
81 		return false;
82 
83 	index &= (HIF_EVENT_HIST_MAX - 1);
84 	rec = &hist_ev->event[index];
85 
86 	switch (event->type) {
87 	case HIF_EVENT_IRQ_TRIGGER:
88 		/*
89 		 * The prev record check is to prevent skipping the IRQ event
90 		 * record in case where BH got re-scheduled due to force_break
91 		 * but there are no entries to be reaped in the rings.
92 		 */
93 		if (rec->type == HIF_EVENT_BH_SCHED &&
94 		    hif_hist_is_prev_record(index,
95 					    hist_ev->misc.last_irq_index,
96 					    HIF_EVENT_HIST_MAX)) {
97 			last_irq_rec =
98 				&hist_ev->event[hist_ev->misc.last_irq_index];
99 			last_irq_rec->timestamp = hif_get_log_timestamp();
100 			last_irq_rec->cpu_id = qdf_get_cpu();
101 			last_irq_rec->hp++;
102 			last_irq_rec->tp = last_irq_rec->timestamp -
103 						hist_ev->misc.last_irq_ts;
104 			return true;
105 		}
106 		break;
107 	case HIF_EVENT_BH_SCHED:
108 		if (rec->type == HIF_EVENT_BH_SCHED) {
109 			rec->timestamp = hif_get_log_timestamp();
110 			rec->cpu_id = qdf_get_cpu();
111 			return true;
112 		}
113 		break;
114 	case HIF_EVENT_SRNG_ACCESS_START:
115 		if (event->hp == event->tp)
116 			return true;
117 		break;
118 	case HIF_EVENT_SRNG_ACCESS_END:
119 		if (rec->type != HIF_EVENT_SRNG_ACCESS_START)
120 			return true;
121 		break;
122 	default:
123 		break;
124 	}
125 
126 	return false;
127 }
128 
129 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
130 			   struct hif_event_record *event, uint8_t intr_grp_id)
131 {
132 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
133 	struct hif_event_history *hist_ev;
134 	struct hif_event_record *record;
135 	int record_index;
136 
137 	if (!(scn->event_enable_mask & BIT(event->type)))
138 		return;
139 
140 	if (qdf_unlikely(intr_grp_id >= HIF_NUM_INT_CONTEXTS)) {
141 		hif_err("Invalid interrupt group id %d", intr_grp_id);
142 		return;
143 	}
144 
145 	hist_ev = scn->evt_hist[intr_grp_id];
146 	if (qdf_unlikely(!hist_ev))
147 		return;
148 
149 	if (hif_hist_skip_event_record(hist_ev, event))
150 		return;
151 
152 	record_index = hif_get_next_record_index(
153 			&hist_ev->index, HIF_EVENT_HIST_MAX);
154 
155 	record = &hist_ev->event[record_index];
156 
157 	if (event->type == HIF_EVENT_IRQ_TRIGGER) {
158 		hist_ev->misc.last_irq_index = record_index;
159 		hist_ev->misc.last_irq_ts = hif_get_log_timestamp();
160 	}
161 
162 	record->hal_ring_id = event->hal_ring_id;
163 	record->hp = event->hp;
164 	record->tp = event->tp;
165 	record->cpu_id = qdf_get_cpu();
166 	record->timestamp = hif_get_log_timestamp();
167 	record->type = event->type;
168 }
169 
170 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id)
171 {
172 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
173 
174 	scn->evt_hist[id] = &hif_event_desc_history[id];
175 	qdf_atomic_set(&scn->evt_hist[id]->index, -1);
176 
177 	hif_info("SRNG events history initialized for group: %d", id);
178 }
179 
180 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id)
181 {
182 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
183 
184 	scn->evt_hist[id] = NULL;
185 	hif_info("SRNG events history de-initialized for group: %d", id);
186 }
187 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
188 
189 /**
190  * hif_print_napi_latency_stats() - print NAPI scheduling latency stats
191  * @hif_state: hif context
192  *
193  * return: void
194  */
195 #ifdef HIF_LATENCY_PROFILE_ENABLE
196 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
197 {
198 	struct hif_exec_context *hif_ext_group;
199 	int i, j;
200 	int64_t cur_tstamp;
201 
202 	const char time_str[HIF_SCHED_LATENCY_BUCKETS][15] =  {
203 		"0-2   ms",
204 		"3-10  ms",
205 		"11-20 ms",
206 		"21-50 ms",
207 		"51-100 ms",
208 		"101-250 ms",
209 		"251-500 ms",
210 		"> 500 ms"
211 	};
212 
213 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
214 
215 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
216 		  "Current timestamp: %lld", cur_tstamp);
217 
218 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
219 		if (hif_state->hif_ext_group[i]) {
220 			hif_ext_group = hif_state->hif_ext_group[i];
221 
222 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
223 				  "Interrupts in the HIF Group");
224 
225 			for (j = 0; j < hif_ext_group->numirq; j++) {
226 				QDF_TRACE(QDF_MODULE_ID_HIF,
227 					  QDF_TRACE_LEVEL_FATAL,
228 					  "  %s",
229 					  hif_ext_group->irq_name
230 					  (hif_ext_group->irq[j]));
231 			}
232 
233 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
234 				  "Last serviced timestamp: %lld",
235 				  hif_ext_group->tstamp);
236 
237 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
238 				  "Latency Bucket     | Time elapsed");
239 
240 			for (j = 0; j < HIF_SCHED_LATENCY_BUCKETS; j++) {
241 				QDF_TRACE(QDF_MODULE_ID_HIF,
242 					  QDF_TRACE_LEVEL_FATAL,
243 					  "%s     |    %lld", time_str[j],
244 					  hif_ext_group->
245 					  sched_latency_stats[j]);
246 			}
247 		}
248 	}
249 }
250 #else
251 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
252 {
253 }
254 #endif
255 
256 /**
257  * hif_clear_napi_stats() - reset NAPI stats
258  * @hif_ctx: hif context
259  *
260  * return: void
261  */
262 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx)
263 {
264 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
265 	struct hif_exec_context *hif_ext_group;
266 	size_t i;
267 
268 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
269 		hif_ext_group = hif_state->hif_ext_group[i];
270 
271 		if (!hif_ext_group)
272 			return;
273 
274 		qdf_mem_set(hif_ext_group->sched_latency_stats,
275 			    sizeof(hif_ext_group->sched_latency_stats),
276 			    0x0);
277 	}
278 }
279 
280 qdf_export_symbol(hif_clear_napi_stats);
281 
282 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
283 /**
284  * hif_get_poll_times_hist_str() - Get HIF poll times histogram string
285  * @stats: NAPI stats to get poll time buckets
286  * @buf: buffer to fill histogram string
287  * @buf_len: length of the buffer
288  *
289  * Return: void
290  */
291 static void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
292 					uint8_t buf_len)
293 {
294 	int i;
295 	int str_index = 0;
296 
297 	for (i = 0; i < QCA_NAPI_NUM_BUCKETS; i++)
298 		str_index += qdf_scnprintf(buf + str_index, buf_len - str_index,
299 					   "%u|", stats->poll_time_buckets[i]);
300 }
301 
302 /**
303  * hif_exec_fill_poll_time_histogram() - fills poll time histogram for a NAPI
304  * @hif_ext_group: hif_ext_group of type NAPI
305  *
306  * The function is called at the end of a NAPI poll to calculate poll time
307  * buckets.
308  *
309  * Return: void
310  */
311 static
312 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
313 {
314 	struct qca_napi_stat *napi_stat;
315 	unsigned long long poll_time_ns;
316 	uint32_t poll_time_us;
317 	uint32_t bucket_size_us = 500;
318 	uint32_t bucket;
319 	uint32_t cpu_id = qdf_get_cpu();
320 
321 	poll_time_ns = qdf_time_sched_clock() - hif_ext_group->poll_start_time;
322 	poll_time_us = qdf_do_div(poll_time_ns, 1000);
323 
324 	napi_stat = &hif_ext_group->stats[cpu_id];
325 	if (poll_time_ns > hif_ext_group->stats[cpu_id].napi_max_poll_time)
326 		hif_ext_group->stats[cpu_id].napi_max_poll_time = poll_time_ns;
327 
328 	bucket = poll_time_us / bucket_size_us;
329 	if (bucket >= QCA_NAPI_NUM_BUCKETS)
330 		bucket = QCA_NAPI_NUM_BUCKETS - 1;
331 	++napi_stat->poll_time_buckets[bucket];
332 }
333 
334 /**
335  * hif_exec_poll_should_yield() - Local function deciding if NAPI should yield
336  * @hif_ext_group: hif_ext_group of type NAPI
337  *
338  * Return: true if NAPI needs to yield, else false
339  */
340 static bool hif_exec_poll_should_yield(struct hif_exec_context *hif_ext_group)
341 {
342 	bool time_limit_reached = false;
343 	unsigned long long poll_time_ns;
344 	int cpu_id = qdf_get_cpu();
345 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
346 	struct hif_config_info *cfg = &scn->hif_config;
347 
348 	poll_time_ns = qdf_time_sched_clock() - hif_ext_group->poll_start_time;
349 	time_limit_reached =
350 		poll_time_ns > cfg->rx_softirq_max_yield_duration_ns ? 1 : 0;
351 
352 	if (time_limit_reached) {
353 		hif_ext_group->stats[cpu_id].time_limit_reached++;
354 		hif_ext_group->force_break = true;
355 	}
356 
357 	return time_limit_reached;
358 }
359 
360 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id)
361 {
362 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
363 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
364 	struct hif_exec_context *hif_ext_group;
365 	bool ret_val = false;
366 
367 	if (!(grp_id < hif_state->hif_num_extgroup) ||
368 	    !(grp_id < HIF_MAX_GROUP))
369 		return false;
370 
371 	hif_ext_group = hif_state->hif_ext_group[grp_id];
372 
373 	if (hif_ext_group->type == HIF_EXEC_NAPI_TYPE)
374 		ret_val = hif_exec_poll_should_yield(hif_ext_group);
375 
376 	return ret_val;
377 }
378 
379 /**
380  * hif_exec_update_service_start_time() - Update NAPI poll start time
381  * @hif_ext_group: hif_ext_group of type NAPI
382  *
383  * The function is called at the beginning of a NAPI poll to record the poll
384  * start time.
385  *
386  * Return: None
387  */
388 static inline
389 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
390 {
391 	hif_ext_group->poll_start_time = qdf_time_sched_clock();
392 }
393 
394 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
395 {
396 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
397 	struct hif_exec_context *hif_ext_group;
398 	struct qca_napi_stat *napi_stats;
399 	int i, j;
400 
401 	/*
402 	 * Max value of uint_32 (poll_time_bucket) = 4294967295
403 	 * Thus we need 10 chars + 1 space =11 chars for each bucket value.
404 	 * +1 space for '\0'.
405 	 */
406 	char hist_str[(QCA_NAPI_NUM_BUCKETS * 11) + 1] = {'\0'};
407 
408 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
409 		  "NAPI[#]CPU[#] |scheds |polls  |comps  |dones  |t-lim  |max(us)|hist(500us buckets)");
410 
411 	for (i = 0;
412 	     (i < hif_state->hif_num_extgroup && hif_state->hif_ext_group[i]);
413 	     i++) {
414 		hif_ext_group = hif_state->hif_ext_group[i];
415 		for (j = 0; j < num_possible_cpus(); j++) {
416 			napi_stats = &hif_ext_group->stats[j];
417 			if (!napi_stats->napi_schedules)
418 				continue;
419 
420 			hif_get_poll_times_hist_str(napi_stats,
421 						    hist_str,
422 						    sizeof(hist_str));
423 			QDF_TRACE(QDF_MODULE_ID_HIF,
424 				  QDF_TRACE_LEVEL_INFO_HIGH,
425 				  "NAPI[%d]CPU[%d]: %7u %7u %7u %7u %7u %7llu %s",
426 				  i, j,
427 				  napi_stats->napi_schedules,
428 				  napi_stats->napi_polls,
429 				  napi_stats->napi_completes,
430 				  napi_stats->napi_workdone,
431 				  napi_stats->time_limit_reached,
432 				  qdf_do_div(napi_stats->napi_max_poll_time,
433 					     1000),
434 				  hist_str);
435 		}
436 	}
437 
438 	hif_print_napi_latency_stats(hif_state);
439 }
440 
441 qdf_export_symbol(hif_print_napi_stats);
442 
443 #else
444 
445 static inline
446 void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
447 				 uint8_t buf_len)
448 {
449 }
450 
451 static inline
452 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
453 {
454 }
455 
456 static inline
457 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
458 {
459 }
460 
461 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
462 {
463 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
464 	struct hif_exec_context *hif_ext_group;
465 	struct qca_napi_stat *napi_stats;
466 	int i, j;
467 
468 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
469 		"NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone");
470 
471 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
472 		if (hif_state->hif_ext_group[i]) {
473 			hif_ext_group = hif_state->hif_ext_group[i];
474 			for (j = 0; j < num_possible_cpus(); j++) {
475 				napi_stats = &(hif_ext_group->stats[j]);
476 				if (napi_stats->napi_schedules != 0)
477 					QDF_TRACE(QDF_MODULE_ID_HIF,
478 						QDF_TRACE_LEVEL_FATAL,
479 						"NAPI[%2d]CPU[%d]: "
480 						"%7d %7d %7d %7d ",
481 						i, j,
482 						napi_stats->napi_schedules,
483 						napi_stats->napi_polls,
484 						napi_stats->napi_completes,
485 						napi_stats->napi_workdone);
486 			}
487 		}
488 	}
489 
490 	hif_print_napi_latency_stats(hif_state);
491 }
492 qdf_export_symbol(hif_print_napi_stats);
493 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
494 
495 static void hif_exec_tasklet_schedule(struct hif_exec_context *ctx)
496 {
497 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
498 
499 	tasklet_schedule(&t_ctx->tasklet);
500 }
501 
502 /**
503  * hif_exec_tasklet() - grp tasklet
504  * data: context
505  *
506  * return: void
507  */
508 static void hif_exec_tasklet_fn(unsigned long data)
509 {
510 	struct hif_exec_context *hif_ext_group =
511 			(struct hif_exec_context *)data;
512 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
513 	unsigned int work_done;
514 
515 	work_done =
516 		hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET);
517 
518 	if (hif_ext_group->work_complete(hif_ext_group, work_done)) {
519 		qdf_atomic_dec(&(scn->active_grp_tasklet_cnt));
520 		hif_ext_group->irq_enable(hif_ext_group);
521 	} else {
522 		hif_exec_tasklet_schedule(hif_ext_group);
523 	}
524 }
525 
526 /**
527  * hif_latency_profile_measure() - calculate latency and update histogram
528  * hif_ext_group: hif exec context
529  *
530  * return: None
531  */
532 #ifdef HIF_LATENCY_PROFILE_ENABLE
533 static void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
534 {
535 	int64_t cur_tstamp;
536 	int64_t time_elapsed;
537 
538 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
539 
540 	if (cur_tstamp > hif_ext_group->tstamp)
541 		time_elapsed = (cur_tstamp - hif_ext_group->tstamp);
542 	else
543 		time_elapsed = ~0x0 - (hif_ext_group->tstamp - cur_tstamp);
544 
545 	hif_ext_group->tstamp = cur_tstamp;
546 
547 	if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_0_2)
548 		hif_ext_group->sched_latency_stats[0]++;
549 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_3_10)
550 		hif_ext_group->sched_latency_stats[1]++;
551 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_11_20)
552 		hif_ext_group->sched_latency_stats[2]++;
553 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_21_50)
554 		hif_ext_group->sched_latency_stats[3]++;
555 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_51_100)
556 		hif_ext_group->sched_latency_stats[4]++;
557 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_101_250)
558 		hif_ext_group->sched_latency_stats[5]++;
559 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_251_500)
560 		hif_ext_group->sched_latency_stats[6]++;
561 	else
562 		hif_ext_group->sched_latency_stats[7]++;
563 }
564 #else
565 static inline
566 void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
567 {
568 }
569 #endif
570 
571 /**
572  * hif_latency_profile_start() - Update the start timestamp for HIF ext group
573  * hif_ext_group: hif exec context
574  *
575  * return: None
576  */
577 #ifdef HIF_LATENCY_PROFILE_ENABLE
578 static void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
579 {
580 	hif_ext_group->tstamp = qdf_ktime_to_ms(qdf_ktime_get());
581 }
582 #else
583 static inline
584 void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
585 {
586 }
587 #endif
588 
589 #ifdef FEATURE_NAPI
590 /**
591  * hif_exec_poll() - napi poll
592  * napi: napi struct
593  * budget: budget for napi
594  *
595  * Return: mapping of internal budget to napi
596  */
597 static int hif_exec_poll(struct napi_struct *napi, int budget)
598 {
599 	struct hif_napi_exec_context *napi_exec_ctx =
600 		    qdf_container_of(napi, struct hif_napi_exec_context, napi);
601 	struct hif_exec_context *hif_ext_group = &napi_exec_ctx->exec_ctx;
602 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
603 	int work_done;
604 	int normalized_budget = 0;
605 	int actual_dones;
606 	int shift = hif_ext_group->scale_bin_shift;
607 	int cpu = smp_processor_id();
608 
609 	hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
610 			 0, 0, 0, HIF_EVENT_BH_SCHED);
611 
612 	hif_ext_group->force_break = false;
613 	hif_exec_update_service_start_time(hif_ext_group);
614 
615 	if (budget)
616 		normalized_budget = NAPI_BUDGET_TO_INTERNAL_BUDGET(budget, shift);
617 
618 	hif_latency_profile_measure(hif_ext_group);
619 
620 	work_done = hif_ext_group->handler(hif_ext_group->context,
621 					   normalized_budget);
622 
623 	actual_dones = work_done;
624 
625 	if (!hif_ext_group->force_break && work_done < normalized_budget) {
626 		napi_complete(napi);
627 		qdf_atomic_dec(&scn->active_grp_tasklet_cnt);
628 		hif_ext_group->irq_enable(hif_ext_group);
629 		hif_ext_group->stats[cpu].napi_completes++;
630 	} else {
631 		/* if the ext_group supports time based yield, claim full work
632 		 * done anyways */
633 		work_done = normalized_budget;
634 	}
635 
636 	hif_ext_group->stats[cpu].napi_polls++;
637 	hif_ext_group->stats[cpu].napi_workdone += actual_dones;
638 
639 	/* map internal budget to NAPI budget */
640 	if (work_done)
641 		work_done = INTERNAL_BUDGET_TO_NAPI_BUDGET(work_done, shift);
642 
643 	hif_exec_fill_poll_time_histogram(hif_ext_group);
644 
645 	return work_done;
646 }
647 
648 /**
649  * hif_exec_napi_schedule() - schedule the napi exec instance
650  * @ctx: a hif_exec_context known to be of napi type
651  */
652 static void hif_exec_napi_schedule(struct hif_exec_context *ctx)
653 {
654 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
655 	ctx->stats[smp_processor_id()].napi_schedules++;
656 
657 	napi_schedule(&n_ctx->napi);
658 }
659 
660 /**
661  * hif_exec_napi_kill() - stop a napi exec context from being rescheduled
662  * @ctx: a hif_exec_context known to be of napi type
663  */
664 static void hif_exec_napi_kill(struct hif_exec_context *ctx)
665 {
666 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
667 	int irq_ind;
668 
669 	if (ctx->inited) {
670 		napi_disable(&n_ctx->napi);
671 		ctx->inited = 0;
672 	}
673 
674 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
675 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
676 
677 	hif_core_ctl_set_boost(false);
678 	netif_napi_del(&(n_ctx->napi));
679 }
680 
681 struct hif_execution_ops napi_sched_ops = {
682 	.schedule = &hif_exec_napi_schedule,
683 	.kill = &hif_exec_napi_kill,
684 };
685 
686 /**
687  * hif_exec_napi_create() - allocate and initialize a napi exec context
688  * @scale: a binary shift factor to map NAPI budget from\to internal
689  *         budget
690  */
691 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
692 {
693 	struct hif_napi_exec_context *ctx;
694 
695 	ctx = qdf_mem_malloc(sizeof(struct hif_napi_exec_context));
696 	if (!ctx)
697 		return NULL;
698 
699 	ctx->exec_ctx.sched_ops = &napi_sched_ops;
700 	ctx->exec_ctx.inited = true;
701 	ctx->exec_ctx.scale_bin_shift = scale;
702 	qdf_net_if_create_dummy_if((struct qdf_net_if *)&ctx->netdev);
703 	netif_napi_add(&(ctx->netdev), &(ctx->napi), hif_exec_poll,
704 		       QCA_NAPI_BUDGET);
705 	napi_enable(&ctx->napi);
706 
707 	return &ctx->exec_ctx;
708 }
709 #else
710 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
711 {
712 	hif_warn("FEATURE_NAPI not defined, making tasklet");
713 	return hif_exec_tasklet_create();
714 }
715 #endif
716 
717 
718 /**
719  * hif_exec_tasklet_kill() - stop a tasklet exec context from being rescheduled
720  * @ctx: a hif_exec_context known to be of tasklet type
721  */
722 static void hif_exec_tasklet_kill(struct hif_exec_context *ctx)
723 {
724 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
725 	int irq_ind;
726 
727 	if (ctx->inited) {
728 		tasklet_disable(&t_ctx->tasklet);
729 		tasklet_kill(&t_ctx->tasklet);
730 	}
731 	ctx->inited = false;
732 
733 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
734 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
735 }
736 
737 struct hif_execution_ops tasklet_sched_ops = {
738 	.schedule = &hif_exec_tasklet_schedule,
739 	.kill = &hif_exec_tasklet_kill,
740 };
741 
742 /**
743  * hif_exec_tasklet_schedule() -  allocate and initialize a tasklet exec context
744  */
745 static struct hif_exec_context *hif_exec_tasklet_create(void)
746 {
747 	struct hif_tasklet_exec_context *ctx;
748 
749 	ctx = qdf_mem_malloc(sizeof(struct hif_tasklet_exec_context));
750 	if (!ctx)
751 		return NULL;
752 
753 	ctx->exec_ctx.sched_ops = &tasklet_sched_ops;
754 	tasklet_init(&ctx->tasklet, hif_exec_tasklet_fn,
755 		     (unsigned long)ctx);
756 
757 	ctx->exec_ctx.inited = true;
758 
759 	return &ctx->exec_ctx;
760 }
761 
762 /**
763  * hif_exec_get_ctx() - retrieve an exec context based on an id
764  * @softc: the hif context owning the exec context
765  * @id: the id of the exec context
766  *
767  * mostly added to make it easier to rename or move the context array
768  */
769 struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *softc,
770 					  uint8_t id)
771 {
772 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
773 
774 	if (id < hif_state->hif_num_extgroup)
775 		return hif_state->hif_ext_group[id];
776 
777 	return NULL;
778 }
779 
780 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
781 				uint8_t id)
782 {
783 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
784 
785 	if (id < hif_state->hif_num_extgroup)
786 		return hif_state->hif_ext_group[id]->os_irq[0];
787 	return -EINVAL;
788 }
789 
790 qdf_export_symbol(hif_get_int_ctx_irq_num);
791 
792 #ifdef HIF_CPU_PERF_AFFINE_MASK
793 void hif_config_irq_set_perf_affinity_hint(
794 	struct hif_opaque_softc *hif_ctx)
795 {
796 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
797 
798 	hif_config_irq_affinity(scn);
799 }
800 
801 qdf_export_symbol(hif_config_irq_set_perf_affinity_hint);
802 #endif
803 
804 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
805 {
806 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
807 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
808 	struct hif_exec_context *hif_ext_group;
809 	int i, status;
810 
811 	if (scn->ext_grp_irq_configured) {
812 		hif_err("Called after ext grp irq configured");
813 		return QDF_STATUS_E_FAILURE;
814 	}
815 
816 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
817 		hif_ext_group = hif_state->hif_ext_group[i];
818 		status = 0;
819 		qdf_spinlock_create(&hif_ext_group->irq_lock);
820 		if (hif_ext_group->configured &&
821 		    hif_ext_group->irq_requested == false) {
822 			hif_ext_group->irq_enabled = true;
823 			status = hif_grp_irq_configure(scn, hif_ext_group);
824 		}
825 		if (status != 0) {
826 			hif_err("Failed for group %d", i);
827 			hif_ext_group->irq_enabled = false;
828 		}
829 	}
830 
831 	scn->ext_grp_irq_configured = true;
832 
833 	return QDF_STATUS_SUCCESS;
834 }
835 
836 qdf_export_symbol(hif_configure_ext_group_interrupts);
837 
838 void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
839 {
840 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
841 
842 	if (!scn || !scn->ext_grp_irq_configured) {
843 		hif_err("scn(%pk) is NULL or grp irq not configured", scn);
844 		return;
845 	}
846 
847 	hif_grp_irq_deconfigure(scn);
848 	scn->ext_grp_irq_configured = false;
849 }
850 
851 qdf_export_symbol(hif_deconfigure_ext_group_interrupts);
852 
853 #ifdef WLAN_SUSPEND_RESUME_TEST
854 /**
855  * hif_check_and_trigger_ut_resume() - check if unit-test command was used to
856  *				       to trigger fake-suspend command, if yes
857  *				       then issue resume procedure.
858  * @scn: opaque HIF software context
859  *
860  * This API checks if unit-test command was used to trigger fake-suspend command
861  * and if answer is yes then it would trigger resume procedure.
862  *
863  * Make this API inline to save API-switch overhead and do branch-prediction to
864  * optimize performance impact.
865  *
866  * Return: void
867  */
868 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
869 {
870 	if (qdf_unlikely(hif_irq_trigger_ut_resume(scn)))
871 		hif_ut_fw_resume(scn);
872 }
873 #else
874 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
875 {
876 }
877 #endif
878 
879 /**
880  * hif_check_and_trigger_sys_resume() - Check for bus suspend and
881  *  trigger system resume
882  * @scn: hif context
883  * @irq: irq number
884  *
885  * Return: None
886  */
887 static inline void
888 hif_check_and_trigger_sys_resume(struct hif_softc *scn, int irq)
889 {
890 	if (scn->bus_suspended && scn->linkstate_vote) {
891 		hif_info_rl("interrupt rcvd:%d trigger sys resume", irq);
892 		qdf_pm_system_wakeup();
893 	}
894 }
895 
896 /**
897  * hif_ext_group_interrupt_handler() - handler for related interrupts
898  * @irq: irq number of the interrupt
899  * @context: the associated hif_exec_group context
900  *
901  * This callback function takes care of dissabling the associated interrupts
902  * and scheduling the expected bottom half for the exec_context.
903  * This callback function also helps keep track of the count running contexts.
904  */
905 irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context)
906 {
907 	struct hif_exec_context *hif_ext_group = context;
908 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
909 
910 	if (hif_ext_group->irq_requested) {
911 		hif_latency_profile_start(hif_ext_group);
912 
913 		hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
914 				 0, 0, 0, HIF_EVENT_IRQ_TRIGGER);
915 
916 		hif_ext_group->irq_disable(hif_ext_group);
917 		/*
918 		 * if private ioctl has issued fake suspend command to put
919 		 * FW in D0-WOW state then here is our chance to bring FW out
920 		 * of WOW mode.
921 		 *
922 		 * The reason why you need to explicitly wake-up the FW is here:
923 		 * APSS should have been in fully awake through-out when
924 		 * fake APSS suspend command was issued (to put FW in WOW mode)
925 		 * hence organic way of waking-up the FW
926 		 * (as part-of APSS-host wake-up) won't happen because
927 		 * in reality APSS didn't really suspend.
928 		 */
929 		hif_check_and_trigger_ut_resume(scn);
930 
931 		hif_check_and_trigger_sys_resume(scn, irq);
932 
933 		qdf_atomic_inc(&scn->active_grp_tasklet_cnt);
934 
935 		hif_ext_group->sched_ops->schedule(hif_ext_group);
936 	}
937 
938 	return IRQ_HANDLED;
939 }
940 
941 /**
942  * hif_exec_kill() - grp tasklet kill
943  * scn: hif_softc
944  *
945  * return: void
946  */
947 void hif_exec_kill(struct hif_opaque_softc *hif_ctx)
948 {
949 	int i;
950 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
951 
952 	for (i = 0; i < hif_state->hif_num_extgroup; i++)
953 		hif_state->hif_ext_group[i]->sched_ops->kill(
954 			hif_state->hif_ext_group[i]);
955 
956 	qdf_atomic_set(&hif_state->ol_sc.active_grp_tasklet_cnt, 0);
957 }
958 
959 /**
960  * hif_register_ext_group() - API to register external group
961  * interrupt handler.
962  * @hif_ctx : HIF Context
963  * @numirq: number of irq's in the group
964  * @irq: array of irq values
965  * @handler: callback interrupt handler function
966  * @cb_ctx: context to passed in callback
967  * @type: napi vs tasklet
968  *
969  * Return: QDF_STATUS
970  */
971 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
972 				  uint32_t numirq, uint32_t irq[],
973 				  ext_intr_handler handler,
974 				  void *cb_ctx, const char *context_name,
975 				  enum hif_exec_type type, uint32_t scale)
976 {
977 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
978 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
979 	struct hif_exec_context *hif_ext_group;
980 
981 	if (scn->ext_grp_irq_configured) {
982 		hif_err("Called after ext grp irq configured");
983 		return QDF_STATUS_E_FAILURE;
984 	}
985 
986 	if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) {
987 		hif_err("Max groups: %d reached", hif_state->hif_num_extgroup);
988 		return QDF_STATUS_E_FAILURE;
989 	}
990 
991 	if (numirq >= HIF_MAX_GRP_IRQ) {
992 		hif_err("Invalid numirq: %d", numirq);
993 		return QDF_STATUS_E_FAILURE;
994 	}
995 
996 	hif_ext_group = hif_exec_create(type, scale);
997 	if (!hif_ext_group)
998 		return QDF_STATUS_E_FAILURE;
999 
1000 	hif_state->hif_ext_group[hif_state->hif_num_extgroup] =
1001 		hif_ext_group;
1002 
1003 	hif_ext_group->numirq = numirq;
1004 	qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0]));
1005 	hif_ext_group->context = cb_ctx;
1006 	hif_ext_group->handler = handler;
1007 	hif_ext_group->configured = true;
1008 	hif_ext_group->grp_id = hif_state->hif_num_extgroup;
1009 	hif_ext_group->hif = hif_ctx;
1010 	hif_ext_group->context_name = context_name;
1011 	hif_ext_group->type = type;
1012 
1013 	hif_state->hif_num_extgroup++;
1014 	return QDF_STATUS_SUCCESS;
1015 }
1016 qdf_export_symbol(hif_register_ext_group);
1017 
1018 /**
1019  * hif_exec_create() - create an execution context
1020  * @type: the type of execution context to create
1021  */
1022 struct hif_exec_context *hif_exec_create(enum hif_exec_type type,
1023 						uint32_t scale)
1024 {
1025 	hif_debug("%s: create exec_type %d budget %d\n",
1026 		  __func__, type, QCA_NAPI_BUDGET * scale);
1027 
1028 	switch (type) {
1029 	case HIF_EXEC_NAPI_TYPE:
1030 		return hif_exec_napi_create(scale);
1031 
1032 	case HIF_EXEC_TASKLET_TYPE:
1033 		return hif_exec_tasklet_create();
1034 	default:
1035 		return NULL;
1036 	}
1037 }
1038 
1039 /**
1040  * hif_exec_destroy() - free the hif_exec context
1041  * @ctx: context to free
1042  *
1043  * please kill the context before freeing it to avoid a use after free.
1044  */
1045 void hif_exec_destroy(struct hif_exec_context *ctx)
1046 {
1047 	struct hif_softc *scn = HIF_GET_SOFTC(ctx->hif);
1048 
1049 	if (scn->ext_grp_irq_configured)
1050 		qdf_spinlock_destroy(&ctx->irq_lock);
1051 	qdf_mem_free(ctx);
1052 }
1053 
1054 /**
1055  * hif_deregister_exec_group() - API to free the exec contexts
1056  * @hif_ctx: HIF context
1057  * @context_name: name of the module whose contexts need to be deregistered
1058  *
1059  * This function deregisters the contexts of the requestor identified
1060  * based on the context_name & frees the memory.
1061  *
1062  * Return: void
1063  */
1064 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
1065 				const char *context_name)
1066 {
1067 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1068 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1069 	struct hif_exec_context *hif_ext_group;
1070 	int i;
1071 
1072 	for (i = 0; i < HIF_MAX_GROUP; i++) {
1073 		hif_ext_group = hif_state->hif_ext_group[i];
1074 
1075 		if (!hif_ext_group)
1076 			continue;
1077 
1078 		hif_debug("%s: Deregistering grp id %d name %s\n",
1079 			  __func__,
1080 			  hif_ext_group->grp_id,
1081 			  hif_ext_group->context_name);
1082 
1083 		if (strcmp(hif_ext_group->context_name, context_name) == 0) {
1084 			hif_ext_group->sched_ops->kill(hif_ext_group);
1085 			hif_state->hif_ext_group[i] = NULL;
1086 			hif_exec_destroy(hif_ext_group);
1087 			hif_state->hif_num_extgroup--;
1088 		}
1089 
1090 	}
1091 }
1092 qdf_export_symbol(hif_deregister_exec_group);
1093