xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_exec.c (revision 2888b71da71bce103343119fa1b31f4a0cee07c8)
1 /*
2  * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <hif_exec.h>
21 #include <ce_main.h>
22 #include "qdf_module.h"
23 #include "qdf_net_if.h"
24 #include <pld_common.h>
25 
26 /* mapping NAPI budget 0 to internal budget 0
27  * NAPI budget 1 to internal budget [1,scaler -1]
28  * NAPI budget 2 to internal budget [scaler, 2 * scaler - 1], etc
29  */
30 #define NAPI_BUDGET_TO_INTERNAL_BUDGET(n, s) \
31 	(((n) << (s)) - 1)
32 #define INTERNAL_BUDGET_TO_NAPI_BUDGET(n, s) \
33 	(((n) + 1) >> (s))
34 
35 static struct hif_exec_context *hif_exec_tasklet_create(void);
36 
37 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
38 struct hif_event_history hif_event_desc_history[HIF_NUM_INT_CONTEXTS];
39 
40 static inline
41 int hif_get_next_record_index(qdf_atomic_t *table_index,
42 			      int array_size)
43 {
44 	int record_index = qdf_atomic_inc_return(table_index);
45 
46 	return record_index & (array_size - 1);
47 }
48 
49 /**
50  * hif_hist_is_prev_record() - Check if index is the immediate
51  *  previous record wrt curr_index
52  * @curr_index: curr index in the event history
53  * @index: index to be checked
54  * @hist_size: history size
55  *
56  * Return: true if index is immediately behind curr_index else false
57  */
58 static inline
59 bool hif_hist_is_prev_record(int32_t curr_index, int32_t index,
60 			     uint32_t hist_size)
61 {
62 	return (((index + 1) & (hist_size - 1)) == curr_index) ?
63 			true : false;
64 }
65 
66 /**
67  * hif_hist_skip_event_record() - Check if current event needs to be
68  *  recorded or not
69  * @hist_ev: HIF event history
70  * @event: DP event entry
71  *
72  * Return: true if current event needs to be skipped else false
73  */
74 static bool
75 hif_hist_skip_event_record(struct hif_event_history *hist_ev,
76 			   struct hif_event_record *event)
77 {
78 	struct hif_event_record *rec;
79 	struct hif_event_record *last_irq_rec;
80 	int32_t index;
81 
82 	index = qdf_atomic_read(&hist_ev->index);
83 	if (index < 0)
84 		return false;
85 
86 	index &= (HIF_EVENT_HIST_MAX - 1);
87 	rec = &hist_ev->event[index];
88 
89 	switch (event->type) {
90 	case HIF_EVENT_IRQ_TRIGGER:
91 		/*
92 		 * The prev record check is to prevent skipping the IRQ event
93 		 * record in case where BH got re-scheduled due to force_break
94 		 * but there are no entries to be reaped in the rings.
95 		 */
96 		if (rec->type == HIF_EVENT_BH_SCHED &&
97 		    hif_hist_is_prev_record(index,
98 					    hist_ev->misc.last_irq_index,
99 					    HIF_EVENT_HIST_MAX)) {
100 			last_irq_rec =
101 				&hist_ev->event[hist_ev->misc.last_irq_index];
102 			last_irq_rec->timestamp = hif_get_log_timestamp();
103 			last_irq_rec->cpu_id = qdf_get_cpu();
104 			last_irq_rec->hp++;
105 			last_irq_rec->tp = last_irq_rec->timestamp -
106 						hist_ev->misc.last_irq_ts;
107 			return true;
108 		}
109 		break;
110 	case HIF_EVENT_BH_SCHED:
111 		if (rec->type == HIF_EVENT_BH_SCHED) {
112 			rec->timestamp = hif_get_log_timestamp();
113 			rec->cpu_id = qdf_get_cpu();
114 			return true;
115 		}
116 		break;
117 	case HIF_EVENT_SRNG_ACCESS_START:
118 		if (event->hp == event->tp)
119 			return true;
120 		break;
121 	case HIF_EVENT_SRNG_ACCESS_END:
122 		if (rec->type != HIF_EVENT_SRNG_ACCESS_START)
123 			return true;
124 		break;
125 	case HIF_EVENT_BH_COMPLETE:
126 	case HIF_EVENT_BH_FORCE_BREAK:
127 		if (rec->type != HIF_EVENT_SRNG_ACCESS_END)
128 			return true;
129 		break;
130 	default:
131 		break;
132 	}
133 
134 	return false;
135 }
136 
137 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
138 			   struct hif_event_record *event, uint8_t intr_grp_id)
139 {
140 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
141 	struct hif_event_history *hist_ev;
142 	struct hif_event_record *record;
143 	int record_index;
144 
145 	if (!(scn->event_enable_mask & BIT(event->type)))
146 		return;
147 
148 	if (qdf_unlikely(intr_grp_id >= HIF_NUM_INT_CONTEXTS)) {
149 		hif_err("Invalid interrupt group id %d", intr_grp_id);
150 		return;
151 	}
152 
153 	hist_ev = scn->evt_hist[intr_grp_id];
154 	if (qdf_unlikely(!hist_ev))
155 		return;
156 
157 	if (hif_hist_skip_event_record(hist_ev, event))
158 		return;
159 
160 	record_index = hif_get_next_record_index(
161 			&hist_ev->index, HIF_EVENT_HIST_MAX);
162 
163 	record = &hist_ev->event[record_index];
164 
165 	if (event->type == HIF_EVENT_IRQ_TRIGGER) {
166 		hist_ev->misc.last_irq_index = record_index;
167 		hist_ev->misc.last_irq_ts = hif_get_log_timestamp();
168 	}
169 
170 	record->hal_ring_id = event->hal_ring_id;
171 	record->hp = event->hp;
172 	record->tp = event->tp;
173 	record->cpu_id = qdf_get_cpu();
174 	record->timestamp = hif_get_log_timestamp();
175 	record->type = event->type;
176 }
177 
178 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id)
179 {
180 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
181 
182 	scn->evt_hist[id] = &hif_event_desc_history[id];
183 	qdf_atomic_set(&scn->evt_hist[id]->index, -1);
184 
185 	hif_info("SRNG events history initialized for group: %d", id);
186 }
187 
188 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id)
189 {
190 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
191 
192 	scn->evt_hist[id] = NULL;
193 	hif_info("SRNG events history de-initialized for group: %d", id);
194 }
195 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
196 
197 /**
198  * hif_print_napi_latency_stats() - print NAPI scheduling latency stats
199  * @hif_state: hif context
200  *
201  * return: void
202  */
203 #ifdef HIF_LATENCY_PROFILE_ENABLE
204 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
205 {
206 	struct hif_exec_context *hif_ext_group;
207 	int i, j;
208 	int64_t cur_tstamp;
209 
210 	const char time_str[HIF_SCHED_LATENCY_BUCKETS][15] =  {
211 		"0-2   ms",
212 		"3-10  ms",
213 		"11-20 ms",
214 		"21-50 ms",
215 		"51-100 ms",
216 		"101-250 ms",
217 		"251-500 ms",
218 		"> 500 ms"
219 	};
220 
221 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
222 
223 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
224 		  "Current timestamp: %lld", cur_tstamp);
225 
226 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
227 		if (hif_state->hif_ext_group[i]) {
228 			hif_ext_group = hif_state->hif_ext_group[i];
229 
230 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
231 				  "Interrupts in the HIF Group");
232 
233 			for (j = 0; j < hif_ext_group->numirq; j++) {
234 				QDF_TRACE(QDF_MODULE_ID_HIF,
235 					  QDF_TRACE_LEVEL_FATAL,
236 					  "  %s",
237 					  hif_ext_group->irq_name
238 					  (hif_ext_group->irq[j]));
239 			}
240 
241 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
242 				  "Last serviced timestamp: %lld",
243 				  hif_ext_group->tstamp);
244 
245 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
246 				  "Latency Bucket     | Time elapsed");
247 
248 			for (j = 0; j < HIF_SCHED_LATENCY_BUCKETS; j++) {
249 				QDF_TRACE(QDF_MODULE_ID_HIF,
250 					  QDF_TRACE_LEVEL_FATAL,
251 					  "%s     |    %lld", time_str[j],
252 					  hif_ext_group->
253 					  sched_latency_stats[j]);
254 			}
255 		}
256 	}
257 }
258 #else
259 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
260 {
261 }
262 #endif
263 
264 /**
265  * hif_clear_napi_stats() - reset NAPI stats
266  * @hif_ctx: hif context
267  *
268  * return: void
269  */
270 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx)
271 {
272 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
273 	struct hif_exec_context *hif_ext_group;
274 	size_t i;
275 
276 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
277 		hif_ext_group = hif_state->hif_ext_group[i];
278 
279 		if (!hif_ext_group)
280 			return;
281 
282 		qdf_mem_set(hif_ext_group->sched_latency_stats,
283 			    sizeof(hif_ext_group->sched_latency_stats),
284 			    0x0);
285 	}
286 }
287 
288 qdf_export_symbol(hif_clear_napi_stats);
289 
290 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
291 /**
292  * hif_get_poll_times_hist_str() - Get HIF poll times histogram string
293  * @stats: NAPI stats to get poll time buckets
294  * @buf: buffer to fill histogram string
295  * @buf_len: length of the buffer
296  *
297  * Return: void
298  */
299 static void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
300 					uint8_t buf_len)
301 {
302 	int i;
303 	int str_index = 0;
304 
305 	for (i = 0; i < QCA_NAPI_NUM_BUCKETS; i++)
306 		str_index += qdf_scnprintf(buf + str_index, buf_len - str_index,
307 					   "%u|", stats->poll_time_buckets[i]);
308 }
309 
310 /**
311  * hif_exec_fill_poll_time_histogram() - fills poll time histogram for a NAPI
312  * @hif_ext_group: hif_ext_group of type NAPI
313  *
314  * The function is called at the end of a NAPI poll to calculate poll time
315  * buckets.
316  *
317  * Return: void
318  */
319 static
320 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
321 {
322 	struct qca_napi_stat *napi_stat;
323 	unsigned long long poll_time_ns;
324 	uint32_t poll_time_us;
325 	uint32_t bucket_size_us = 500;
326 	uint32_t bucket;
327 	uint32_t cpu_id = qdf_get_cpu();
328 
329 	poll_time_ns = qdf_time_sched_clock() - hif_ext_group->poll_start_time;
330 	poll_time_us = qdf_do_div(poll_time_ns, 1000);
331 
332 	napi_stat = &hif_ext_group->stats[cpu_id];
333 	if (poll_time_ns > hif_ext_group->stats[cpu_id].napi_max_poll_time)
334 		hif_ext_group->stats[cpu_id].napi_max_poll_time = poll_time_ns;
335 
336 	bucket = poll_time_us / bucket_size_us;
337 	if (bucket >= QCA_NAPI_NUM_BUCKETS)
338 		bucket = QCA_NAPI_NUM_BUCKETS - 1;
339 	++napi_stat->poll_time_buckets[bucket];
340 }
341 
342 /**
343  * hif_exec_poll_should_yield() - Local function deciding if NAPI should yield
344  * @hif_ext_group: hif_ext_group of type NAPI
345  *
346  * Return: true if NAPI needs to yield, else false
347  */
348 static bool hif_exec_poll_should_yield(struct hif_exec_context *hif_ext_group)
349 {
350 	bool time_limit_reached = false;
351 	unsigned long long poll_time_ns;
352 	int cpu_id = qdf_get_cpu();
353 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
354 	struct hif_config_info *cfg = &scn->hif_config;
355 
356 	poll_time_ns = qdf_time_sched_clock() - hif_ext_group->poll_start_time;
357 	time_limit_reached =
358 		poll_time_ns > cfg->rx_softirq_max_yield_duration_ns ? 1 : 0;
359 
360 	if (time_limit_reached) {
361 		hif_ext_group->stats[cpu_id].time_limit_reached++;
362 		hif_ext_group->force_break = true;
363 	}
364 
365 	return time_limit_reached;
366 }
367 
368 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id)
369 {
370 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
371 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
372 	struct hif_exec_context *hif_ext_group;
373 	bool ret_val = false;
374 
375 	if (!(grp_id < hif_state->hif_num_extgroup) ||
376 	    !(grp_id < HIF_MAX_GROUP))
377 		return false;
378 
379 	hif_ext_group = hif_state->hif_ext_group[grp_id];
380 
381 	if (hif_ext_group->type == HIF_EXEC_NAPI_TYPE)
382 		ret_val = hif_exec_poll_should_yield(hif_ext_group);
383 
384 	return ret_val;
385 }
386 
387 /**
388  * hif_exec_update_service_start_time() - Update NAPI poll start time
389  * @hif_ext_group: hif_ext_group of type NAPI
390  *
391  * The function is called at the beginning of a NAPI poll to record the poll
392  * start time.
393  *
394  * Return: None
395  */
396 static inline
397 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
398 {
399 	hif_ext_group->poll_start_time = qdf_time_sched_clock();
400 }
401 
402 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
403 {
404 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
405 	struct hif_exec_context *hif_ext_group;
406 	struct qca_napi_stat *napi_stats;
407 	int i, j;
408 
409 	/*
410 	 * Max value of uint_32 (poll_time_bucket) = 4294967295
411 	 * Thus we need 10 chars + 1 space =11 chars for each bucket value.
412 	 * +1 space for '\0'.
413 	 */
414 	char hist_str[(QCA_NAPI_NUM_BUCKETS * 11) + 1] = {'\0'};
415 
416 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
417 		  "NAPI[#]CPU[#] |scheds |polls  |comps  |dones  |t-lim  |max(us)|hist(500us buckets)");
418 
419 	for (i = 0;
420 	     (i < hif_state->hif_num_extgroup && hif_state->hif_ext_group[i]);
421 	     i++) {
422 		hif_ext_group = hif_state->hif_ext_group[i];
423 		for (j = 0; j < num_possible_cpus(); j++) {
424 			napi_stats = &hif_ext_group->stats[j];
425 			if (!napi_stats->napi_schedules)
426 				continue;
427 
428 			hif_get_poll_times_hist_str(napi_stats,
429 						    hist_str,
430 						    sizeof(hist_str));
431 			QDF_TRACE(QDF_MODULE_ID_HIF,
432 				  QDF_TRACE_LEVEL_INFO_HIGH,
433 				  "NAPI[%d]CPU[%d]: %7u %7u %7u %7u %7u %7llu %s",
434 				  i, j,
435 				  napi_stats->napi_schedules,
436 				  napi_stats->napi_polls,
437 				  napi_stats->napi_completes,
438 				  napi_stats->napi_workdone,
439 				  napi_stats->time_limit_reached,
440 				  qdf_do_div(napi_stats->napi_max_poll_time,
441 					     1000),
442 				  hist_str);
443 		}
444 	}
445 
446 	hif_print_napi_latency_stats(hif_state);
447 }
448 
449 qdf_export_symbol(hif_print_napi_stats);
450 
451 #else
452 
453 static inline
454 void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
455 				 uint8_t buf_len)
456 {
457 }
458 
459 static inline
460 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
461 {
462 }
463 
464 static inline
465 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
466 {
467 }
468 
469 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
470 {
471 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
472 	struct hif_exec_context *hif_ext_group;
473 	struct qca_napi_stat *napi_stats;
474 	int i, j;
475 
476 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
477 		"NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone");
478 
479 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
480 		if (hif_state->hif_ext_group[i]) {
481 			hif_ext_group = hif_state->hif_ext_group[i];
482 			for (j = 0; j < num_possible_cpus(); j++) {
483 				napi_stats = &(hif_ext_group->stats[j]);
484 				if (napi_stats->napi_schedules != 0)
485 					QDF_TRACE(QDF_MODULE_ID_HIF,
486 						QDF_TRACE_LEVEL_FATAL,
487 						"NAPI[%2d]CPU[%d]: "
488 						"%7d %7d %7d %7d ",
489 						i, j,
490 						napi_stats->napi_schedules,
491 						napi_stats->napi_polls,
492 						napi_stats->napi_completes,
493 						napi_stats->napi_workdone);
494 			}
495 		}
496 	}
497 
498 	hif_print_napi_latency_stats(hif_state);
499 }
500 qdf_export_symbol(hif_print_napi_stats);
501 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
502 
503 static void hif_exec_tasklet_schedule(struct hif_exec_context *ctx)
504 {
505 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
506 
507 	tasklet_schedule(&t_ctx->tasklet);
508 }
509 
510 /**
511  * hif_exec_tasklet() - grp tasklet
512  * data: context
513  *
514  * return: void
515  */
516 static void hif_exec_tasklet_fn(unsigned long data)
517 {
518 	struct hif_exec_context *hif_ext_group =
519 			(struct hif_exec_context *)data;
520 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
521 	unsigned int work_done;
522 	int cpu = smp_processor_id();
523 
524 	work_done =
525 		hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET,
526 				       cpu);
527 
528 	if (hif_ext_group->work_complete(hif_ext_group, work_done)) {
529 		qdf_atomic_dec(&(scn->active_grp_tasklet_cnt));
530 		hif_ext_group->irq_enable(hif_ext_group);
531 	} else {
532 		hif_exec_tasklet_schedule(hif_ext_group);
533 	}
534 }
535 
536 /**
537  * hif_latency_profile_measure() - calculate latency and update histogram
538  * hif_ext_group: hif exec context
539  *
540  * return: None
541  */
542 #ifdef HIF_LATENCY_PROFILE_ENABLE
543 static void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
544 {
545 	int64_t cur_tstamp;
546 	int64_t time_elapsed;
547 
548 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
549 
550 	if (cur_tstamp > hif_ext_group->tstamp)
551 		time_elapsed = (cur_tstamp - hif_ext_group->tstamp);
552 	else
553 		time_elapsed = ~0x0 - (hif_ext_group->tstamp - cur_tstamp);
554 
555 	hif_ext_group->tstamp = cur_tstamp;
556 
557 	if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_0_2)
558 		hif_ext_group->sched_latency_stats[0]++;
559 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_3_10)
560 		hif_ext_group->sched_latency_stats[1]++;
561 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_11_20)
562 		hif_ext_group->sched_latency_stats[2]++;
563 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_21_50)
564 		hif_ext_group->sched_latency_stats[3]++;
565 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_51_100)
566 		hif_ext_group->sched_latency_stats[4]++;
567 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_101_250)
568 		hif_ext_group->sched_latency_stats[5]++;
569 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_251_500)
570 		hif_ext_group->sched_latency_stats[6]++;
571 	else
572 		hif_ext_group->sched_latency_stats[7]++;
573 }
574 #else
575 static inline
576 void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
577 {
578 }
579 #endif
580 
581 /**
582  * hif_latency_profile_start() - Update the start timestamp for HIF ext group
583  * hif_ext_group: hif exec context
584  *
585  * return: None
586  */
587 #ifdef HIF_LATENCY_PROFILE_ENABLE
588 static void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
589 {
590 	hif_ext_group->tstamp = qdf_ktime_to_ms(qdf_ktime_get());
591 }
592 #else
593 static inline
594 void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
595 {
596 }
597 #endif
598 
599 #ifdef FEATURE_NAPI
600 #ifdef FEATURE_IRQ_AFFINITY
601 static inline int32_t
602 hif_is_force_napi_complete_required(struct hif_exec_context *hif_ext_group)
603 {
604 	return qdf_atomic_inc_not_zero(&hif_ext_group->force_napi_complete);
605 }
606 #else
607 static inline int32_t
608 hif_is_force_napi_complete_required(struct hif_exec_context *hif_ext_group)
609 {
610 	return 0;
611 }
612 #endif
613 
614 /**
615  * hif_exec_poll() - napi poll
616  * napi: napi struct
617  * budget: budget for napi
618  *
619  * Return: mapping of internal budget to napi
620  */
621 static int hif_exec_poll(struct napi_struct *napi, int budget)
622 {
623 	struct hif_napi_exec_context *napi_exec_ctx =
624 		    qdf_container_of(napi, struct hif_napi_exec_context, napi);
625 	struct hif_exec_context *hif_ext_group = &napi_exec_ctx->exec_ctx;
626 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
627 	int work_done;
628 	int normalized_budget = 0;
629 	int actual_dones;
630 	int shift = hif_ext_group->scale_bin_shift;
631 	int cpu = smp_processor_id();
632 
633 	hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
634 			 0, 0, 0, HIF_EVENT_BH_SCHED);
635 
636 	hif_ext_group->force_break = false;
637 	hif_exec_update_service_start_time(hif_ext_group);
638 
639 	if (budget)
640 		normalized_budget = NAPI_BUDGET_TO_INTERNAL_BUDGET(budget, shift);
641 
642 	hif_latency_profile_measure(hif_ext_group);
643 
644 	work_done = hif_ext_group->handler(hif_ext_group->context,
645 					   normalized_budget, cpu);
646 
647 	actual_dones = work_done;
648 
649 	if (hif_is_force_napi_complete_required(hif_ext_group) ||
650 	    (!hif_ext_group->force_break && work_done < normalized_budget)) {
651 		hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
652 				 0, 0, 0, HIF_EVENT_BH_COMPLETE);
653 		napi_complete(napi);
654 		qdf_atomic_dec(&scn->active_grp_tasklet_cnt);
655 		hif_ext_group->irq_enable(hif_ext_group);
656 		hif_ext_group->stats[cpu].napi_completes++;
657 	} else {
658 		/* if the ext_group supports time based yield, claim full work
659 		 * done anyways */
660 		hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
661 				 0, 0, 0, HIF_EVENT_BH_FORCE_BREAK);
662 		work_done = normalized_budget;
663 	}
664 
665 	hif_ext_group->stats[cpu].napi_polls++;
666 	hif_ext_group->stats[cpu].napi_workdone += actual_dones;
667 
668 	/* map internal budget to NAPI budget */
669 	if (work_done)
670 		work_done = INTERNAL_BUDGET_TO_NAPI_BUDGET(work_done, shift);
671 
672 	hif_exec_fill_poll_time_histogram(hif_ext_group);
673 
674 	return work_done;
675 }
676 
677 /**
678  * hif_exec_napi_schedule() - schedule the napi exec instance
679  * @ctx: a hif_exec_context known to be of napi type
680  */
681 static void hif_exec_napi_schedule(struct hif_exec_context *ctx)
682 {
683 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
684 	ctx->stats[smp_processor_id()].napi_schedules++;
685 
686 	napi_schedule(&n_ctx->napi);
687 }
688 
689 /**
690  * hif_exec_napi_kill() - stop a napi exec context from being rescheduled
691  * @ctx: a hif_exec_context known to be of napi type
692  */
693 static void hif_exec_napi_kill(struct hif_exec_context *ctx)
694 {
695 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
696 	int irq_ind;
697 
698 	if (ctx->inited) {
699 		napi_disable(&n_ctx->napi);
700 		ctx->inited = 0;
701 	}
702 
703 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
704 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
705 
706 	hif_core_ctl_set_boost(false);
707 	netif_napi_del(&(n_ctx->napi));
708 }
709 
710 struct hif_execution_ops napi_sched_ops = {
711 	.schedule = &hif_exec_napi_schedule,
712 	.kill = &hif_exec_napi_kill,
713 };
714 
715 /**
716  * hif_exec_napi_create() - allocate and initialize a napi exec context
717  * @scale: a binary shift factor to map NAPI budget from\to internal
718  *         budget
719  */
720 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
721 {
722 	struct hif_napi_exec_context *ctx;
723 
724 	ctx = qdf_mem_malloc(sizeof(struct hif_napi_exec_context));
725 	if (!ctx)
726 		return NULL;
727 
728 	ctx->exec_ctx.sched_ops = &napi_sched_ops;
729 	ctx->exec_ctx.inited = true;
730 	ctx->exec_ctx.scale_bin_shift = scale;
731 	qdf_net_if_create_dummy_if((struct qdf_net_if *)&ctx->netdev);
732 	netif_napi_add(&(ctx->netdev), &(ctx->napi), hif_exec_poll,
733 		       QCA_NAPI_BUDGET);
734 	napi_enable(&ctx->napi);
735 
736 	return &ctx->exec_ctx;
737 }
738 #else
739 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
740 {
741 	hif_warn("FEATURE_NAPI not defined, making tasklet");
742 	return hif_exec_tasklet_create();
743 }
744 #endif
745 
746 
747 /**
748  * hif_exec_tasklet_kill() - stop a tasklet exec context from being rescheduled
749  * @ctx: a hif_exec_context known to be of tasklet type
750  */
751 static void hif_exec_tasklet_kill(struct hif_exec_context *ctx)
752 {
753 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
754 	int irq_ind;
755 
756 	if (ctx->inited) {
757 		tasklet_disable(&t_ctx->tasklet);
758 		tasklet_kill(&t_ctx->tasklet);
759 	}
760 	ctx->inited = false;
761 
762 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
763 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
764 }
765 
766 struct hif_execution_ops tasklet_sched_ops = {
767 	.schedule = &hif_exec_tasklet_schedule,
768 	.kill = &hif_exec_tasklet_kill,
769 };
770 
771 /**
772  * hif_exec_tasklet_schedule() -  allocate and initialize a tasklet exec context
773  */
774 static struct hif_exec_context *hif_exec_tasklet_create(void)
775 {
776 	struct hif_tasklet_exec_context *ctx;
777 
778 	ctx = qdf_mem_malloc(sizeof(struct hif_tasklet_exec_context));
779 	if (!ctx)
780 		return NULL;
781 
782 	ctx->exec_ctx.sched_ops = &tasklet_sched_ops;
783 	tasklet_init(&ctx->tasklet, hif_exec_tasklet_fn,
784 		     (unsigned long)ctx);
785 
786 	ctx->exec_ctx.inited = true;
787 
788 	return &ctx->exec_ctx;
789 }
790 
791 /**
792  * hif_exec_get_ctx() - retrieve an exec context based on an id
793  * @softc: the hif context owning the exec context
794  * @id: the id of the exec context
795  *
796  * mostly added to make it easier to rename or move the context array
797  */
798 struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *softc,
799 					  uint8_t id)
800 {
801 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
802 
803 	if (id < hif_state->hif_num_extgroup)
804 		return hif_state->hif_ext_group[id];
805 
806 	return NULL;
807 }
808 
809 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
810 				uint8_t id)
811 {
812 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
813 
814 	if (id < hif_state->hif_num_extgroup)
815 		return hif_state->hif_ext_group[id]->os_irq[0];
816 	return -EINVAL;
817 }
818 
819 qdf_export_symbol(hif_get_int_ctx_irq_num);
820 
821 #ifdef HIF_CPU_PERF_AFFINE_MASK
822 void hif_config_irq_set_perf_affinity_hint(
823 	struct hif_opaque_softc *hif_ctx)
824 {
825 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
826 
827 	hif_config_irq_affinity(scn);
828 }
829 
830 qdf_export_symbol(hif_config_irq_set_perf_affinity_hint);
831 #endif
832 
833 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
834 {
835 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
836 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
837 	struct hif_exec_context *hif_ext_group;
838 	int i, status;
839 
840 	if (scn->ext_grp_irq_configured) {
841 		hif_err("Called after ext grp irq configured");
842 		return QDF_STATUS_E_FAILURE;
843 	}
844 
845 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
846 		hif_ext_group = hif_state->hif_ext_group[i];
847 		status = 0;
848 		qdf_spinlock_create(&hif_ext_group->irq_lock);
849 		if (hif_ext_group->configured &&
850 		    hif_ext_group->irq_requested == false) {
851 			hif_ext_group->irq_enabled = true;
852 			status = hif_grp_irq_configure(scn, hif_ext_group);
853 		}
854 		if (status != 0) {
855 			hif_err("Failed for group %d", i);
856 			hif_ext_group->irq_enabled = false;
857 		}
858 	}
859 
860 	scn->ext_grp_irq_configured = true;
861 
862 	return QDF_STATUS_SUCCESS;
863 }
864 
865 qdf_export_symbol(hif_configure_ext_group_interrupts);
866 
867 void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
868 {
869 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
870 
871 	if (!scn || !scn->ext_grp_irq_configured) {
872 		hif_err("scn(%pk) is NULL or grp irq not configured", scn);
873 		return;
874 	}
875 
876 	hif_grp_irq_deconfigure(scn);
877 	scn->ext_grp_irq_configured = false;
878 }
879 
880 qdf_export_symbol(hif_deconfigure_ext_group_interrupts);
881 
882 #ifdef WLAN_SUSPEND_RESUME_TEST
883 /**
884  * hif_check_and_trigger_ut_resume() - check if unit-test command was used to
885  *				       to trigger fake-suspend command, if yes
886  *				       then issue resume procedure.
887  * @scn: opaque HIF software context
888  *
889  * This API checks if unit-test command was used to trigger fake-suspend command
890  * and if answer is yes then it would trigger resume procedure.
891  *
892  * Make this API inline to save API-switch overhead and do branch-prediction to
893  * optimize performance impact.
894  *
895  * Return: void
896  */
897 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
898 {
899 	if (qdf_unlikely(hif_irq_trigger_ut_resume(scn)))
900 		hif_ut_fw_resume(scn);
901 }
902 #else
903 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
904 {
905 }
906 #endif
907 
908 /**
909  * hif_check_and_trigger_sys_resume() - Check for bus suspend and
910  *  trigger system resume
911  * @scn: hif context
912  * @irq: irq number
913  *
914  * Return: None
915  */
916 static inline void
917 hif_check_and_trigger_sys_resume(struct hif_softc *scn, int irq)
918 {
919 	if (scn->bus_suspended && scn->linkstate_vote) {
920 		hif_info_rl("interrupt rcvd:%d trigger sys resume", irq);
921 		qdf_pm_system_wakeup();
922 	}
923 }
924 
925 /**
926  * hif_ext_group_interrupt_handler() - handler for related interrupts
927  * @irq: irq number of the interrupt
928  * @context: the associated hif_exec_group context
929  *
930  * This callback function takes care of dissabling the associated interrupts
931  * and scheduling the expected bottom half for the exec_context.
932  * This callback function also helps keep track of the count running contexts.
933  */
934 irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context)
935 {
936 	struct hif_exec_context *hif_ext_group = context;
937 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
938 
939 	if (hif_ext_group->irq_requested) {
940 		hif_latency_profile_start(hif_ext_group);
941 
942 		hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
943 				 0, 0, 0, HIF_EVENT_IRQ_TRIGGER);
944 
945 		hif_ext_group->irq_disable(hif_ext_group);
946 		/*
947 		 * if private ioctl has issued fake suspend command to put
948 		 * FW in D0-WOW state then here is our chance to bring FW out
949 		 * of WOW mode.
950 		 *
951 		 * The reason why you need to explicitly wake-up the FW is here:
952 		 * APSS should have been in fully awake through-out when
953 		 * fake APSS suspend command was issued (to put FW in WOW mode)
954 		 * hence organic way of waking-up the FW
955 		 * (as part-of APSS-host wake-up) won't happen because
956 		 * in reality APSS didn't really suspend.
957 		 */
958 		hif_check_and_trigger_ut_resume(scn);
959 
960 		hif_check_and_trigger_sys_resume(scn, irq);
961 
962 		qdf_atomic_inc(&scn->active_grp_tasklet_cnt);
963 
964 		hif_ext_group->sched_ops->schedule(hif_ext_group);
965 	}
966 
967 	return IRQ_HANDLED;
968 }
969 
970 /**
971  * hif_exec_kill() - grp tasklet kill
972  * scn: hif_softc
973  *
974  * return: void
975  */
976 void hif_exec_kill(struct hif_opaque_softc *hif_ctx)
977 {
978 	int i;
979 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
980 
981 	for (i = 0; i < hif_state->hif_num_extgroup; i++)
982 		hif_state->hif_ext_group[i]->sched_ops->kill(
983 			hif_state->hif_ext_group[i]);
984 
985 	qdf_atomic_set(&hif_state->ol_sc.active_grp_tasklet_cnt, 0);
986 }
987 
988 #ifdef FEATURE_IRQ_AFFINITY
989 static inline void
990 hif_init_force_napi_complete(struct hif_exec_context *hif_ext_group)
991 {
992 	qdf_atomic_init(&hif_ext_group->force_napi_complete);
993 }
994 #else
995 static inline void
996 hif_init_force_napi_complete(struct hif_exec_context *hif_ext_group)
997 {
998 }
999 #endif
1000 
1001 /**
1002  * hif_register_ext_group() - API to register external group
1003  * interrupt handler.
1004  * @hif_ctx : HIF Context
1005  * @numirq: number of irq's in the group
1006  * @irq: array of irq values
1007  * @handler: callback interrupt handler function
1008  * @cb_ctx: context to passed in callback
1009  * @type: napi vs tasklet
1010  *
1011  * Return: QDF_STATUS
1012  */
1013 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
1014 				  uint32_t numirq, uint32_t irq[],
1015 				  ext_intr_handler handler,
1016 				  void *cb_ctx, const char *context_name,
1017 				  enum hif_exec_type type, uint32_t scale)
1018 {
1019 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1020 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1021 	struct hif_exec_context *hif_ext_group;
1022 
1023 	if (scn->ext_grp_irq_configured) {
1024 		hif_err("Called after ext grp irq configured");
1025 		return QDF_STATUS_E_FAILURE;
1026 	}
1027 
1028 	if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) {
1029 		hif_err("Max groups: %d reached", hif_state->hif_num_extgroup);
1030 		return QDF_STATUS_E_FAILURE;
1031 	}
1032 
1033 	if (numirq >= HIF_MAX_GRP_IRQ) {
1034 		hif_err("Invalid numirq: %d", numirq);
1035 		return QDF_STATUS_E_FAILURE;
1036 	}
1037 
1038 	hif_ext_group = hif_exec_create(type, scale);
1039 	if (!hif_ext_group)
1040 		return QDF_STATUS_E_FAILURE;
1041 
1042 	hif_state->hif_ext_group[hif_state->hif_num_extgroup] =
1043 		hif_ext_group;
1044 
1045 	hif_ext_group->numirq = numirq;
1046 	qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0]));
1047 	hif_ext_group->context = cb_ctx;
1048 	hif_ext_group->handler = handler;
1049 	hif_ext_group->configured = true;
1050 	hif_ext_group->grp_id = hif_state->hif_num_extgroup;
1051 	hif_ext_group->hif = hif_ctx;
1052 	hif_ext_group->context_name = context_name;
1053 	hif_ext_group->type = type;
1054 	hif_init_force_napi_complete(hif_ext_group);
1055 
1056 	hif_state->hif_num_extgroup++;
1057 	return QDF_STATUS_SUCCESS;
1058 }
1059 qdf_export_symbol(hif_register_ext_group);
1060 
1061 /**
1062  * hif_exec_create() - create an execution context
1063  * @type: the type of execution context to create
1064  */
1065 struct hif_exec_context *hif_exec_create(enum hif_exec_type type,
1066 						uint32_t scale)
1067 {
1068 	hif_debug("%s: create exec_type %d budget %d\n",
1069 		  __func__, type, QCA_NAPI_BUDGET * scale);
1070 
1071 	switch (type) {
1072 	case HIF_EXEC_NAPI_TYPE:
1073 		return hif_exec_napi_create(scale);
1074 
1075 	case HIF_EXEC_TASKLET_TYPE:
1076 		return hif_exec_tasklet_create();
1077 	default:
1078 		return NULL;
1079 	}
1080 }
1081 
1082 /**
1083  * hif_exec_destroy() - free the hif_exec context
1084  * @ctx: context to free
1085  *
1086  * please kill the context before freeing it to avoid a use after free.
1087  */
1088 void hif_exec_destroy(struct hif_exec_context *ctx)
1089 {
1090 	struct hif_softc *scn = HIF_GET_SOFTC(ctx->hif);
1091 
1092 	if (scn->ext_grp_irq_configured)
1093 		qdf_spinlock_destroy(&ctx->irq_lock);
1094 	qdf_mem_free(ctx);
1095 }
1096 
1097 /**
1098  * hif_deregister_exec_group() - API to free the exec contexts
1099  * @hif_ctx: HIF context
1100  * @context_name: name of the module whose contexts need to be deregistered
1101  *
1102  * This function deregisters the contexts of the requestor identified
1103  * based on the context_name & frees the memory.
1104  *
1105  * Return: void
1106  */
1107 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
1108 				const char *context_name)
1109 {
1110 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1111 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1112 	struct hif_exec_context *hif_ext_group;
1113 	int i;
1114 
1115 	for (i = 0; i < HIF_MAX_GROUP; i++) {
1116 		hif_ext_group = hif_state->hif_ext_group[i];
1117 
1118 		if (!hif_ext_group)
1119 			continue;
1120 
1121 		hif_debug("%s: Deregistering grp id %d name %s\n",
1122 			  __func__,
1123 			  hif_ext_group->grp_id,
1124 			  hif_ext_group->context_name);
1125 
1126 		if (strcmp(hif_ext_group->context_name, context_name) == 0) {
1127 			hif_ext_group->sched_ops->kill(hif_ext_group);
1128 			hif_state->hif_ext_group[i] = NULL;
1129 			hif_exec_destroy(hif_ext_group);
1130 			hif_state->hif_num_extgroup--;
1131 		}
1132 
1133 	}
1134 }
1135 qdf_export_symbol(hif_deregister_exec_group);
1136 
1137 #ifdef DP_UMAC_HW_RESET_SUPPORT
1138 /**
1139  * hif_umac_reset_handler_tasklet() - Tasklet for UMAC HW reset interrupt
1140  * @data: UMAC HW reset HIF context
1141  *
1142  * return: void
1143  */
1144 static void hif_umac_reset_handler_tasklet(unsigned long data)
1145 {
1146 	struct hif_umac_reset_ctx *umac_reset_ctx =
1147 		(struct hif_umac_reset_ctx *)data;
1148 
1149 	/* call the callback handler */
1150 	umac_reset_ctx->cb_handler(umac_reset_ctx->cb_ctx);
1151 }
1152 
1153 /**
1154  * hif_umac_reset_irq_handler() - Interrupt service routine of UMAC HW reset
1155  * @irq: irq coming from kernel
1156  * @ctx: UMAC HW reset HIF context
1157  *
1158  * return: IRQ_HANDLED if success, else IRQ_NONE
1159  */
1160 static irqreturn_t hif_umac_reset_irq_handler(int irq, void *ctx)
1161 {
1162 	struct hif_umac_reset_ctx *umac_reset_ctx = ctx;
1163 
1164 	/* Schedule the tasklet and exit */
1165 	tasklet_hi_schedule(&umac_reset_ctx->intr_tq);
1166 
1167 	return IRQ_HANDLED;
1168 }
1169 
1170 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
1171 					   int (*handler)(void *cb_ctx),
1172 					   void *cb_ctx, int irq)
1173 {
1174 	struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_scn);
1175 	struct hif_umac_reset_ctx *umac_reset_ctx;
1176 	int ret;
1177 
1178 	if (!hif_sc) {
1179 		hif_err("scn is null");
1180 		return QDF_STATUS_E_NULL_VALUE;
1181 	}
1182 
1183 	umac_reset_ctx = &hif_sc->umac_reset_ctx;
1184 
1185 	umac_reset_ctx->cb_handler = handler;
1186 	umac_reset_ctx->cb_ctx = cb_ctx;
1187 	umac_reset_ctx->os_irq = irq;
1188 
1189 	/* Init the tasklet */
1190 	tasklet_init(&umac_reset_ctx->intr_tq,
1191 		     hif_umac_reset_handler_tasklet,
1192 		     (unsigned long)umac_reset_ctx);
1193 
1194 	/* Register the interrupt handler */
1195 	ret  = pfrm_request_irq(hif_sc->qdf_dev->dev, irq,
1196 				hif_umac_reset_irq_handler,
1197 				IRQF_SHARED | IRQF_NO_SUSPEND,
1198 				"umac_hw_reset_irq",
1199 				umac_reset_ctx);
1200 	if (ret) {
1201 		hif_err("request_irq failed: %d", ret);
1202 		return qdf_status_from_os_return(ret);
1203 	}
1204 
1205 	umac_reset_ctx->irq_configured = true;
1206 
1207 	return QDF_STATUS_SUCCESS;
1208 }
1209 
1210 qdf_export_symbol(hif_register_umac_reset_handler);
1211 
1212 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn)
1213 {
1214 	struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_scn);
1215 	struct hif_umac_reset_ctx *umac_reset_ctx;
1216 	int ret;
1217 
1218 	if (!hif_sc) {
1219 		hif_err("scn is null");
1220 		return QDF_STATUS_E_NULL_VALUE;
1221 	}
1222 
1223 	umac_reset_ctx = &hif_sc->umac_reset_ctx;
1224 	if (!umac_reset_ctx->irq_configured) {
1225 		hif_err("unregister called without a prior IRQ configuration");
1226 		return QDF_STATUS_E_FAILURE;
1227 	}
1228 
1229 	ret  = pfrm_free_irq(hif_sc->qdf_dev->dev,
1230 			     umac_reset_ctx->os_irq,
1231 			     umac_reset_ctx);
1232 	if (ret) {
1233 		hif_err("free_irq failed: %d", ret);
1234 		return qdf_status_from_os_return(ret);
1235 	}
1236 	umac_reset_ctx->irq_configured = false;
1237 
1238 	tasklet_disable(&umac_reset_ctx->intr_tq);
1239 	tasklet_kill(&umac_reset_ctx->intr_tq);
1240 
1241 	umac_reset_ctx->cb_handler = NULL;
1242 	umac_reset_ctx->cb_ctx = NULL;
1243 
1244 	return QDF_STATUS_SUCCESS;
1245 }
1246 
1247 qdf_export_symbol(hif_unregister_umac_reset_handler);
1248 #endif
1249