xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_exec.c (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <hif_exec.h>
21 #include <ce_main.h>
22 #include "qdf_module.h"
23 #include "qdf_net_if.h"
24 #include <pld_common.h>
25 
26 /* mapping NAPI budget 0 to internal budget 0
27  * NAPI budget 1 to internal budget [1,scaler -1]
28  * NAPI budget 2 to internal budget [scaler, 2 * scaler - 1], etc
29  */
30 #define NAPI_BUDGET_TO_INTERNAL_BUDGET(n, s) \
31 	(((n) << (s)) - 1)
32 #define INTERNAL_BUDGET_TO_NAPI_BUDGET(n, s) \
33 	(((n) + 1) >> (s))
34 
35 static struct hif_exec_context *hif_exec_tasklet_create(void);
36 
37 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
38 struct hif_event_history hif_event_desc_history[HIF_NUM_INT_CONTEXTS];
39 
40 static inline
41 int hif_get_next_record_index(qdf_atomic_t *table_index,
42 			      int array_size)
43 {
44 	int record_index = qdf_atomic_inc_return(table_index);
45 
46 	return record_index & (array_size - 1);
47 }
48 
49 /**
50  * hif_hist_is_prev_record() - Check if index is the immediate
51  *  previous record wrt curr_index
52  * @curr_index: curr index in the event history
53  * @index: index to be checked
54  * @hist_size: history size
55  *
56  * Return: true if index is immediately behind curr_index else false
57  */
58 static inline
59 bool hif_hist_is_prev_record(int32_t curr_index, int32_t index,
60 			     uint32_t hist_size)
61 {
62 	return (((index + 1) & (hist_size - 1)) == curr_index) ?
63 			true : false;
64 }
65 
66 /**
67  * hif_hist_skip_event_record() - Check if current event needs to be
68  *  recorded or not
69  * @hist_ev: HIF event history
70  * @event: DP event entry
71  *
72  * Return: true if current event needs to be skipped else false
73  */
74 static bool
75 hif_hist_skip_event_record(struct hif_event_history *hist_ev,
76 			   struct hif_event_record *event)
77 {
78 	struct hif_event_record *rec;
79 	struct hif_event_record *last_irq_rec;
80 	int32_t index;
81 
82 	index = qdf_atomic_read(&hist_ev->index);
83 	if (index < 0)
84 		return false;
85 
86 	index &= (HIF_EVENT_HIST_MAX - 1);
87 	rec = &hist_ev->event[index];
88 
89 	switch (event->type) {
90 	case HIF_EVENT_IRQ_TRIGGER:
91 		/*
92 		 * The prev record check is to prevent skipping the IRQ event
93 		 * record in case where BH got re-scheduled due to force_break
94 		 * but there are no entries to be reaped in the rings.
95 		 */
96 		if (rec->type == HIF_EVENT_BH_SCHED &&
97 		    hif_hist_is_prev_record(index,
98 					    hist_ev->misc.last_irq_index,
99 					    HIF_EVENT_HIST_MAX)) {
100 			last_irq_rec =
101 				&hist_ev->event[hist_ev->misc.last_irq_index];
102 			last_irq_rec->timestamp = hif_get_log_timestamp();
103 			last_irq_rec->cpu_id = qdf_get_cpu();
104 			last_irq_rec->hp++;
105 			last_irq_rec->tp = last_irq_rec->timestamp -
106 						hist_ev->misc.last_irq_ts;
107 			return true;
108 		}
109 		break;
110 	case HIF_EVENT_BH_SCHED:
111 		if (rec->type == HIF_EVENT_BH_SCHED) {
112 			rec->timestamp = hif_get_log_timestamp();
113 			rec->cpu_id = qdf_get_cpu();
114 			return true;
115 		}
116 		break;
117 	case HIF_EVENT_SRNG_ACCESS_START:
118 		if (event->hp == event->tp)
119 			return true;
120 		break;
121 	case HIF_EVENT_SRNG_ACCESS_END:
122 		if (rec->type != HIF_EVENT_SRNG_ACCESS_START)
123 			return true;
124 		break;
125 	case HIF_EVENT_BH_COMPLETE:
126 	case HIF_EVENT_BH_FORCE_BREAK:
127 		if (rec->type != HIF_EVENT_SRNG_ACCESS_END)
128 			return true;
129 		break;
130 	default:
131 		break;
132 	}
133 
134 	return false;
135 }
136 
137 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
138 			   struct hif_event_record *event, uint8_t intr_grp_id)
139 {
140 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
141 	struct hif_event_history *hist_ev;
142 	struct hif_event_record *record;
143 	int record_index;
144 
145 	if (!(scn->event_enable_mask & BIT(event->type)))
146 		return;
147 
148 	if (qdf_unlikely(intr_grp_id >= HIF_NUM_INT_CONTEXTS)) {
149 		hif_err("Invalid interrupt group id %d", intr_grp_id);
150 		return;
151 	}
152 
153 	hist_ev = scn->evt_hist[intr_grp_id];
154 	if (qdf_unlikely(!hist_ev))
155 		return;
156 
157 	if (hif_hist_skip_event_record(hist_ev, event))
158 		return;
159 
160 	record_index = hif_get_next_record_index(
161 			&hist_ev->index, HIF_EVENT_HIST_MAX);
162 
163 	record = &hist_ev->event[record_index];
164 
165 	if (event->type == HIF_EVENT_IRQ_TRIGGER) {
166 		hist_ev->misc.last_irq_index = record_index;
167 		hist_ev->misc.last_irq_ts = hif_get_log_timestamp();
168 	}
169 
170 	record->hal_ring_id = event->hal_ring_id;
171 	record->hp = event->hp;
172 	record->tp = event->tp;
173 	record->cpu_id = qdf_get_cpu();
174 	record->timestamp = hif_get_log_timestamp();
175 	record->type = event->type;
176 }
177 
178 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id)
179 {
180 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
181 
182 	scn->evt_hist[id] = &hif_event_desc_history[id];
183 	qdf_atomic_set(&scn->evt_hist[id]->index, -1);
184 
185 	hif_info("SRNG events history initialized for group: %d", id);
186 }
187 
188 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id)
189 {
190 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
191 
192 	scn->evt_hist[id] = NULL;
193 	hif_info("SRNG events history de-initialized for group: %d", id);
194 }
195 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
196 
197 /**
198  * hif_print_napi_latency_stats() - print NAPI scheduling latency stats
199  * @hif_state: hif context
200  *
201  * return: void
202  */
203 #ifdef HIF_LATENCY_PROFILE_ENABLE
204 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
205 {
206 	struct hif_exec_context *hif_ext_group;
207 	int i, j;
208 	int64_t cur_tstamp;
209 
210 	const char time_str[HIF_SCHED_LATENCY_BUCKETS][15] =  {
211 		"0-2   ms",
212 		"3-10  ms",
213 		"11-20 ms",
214 		"21-50 ms",
215 		"51-100 ms",
216 		"101-250 ms",
217 		"251-500 ms",
218 		"> 500 ms"
219 	};
220 
221 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
222 
223 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
224 		  "Current timestamp: %lld", cur_tstamp);
225 
226 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
227 		if (hif_state->hif_ext_group[i]) {
228 			hif_ext_group = hif_state->hif_ext_group[i];
229 
230 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
231 				  "ext grp %d Last serviced timestamp: %lld",
232 				  i, hif_ext_group->tstamp);
233 
234 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
235 				  "Latency Bucket     | Time elapsed");
236 
237 			for (j = 0; j < HIF_SCHED_LATENCY_BUCKETS; j++) {
238 				if (hif_ext_group->sched_latency_stats[j])
239 					QDF_TRACE(QDF_MODULE_ID_HIF,
240 						  QDF_TRACE_LEVEL_INFO_HIGH,
241 						  "%s     |    %lld",
242 						  time_str[j],
243 						  hif_ext_group->
244 						  sched_latency_stats[j]);
245 			}
246 		}
247 	}
248 }
249 #else
250 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
251 {
252 }
253 #endif
254 
255 /**
256  * hif_clear_napi_stats() - reset NAPI stats
257  * @hif_ctx: hif context
258  *
259  * return: void
260  */
261 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx)
262 {
263 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
264 	struct hif_exec_context *hif_ext_group;
265 	size_t i;
266 
267 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
268 		hif_ext_group = hif_state->hif_ext_group[i];
269 
270 		if (!hif_ext_group)
271 			return;
272 
273 		qdf_mem_set(hif_ext_group->sched_latency_stats,
274 			    sizeof(hif_ext_group->sched_latency_stats),
275 			    0x0);
276 	}
277 }
278 
279 qdf_export_symbol(hif_clear_napi_stats);
280 
281 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
282 /**
283  * hif_get_poll_times_hist_str() - Get HIF poll times histogram string
284  * @stats: NAPI stats to get poll time buckets
285  * @buf: buffer to fill histogram string
286  * @buf_len: length of the buffer
287  *
288  * Return: void
289  */
290 static void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
291 					uint8_t buf_len)
292 {
293 	int i;
294 	int str_index = 0;
295 
296 	for (i = 0; i < QCA_NAPI_NUM_BUCKETS; i++)
297 		str_index += qdf_scnprintf(buf + str_index, buf_len - str_index,
298 					   "%u|", stats->poll_time_buckets[i]);
299 }
300 
301 /**
302  * hif_exec_fill_poll_time_histogram() - fills poll time histogram for a NAPI
303  * @hif_ext_group: hif_ext_group of type NAPI
304  *
305  * The function is called at the end of a NAPI poll to calculate poll time
306  * buckets.
307  *
308  * Return: void
309  */
310 static
311 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
312 {
313 	struct qca_napi_stat *napi_stat;
314 	unsigned long long poll_time_ns;
315 	uint32_t poll_time_us;
316 	uint32_t bucket_size_us = 500;
317 	uint32_t bucket;
318 	uint32_t cpu_id = qdf_get_cpu();
319 
320 	poll_time_ns = qdf_time_sched_clock() - hif_ext_group->poll_start_time;
321 	poll_time_us = qdf_do_div(poll_time_ns, 1000);
322 
323 	napi_stat = &hif_ext_group->stats[cpu_id];
324 	if (poll_time_ns > hif_ext_group->stats[cpu_id].napi_max_poll_time)
325 		hif_ext_group->stats[cpu_id].napi_max_poll_time = poll_time_ns;
326 
327 	bucket = poll_time_us / bucket_size_us;
328 	if (bucket >= QCA_NAPI_NUM_BUCKETS)
329 		bucket = QCA_NAPI_NUM_BUCKETS - 1;
330 	++napi_stat->poll_time_buckets[bucket];
331 }
332 
333 /**
334  * hif_exec_poll_should_yield() - Local function deciding if NAPI should yield
335  * @hif_ext_group: hif_ext_group of type NAPI
336  *
337  * Return: true if NAPI needs to yield, else false
338  */
339 static bool hif_exec_poll_should_yield(struct hif_exec_context *hif_ext_group)
340 {
341 	bool time_limit_reached = false;
342 	unsigned long long poll_time_ns;
343 	int cpu_id = qdf_get_cpu();
344 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
345 	struct hif_config_info *cfg = &scn->hif_config;
346 
347 	poll_time_ns = qdf_time_sched_clock() - hif_ext_group->poll_start_time;
348 	time_limit_reached =
349 		poll_time_ns > cfg->rx_softirq_max_yield_duration_ns ? 1 : 0;
350 
351 	if (time_limit_reached) {
352 		hif_ext_group->stats[cpu_id].time_limit_reached++;
353 		hif_ext_group->force_break = true;
354 	}
355 
356 	return time_limit_reached;
357 }
358 
359 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id)
360 {
361 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
362 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
363 	struct hif_exec_context *hif_ext_group;
364 	bool ret_val = false;
365 
366 	if (!(grp_id < hif_state->hif_num_extgroup) ||
367 	    !(grp_id < HIF_MAX_GROUP))
368 		return false;
369 
370 	hif_ext_group = hif_state->hif_ext_group[grp_id];
371 
372 	if (hif_ext_group->type == HIF_EXEC_NAPI_TYPE)
373 		ret_val = hif_exec_poll_should_yield(hif_ext_group);
374 
375 	return ret_val;
376 }
377 
378 /**
379  * hif_exec_update_service_start_time() - Update NAPI poll start time
380  * @hif_ext_group: hif_ext_group of type NAPI
381  *
382  * The function is called at the beginning of a NAPI poll to record the poll
383  * start time.
384  *
385  * Return: None
386  */
387 static inline
388 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
389 {
390 	hif_ext_group->poll_start_time = qdf_time_sched_clock();
391 }
392 
393 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
394 {
395 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
396 	struct hif_exec_context *hif_ext_group;
397 	struct qca_napi_stat *napi_stats;
398 	int i, j;
399 
400 	/*
401 	 * Max value of uint_32 (poll_time_bucket) = 4294967295
402 	 * Thus we need 10 chars + 1 space =11 chars for each bucket value.
403 	 * +1 space for '\0'.
404 	 */
405 	char hist_str[(QCA_NAPI_NUM_BUCKETS * 11) + 1] = {'\0'};
406 
407 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
408 		  "NAPI[#]CPU[#] |scheds |polls  |comps  |dones  |t-lim  |max(us)|hist(500us buckets)");
409 
410 	for (i = 0;
411 	     (i < hif_state->hif_num_extgroup && hif_state->hif_ext_group[i]);
412 	     i++) {
413 		hif_ext_group = hif_state->hif_ext_group[i];
414 		for (j = 0; j < num_possible_cpus(); j++) {
415 			napi_stats = &hif_ext_group->stats[j];
416 			if (!napi_stats->napi_schedules)
417 				continue;
418 
419 			hif_get_poll_times_hist_str(napi_stats,
420 						    hist_str,
421 						    sizeof(hist_str));
422 			QDF_TRACE(QDF_MODULE_ID_HIF,
423 				  QDF_TRACE_LEVEL_INFO_HIGH,
424 				  "NAPI[%d]CPU[%d]: %7u %7u %7u %7u %7u %7llu %s",
425 				  i, j,
426 				  napi_stats->napi_schedules,
427 				  napi_stats->napi_polls,
428 				  napi_stats->napi_completes,
429 				  napi_stats->napi_workdone,
430 				  napi_stats->time_limit_reached,
431 				  qdf_do_div(napi_stats->napi_max_poll_time,
432 					     1000),
433 				  hist_str);
434 		}
435 	}
436 
437 	hif_print_napi_latency_stats(hif_state);
438 }
439 
440 qdf_export_symbol(hif_print_napi_stats);
441 
442 #else
443 
444 static inline
445 void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
446 				 uint8_t buf_len)
447 {
448 }
449 
450 static inline
451 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
452 {
453 }
454 
455 static inline
456 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
457 {
458 }
459 
460 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
461 {
462 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
463 	struct hif_exec_context *hif_ext_group;
464 	struct qca_napi_stat *napi_stats;
465 	int i, j;
466 
467 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
468 		"NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone");
469 
470 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
471 		if (hif_state->hif_ext_group[i]) {
472 			hif_ext_group = hif_state->hif_ext_group[i];
473 			for (j = 0; j < num_possible_cpus(); j++) {
474 				napi_stats = &(hif_ext_group->stats[j]);
475 				if (napi_stats->napi_schedules != 0)
476 					QDF_TRACE(QDF_MODULE_ID_HIF,
477 						QDF_TRACE_LEVEL_FATAL,
478 						"NAPI[%2d]CPU[%d]: "
479 						"%7d %7d %7d %7d ",
480 						i, j,
481 						napi_stats->napi_schedules,
482 						napi_stats->napi_polls,
483 						napi_stats->napi_completes,
484 						napi_stats->napi_workdone);
485 			}
486 		}
487 	}
488 
489 	hif_print_napi_latency_stats(hif_state);
490 }
491 qdf_export_symbol(hif_print_napi_stats);
492 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
493 
494 static void hif_exec_tasklet_schedule(struct hif_exec_context *ctx)
495 {
496 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
497 
498 	tasklet_schedule(&t_ctx->tasklet);
499 }
500 
501 /**
502  * hif_exec_tasklet_fn() - grp tasklet
503  * @data: context
504  *
505  * Return: void
506  */
507 static void hif_exec_tasklet_fn(unsigned long data)
508 {
509 	struct hif_exec_context *hif_ext_group =
510 			(struct hif_exec_context *)data;
511 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
512 	unsigned int work_done;
513 	int cpu = smp_processor_id();
514 
515 	work_done =
516 		hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET,
517 				       cpu);
518 
519 	if (hif_ext_group->work_complete(hif_ext_group, work_done)) {
520 		qdf_atomic_dec(&(scn->active_grp_tasklet_cnt));
521 		hif_ext_group->irq_enable(hif_ext_group);
522 	} else {
523 		hif_exec_tasklet_schedule(hif_ext_group);
524 	}
525 }
526 
527 /**
528  * hif_latency_profile_measure() - calculate latency and update histogram
529  * @hif_ext_group: hif exec context
530  *
531  * Return: None
532  */
533 #ifdef HIF_LATENCY_PROFILE_ENABLE
534 static void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
535 {
536 	int64_t cur_tstamp;
537 	int64_t time_elapsed;
538 
539 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
540 
541 	if (cur_tstamp > hif_ext_group->tstamp)
542 		time_elapsed = (cur_tstamp - hif_ext_group->tstamp);
543 	else
544 		time_elapsed = ~0x0 - (hif_ext_group->tstamp - cur_tstamp);
545 
546 	hif_ext_group->tstamp = cur_tstamp;
547 
548 	if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_0_2)
549 		hif_ext_group->sched_latency_stats[0]++;
550 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_3_10)
551 		hif_ext_group->sched_latency_stats[1]++;
552 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_11_20)
553 		hif_ext_group->sched_latency_stats[2]++;
554 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_21_50)
555 		hif_ext_group->sched_latency_stats[3]++;
556 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_51_100)
557 		hif_ext_group->sched_latency_stats[4]++;
558 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_101_250)
559 		hif_ext_group->sched_latency_stats[5]++;
560 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_251_500)
561 		hif_ext_group->sched_latency_stats[6]++;
562 	else
563 		hif_ext_group->sched_latency_stats[7]++;
564 }
565 #else
566 static inline
567 void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
568 {
569 }
570 #endif
571 
572 /**
573  * hif_latency_profile_start() - Update the start timestamp for HIF ext group
574  * @hif_ext_group: hif exec context
575  *
576  * Return: None
577  */
578 #ifdef HIF_LATENCY_PROFILE_ENABLE
579 static void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
580 {
581 	hif_ext_group->tstamp = qdf_ktime_to_ms(qdf_ktime_get());
582 }
583 #else
584 static inline
585 void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
586 {
587 }
588 #endif
589 
590 #ifdef FEATURE_NAPI
591 #ifdef FEATURE_IRQ_AFFINITY
592 static inline int32_t
593 hif_is_force_napi_complete_required(struct hif_exec_context *hif_ext_group)
594 {
595 	return qdf_atomic_inc_not_zero(&hif_ext_group->force_napi_complete);
596 }
597 #else
598 static inline int32_t
599 hif_is_force_napi_complete_required(struct hif_exec_context *hif_ext_group)
600 {
601 	return 0;
602 }
603 #endif
604 
605 /**
606  * hif_irq_disabled_time_limit_reached() - determine if irq disabled limit
607  * reached for single MSI
608  * @hif_ext_group: hif exec context
609  *
610  * Return: true if reached, else false.
611  */
612 static bool
613 hif_irq_disabled_time_limit_reached(struct hif_exec_context *hif_ext_group)
614 {
615 	unsigned long long irq_disabled_duration_ns;
616 
617 	if (hif_ext_group->type != HIF_EXEC_NAPI_TYPE)
618 		return false;
619 
620 	irq_disabled_duration_ns = qdf_time_sched_clock() -
621 					hif_ext_group->irq_disabled_start_time;
622 	if (irq_disabled_duration_ns >= IRQ_DISABLED_MAX_DURATION_NS) {
623 		hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
624 				 0, 0, 0, HIF_EVENT_IRQ_DISABLE_EXPIRED);
625 		return true;
626 	}
627 
628 	return false;
629 }
630 
631 /**
632  * hif_exec_poll() - napi poll
633  * @napi: napi struct
634  * @budget: budget for napi
635  *
636  * Return: mapping of internal budget to napi
637  */
638 static int hif_exec_poll(struct napi_struct *napi, int budget)
639 {
640 	struct hif_napi_exec_context *napi_exec_ctx =
641 		    qdf_container_of(napi, struct hif_napi_exec_context, napi);
642 	struct hif_exec_context *hif_ext_group = &napi_exec_ctx->exec_ctx;
643 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
644 	int work_done;
645 	int normalized_budget = 0;
646 	int actual_dones;
647 	int shift = hif_ext_group->scale_bin_shift;
648 	int cpu = smp_processor_id();
649 
650 	hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
651 			 0, 0, 0, HIF_EVENT_BH_SCHED);
652 
653 	hif_ext_group->force_break = false;
654 	hif_exec_update_service_start_time(hif_ext_group);
655 
656 	if (budget)
657 		normalized_budget = NAPI_BUDGET_TO_INTERNAL_BUDGET(budget, shift);
658 
659 	hif_latency_profile_measure(hif_ext_group);
660 
661 	work_done = hif_ext_group->handler(hif_ext_group->context,
662 					   normalized_budget, cpu);
663 
664 	actual_dones = work_done;
665 
666 	if (hif_is_force_napi_complete_required(hif_ext_group) ||
667 	    (!hif_ext_group->force_break && work_done < normalized_budget) ||
668 	    ((pld_is_one_msi(scn->qdf_dev->dev) &&
669 	    hif_irq_disabled_time_limit_reached(hif_ext_group)))) {
670 		hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
671 				 0, 0, 0, HIF_EVENT_BH_COMPLETE);
672 		napi_complete(napi);
673 		qdf_atomic_dec(&scn->active_grp_tasklet_cnt);
674 		hif_ext_group->irq_enable(hif_ext_group);
675 		hif_ext_group->stats[cpu].napi_completes++;
676 	} else {
677 		/* if the ext_group supports time based yield, claim full work
678 		 * done anyways */
679 		hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
680 				 0, 0, 0, HIF_EVENT_BH_FORCE_BREAK);
681 		work_done = normalized_budget;
682 	}
683 
684 	hif_ext_group->stats[cpu].napi_polls++;
685 	hif_ext_group->stats[cpu].napi_workdone += actual_dones;
686 
687 	/* map internal budget to NAPI budget */
688 	if (work_done)
689 		work_done = INTERNAL_BUDGET_TO_NAPI_BUDGET(work_done, shift);
690 
691 	hif_exec_fill_poll_time_histogram(hif_ext_group);
692 
693 	return work_done;
694 }
695 
696 /**
697  * hif_exec_napi_schedule() - schedule the napi exec instance
698  * @ctx: a hif_exec_context known to be of napi type
699  */
700 static void hif_exec_napi_schedule(struct hif_exec_context *ctx)
701 {
702 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
703 	ctx->stats[smp_processor_id()].napi_schedules++;
704 
705 	napi_schedule(&n_ctx->napi);
706 }
707 
708 /**
709  * hif_exec_napi_kill() - stop a napi exec context from being rescheduled
710  * @ctx: a hif_exec_context known to be of napi type
711  */
712 static void hif_exec_napi_kill(struct hif_exec_context *ctx)
713 {
714 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
715 	int irq_ind;
716 
717 	if (ctx->inited) {
718 		qdf_napi_disable(&n_ctx->napi);
719 		ctx->inited = 0;
720 	}
721 
722 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
723 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
724 
725 	hif_core_ctl_set_boost(false);
726 	qdf_netif_napi_del(&(n_ctx->napi));
727 }
728 
729 struct hif_execution_ops napi_sched_ops = {
730 	.schedule = &hif_exec_napi_schedule,
731 	.kill = &hif_exec_napi_kill,
732 };
733 
734 /**
735  * hif_exec_napi_create() - allocate and initialize a napi exec context
736  * @scale: a binary shift factor to map NAPI budget from\to internal
737  *         budget
738  */
739 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
740 {
741 	struct hif_napi_exec_context *ctx;
742 
743 	ctx = qdf_mem_malloc(sizeof(struct hif_napi_exec_context));
744 	if (!ctx)
745 		return NULL;
746 
747 	ctx->exec_ctx.sched_ops = &napi_sched_ops;
748 	ctx->exec_ctx.inited = true;
749 	ctx->exec_ctx.scale_bin_shift = scale;
750 	qdf_net_if_create_dummy_if((struct qdf_net_if *)&ctx->netdev);
751 	qdf_netif_napi_add(&(ctx->netdev), &(ctx->napi), hif_exec_poll,
752 			   QCA_NAPI_BUDGET);
753 	qdf_napi_enable(&ctx->napi);
754 
755 	return &ctx->exec_ctx;
756 }
757 #else
758 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
759 {
760 	hif_warn("FEATURE_NAPI not defined, making tasklet");
761 	return hif_exec_tasklet_create();
762 }
763 #endif
764 
765 
766 /**
767  * hif_exec_tasklet_kill() - stop a tasklet exec context from being rescheduled
768  * @ctx: a hif_exec_context known to be of tasklet type
769  */
770 static void hif_exec_tasklet_kill(struct hif_exec_context *ctx)
771 {
772 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
773 	int irq_ind;
774 
775 	if (ctx->inited) {
776 		tasklet_disable(&t_ctx->tasklet);
777 		tasklet_kill(&t_ctx->tasklet);
778 	}
779 	ctx->inited = false;
780 
781 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
782 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
783 }
784 
785 struct hif_execution_ops tasklet_sched_ops = {
786 	.schedule = &hif_exec_tasklet_schedule,
787 	.kill = &hif_exec_tasklet_kill,
788 };
789 
790 /**
791  * hif_exec_tasklet_create() -  allocate and initialize a tasklet exec context
792  */
793 static struct hif_exec_context *hif_exec_tasklet_create(void)
794 {
795 	struct hif_tasklet_exec_context *ctx;
796 
797 	ctx = qdf_mem_malloc(sizeof(struct hif_tasklet_exec_context));
798 	if (!ctx)
799 		return NULL;
800 
801 	ctx->exec_ctx.sched_ops = &tasklet_sched_ops;
802 	tasklet_init(&ctx->tasklet, hif_exec_tasklet_fn,
803 		     (unsigned long)ctx);
804 
805 	ctx->exec_ctx.inited = true;
806 
807 	return &ctx->exec_ctx;
808 }
809 
810 /**
811  * hif_exec_get_ctx() - retrieve an exec context based on an id
812  * @softc: the hif context owning the exec context
813  * @id: the id of the exec context
814  *
815  * mostly added to make it easier to rename or move the context array
816  */
817 struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *softc,
818 					  uint8_t id)
819 {
820 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
821 
822 	if (id < hif_state->hif_num_extgroup)
823 		return hif_state->hif_ext_group[id];
824 
825 	return NULL;
826 }
827 
828 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
829 				uint8_t id)
830 {
831 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
832 
833 	if (id < hif_state->hif_num_extgroup)
834 		return hif_state->hif_ext_group[id]->os_irq[0];
835 	return -EINVAL;
836 }
837 
838 qdf_export_symbol(hif_get_int_ctx_irq_num);
839 
840 #ifdef HIF_CPU_PERF_AFFINE_MASK
841 void hif_config_irq_set_perf_affinity_hint(
842 	struct hif_opaque_softc *hif_ctx)
843 {
844 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
845 
846 	hif_config_irq_affinity(scn);
847 }
848 
849 qdf_export_symbol(hif_config_irq_set_perf_affinity_hint);
850 #endif
851 
852 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
853 {
854 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
855 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
856 	struct hif_exec_context *hif_ext_group;
857 	int i, status;
858 
859 	if (scn->ext_grp_irq_configured) {
860 		hif_err("Called after ext grp irq configured");
861 		return QDF_STATUS_E_FAILURE;
862 	}
863 
864 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
865 		hif_ext_group = hif_state->hif_ext_group[i];
866 		status = 0;
867 		qdf_spinlock_create(&hif_ext_group->irq_lock);
868 		if (hif_ext_group->configured &&
869 		    hif_ext_group->irq_requested == false) {
870 			hif_ext_group->irq_enabled = true;
871 			status = hif_grp_irq_configure(scn, hif_ext_group);
872 		}
873 		if (status != 0) {
874 			hif_err("Failed for group %d", i);
875 			hif_ext_group->irq_enabled = false;
876 		}
877 	}
878 
879 	scn->ext_grp_irq_configured = true;
880 
881 	return QDF_STATUS_SUCCESS;
882 }
883 
884 qdf_export_symbol(hif_configure_ext_group_interrupts);
885 
886 void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
887 {
888 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
889 
890 	if (!scn || !scn->ext_grp_irq_configured) {
891 		hif_err("scn(%pk) is NULL or grp irq not configured", scn);
892 		return;
893 	}
894 
895 	hif_grp_irq_deconfigure(scn);
896 	scn->ext_grp_irq_configured = false;
897 }
898 
899 qdf_export_symbol(hif_deconfigure_ext_group_interrupts);
900 
901 #ifdef WLAN_SUSPEND_RESUME_TEST
902 /**
903  * hif_check_and_trigger_ut_resume() - check if unit-test command was used to
904  *				       to trigger fake-suspend command, if yes
905  *				       then issue resume procedure.
906  * @scn: opaque HIF software context
907  *
908  * This API checks if unit-test command was used to trigger fake-suspend command
909  * and if answer is yes then it would trigger resume procedure.
910  *
911  * Make this API inline to save API-switch overhead and do branch-prediction to
912  * optimize performance impact.
913  *
914  * Return: void
915  */
916 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
917 {
918 	if (qdf_unlikely(hif_irq_trigger_ut_resume(scn)))
919 		hif_ut_fw_resume(scn);
920 }
921 #else
922 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
923 {
924 }
925 #endif
926 
927 /**
928  * hif_check_and_trigger_sys_resume() - Check for bus suspend and
929  *  trigger system resume
930  * @scn: hif context
931  * @irq: irq number
932  *
933  * Return: None
934  */
935 static inline void
936 hif_check_and_trigger_sys_resume(struct hif_softc *scn, int irq)
937 {
938 	if (scn->bus_suspended && scn->linkstate_vote) {
939 		hif_info_rl("interrupt rcvd:%d trigger sys resume", irq);
940 		qdf_pm_system_wakeup();
941 	}
942 }
943 
944 /**
945  * hif_ext_group_interrupt_handler() - handler for related interrupts
946  * @irq: irq number of the interrupt
947  * @context: the associated hif_exec_group context
948  *
949  * This callback function takes care of disabling the associated interrupts
950  * and scheduling the expected bottom half for the exec_context.
951  * This callback function also helps keep track of the count running contexts.
952  */
953 irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context)
954 {
955 	struct hif_exec_context *hif_ext_group = context;
956 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
957 
958 	if (hif_ext_group->irq_requested) {
959 		hif_latency_profile_start(hif_ext_group);
960 
961 		hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
962 				 0, 0, 0, HIF_EVENT_IRQ_TRIGGER);
963 
964 		hif_ext_group->irq_disable(hif_ext_group);
965 
966 		if (pld_is_one_msi(scn->qdf_dev->dev))
967 			hif_ext_group->irq_disabled_start_time =
968 							qdf_time_sched_clock();
969 		/*
970 		 * if private ioctl has issued fake suspend command to put
971 		 * FW in D0-WOW state then here is our chance to bring FW out
972 		 * of WOW mode.
973 		 *
974 		 * The reason why you need to explicitly wake-up the FW is here:
975 		 * APSS should have been in fully awake through-out when
976 		 * fake APSS suspend command was issued (to put FW in WOW mode)
977 		 * hence organic way of waking-up the FW
978 		 * (as part-of APSS-host wake-up) won't happen because
979 		 * in reality APSS didn't really suspend.
980 		 */
981 		hif_check_and_trigger_ut_resume(scn);
982 
983 		hif_check_and_trigger_sys_resume(scn, irq);
984 
985 		qdf_atomic_inc(&scn->active_grp_tasklet_cnt);
986 
987 		hif_ext_group->sched_ops->schedule(hif_ext_group);
988 	}
989 
990 	return IRQ_HANDLED;
991 }
992 
993 /**
994  * hif_exec_kill() - grp tasklet kill
995  * @hif_ctx: hif_softc
996  *
997  * return: void
998  */
999 void hif_exec_kill(struct hif_opaque_softc *hif_ctx)
1000 {
1001 	int i;
1002 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1003 
1004 	for (i = 0; i < hif_state->hif_num_extgroup; i++)
1005 		hif_state->hif_ext_group[i]->sched_ops->kill(
1006 			hif_state->hif_ext_group[i]);
1007 
1008 	qdf_atomic_set(&hif_state->ol_sc.active_grp_tasklet_cnt, 0);
1009 }
1010 
1011 #ifdef FEATURE_IRQ_AFFINITY
1012 static inline void
1013 hif_init_force_napi_complete(struct hif_exec_context *hif_ext_group)
1014 {
1015 	qdf_atomic_init(&hif_ext_group->force_napi_complete);
1016 }
1017 #else
1018 static inline void
1019 hif_init_force_napi_complete(struct hif_exec_context *hif_ext_group)
1020 {
1021 }
1022 #endif
1023 
1024 /**
1025  * hif_register_ext_group() - API to register external group
1026  * interrupt handler.
1027  * @hif_ctx : HIF Context
1028  * @numirq: number of irq's in the group
1029  * @irq: array of irq values
1030  * @handler: callback interrupt handler function
1031  * @cb_ctx: context to passed in callback
1032  * @context_name: context name
1033  * @type: napi vs tasklet
1034  * @scale:
1035  *
1036  * Return: QDF_STATUS
1037  */
1038 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
1039 				  uint32_t numirq, uint32_t irq[],
1040 				  ext_intr_handler handler,
1041 				  void *cb_ctx, const char *context_name,
1042 				  enum hif_exec_type type, uint32_t scale)
1043 {
1044 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1045 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1046 	struct hif_exec_context *hif_ext_group;
1047 
1048 	if (scn->ext_grp_irq_configured) {
1049 		hif_err("Called after ext grp irq configured");
1050 		return QDF_STATUS_E_FAILURE;
1051 	}
1052 
1053 	if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) {
1054 		hif_err("Max groups: %d reached", hif_state->hif_num_extgroup);
1055 		return QDF_STATUS_E_FAILURE;
1056 	}
1057 
1058 	if (numirq >= HIF_MAX_GRP_IRQ) {
1059 		hif_err("Invalid numirq: %d", numirq);
1060 		return QDF_STATUS_E_FAILURE;
1061 	}
1062 
1063 	hif_ext_group = hif_exec_create(type, scale);
1064 	if (!hif_ext_group)
1065 		return QDF_STATUS_E_FAILURE;
1066 
1067 	hif_state->hif_ext_group[hif_state->hif_num_extgroup] =
1068 		hif_ext_group;
1069 
1070 	hif_ext_group->numirq = numirq;
1071 	qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0]));
1072 	hif_ext_group->context = cb_ctx;
1073 	hif_ext_group->handler = handler;
1074 	hif_ext_group->configured = true;
1075 	hif_ext_group->grp_id = hif_state->hif_num_extgroup;
1076 	hif_ext_group->hif = hif_ctx;
1077 	hif_ext_group->context_name = context_name;
1078 	hif_ext_group->type = type;
1079 	hif_init_force_napi_complete(hif_ext_group);
1080 
1081 	hif_state->hif_num_extgroup++;
1082 	return QDF_STATUS_SUCCESS;
1083 }
1084 qdf_export_symbol(hif_register_ext_group);
1085 
1086 /**
1087  * hif_exec_create() - create an execution context
1088  * @type: the type of execution context to create
1089  * @scale:
1090  */
1091 struct hif_exec_context *hif_exec_create(enum hif_exec_type type,
1092 						uint32_t scale)
1093 {
1094 	hif_debug("%s: create exec_type %d budget %d\n",
1095 		  __func__, type, QCA_NAPI_BUDGET * scale);
1096 
1097 	switch (type) {
1098 	case HIF_EXEC_NAPI_TYPE:
1099 		return hif_exec_napi_create(scale);
1100 
1101 	case HIF_EXEC_TASKLET_TYPE:
1102 		return hif_exec_tasklet_create();
1103 	default:
1104 		return NULL;
1105 	}
1106 }
1107 
1108 /**
1109  * hif_exec_destroy() - free the hif_exec context
1110  * @ctx: context to free
1111  *
1112  * please kill the context before freeing it to avoid a use after free.
1113  */
1114 void hif_exec_destroy(struct hif_exec_context *ctx)
1115 {
1116 	struct hif_softc *scn = HIF_GET_SOFTC(ctx->hif);
1117 
1118 	if (scn->ext_grp_irq_configured)
1119 		qdf_spinlock_destroy(&ctx->irq_lock);
1120 	qdf_mem_free(ctx);
1121 }
1122 
1123 /**
1124  * hif_deregister_exec_group() - API to free the exec contexts
1125  * @hif_ctx: HIF context
1126  * @context_name: name of the module whose contexts need to be deregistered
1127  *
1128  * This function deregisters the contexts of the requestor identified
1129  * based on the context_name & frees the memory.
1130  *
1131  * Return: void
1132  */
1133 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
1134 				const char *context_name)
1135 {
1136 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1137 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1138 	struct hif_exec_context *hif_ext_group;
1139 	int i;
1140 
1141 	for (i = 0; i < HIF_MAX_GROUP; i++) {
1142 		hif_ext_group = hif_state->hif_ext_group[i];
1143 
1144 		if (!hif_ext_group)
1145 			continue;
1146 
1147 		hif_debug("%s: Deregistering grp id %d name %s\n",
1148 			  __func__,
1149 			  hif_ext_group->grp_id,
1150 			  hif_ext_group->context_name);
1151 
1152 		if (strcmp(hif_ext_group->context_name, context_name) == 0) {
1153 			hif_ext_group->sched_ops->kill(hif_ext_group);
1154 			hif_state->hif_ext_group[i] = NULL;
1155 			hif_exec_destroy(hif_ext_group);
1156 			hif_state->hif_num_extgroup--;
1157 		}
1158 
1159 	}
1160 }
1161 qdf_export_symbol(hif_deregister_exec_group);
1162 
1163 #ifdef DP_UMAC_HW_RESET_SUPPORT
1164 /**
1165  * hif_umac_reset_handler_tasklet() - Tasklet for UMAC HW reset interrupt
1166  * @data: UMAC HW reset HIF context
1167  *
1168  * return: void
1169  */
1170 static void hif_umac_reset_handler_tasklet(unsigned long data)
1171 {
1172 	struct hif_umac_reset_ctx *umac_reset_ctx =
1173 		(struct hif_umac_reset_ctx *)data;
1174 
1175 	/* call the callback handler */
1176 	umac_reset_ctx->cb_handler(umac_reset_ctx->cb_ctx);
1177 }
1178 
1179 /**
1180  * hif_umac_reset_irq_handler() - Interrupt service routine of UMAC HW reset
1181  * @irq: irq coming from kernel
1182  * @ctx: UMAC HW reset HIF context
1183  *
1184  * return: IRQ_HANDLED if success, else IRQ_NONE
1185  */
1186 static irqreturn_t hif_umac_reset_irq_handler(int irq, void *ctx)
1187 {
1188 	struct hif_umac_reset_ctx *umac_reset_ctx = ctx;
1189 
1190 	/* Schedule the tasklet and exit */
1191 	tasklet_hi_schedule(&umac_reset_ctx->intr_tq);
1192 
1193 	return IRQ_HANDLED;
1194 }
1195 
1196 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
1197 					   int (*handler)(void *cb_ctx),
1198 					   void *cb_ctx, int irq)
1199 {
1200 	struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_scn);
1201 	struct hif_umac_reset_ctx *umac_reset_ctx;
1202 	int ret;
1203 
1204 	if (!hif_sc) {
1205 		hif_err("scn is null");
1206 		return QDF_STATUS_E_NULL_VALUE;
1207 	}
1208 
1209 	umac_reset_ctx = &hif_sc->umac_reset_ctx;
1210 
1211 	umac_reset_ctx->cb_handler = handler;
1212 	umac_reset_ctx->cb_ctx = cb_ctx;
1213 	umac_reset_ctx->os_irq = irq;
1214 
1215 	/* Init the tasklet */
1216 	tasklet_init(&umac_reset_ctx->intr_tq,
1217 		     hif_umac_reset_handler_tasklet,
1218 		     (unsigned long)umac_reset_ctx);
1219 
1220 	/* Register the interrupt handler */
1221 	ret  = pfrm_request_irq(hif_sc->qdf_dev->dev, irq,
1222 				hif_umac_reset_irq_handler,
1223 				IRQF_SHARED | IRQF_NO_SUSPEND,
1224 				"umac_hw_reset_irq",
1225 				umac_reset_ctx);
1226 	if (ret) {
1227 		hif_err("request_irq failed: %d", ret);
1228 		return qdf_status_from_os_return(ret);
1229 	}
1230 
1231 	umac_reset_ctx->irq_configured = true;
1232 
1233 	return QDF_STATUS_SUCCESS;
1234 }
1235 
1236 qdf_export_symbol(hif_register_umac_reset_handler);
1237 
1238 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn)
1239 {
1240 	struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_scn);
1241 	struct hif_umac_reset_ctx *umac_reset_ctx;
1242 	int ret;
1243 
1244 	if (!hif_sc) {
1245 		hif_err("scn is null");
1246 		return QDF_STATUS_E_NULL_VALUE;
1247 	}
1248 
1249 	umac_reset_ctx = &hif_sc->umac_reset_ctx;
1250 	if (!umac_reset_ctx->irq_configured) {
1251 		hif_err("unregister called without a prior IRQ configuration");
1252 		return QDF_STATUS_E_FAILURE;
1253 	}
1254 
1255 	ret  = pfrm_free_irq(hif_sc->qdf_dev->dev,
1256 			     umac_reset_ctx->os_irq,
1257 			     umac_reset_ctx);
1258 	if (ret) {
1259 		hif_err("free_irq failed: %d", ret);
1260 		return qdf_status_from_os_return(ret);
1261 	}
1262 	umac_reset_ctx->irq_configured = false;
1263 
1264 	tasklet_disable(&umac_reset_ctx->intr_tq);
1265 	tasklet_kill(&umac_reset_ctx->intr_tq);
1266 
1267 	umac_reset_ctx->cb_handler = NULL;
1268 	umac_reset_ctx->cb_ctx = NULL;
1269 
1270 	return QDF_STATUS_SUCCESS;
1271 }
1272 
1273 qdf_export_symbol(hif_unregister_umac_reset_handler);
1274 #endif
1275