xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_exec.c (revision 97b39bfea3401259bed153a56c00d1fddbb9e87d)
1 /*
2  * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <hif_exec.h>
20 #include <ce_main.h>
21 #include "qdf_module.h"
22 #include "qdf_net_if.h"
23 /* mapping NAPI budget 0 to internal budget 0
24  * NAPI budget 1 to internal budget [1,scaler -1]
25  * NAPI budget 2 to internal budget [scaler, 2 * scaler - 1], etc
26  */
27 #define NAPI_BUDGET_TO_INTERNAL_BUDGET(n, s) \
28 	(((n) << (s)) - 1)
29 #define INTERNAL_BUDGET_TO_NAPI_BUDGET(n, s) \
30 	(((n) + 1) >> (s))
31 
32 static struct hif_exec_context *hif_exec_tasklet_create(void);
33 
34 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
35 struct hif_event_history hif_event_desc_history[HIF_NUM_INT_CONTEXTS];
36 
37 static inline
38 int hif_get_next_record_index(qdf_atomic_t *table_index,
39 			      int array_size)
40 {
41 	int record_index = qdf_atomic_inc_return(table_index);
42 
43 	return record_index & (array_size - 1);
44 }
45 
46 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
47 			   struct hif_event_record *event, uint8_t intr_grp_id)
48 {
49 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
50 	struct hif_event_history *hist_ev;
51 	struct hif_event_record *record;
52 	int record_index;
53 
54 	if (scn->event_disable_mask & BIT(event->type))
55 		return;
56 
57 	if (qdf_unlikely(intr_grp_id >= HIF_NUM_INT_CONTEXTS)) {
58 		hif_err("Invalid interrupt group id %d", intr_grp_id);
59 		return;
60 	}
61 
62 	hist_ev = scn->evt_hist[intr_grp_id];
63 	if (qdf_unlikely(!hist_ev))
64 		return;
65 
66 	record_index = hif_get_next_record_index(
67 			&hist_ev->index, HIF_EVENT_HIST_MAX);
68 
69 	record = &hist_ev->event[record_index];
70 
71 	record->hal_ring_id = event->hal_ring_id;
72 	record->hp = event->hp;
73 	record->tp = event->tp;
74 	record->cpu_id = qdf_get_cpu();
75 	record->timestamp = qdf_get_log_timestamp();
76 	record->type = event->type;
77 }
78 
79 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id)
80 {
81 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
82 
83 	scn->evt_hist[id] = &hif_event_desc_history[id];
84 	qdf_atomic_set(&scn->evt_hist[id]->index, -1);
85 
86 	hif_info("SRNG events history initialized for group: %d", id);
87 }
88 
89 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id)
90 {
91 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
92 
93 	scn->evt_hist[id] = NULL;
94 	hif_info("SRNG events history de-initialized for group: %d", id);
95 }
96 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
97 
98 /**
99  * hif_print_napi_latency_stats() - print NAPI scheduling latency stats
100  * @hif_state: hif context
101  *
102  * return: void
103  */
104 #ifdef HIF_LATENCY_PROFILE_ENABLE
105 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
106 {
107 	struct hif_exec_context *hif_ext_group;
108 	int i, j;
109 	int64_t cur_tstamp;
110 
111 	const char time_str[HIF_SCHED_LATENCY_BUCKETS][15] =  {
112 		"0-2   ms",
113 		"3-10  ms",
114 		"11-20 ms",
115 		"21-50 ms",
116 		"51-100 ms",
117 		"101-250 ms",
118 		"251-500 ms",
119 		"> 500 ms"
120 	};
121 
122 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
123 
124 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
125 		  "Current timestamp: %lld", cur_tstamp);
126 
127 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
128 		if (hif_state->hif_ext_group[i]) {
129 			hif_ext_group = hif_state->hif_ext_group[i];
130 
131 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
132 				  "Interrupts in the HIF Group");
133 
134 			for (j = 0; j < hif_ext_group->numirq; j++) {
135 				QDF_TRACE(QDF_MODULE_ID_HIF,
136 					  QDF_TRACE_LEVEL_FATAL,
137 					  "  %s",
138 					  hif_ext_group->irq_name
139 					  (hif_ext_group->irq[j]));
140 			}
141 
142 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
143 				  "Last serviced timestamp: %lld",
144 				  hif_ext_group->tstamp);
145 
146 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
147 				  "Latency Bucket     | Time elapsed");
148 
149 			for (j = 0; j < HIF_SCHED_LATENCY_BUCKETS; j++) {
150 				QDF_TRACE(QDF_MODULE_ID_HIF,
151 					  QDF_TRACE_LEVEL_FATAL,
152 					  "%s     |    %lld", time_str[j],
153 					  hif_ext_group->
154 					  sched_latency_stats[j]);
155 			}
156 		}
157 	}
158 }
159 #else
160 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
161 {
162 }
163 #endif
164 
165 /**
166  * hif_clear_napi_stats() - reset NAPI stats
167  * @hif_ctx: hif context
168  *
169  * return: void
170  */
171 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx)
172 {
173 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
174 	struct hif_exec_context *hif_ext_group;
175 	size_t i;
176 
177 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
178 		hif_ext_group = hif_state->hif_ext_group[i];
179 
180 		if (!hif_ext_group)
181 			return;
182 
183 		qdf_mem_set(hif_ext_group->sched_latency_stats,
184 			    sizeof(hif_ext_group->sched_latency_stats),
185 			    0x0);
186 	}
187 }
188 
189 qdf_export_symbol(hif_clear_napi_stats);
190 
191 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
192 /**
193  * hif_get_poll_times_hist_str() - Get HIF poll times histogram string
194  * @stats: NAPI stats to get poll time buckets
195  * @buf: buffer to fill histogram string
196  * @buf_len: length of the buffer
197  *
198  * Return: void
199  */
200 static void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
201 					uint8_t buf_len)
202 {
203 	int i;
204 	int str_index = 0;
205 
206 	for (i = 0; i < QCA_NAPI_NUM_BUCKETS; i++)
207 		str_index += qdf_scnprintf(buf + str_index, buf_len - str_index,
208 					   "%u|", stats->poll_time_buckets[i]);
209 }
210 
211 /**
212  * hif_exec_fill_poll_time_histogram() - fills poll time histogram for a NAPI
213  * @hif_ext_group: hif_ext_group of type NAPI
214  *
215  * The function is called at the end of a NAPI poll to calculate poll time
216  * buckets.
217  *
218  * Return: void
219  */
220 static
221 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
222 {
223 	struct qca_napi_stat *napi_stat;
224 	unsigned long long poll_time_ns;
225 	uint32_t poll_time_us;
226 	uint32_t bucket_size_us = 500;
227 	uint32_t bucket;
228 	uint32_t cpu_id = qdf_get_cpu();
229 
230 	poll_time_ns = sched_clock() - hif_ext_group->poll_start_time;
231 	poll_time_us = qdf_do_div(poll_time_ns, 1000);
232 
233 	napi_stat = &hif_ext_group->stats[cpu_id];
234 	if (poll_time_ns > hif_ext_group->stats[cpu_id].napi_max_poll_time)
235 		hif_ext_group->stats[cpu_id].napi_max_poll_time = poll_time_ns;
236 
237 	bucket = poll_time_us / bucket_size_us;
238 	if (bucket >= QCA_NAPI_NUM_BUCKETS)
239 		bucket = QCA_NAPI_NUM_BUCKETS - 1;
240 	++napi_stat->poll_time_buckets[bucket];
241 }
242 
243 /**
244  * hif_exec_poll_should_yield() - Local function deciding if NAPI should yield
245  * @hif_ext_group: hif_ext_group of type NAPI
246  *
247  * Return: true if NAPI needs to yield, else false
248  */
249 static bool hif_exec_poll_should_yield(struct hif_exec_context *hif_ext_group)
250 {
251 	bool time_limit_reached = false;
252 	unsigned long long poll_time_ns;
253 	int cpu_id = qdf_get_cpu();
254 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
255 	struct hif_config_info *cfg = &scn->hif_config;
256 
257 	poll_time_ns = sched_clock() - hif_ext_group->poll_start_time;
258 	time_limit_reached =
259 		poll_time_ns > cfg->rx_softirq_max_yield_duration_ns ? 1 : 0;
260 
261 	if (time_limit_reached) {
262 		hif_ext_group->stats[cpu_id].time_limit_reached++;
263 		hif_ext_group->force_break = true;
264 	}
265 
266 	return time_limit_reached;
267 }
268 
269 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id)
270 {
271 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
272 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
273 	struct hif_exec_context *hif_ext_group;
274 	bool ret_val = false;
275 
276 	if (!(grp_id < hif_state->hif_num_extgroup) ||
277 	    !(grp_id < HIF_MAX_GROUP))
278 		return false;
279 
280 	hif_ext_group = hif_state->hif_ext_group[grp_id];
281 
282 	if (hif_ext_group->type == HIF_EXEC_NAPI_TYPE)
283 		ret_val = hif_exec_poll_should_yield(hif_ext_group);
284 
285 	return ret_val;
286 }
287 
288 /**
289  * hif_exec_update_service_start_time() - Update NAPI poll start time
290  * @hif_ext_group: hif_ext_group of type NAPI
291  *
292  * The function is called at the beginning of a NAPI poll to record the poll
293  * start time.
294  *
295  * Return: None
296  */
297 static inline
298 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
299 {
300 	hif_ext_group->poll_start_time = sched_clock();
301 }
302 
303 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
304 {
305 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
306 	struct hif_exec_context *hif_ext_group;
307 	struct qca_napi_stat *napi_stats;
308 	int i, j;
309 
310 	/*
311 	 * Max value of uint_32 (poll_time_bucket) = 4294967295
312 	 * Thus we need 10 chars + 1 space =11 chars for each bucket value.
313 	 * +1 space for '\0'.
314 	 */
315 	char hist_str[(QCA_NAPI_NUM_BUCKETS * 11) + 1] = {'\0'};
316 
317 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
318 		  "NAPI[#]CPU[#] |scheds |polls  |comps  |dones  |t-lim  |max(us)|hist(500us buckets)");
319 
320 	for (i = 0;
321 	     (i < hif_state->hif_num_extgroup && hif_state->hif_ext_group[i]);
322 	     i++) {
323 		hif_ext_group = hif_state->hif_ext_group[i];
324 		for (j = 0; j < num_possible_cpus(); j++) {
325 			napi_stats = &hif_ext_group->stats[j];
326 			if (!napi_stats->napi_schedules)
327 				continue;
328 
329 			hif_get_poll_times_hist_str(napi_stats,
330 						    hist_str,
331 						    sizeof(hist_str));
332 			QDF_TRACE(QDF_MODULE_ID_HIF,
333 				  QDF_TRACE_LEVEL_ERROR,
334 				  "NAPI[%d]CPU[%d]: %7u %7u %7u %7u %7u %7llu %s",
335 				  i, j,
336 				  napi_stats->napi_schedules,
337 				  napi_stats->napi_polls,
338 				  napi_stats->napi_completes,
339 				  napi_stats->napi_workdone,
340 				  napi_stats->time_limit_reached,
341 				  qdf_do_div(napi_stats->napi_max_poll_time,
342 					     1000),
343 				  hist_str);
344 		}
345 	}
346 
347 	hif_print_napi_latency_stats(hif_state);
348 }
349 
350 qdf_export_symbol(hif_print_napi_stats);
351 
352 #else
353 
354 static inline
355 void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
356 				 uint8_t buf_len)
357 {
358 }
359 
360 static inline
361 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
362 {
363 }
364 
365 static inline
366 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
367 {
368 }
369 
370 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
371 {
372 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
373 	struct hif_exec_context *hif_ext_group;
374 	struct qca_napi_stat *napi_stats;
375 	int i, j;
376 
377 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
378 		"NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone");
379 
380 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
381 		if (hif_state->hif_ext_group[i]) {
382 			hif_ext_group = hif_state->hif_ext_group[i];
383 			for (j = 0; j < num_possible_cpus(); j++) {
384 				napi_stats = &(hif_ext_group->stats[j]);
385 				if (napi_stats->napi_schedules != 0)
386 					QDF_TRACE(QDF_MODULE_ID_HIF,
387 						QDF_TRACE_LEVEL_FATAL,
388 						"NAPI[%2d]CPU[%d]: "
389 						"%7d %7d %7d %7d ",
390 						i, j,
391 						napi_stats->napi_schedules,
392 						napi_stats->napi_polls,
393 						napi_stats->napi_completes,
394 						napi_stats->napi_workdone);
395 			}
396 		}
397 	}
398 
399 	hif_print_napi_latency_stats(hif_state);
400 }
401 qdf_export_symbol(hif_print_napi_stats);
402 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
403 
404 static void hif_exec_tasklet_schedule(struct hif_exec_context *ctx)
405 {
406 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
407 
408 	tasklet_schedule(&t_ctx->tasklet);
409 }
410 
411 /**
412  * hif_exec_tasklet() - grp tasklet
413  * data: context
414  *
415  * return: void
416  */
417 static void hif_exec_tasklet_fn(unsigned long data)
418 {
419 	struct hif_exec_context *hif_ext_group =
420 			(struct hif_exec_context *)data;
421 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
422 	unsigned int work_done;
423 
424 	work_done =
425 		hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET);
426 
427 	if (hif_ext_group->work_complete(hif_ext_group, work_done)) {
428 		qdf_atomic_dec(&(scn->active_grp_tasklet_cnt));
429 		hif_ext_group->irq_enable(hif_ext_group);
430 	} else {
431 		hif_exec_tasklet_schedule(hif_ext_group);
432 	}
433 }
434 
435 /**
436  * hif_latency_profile_measure() - calculate latency and update histogram
437  * hif_ext_group: hif exec context
438  *
439  * return: None
440  */
441 #ifdef HIF_LATENCY_PROFILE_ENABLE
442 static void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
443 {
444 	int64_t cur_tstamp;
445 	int64_t time_elapsed;
446 
447 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
448 
449 	if (cur_tstamp > hif_ext_group->tstamp)
450 		time_elapsed = (cur_tstamp - hif_ext_group->tstamp);
451 	else
452 		time_elapsed = ~0x0 - (hif_ext_group->tstamp - cur_tstamp);
453 
454 	hif_ext_group->tstamp = cur_tstamp;
455 
456 	if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_0_2)
457 		hif_ext_group->sched_latency_stats[0]++;
458 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_3_10)
459 		hif_ext_group->sched_latency_stats[1]++;
460 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_11_20)
461 		hif_ext_group->sched_latency_stats[2]++;
462 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_21_50)
463 		hif_ext_group->sched_latency_stats[3]++;
464 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_51_100)
465 		hif_ext_group->sched_latency_stats[4]++;
466 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_101_250)
467 		hif_ext_group->sched_latency_stats[5]++;
468 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_251_500)
469 		hif_ext_group->sched_latency_stats[6]++;
470 	else
471 		hif_ext_group->sched_latency_stats[7]++;
472 }
473 #else
474 static inline
475 void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
476 {
477 }
478 #endif
479 
480 /**
481  * hif_latency_profile_start() - Update the start timestamp for HIF ext group
482  * hif_ext_group: hif exec context
483  *
484  * return: None
485  */
486 #ifdef HIF_LATENCY_PROFILE_ENABLE
487 static void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
488 {
489 	hif_ext_group->tstamp = qdf_ktime_to_ms(qdf_ktime_get());
490 }
491 #else
492 static inline
493 void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
494 {
495 }
496 #endif
497 
498 #ifdef FEATURE_NAPI
499 /**
500  * hif_exec_poll() - napi poll
501  * napi: napi struct
502  * budget: budget for napi
503  *
504  * Return: mapping of internal budget to napi
505  */
506 static int hif_exec_poll(struct napi_struct *napi, int budget)
507 {
508 	struct hif_napi_exec_context *napi_exec_ctx =
509 		    qdf_container_of(napi, struct hif_napi_exec_context, napi);
510 	struct hif_exec_context *hif_ext_group = &napi_exec_ctx->exec_ctx;
511 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
512 	int work_done;
513 	int normalized_budget = 0;
514 	int actual_dones;
515 	int shift = hif_ext_group->scale_bin_shift;
516 	int cpu = smp_processor_id();
517 
518 	hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
519 			 0, 0, 0, HIF_EVENT_BH_SCHED);
520 
521 	hif_ext_group->force_break = false;
522 	hif_exec_update_service_start_time(hif_ext_group);
523 
524 	if (budget)
525 		normalized_budget = NAPI_BUDGET_TO_INTERNAL_BUDGET(budget, shift);
526 
527 	hif_latency_profile_measure(hif_ext_group);
528 
529 	work_done = hif_ext_group->handler(hif_ext_group->context,
530 					   normalized_budget);
531 
532 	actual_dones = work_done;
533 
534 	if (!hif_ext_group->force_break && work_done < normalized_budget) {
535 		napi_complete(napi);
536 		qdf_atomic_dec(&scn->active_grp_tasklet_cnt);
537 		hif_ext_group->irq_enable(hif_ext_group);
538 		hif_ext_group->stats[cpu].napi_completes++;
539 	} else {
540 		/* if the ext_group supports time based yield, claim full work
541 		 * done anyways */
542 		work_done = normalized_budget;
543 	}
544 
545 	hif_ext_group->stats[cpu].napi_polls++;
546 	hif_ext_group->stats[cpu].napi_workdone += actual_dones;
547 
548 	/* map internal budget to NAPI budget */
549 	if (work_done)
550 		work_done = INTERNAL_BUDGET_TO_NAPI_BUDGET(work_done, shift);
551 
552 	hif_exec_fill_poll_time_histogram(hif_ext_group);
553 
554 	return work_done;
555 }
556 
557 /**
558  * hif_exec_napi_schedule() - schedule the napi exec instance
559  * @ctx: a hif_exec_context known to be of napi type
560  */
561 static void hif_exec_napi_schedule(struct hif_exec_context *ctx)
562 {
563 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
564 	ctx->stats[smp_processor_id()].napi_schedules++;
565 
566 	napi_schedule(&n_ctx->napi);
567 }
568 
569 /**
570  * hif_exec_napi_kill() - stop a napi exec context from being rescheduled
571  * @ctx: a hif_exec_context known to be of napi type
572  */
573 static void hif_exec_napi_kill(struct hif_exec_context *ctx)
574 {
575 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
576 	int irq_ind;
577 
578 	if (ctx->inited) {
579 		napi_disable(&n_ctx->napi);
580 		ctx->inited = 0;
581 	}
582 
583 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
584 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
585 
586 	hif_core_ctl_set_boost(false);
587 	netif_napi_del(&(n_ctx->napi));
588 }
589 
590 struct hif_execution_ops napi_sched_ops = {
591 	.schedule = &hif_exec_napi_schedule,
592 	.kill = &hif_exec_napi_kill,
593 };
594 
595 /**
596  * hif_exec_napi_create() - allocate and initialize a napi exec context
597  * @scale: a binary shift factor to map NAPI budget from\to internal
598  *         budget
599  */
600 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
601 {
602 	struct hif_napi_exec_context *ctx;
603 
604 	ctx = qdf_mem_malloc(sizeof(struct hif_napi_exec_context));
605 	if (!ctx)
606 		return NULL;
607 
608 	ctx->exec_ctx.sched_ops = &napi_sched_ops;
609 	ctx->exec_ctx.inited = true;
610 	ctx->exec_ctx.scale_bin_shift = scale;
611 	qdf_net_if_create_dummy_if((struct qdf_net_if *)&ctx->netdev);
612 	netif_napi_add(&(ctx->netdev), &(ctx->napi), hif_exec_poll,
613 		       QCA_NAPI_BUDGET);
614 	napi_enable(&ctx->napi);
615 
616 	return &ctx->exec_ctx;
617 }
618 #else
619 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
620 {
621 	HIF_WARN("%s: FEATURE_NAPI not defined, making tasklet", __func__);
622 	return hif_exec_tasklet_create();
623 }
624 #endif
625 
626 
627 /**
628  * hif_exec_tasklet_kill() - stop a tasklet exec context from being rescheduled
629  * @ctx: a hif_exec_context known to be of tasklet type
630  */
631 static void hif_exec_tasklet_kill(struct hif_exec_context *ctx)
632 {
633 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
634 	int irq_ind;
635 
636 	if (ctx->inited) {
637 		tasklet_disable(&t_ctx->tasklet);
638 		tasklet_kill(&t_ctx->tasklet);
639 	}
640 	ctx->inited = false;
641 
642 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
643 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
644 }
645 
646 struct hif_execution_ops tasklet_sched_ops = {
647 	.schedule = &hif_exec_tasklet_schedule,
648 	.kill = &hif_exec_tasklet_kill,
649 };
650 
651 /**
652  * hif_exec_tasklet_schedule() -  allocate and initialize a tasklet exec context
653  */
654 static struct hif_exec_context *hif_exec_tasklet_create(void)
655 {
656 	struct hif_tasklet_exec_context *ctx;
657 
658 	ctx = qdf_mem_malloc(sizeof(struct hif_tasklet_exec_context));
659 	if (!ctx)
660 		return NULL;
661 
662 	ctx->exec_ctx.sched_ops = &tasklet_sched_ops;
663 	tasklet_init(&ctx->tasklet, hif_exec_tasklet_fn,
664 		     (unsigned long)ctx);
665 
666 	ctx->exec_ctx.inited = true;
667 
668 	return &ctx->exec_ctx;
669 }
670 
671 /**
672  * hif_exec_get_ctx() - retrieve an exec context based on an id
673  * @softc: the hif context owning the exec context
674  * @id: the id of the exec context
675  *
676  * mostly added to make it easier to rename or move the context array
677  */
678 struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *softc,
679 					  uint8_t id)
680 {
681 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
682 
683 	if (id < hif_state->hif_num_extgroup)
684 		return hif_state->hif_ext_group[id];
685 
686 	return NULL;
687 }
688 
689 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
690 				uint8_t id)
691 {
692 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
693 
694 	if (id < hif_state->hif_num_extgroup)
695 		return hif_state->hif_ext_group[id]->os_irq[0];
696 	return -EINVAL;
697 }
698 
699 qdf_export_symbol(hif_get_int_ctx_irq_num);
700 
701 #ifdef HIF_CPU_PERF_AFFINE_MASK
702 void hif_config_irq_set_perf_affinity_hint(
703 	struct hif_opaque_softc *hif_ctx)
704 {
705 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
706 
707 	hif_config_irq_affinity(scn);
708 }
709 
710 qdf_export_symbol(hif_config_irq_set_perf_affinity_hint);
711 #endif
712 
713 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
714 {
715 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
716 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
717 	struct hif_exec_context *hif_ext_group;
718 	int i, status;
719 
720 	if (scn->ext_grp_irq_configured) {
721 		HIF_ERROR("%s Called after ext grp irq configured\n", __func__);
722 		return QDF_STATUS_E_FAILURE;
723 	}
724 
725 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
726 		hif_ext_group = hif_state->hif_ext_group[i];
727 		status = 0;
728 		qdf_spinlock_create(&hif_ext_group->irq_lock);
729 		if (hif_ext_group->configured &&
730 		    hif_ext_group->irq_requested == false) {
731 			hif_ext_group->irq_enabled = true;
732 			status = hif_grp_irq_configure(scn, hif_ext_group);
733 		}
734 		if (status != 0) {
735 			HIF_ERROR("%s: failed for group %d", __func__, i);
736 			hif_ext_group->irq_enabled = false;
737 		}
738 	}
739 
740 	scn->ext_grp_irq_configured = true;
741 
742 	return QDF_STATUS_SUCCESS;
743 }
744 
745 qdf_export_symbol(hif_configure_ext_group_interrupts);
746 
747 #ifdef WLAN_SUSPEND_RESUME_TEST
748 /**
749  * hif_check_and_trigger_ut_resume() - check if unit-test command was used to
750  *				       to trigger fake-suspend command, if yes
751  *				       then issue resume procedure.
752  * @scn: opaque HIF software context
753  *
754  * This API checks if unit-test command was used to trigger fake-suspend command
755  * and if answer is yes then it would trigger resume procedure.
756  *
757  * Make this API inline to save API-switch overhead and do branch-prediction to
758  * optimize performance impact.
759  *
760  * Return: void
761  */
762 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
763 {
764 	if (qdf_unlikely(hif_irq_trigger_ut_resume(scn)))
765 		hif_ut_fw_resume(scn);
766 }
767 #else
768 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
769 {
770 }
771 #endif
772 
773 /**
774  * hif_ext_group_interrupt_handler() - handler for related interrupts
775  * @irq: irq number of the interrupt
776  * @context: the associated hif_exec_group context
777  *
778  * This callback function takes care of dissabling the associated interrupts
779  * and scheduling the expected bottom half for the exec_context.
780  * This callback function also helps keep track of the count running contexts.
781  */
782 irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context)
783 {
784 	struct hif_exec_context *hif_ext_group = context;
785 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
786 
787 	if (hif_ext_group->irq_requested) {
788 		hif_latency_profile_start(hif_ext_group);
789 
790 		hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
791 				 0, 0, 0, HIF_EVENT_IRQ_TRIGGER);
792 
793 		hif_ext_group->irq_disable(hif_ext_group);
794 		/*
795 		 * if private ioctl has issued fake suspend command to put
796 		 * FW in D0-WOW state then here is our chance to bring FW out
797 		 * of WOW mode.
798 		 *
799 		 * The reason why you need to explicitly wake-up the FW is here:
800 		 * APSS should have been in fully awake through-out when
801 		 * fake APSS suspend command was issued (to put FW in WOW mode)
802 		 * hence organic way of waking-up the FW
803 		 * (as part-of APSS-host wake-up) won't happen because
804 		 * in reality APSS didn't really suspend.
805 		 */
806 		hif_check_and_trigger_ut_resume(scn);
807 		qdf_atomic_inc(&scn->active_grp_tasklet_cnt);
808 
809 		hif_ext_group->sched_ops->schedule(hif_ext_group);
810 	}
811 
812 	return IRQ_HANDLED;
813 }
814 
815 /**
816  * hif_exec_kill() - grp tasklet kill
817  * scn: hif_softc
818  *
819  * return: void
820  */
821 void hif_exec_kill(struct hif_opaque_softc *hif_ctx)
822 {
823 	int i;
824 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
825 
826 	for (i = 0; i < hif_state->hif_num_extgroup; i++)
827 		hif_state->hif_ext_group[i]->sched_ops->kill(
828 			hif_state->hif_ext_group[i]);
829 
830 	qdf_atomic_set(&hif_state->ol_sc.active_grp_tasklet_cnt, 0);
831 }
832 
833 /**
834  * hif_register_ext_group() - API to register external group
835  * interrupt handler.
836  * @hif_ctx : HIF Context
837  * @numirq: number of irq's in the group
838  * @irq: array of irq values
839  * @handler: callback interrupt handler function
840  * @cb_ctx: context to passed in callback
841  * @type: napi vs tasklet
842  *
843  * Return: QDF_STATUS
844  */
845 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
846 				  uint32_t numirq, uint32_t irq[],
847 				  ext_intr_handler handler,
848 				  void *cb_ctx, const char *context_name,
849 				  enum hif_exec_type type, uint32_t scale)
850 {
851 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
852 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
853 	struct hif_exec_context *hif_ext_group;
854 
855 	if (scn->ext_grp_irq_configured) {
856 		HIF_ERROR("%s Called after ext grp irq configured\n", __func__);
857 		return QDF_STATUS_E_FAILURE;
858 	}
859 
860 	if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) {
861 		HIF_ERROR("%s Max groups reached\n", __func__);
862 		return QDF_STATUS_E_FAILURE;
863 	}
864 
865 	if (numirq >= HIF_MAX_GRP_IRQ) {
866 		HIF_ERROR("%s invalid numirq\n", __func__);
867 		return QDF_STATUS_E_FAILURE;
868 	}
869 
870 	hif_ext_group = hif_exec_create(type, scale);
871 	if (!hif_ext_group)
872 		return QDF_STATUS_E_FAILURE;
873 
874 	hif_state->hif_ext_group[hif_state->hif_num_extgroup] =
875 		hif_ext_group;
876 
877 	hif_ext_group->numirq = numirq;
878 	qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0]));
879 	hif_ext_group->context = cb_ctx;
880 	hif_ext_group->handler = handler;
881 	hif_ext_group->configured = true;
882 	hif_ext_group->grp_id = hif_state->hif_num_extgroup;
883 	hif_ext_group->hif = hif_ctx;
884 	hif_ext_group->context_name = context_name;
885 	hif_ext_group->type = type;
886 
887 	hif_state->hif_num_extgroup++;
888 	return QDF_STATUS_SUCCESS;
889 }
890 qdf_export_symbol(hif_register_ext_group);
891 
892 /**
893  * hif_exec_create() - create an execution context
894  * @type: the type of execution context to create
895  */
896 struct hif_exec_context *hif_exec_create(enum hif_exec_type type,
897 						uint32_t scale)
898 {
899 	hif_debug("%s: create exec_type %d budget %d\n",
900 		  __func__, type, QCA_NAPI_BUDGET * scale);
901 
902 	switch (type) {
903 	case HIF_EXEC_NAPI_TYPE:
904 		return hif_exec_napi_create(scale);
905 
906 	case HIF_EXEC_TASKLET_TYPE:
907 		return hif_exec_tasklet_create();
908 	default:
909 		return NULL;
910 	}
911 }
912 
913 /**
914  * hif_exec_destroy() - free the hif_exec context
915  * @ctx: context to free
916  *
917  * please kill the context before freeing it to avoid a use after free.
918  */
919 void hif_exec_destroy(struct hif_exec_context *ctx)
920 {
921 	qdf_spinlock_destroy(&ctx->irq_lock);
922 	qdf_mem_free(ctx);
923 }
924 
925 /**
926  * hif_deregister_exec_group() - API to free the exec contexts
927  * @hif_ctx: HIF context
928  * @context_name: name of the module whose contexts need to be deregistered
929  *
930  * This function deregisters the contexts of the requestor identified
931  * based on the context_name & frees the memory.
932  *
933  * Return: void
934  */
935 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
936 				const char *context_name)
937 {
938 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
939 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
940 	struct hif_exec_context *hif_ext_group;
941 	int i;
942 
943 	for (i = 0; i < HIF_MAX_GROUP; i++) {
944 		hif_ext_group = hif_state->hif_ext_group[i];
945 
946 		if (!hif_ext_group)
947 			continue;
948 
949 		hif_debug("%s: Deregistering grp id %d name %s\n",
950 			  __func__,
951 			  hif_ext_group->grp_id,
952 			  hif_ext_group->context_name);
953 
954 		if (strcmp(hif_ext_group->context_name, context_name) == 0) {
955 			hif_ext_group->sched_ops->kill(hif_ext_group);
956 			hif_state->hif_ext_group[i] = NULL;
957 			hif_exec_destroy(hif_ext_group);
958 			hif_state->hif_num_extgroup--;
959 		}
960 
961 	}
962 }
963 qdf_export_symbol(hif_deregister_exec_group);
964