xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.c (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/if_arp.h>
23 #include "qdf_lock.h"
24 #include "qdf_types.h"
25 #include "qdf_status.h"
26 #include "regtable.h"
27 #include "hif.h"
28 #include "hif_io32.h"
29 #include "ce_main.h"
30 #include "ce_api.h"
31 #include "ce_reg.h"
32 #include "ce_internal.h"
33 #include "ce_tasklet.h"
34 #include "pld_common.h"
35 #include "hif_debug.h"
36 #include "hif_napi.h"
37 
38 /**
39  * struct tasklet_work
40  *
41  * @id: ce_id
42  * @work: work
43  */
44 struct tasklet_work {
45 	enum ce_id_type id;
46 	void *data;
47 	struct work_struct work;
48 };
49 
50 
51 /**
52  * reschedule_ce_tasklet_work_handler() - reschedule work
53  * @work: struct work_struct
54  *
55  * Return: N/A
56  */
57 static void reschedule_ce_tasklet_work_handler(struct work_struct *work)
58 {
59 	struct tasklet_work *ce_work = container_of(work, struct tasklet_work,
60 						    work);
61 	struct hif_softc *scn = ce_work->data;
62 	struct HIF_CE_state *hif_ce_state;
63 
64 	if (!scn) {
65 		hif_err("tasklet scn is null");
66 		return;
67 	}
68 
69 	hif_ce_state = HIF_GET_CE_STATE(scn);
70 
71 	if (scn->hif_init_done == false) {
72 		hif_err("wlan driver is unloaded");
73 		return;
74 	}
75 	if (hif_ce_state->tasklets[ce_work->id].inited)
76 		tasklet_schedule(&hif_ce_state->tasklets[ce_work->id].intr_tq);
77 }
78 
79 static struct tasklet_work tasklet_workers[CE_ID_MAX];
80 
81 /**
82  * init_tasklet_work() - init_tasklet_work
83  * @work: struct work_struct
84  * @work_handler: work_handler
85  *
86  * Return: N/A
87  */
88 static void init_tasklet_work(struct work_struct *work,
89 			      work_func_t work_handler)
90 {
91 	INIT_WORK(work, work_handler);
92 }
93 
94 /**
95  * init_tasklet_workers() - init_tasklet_workers
96  * @scn: HIF Context
97  *
98  * Return: N/A
99  */
100 void init_tasklet_worker_by_ceid(struct hif_opaque_softc *scn, int ce_id)
101 {
102 
103 	tasklet_workers[ce_id].id = ce_id;
104 	tasklet_workers[ce_id].data = scn;
105 	init_tasklet_work(&tasklet_workers[ce_id].work,
106 			  reschedule_ce_tasklet_work_handler);
107 }
108 
109 /**
110  * deinit_tasklet_workers() - deinit_tasklet_workers
111  * @scn: HIF Context
112  *
113  * Return: N/A
114  */
115 void deinit_tasklet_workers(struct hif_opaque_softc *scn)
116 {
117 	u32 id;
118 
119 	for (id = 0; id < CE_ID_MAX; id++)
120 		cancel_work_sync(&tasklet_workers[id].work);
121 }
122 
123 /**
124  * ce_schedule_tasklet() - schedule ce tasklet
125  * @tasklet_entry: struct ce_tasklet_entry
126  *
127  * Return: N/A
128  */
129 static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
130 {
131 	tasklet_schedule(&tasklet_entry->intr_tq);
132 }
133 
134 #ifdef CE_TASKLET_DEBUG_ENABLE
135 /**
136  * hif_record_tasklet_exec_entry_ts() - Record ce tasklet execution
137  *                                      entry time
138  * @scn: hif_softc
139  * @ce_id: ce_id
140  *
141  * Return: None
142  */
143 static inline void
144 hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id)
145 {
146 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
147 
148 	hif_ce_state->stats.tasklet_exec_entry_ts[ce_id] =
149 					qdf_get_log_timestamp_usecs();
150 }
151 
152 /**
153  * hif_record_tasklet_sched_entry_ts() - Record ce tasklet scheduled
154  *                                       entry time
155  * @scn: hif_softc
156  * @ce_id: ce_id
157  *
158  * Return: None
159  */
160 static inline void
161 hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id)
162 {
163 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
164 
165 	hif_ce_state->stats.tasklet_sched_entry_ts[ce_id] =
166 					qdf_get_log_timestamp_usecs();
167 }
168 
169 /**
170  * hif_ce_latency_stats() - Display ce latency information
171  * @hif_ctx: hif_softc struct
172  *
173  * Return: None
174  */
175 static void
176 hif_ce_latency_stats(struct hif_softc *hif_ctx)
177 {
178 	uint8_t i, j;
179 	uint32_t index, start_index;
180 	uint64_t secs, usecs;
181 	static const char * const buck_str[] = {"0 - 0.5", "0.5 - 1", "1  -  2",
182 					       "2  -  5", "5  - 10", "  >  10"};
183 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx);
184 	struct ce_stats *stats = &hif_ce_state->stats;
185 
186 	hif_err("\tCE TASKLET ARRIVAL AND EXECUTION STATS");
187 	for (i = 0; i < CE_COUNT_MAX; i++) {
188 		hif_nofl_err("\n\t\tCE Ring %d Tasklet Execution Bucket", i);
189 		for (j = 0; j < CE_BUCKET_MAX; j++) {
190 			qdf_log_timestamp_to_secs(
191 				       stats->ce_tasklet_exec_last_update[i][j],
192 				       &secs, &usecs);
193 			hif_nofl_err("\t Bucket %sms :%llu\t last update:% 8lld.%06lld",
194 				     buck_str[j],
195 				     stats->ce_tasklet_exec_bucket[i][j],
196 				     secs, usecs);
197 		}
198 
199 		hif_nofl_err("\n\t\tCE Ring %d Tasklet Scheduled Bucket", i);
200 		for (j = 0; j < CE_BUCKET_MAX; j++) {
201 			qdf_log_timestamp_to_secs(
202 				      stats->ce_tasklet_sched_last_update[i][j],
203 				      &secs, &usecs);
204 			hif_nofl_err("\t Bucket %sms :%llu\t last update :% 8lld.%06lld",
205 				     buck_str[j],
206 				     stats->ce_tasklet_sched_bucket[i][j],
207 				     secs, usecs);
208 		}
209 
210 		hif_nofl_err("\n\t\t CE RING %d Last %d time records",
211 			     i, HIF_REQUESTED_EVENTS);
212 		index = stats->record_index[i];
213 		start_index = stats->record_index[i];
214 
215 		for (j = 0; j < HIF_REQUESTED_EVENTS; j++) {
216 			hif_nofl_err("\tExecution time: %lluus Total Scheduled time: %lluus",
217 				     stats->tasklet_exec_time_record[i][index],
218 				     stats->
219 					   tasklet_sched_time_record[i][index]);
220 			if (index)
221 				index = (index - 1) % HIF_REQUESTED_EVENTS;
222 			else
223 				index = HIF_REQUESTED_EVENTS - 1;
224 			if (index == start_index)
225 				break;
226 		}
227 	}
228 }
229 
230 /**
231  * ce_tasklet_update_bucket() - update ce execution and scehduled time latency
232  *                              in corresponding time buckets
233  * @stats: struct ce_stats
234  * @ce_id: ce_id_type
235  * @entry_us: timestamp when tasklet is started to execute
236  * @exit_us: timestamp when tasklet is completed execution
237  *
238  * Return: N/A
239  */
240 static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state,
241 				     uint8_t ce_id)
242 {
243 	uint32_t index;
244 	uint64_t exec_time, exec_ms;
245 	uint64_t sched_time, sched_ms;
246 	uint64_t curr_time = qdf_get_log_timestamp_usecs();
247 	struct ce_stats *stats = &hif_ce_state->stats;
248 
249 	exec_time = curr_time - (stats->tasklet_exec_entry_ts[ce_id]);
250 	sched_time = (stats->tasklet_exec_entry_ts[ce_id]) -
251 		      (stats->tasklet_sched_entry_ts[ce_id]);
252 
253 	index = stats->record_index[ce_id];
254 	index = (index + 1) % HIF_REQUESTED_EVENTS;
255 
256 	stats->tasklet_exec_time_record[ce_id][index] = exec_time;
257 	stats->tasklet_sched_time_record[ce_id][index] = sched_time;
258 	stats->record_index[ce_id] = index;
259 
260 	exec_ms = qdf_do_div(exec_time, 1000);
261 	sched_ms = qdf_do_div(sched_time, 1000);
262 
263 	if (exec_ms > 10) {
264 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_BEYOND]++;
265 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_BEYOND]
266 								= curr_time;
267 	} else if (exec_ms > 5) {
268 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_10_MS]++;
269 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_10_MS]
270 								= curr_time;
271 	} else if (exec_ms > 2) {
272 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_5_MS]++;
273 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_5_MS]
274 								= curr_time;
275 	} else if (exec_ms > 1) {
276 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_2_MS]++;
277 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_2_MS]
278 								= curr_time;
279 	} else if (exec_time > 500) {
280 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_1_MS]++;
281 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_1_MS]
282 								= curr_time;
283 	} else {
284 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_500_US]++;
285 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_500_US]
286 								= curr_time;
287 	}
288 
289 	if (sched_ms > 10) {
290 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_BEYOND]++;
291 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_BEYOND]
292 								= curr_time;
293 	} else if (sched_ms > 5) {
294 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_10_MS]++;
295 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_10_MS]
296 								= curr_time;
297 	} else if (sched_ms > 2) {
298 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_5_MS]++;
299 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_5_MS]
300 								= curr_time;
301 	} else if (sched_ms > 1) {
302 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_2_MS]++;
303 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_2_MS]
304 								= curr_time;
305 	} else if (sched_time > 500) {
306 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_1_MS]++;
307 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_1_MS]
308 								= curr_time;
309 	} else {
310 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_500_US]++;
311 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_500_US]
312 								= curr_time;
313 	}
314 }
315 #else
316 static inline void
317 hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id)
318 {
319 }
320 
321 static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state,
322 				     uint8_t ce_id)
323 {
324 }
325 
326 static inline void
327 hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id)
328 {
329 }
330 
331 static void
332 hif_ce_latency_stats(struct hif_softc *hif_ctx)
333 {
334 }
335 #endif /*CE_TASKLET_DEBUG_ENABLE*/
336 
337 /**
338  * ce_tasklet() - ce_tasklet
339  * @data: data
340  *
341  * Return: N/A
342  */
343 static void ce_tasklet(unsigned long data)
344 {
345 	struct ce_tasklet_entry *tasklet_entry =
346 		(struct ce_tasklet_entry *)data;
347 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
348 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
349 	struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id];
350 
351 	if (scn->ce_latency_stats)
352 		hif_record_tasklet_exec_entry_ts(scn, tasklet_entry->ce_id);
353 
354 	hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
355 				 HIF_CE_TASKLET_ENTRY, NULL, NULL, -1, 0);
356 
357 	if (qdf_atomic_read(&scn->link_suspended)) {
358 		hif_err("ce %d tasklet fired after link suspend",
359 			tasklet_entry->ce_id);
360 		QDF_BUG(0);
361 	}
362 
363 	ce_per_engine_service(scn, tasklet_entry->ce_id);
364 
365 	if (ce_check_rx_pending(CE_state) && tasklet_entry->inited) {
366 		/*
367 		 * There are frames pending, schedule tasklet to process them.
368 		 * Enable the interrupt only when there is no pending frames in
369 		 * any of the Copy Engine pipes.
370 		 */
371 		hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
372 				HIF_CE_TASKLET_RESCHEDULE, NULL, NULL, -1, 0);
373 
374 		ce_schedule_tasklet(tasklet_entry);
375 		return;
376 	}
377 
378 	if (scn->target_status != TARGET_STATUS_RESET)
379 		hif_irq_enable(scn, tasklet_entry->ce_id);
380 
381 	hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT,
382 				NULL, NULL, -1, 0);
383 
384 	if (scn->ce_latency_stats)
385 		ce_tasklet_update_bucket(hif_ce_state, tasklet_entry->ce_id);
386 
387 	qdf_atomic_dec(&scn->active_tasklet_cnt);
388 }
389 
390 /**
391  * ce_tasklet_init() - ce_tasklet_init
392  * @hif_ce_state: hif_ce_state
393  * @mask: mask
394  *
395  * Return: N/A
396  */
397 void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask)
398 {
399 	int i;
400 
401 	for (i = 0; i < CE_COUNT_MAX; i++) {
402 		if (mask & (1 << i)) {
403 			hif_ce_state->tasklets[i].ce_id = i;
404 			hif_ce_state->tasklets[i].inited = true;
405 			hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state;
406 			tasklet_init(&hif_ce_state->tasklets[i].intr_tq,
407 				ce_tasklet,
408 				(unsigned long)&hif_ce_state->tasklets[i]);
409 		}
410 	}
411 }
412 /**
413  * ce_tasklet_kill() - ce_tasklet_kill
414  * @hif_ce_state: hif_ce_state
415  *
416  * Context: Non-Atomic context
417  * Return: N/A
418  */
419 void ce_tasklet_kill(struct hif_softc *scn)
420 {
421 	int i;
422 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
423 
424 	for (i = 0; i < CE_COUNT_MAX; i++) {
425 		if (hif_ce_state->tasklets[i].inited) {
426 			hif_ce_state->tasklets[i].inited = false;
427 			/*
428 			 * Cancel the tasklet work before tasklet_disable
429 			 * to avoid race between tasklet_schedule and
430 			 * tasklet_kill. Here cancel_work_sync() won't
431 			 * return before reschedule_ce_tasklet_work_handler()
432 			 * completes. Even if tasklet_schedule() happens
433 			 * tasklet_disable() will take care of that.
434 			 */
435 			cancel_work_sync(&tasklet_workers[i].work);
436 			tasklet_kill(&hif_ce_state->tasklets[i].intr_tq);
437 		}
438 	}
439 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
440 }
441 
442 #define HIF_CE_DRAIN_WAIT_CNT          20
443 /**
444  * hif_drain_tasklets(): wait until no tasklet is pending
445  * @scn: hif context
446  *
447  * Let running tasklets clear pending trafic.
448  *
449  * Return: 0 if no bottom half is in progress when it returns.
450  *   -EFAULT if it times out.
451  */
452 int hif_drain_tasklets(struct hif_softc *scn)
453 {
454 	uint32_t ce_drain_wait_cnt = 0;
455 	int32_t tasklet_cnt;
456 
457 	while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) {
458 		if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) {
459 			hif_err("CE still not done with access: %d",
460 				tasklet_cnt);
461 
462 			return -EFAULT;
463 		}
464 		hif_info("Waiting for CE to finish access");
465 		msleep(10);
466 	}
467 	return 0;
468 }
469 
470 #ifdef WLAN_SUSPEND_RESUME_TEST
471 /**
472  * hif_interrupt_is_ut_resume(): Tests if an irq on the given copy engine should
473  *	trigger a unit-test resume.
474  * @scn: The HIF context to operate on
475  * @ce_id: The copy engine Id from the originating interrupt
476  *
477  * Return: true if the raised irq should trigger a unit-test resume
478  */
479 static bool hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
480 {
481 	int errno;
482 	uint8_t wake_ce_id;
483 
484 	if (!hif_is_ut_suspended(scn))
485 		return false;
486 
487 	/* ensure passed ce_id matches wake ce_id */
488 	errno = hif_get_wake_ce_id(scn, &wake_ce_id);
489 	if (errno) {
490 		hif_err("Failed to get wake CE Id: %d", errno);
491 		return false;
492 	}
493 
494 	return ce_id == wake_ce_id;
495 }
496 #else
497 static inline bool
498 hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
499 {
500 	return false;
501 }
502 #endif /* WLAN_SUSPEND_RESUME_TEST */
503 
504 /**
505  * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler
506  * @irq: irq coming from kernel
507  * @context: context
508  *
509  * Return: N/A
510  */
511 static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context)
512 {
513 	struct ce_tasklet_entry *tasklet_entry = context;
514 	struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state);
515 
516 	return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq),
517 				     tasklet_entry);
518 }
519 
520 /**
521  * hif_ce_increment_interrupt_count() - update ce stats
522  * @hif_ce_state: ce state
523  * @ce_id: ce id
524  *
525  * Return: none
526  */
527 static inline void
528 hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id)
529 {
530 	int cpu_id = qdf_get_cpu();
531 
532 	hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++;
533 }
534 
535 /**
536  * hif_display_ce_stats() - display ce stats
537  * @hif_ce_state: ce state
538  *
539  * Return: none
540  */
541 void hif_display_ce_stats(struct hif_softc *hif_ctx)
542 {
543 #define STR_SIZE 128
544 	uint8_t i, j, pos;
545 	char str_buffer[STR_SIZE];
546 	int size, ret;
547 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx);
548 
549 	qdf_debug("CE interrupt statistics:");
550 	for (i = 0; i < CE_COUNT_MAX; i++) {
551 		size = STR_SIZE;
552 		pos = 0;
553 		for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) {
554 			ret = snprintf(str_buffer + pos, size, "[%d]:%d ",
555 				       j, hif_ce_state->stats.ce_per_cpu[i][j]);
556 			if (ret <= 0 || ret >= size)
557 				break;
558 			size -= ret;
559 			pos += ret;
560 		}
561 		qdf_debug("CE id[%2d] - %s", i, str_buffer);
562 	}
563 
564 	if (hif_ctx->ce_latency_stats)
565 		hif_ce_latency_stats(hif_ctx);
566 #undef STR_SIZE
567 }
568 
569 /**
570  * hif_clear_ce_stats() - clear ce stats
571  * @hif_ce_state: ce state
572  *
573  * Return: none
574  */
575 void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state)
576 {
577 	qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats));
578 }
579 
580 /**
581  * hif_tasklet_schedule() - schedule tasklet
582  * @hif_ctx: hif context
583  * @tasklet_entry: ce tasklet entry
584  *
585  * Return: false if tasklet already scheduled, otherwise true
586  */
587 static inline bool hif_tasklet_schedule(struct hif_opaque_softc *hif_ctx,
588 					struct ce_tasklet_entry *tasklet_entry)
589 {
590 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
591 
592 	if (test_bit(TASKLET_STATE_SCHED, &tasklet_entry->intr_tq.state)) {
593 		hif_debug("tasklet scheduled, return");
594 		qdf_atomic_dec(&scn->active_tasklet_cnt);
595 		return false;
596 	}
597 
598 	tasklet_schedule(&tasklet_entry->intr_tq);
599 	if (scn->ce_latency_stats)
600 		hif_record_tasklet_sched_entry_ts(scn, tasklet_entry->ce_id);
601 
602 	return true;
603 }
604 
605 /**
606  * ce_dispatch_interrupt() - dispatch an interrupt to a processing context
607  * @ce_id: ce_id
608  * @tasklet_entry: context
609  *
610  * Return: N/A
611  */
612 irqreturn_t ce_dispatch_interrupt(int ce_id,
613 				  struct ce_tasklet_entry *tasklet_entry)
614 {
615 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
616 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
617 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
618 
619 	if (tasklet_entry->ce_id != ce_id) {
620 		hif_err("ce_id (expect %d, received %d) does not match",
621 			tasklet_entry->ce_id, ce_id);
622 		return IRQ_NONE;
623 	}
624 	if (unlikely(ce_id >= CE_COUNT_MAX)) {
625 		hif_err("ce_id=%d > CE_COUNT_MAX=%d",
626 			tasklet_entry->ce_id, CE_COUNT_MAX);
627 		return IRQ_NONE;
628 	}
629 
630 	hif_irq_disable(scn, ce_id);
631 
632 	if (!TARGET_REGISTER_ACCESS_ALLOWED(scn))
633 		return IRQ_HANDLED;
634 
635 	hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT,
636 				NULL, NULL, 0, 0);
637 	hif_ce_increment_interrupt_count(hif_ce_state, ce_id);
638 
639 	if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) {
640 		hif_ut_fw_resume(scn);
641 		hif_irq_enable(scn, ce_id);
642 		return IRQ_HANDLED;
643 	}
644 
645 	qdf_atomic_inc(&scn->active_tasklet_cnt);
646 
647 	if (hif_napi_enabled(hif_hdl, ce_id))
648 		hif_napi_schedule(hif_hdl, ce_id);
649 	else
650 		hif_tasklet_schedule(hif_hdl, tasklet_entry);
651 
652 	return IRQ_HANDLED;
653 }
654 
655 /**
656  * const char *ce_name
657  *
658  * @ce_name: ce_name
659  */
660 const char *ce_name[] = {
661 	"WLAN_CE_0",
662 	"WLAN_CE_1",
663 	"WLAN_CE_2",
664 	"WLAN_CE_3",
665 	"WLAN_CE_4",
666 	"WLAN_CE_5",
667 	"WLAN_CE_6",
668 	"WLAN_CE_7",
669 	"WLAN_CE_8",
670 	"WLAN_CE_9",
671 	"WLAN_CE_10",
672 	"WLAN_CE_11",
673 };
674 /**
675  * ce_unregister_irq() - ce_unregister_irq
676  * @hif_ce_state: hif_ce_state copy engine device handle
677  * @mask: which coppy engines to unregister for.
678  *
679  * Unregisters copy engine irqs matching mask.  If a 1 is set at bit x,
680  * unregister for copy engine x.
681  *
682  * Return: QDF_STATUS
683  */
684 QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
685 {
686 	int id;
687 	int ce_count;
688 	int ret;
689 	struct hif_softc *scn;
690 
691 	if (!hif_ce_state) {
692 		hif_warn("hif_ce_state = NULL");
693 		return QDF_STATUS_SUCCESS;
694 	}
695 
696 	scn = HIF_GET_SOFTC(hif_ce_state);
697 	ce_count = scn->ce_count;
698 	/* we are removing interrupts, so better stop NAPI */
699 	ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn),
700 			     NAPI_EVT_INT_STATE, (void *)0);
701 	if (ret != 0)
702 		hif_err("napi_event INT_STATE returned %d", ret);
703 	/* this is not fatal, continue */
704 
705 	/* filter mask to free only for ce's with irq registered */
706 	mask &= hif_ce_state->ce_register_irq_done;
707 	for (id = 0; id < ce_count; id++) {
708 		if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
709 			ret = pld_ce_free_irq(scn->qdf_dev->dev, id,
710 					&hif_ce_state->tasklets[id]);
711 			if (ret < 0)
712 				hif_err(
713 					"pld_unregister_irq error - ce_id = %d, ret = %d",
714 					id, ret);
715 		}
716 		ce_disable_polling(scn->ce_id_to_state[id]);
717 	}
718 	hif_ce_state->ce_register_irq_done &= ~mask;
719 
720 	return QDF_STATUS_SUCCESS;
721 }
722 /**
723  * ce_register_irq() - ce_register_irq
724  * @hif_ce_state: hif_ce_state
725  * @mask: which coppy engines to unregister for.
726  *
727  * Registers copy engine irqs matching mask.  If a 1 is set at bit x,
728  * Register for copy engine x.
729  *
730  * Return: QDF_STATUS
731  */
732 QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
733 {
734 	int id;
735 	int ce_count;
736 	int ret;
737 	unsigned long irqflags = IRQF_TRIGGER_RISING;
738 	uint32_t done_mask = 0;
739 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
740 
741 	ce_count = scn->ce_count;
742 
743 	for (id = 0; id < ce_count; id++) {
744 		if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
745 			ret = pld_ce_request_irq(scn->qdf_dev->dev, id,
746 				hif_snoc_interrupt_handler,
747 				irqflags, ce_name[id],
748 				&hif_ce_state->tasklets[id]);
749 			if (ret) {
750 				hif_err(
751 					"cannot register CE %d irq handler, ret = %d",
752 					id, ret);
753 				ce_unregister_irq(hif_ce_state, done_mask);
754 				return QDF_STATUS_E_FAULT;
755 			}
756 			done_mask |= 1 << id;
757 		}
758 	}
759 	hif_ce_state->ce_register_irq_done |= done_mask;
760 
761 	return QDF_STATUS_SUCCESS;
762 }
763