xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.c (revision 503663c6daafffe652fa360bde17243568cd6d2a)
1 /*
2  * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/if_arp.h>
23 #include "qdf_lock.h"
24 #include "qdf_types.h"
25 #include "qdf_status.h"
26 #include "regtable.h"
27 #include "hif.h"
28 #include "hif_io32.h"
29 #include "ce_main.h"
30 #include "ce_api.h"
31 #include "ce_reg.h"
32 #include "ce_internal.h"
33 #include "ce_tasklet.h"
34 #include "pld_common.h"
35 #include "hif_debug.h"
36 #include "hif_napi.h"
37 
38 /**
39  * struct tasklet_work
40  *
41  * @id: ce_id
42  * @work: work
43  */
44 struct tasklet_work {
45 	enum ce_id_type id;
46 	void *data;
47 	struct work_struct work;
48 };
49 
50 
51 /**
52  * reschedule_ce_tasklet_work_handler() - reschedule work
53  * @work: struct work_struct
54  *
55  * Return: N/A
56  */
57 static void reschedule_ce_tasklet_work_handler(struct work_struct *work)
58 {
59 	struct tasklet_work *ce_work = container_of(work, struct tasklet_work,
60 						    work);
61 	struct hif_softc *scn = ce_work->data;
62 	struct HIF_CE_state *hif_ce_state;
63 
64 	if (!scn) {
65 		HIF_ERROR("%s: tasklet scn is null", __func__);
66 		return;
67 	}
68 
69 	hif_ce_state = HIF_GET_CE_STATE(scn);
70 
71 	if (scn->hif_init_done == false) {
72 		HIF_ERROR("%s: wlan driver is unloaded", __func__);
73 		return;
74 	}
75 	if (hif_ce_state->tasklets[ce_work->id].inited)
76 		tasklet_schedule(&hif_ce_state->tasklets[ce_work->id].intr_tq);
77 }
78 
79 static struct tasklet_work tasklet_workers[CE_ID_MAX];
80 static bool work_initialized;
81 
82 /**
83  * init_tasklet_work() - init_tasklet_work
84  * @work: struct work_struct
85  * @work_handler: work_handler
86  *
87  * Return: N/A
88  */
89 static void init_tasklet_work(struct work_struct *work,
90 			      work_func_t work_handler)
91 {
92 	INIT_WORK(work, work_handler);
93 }
94 
95 /**
96  * init_tasklet_workers() - init_tasklet_workers
97  * @scn: HIF Context
98  *
99  * Return: N/A
100  */
101 void init_tasklet_workers(struct hif_opaque_softc *scn)
102 {
103 	uint32_t id;
104 
105 	for (id = 0; id < CE_ID_MAX; id++) {
106 		tasklet_workers[id].id = id;
107 		tasklet_workers[id].data = scn;
108 		init_tasklet_work(&tasklet_workers[id].work,
109 				  reschedule_ce_tasklet_work_handler);
110 	}
111 	work_initialized = true;
112 }
113 
114 /**
115  * deinit_tasklet_workers() - deinit_tasklet_workers
116  * @scn: HIF Context
117  *
118  * Return: N/A
119  */
120 void deinit_tasklet_workers(struct hif_opaque_softc *scn)
121 {
122 	u32 id;
123 
124 	for (id = 0; id < CE_ID_MAX; id++)
125 		cancel_work_sync(&tasklet_workers[id].work);
126 
127 	work_initialized = false;
128 }
129 
130 /**
131  * ce_schedule_tasklet() - schedule ce tasklet
132  * @tasklet_entry: struct ce_tasklet_entry
133  *
134  * Return: N/A
135  */
136 static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
137 {
138 	tasklet_schedule(&tasklet_entry->intr_tq);
139 }
140 
141 #ifdef CE_TASKLET_DEBUG_ENABLE
142 /**
143  * hif_record_tasklet_exec_entry_ts() - Record ce tasklet execution
144  *                                      entry time
145  * @scn: hif_softc
146  * @ce_id: ce_id
147  *
148  * Return: None
149  */
150 static inline void
151 hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id)
152 {
153 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
154 
155 	hif_ce_state->stats.tasklet_exec_entry_ts[ce_id] =
156 					qdf_get_log_timestamp_usecs();
157 }
158 
159 /**
160  * hif_record_tasklet_sched_entry_ts() - Record ce tasklet scheduled
161  *                                       entry time
162  * @scn: hif_softc
163  * @ce_id: ce_id
164  *
165  * Return: None
166  */
167 static inline void
168 hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id)
169 {
170 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
171 
172 	hif_ce_state->stats.tasklet_sched_entry_ts[ce_id] =
173 					qdf_get_log_timestamp_usecs();
174 }
175 
176 /**
177  * hif_ce_latency_stats() - Display ce latency information
178  * @hif_ctx: hif_softc struct
179  *
180  * Return: None
181  */
182 static void
183 hif_ce_latency_stats(struct hif_softc *hif_ctx)
184 {
185 	uint8_t i, j;
186 	uint32_t index, start_index;
187 	static const char * const buck_str[] = {"0 - 0.5", "0.5 - 1", "1  -  2",
188 					       "2  -  5", "5  - 10", "  >  10"};
189 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx);
190 	struct ce_stats *stats = &hif_ce_state->stats;
191 
192 	for (i = 0; i < CE_COUNT_MAX; i++) {
193 		qdf_nofl_info("\n\t\tCE Ring %d Tasklet Execution Bucket", i);
194 		for (j = 0; j < CE_BUCKET_MAX; j++) {
195 			qdf_nofl_info("\t Bucket %sms :%llu\t last update:%llu",
196 				      buck_str[j],
197 				      stats->ce_tasklet_exec_bucket[i][j],
198 				      stats->ce_tasklet_exec_last_update[i][j]);
199 		}
200 
201 		qdf_nofl_info("\n\t\tCE Ring %d Tasklet Scheduled Bucket", i);
202 		for (j = 0; j < CE_BUCKET_MAX; j++) {
203 			qdf_nofl_info("\t Bucket %sms :%llu\t last update :%lld",
204 				      buck_str[j],
205 				      stats->ce_tasklet_sched_bucket[i][j],
206 				      stats->
207 					   ce_tasklet_sched_last_update[i][j]);
208 		}
209 
210 		qdf_nofl_info("\n\t\t CE RING %d Last %d time records",
211 			      i, HIF_REQUESTED_EVENTS);
212 		index = stats->record_index[i];
213 		start_index = stats->record_index[i];
214 
215 		for (j = 0; j < HIF_REQUESTED_EVENTS; j++) {
216 			qdf_nofl_info("\t Execuiton time:  %luus Total Scheduled time: %luus",
217 				      stats->tasklet_exec_time_record[i][index],
218 				      stats->
219 					   tasklet_sched_time_record[i][index]);
220 			index = (index - 1) % HIF_REQUESTED_EVENTS;
221 			if (index == start_index)
222 				break;
223 		}
224 	}
225 }
226 
227 /**
228  * ce_tasklet_update_bucket() - update ce execution and scehduled time latency
229  *                              in corresponding time buckets
230  * @stats: struct ce_stats
231  * @ce_id: ce_id_type
232  * @entry_us: timestamp when tasklet is started to execute
233  * @exit_us: timestamp when tasklet is completed execution
234  *
235  * Return: N/A
236  */
237 static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state,
238 				     uint8_t ce_id)
239 {
240 	uint32_t index;
241 	uint64_t exec_time, exec_ms;
242 	uint64_t sched_time, sched_ms;
243 	uint64_t curr_time = qdf_get_log_timestamp_usecs();
244 	struct ce_stats *stats = &hif_ce_state->stats;
245 
246 	exec_time = curr_time - (stats->tasklet_exec_entry_ts[ce_id]);
247 	sched_time = (stats->tasklet_exec_entry_ts[ce_id]) -
248 		      (stats->tasklet_sched_entry_ts[ce_id]);
249 
250 	index = stats->record_index[ce_id];
251 	index = (index + 1) % HIF_REQUESTED_EVENTS;
252 
253 	stats->tasklet_exec_time_record[ce_id][index] = exec_time;
254 	stats->tasklet_sched_time_record[ce_id][index] = sched_time;
255 	stats->record_index[ce_id] = index;
256 
257 	exec_ms = qdf_do_div(exec_time, 1000);
258 	sched_ms = qdf_do_div(sched_time, 1000);
259 
260 	if (exec_ms > 10) {
261 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_BEYOND]++;
262 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_BEYOND]
263 								= curr_time;
264 	} else if (exec_ms > 5) {
265 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_10_MS]++;
266 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_10_MS]
267 								= curr_time;
268 	} else if (exec_ms > 2) {
269 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_5_MS]++;
270 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_5_MS]
271 								= curr_time;
272 	} else if (exec_ms > 1) {
273 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_2_MS]++;
274 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_2_MS]
275 								= curr_time;
276 	} else if (exec_time > 500) {
277 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_1_MS]++;
278 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_1_MS]
279 								= curr_time;
280 	} else {
281 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_500_US]++;
282 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_500_US]
283 								= curr_time;
284 	}
285 
286 	if (sched_ms > 10) {
287 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_BEYOND]++;
288 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_BEYOND]
289 								= curr_time;
290 	} else if (sched_ms > 5) {
291 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_10_MS]++;
292 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_10_MS]
293 								= curr_time;
294 	} else if (sched_ms > 2) {
295 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_5_MS]++;
296 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_5_MS]
297 								= curr_time;
298 	} else if (sched_ms > 1) {
299 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_2_MS]++;
300 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_2_MS]
301 								= curr_time;
302 	} else if (sched_time > 500) {
303 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_1_MS]++;
304 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_1_MS]
305 								= curr_time;
306 	} else {
307 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_500_US]++;
308 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_500_US]
309 								= curr_time;
310 	}
311 }
312 #else
313 static inline void
314 hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id)
315 {
316 }
317 
318 static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state,
319 				     uint8_t ce_id)
320 {
321 }
322 
323 static inline void
324 hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id)
325 {
326 }
327 
328 static void
329 hif_ce_latency_stats(struct hif_softc *hif_ctx)
330 {
331 }
332 #endif /*CE_TASKLET_DEBUG_ENABLE*/
333 
334 /**
335  * ce_tasklet() - ce_tasklet
336  * @data: data
337  *
338  * Return: N/A
339  */
340 static void ce_tasklet(unsigned long data)
341 {
342 	struct ce_tasklet_entry *tasklet_entry =
343 		(struct ce_tasklet_entry *)data;
344 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
345 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
346 	struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id];
347 
348 	hif_record_tasklet_exec_entry_ts(scn, tasklet_entry->ce_id);
349 	hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
350 				 HIF_CE_TASKLET_ENTRY, NULL, NULL, -1, 0);
351 
352 	if (qdf_atomic_read(&scn->link_suspended)) {
353 		HIF_ERROR("%s: ce %d tasklet fired after link suspend.",
354 				__func__, tasklet_entry->ce_id);
355 		QDF_BUG(0);
356 	}
357 
358 	ce_per_engine_service(scn, tasklet_entry->ce_id);
359 
360 	if (ce_check_rx_pending(CE_state) && tasklet_entry->inited) {
361 		/*
362 		 * There are frames pending, schedule tasklet to process them.
363 		 * Enable the interrupt only when there is no pending frames in
364 		 * any of the Copy Engine pipes.
365 		 */
366 		hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
367 				HIF_CE_TASKLET_RESCHEDULE, NULL, NULL, -1, 0);
368 
369 		ce_schedule_tasklet(tasklet_entry);
370 		return;
371 	}
372 
373 	if (scn->target_status != TARGET_STATUS_RESET)
374 		hif_irq_enable(scn, tasklet_entry->ce_id);
375 
376 	hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT,
377 				NULL, NULL, -1, 0);
378 	ce_tasklet_update_bucket(hif_ce_state, tasklet_entry->ce_id);
379 	qdf_atomic_dec(&scn->active_tasklet_cnt);
380 }
381 
382 /**
383  * ce_tasklet_init() - ce_tasklet_init
384  * @hif_ce_state: hif_ce_state
385  * @mask: mask
386  *
387  * Return: N/A
388  */
389 void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask)
390 {
391 	int i;
392 
393 	for (i = 0; i < CE_COUNT_MAX; i++) {
394 		if (mask & (1 << i)) {
395 			hif_ce_state->tasklets[i].ce_id = i;
396 			hif_ce_state->tasklets[i].inited = true;
397 			hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state;
398 			tasklet_init(&hif_ce_state->tasklets[i].intr_tq,
399 				ce_tasklet,
400 				(unsigned long)&hif_ce_state->tasklets[i]);
401 		}
402 	}
403 }
404 /**
405  * ce_tasklet_kill() - ce_tasklet_kill
406  * @hif_ce_state: hif_ce_state
407  *
408  * Context: Non-Atomic context
409  * Return: N/A
410  */
411 void ce_tasklet_kill(struct hif_softc *scn)
412 {
413 	int i;
414 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
415 
416 	work_initialized = false;
417 
418 	for (i = 0; i < CE_COUNT_MAX; i++) {
419 		if (hif_ce_state->tasklets[i].inited) {
420 			hif_ce_state->tasklets[i].inited = false;
421 			/*
422 			 * Cancel the tasklet work before tasklet_disable
423 			 * to avoid race between tasklet_schedule and
424 			 * tasklet_kill. Here cancel_work_sync() won't
425 			 * return before reschedule_ce_tasklet_work_handler()
426 			 * completes. Even if tasklet_schedule() happens
427 			 * tasklet_disable() will take care of that.
428 			 */
429 			cancel_work_sync(&tasklet_workers[i].work);
430 			tasklet_kill(&hif_ce_state->tasklets[i].intr_tq);
431 		}
432 	}
433 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
434 }
435 
436 #define HIF_CE_DRAIN_WAIT_CNT          20
437 /**
438  * hif_drain_tasklets(): wait until no tasklet is pending
439  * @scn: hif context
440  *
441  * Let running tasklets clear pending trafic.
442  *
443  * Return: 0 if no bottom half is in progress when it returns.
444  *   -EFAULT if it times out.
445  */
446 int hif_drain_tasklets(struct hif_softc *scn)
447 {
448 	uint32_t ce_drain_wait_cnt = 0;
449 	int32_t tasklet_cnt;
450 
451 	while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) {
452 		if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) {
453 			HIF_ERROR("%s: CE still not done with access: %d",
454 				  __func__, tasklet_cnt);
455 
456 			return -EFAULT;
457 		}
458 		HIF_INFO("%s: Waiting for CE to finish access", __func__);
459 		msleep(10);
460 	}
461 	return 0;
462 }
463 
464 #ifdef WLAN_SUSPEND_RESUME_TEST
465 /**
466  * hif_interrupt_is_ut_resume(): Tests if an irq on the given copy engine should
467  *	trigger a unit-test resume.
468  * @scn: The HIF context to operate on
469  * @ce_id: The copy engine Id from the originating interrupt
470  *
471  * Return: true if the raised irq should trigger a unit-test resume
472  */
473 static bool hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
474 {
475 	int errno;
476 	uint8_t wake_ce_id;
477 
478 	if (!hif_is_ut_suspended(scn))
479 		return false;
480 
481 	/* ensure passed ce_id matches wake ce_id */
482 	errno = hif_get_wake_ce_id(scn, &wake_ce_id);
483 	if (errno) {
484 		HIF_ERROR("%s: failed to get wake CE Id: %d", __func__, errno);
485 		return false;
486 	}
487 
488 	return ce_id == wake_ce_id;
489 }
490 #else
491 static inline bool
492 hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
493 {
494 	return false;
495 }
496 #endif /* WLAN_SUSPEND_RESUME_TEST */
497 
498 /**
499  * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler
500  * @irq: irq coming from kernel
501  * @context: context
502  *
503  * Return: N/A
504  */
505 static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context)
506 {
507 	struct ce_tasklet_entry *tasklet_entry = context;
508 	struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state);
509 
510 	return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq),
511 				     tasklet_entry);
512 }
513 
514 /**
515  * hif_ce_increment_interrupt_count() - update ce stats
516  * @hif_ce_state: ce state
517  * @ce_id: ce id
518  *
519  * Return: none
520  */
521 static inline void
522 hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id)
523 {
524 	int cpu_id = qdf_get_cpu();
525 
526 	hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++;
527 }
528 
529 /**
530  * hif_display_ce_stats() - display ce stats
531  * @hif_ce_state: ce state
532  *
533  * Return: none
534  */
535 void hif_display_ce_stats(struct hif_softc *hif_ctx)
536 {
537 #define STR_SIZE 128
538 	uint8_t i, j, pos;
539 	char str_buffer[STR_SIZE];
540 	int size, ret;
541 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx);
542 
543 	qdf_debug("CE interrupt statistics:");
544 	for (i = 0; i < CE_COUNT_MAX; i++) {
545 		size = STR_SIZE;
546 		pos = 0;
547 		for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) {
548 			ret = snprintf(str_buffer + pos, size, "[%d]:%d ",
549 				       j, hif_ce_state->stats.ce_per_cpu[i][j]);
550 			if (ret <= 0 || ret >= size)
551 				break;
552 			size -= ret;
553 			pos += ret;
554 		}
555 		qdf_debug("CE id[%2d] - %s", i, str_buffer);
556 	}
557 
558 	hif_ce_latency_stats(hif_ctx);
559 #undef STR_SIZE
560 }
561 
562 /**
563  * hif_clear_ce_stats() - clear ce stats
564  * @hif_ce_state: ce state
565  *
566  * Return: none
567  */
568 void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state)
569 {
570 	qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats));
571 }
572 
573 /**
574  * hif_tasklet_schedule() - schedule tasklet
575  * @hif_ctx: hif context
576  * @tasklet_entry: ce tasklet entry
577  *
578  * Return: false if tasklet already scheduled, otherwise true
579  */
580 static inline bool hif_tasklet_schedule(struct hif_opaque_softc *hif_ctx,
581 					struct ce_tasklet_entry *tasklet_entry)
582 {
583 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
584 
585 	if (test_bit(TASKLET_STATE_SCHED, &tasklet_entry->intr_tq.state)) {
586 		HIF_DBG("tasklet scheduled, return");
587 		qdf_atomic_dec(&scn->active_tasklet_cnt);
588 		return false;
589 	}
590 
591 	tasklet_schedule(&tasklet_entry->intr_tq);
592 	hif_record_tasklet_sched_entry_ts(scn, tasklet_entry->ce_id);
593 	return true;
594 }
595 
596 /**
597  * ce_dispatch_interrupt() - dispatch an interrupt to a processing context
598  * @ce_id: ce_id
599  * @tasklet_entry: context
600  *
601  * Return: N/A
602  */
603 irqreturn_t ce_dispatch_interrupt(int ce_id,
604 				  struct ce_tasklet_entry *tasklet_entry)
605 {
606 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
607 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
608 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
609 
610 	if (tasklet_entry->ce_id != ce_id) {
611 		HIF_ERROR("%s: ce_id (expect %d, received %d) does not match",
612 			  __func__, tasklet_entry->ce_id, ce_id);
613 		return IRQ_NONE;
614 	}
615 	if (unlikely(ce_id >= CE_COUNT_MAX)) {
616 		HIF_ERROR("%s: ce_id=%d > CE_COUNT_MAX=%d",
617 			  __func__, tasklet_entry->ce_id, CE_COUNT_MAX);
618 		return IRQ_NONE;
619 	}
620 
621 	hif_irq_disable(scn, ce_id);
622 
623 	if (!TARGET_REGISTER_ACCESS_ALLOWED(scn))
624 		return IRQ_HANDLED;
625 
626 	hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT,
627 				NULL, NULL, 0, 0);
628 	hif_ce_increment_interrupt_count(hif_ce_state, ce_id);
629 
630 	if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) {
631 		hif_ut_fw_resume(scn);
632 		hif_irq_enable(scn, ce_id);
633 		return IRQ_HANDLED;
634 	}
635 
636 	qdf_atomic_inc(&scn->active_tasklet_cnt);
637 
638 	if (hif_napi_enabled(hif_hdl, ce_id))
639 		hif_napi_schedule(hif_hdl, ce_id);
640 	else
641 		hif_tasklet_schedule(hif_hdl, tasklet_entry);
642 
643 	return IRQ_HANDLED;
644 }
645 
646 /**
647  * const char *ce_name
648  *
649  * @ce_name: ce_name
650  */
651 const char *ce_name[] = {
652 	"WLAN_CE_0",
653 	"WLAN_CE_1",
654 	"WLAN_CE_2",
655 	"WLAN_CE_3",
656 	"WLAN_CE_4",
657 	"WLAN_CE_5",
658 	"WLAN_CE_6",
659 	"WLAN_CE_7",
660 	"WLAN_CE_8",
661 	"WLAN_CE_9",
662 	"WLAN_CE_10",
663 	"WLAN_CE_11",
664 };
665 /**
666  * ce_unregister_irq() - ce_unregister_irq
667  * @hif_ce_state: hif_ce_state copy engine device handle
668  * @mask: which coppy engines to unregister for.
669  *
670  * Unregisters copy engine irqs matching mask.  If a 1 is set at bit x,
671  * unregister for copy engine x.
672  *
673  * Return: QDF_STATUS
674  */
675 QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
676 {
677 	int id;
678 	int ce_count;
679 	int ret;
680 	struct hif_softc *scn;
681 
682 	if (!hif_ce_state) {
683 		HIF_WARN("%s: hif_ce_state = NULL", __func__);
684 		return QDF_STATUS_SUCCESS;
685 	}
686 
687 	scn = HIF_GET_SOFTC(hif_ce_state);
688 	ce_count = scn->ce_count;
689 	/* we are removing interrupts, so better stop NAPI */
690 	ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn),
691 			     NAPI_EVT_INT_STATE, (void *)0);
692 	if (ret != 0)
693 		HIF_ERROR("%s: napi_event INT_STATE returned %d",
694 			  __func__, ret);
695 	/* this is not fatal, continue */
696 
697 	/* filter mask to free only for ce's with irq registered */
698 	mask &= hif_ce_state->ce_register_irq_done;
699 	for (id = 0; id < ce_count; id++) {
700 		if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
701 			ret = pld_ce_free_irq(scn->qdf_dev->dev, id,
702 					&hif_ce_state->tasklets[id]);
703 			if (ret < 0)
704 				HIF_ERROR(
705 					"%s: pld_unregister_irq error - ce_id = %d, ret = %d",
706 					__func__, id, ret);
707 		}
708 		ce_disable_polling(scn->ce_id_to_state[id]);
709 	}
710 	hif_ce_state->ce_register_irq_done &= ~mask;
711 
712 	return QDF_STATUS_SUCCESS;
713 }
714 /**
715  * ce_register_irq() - ce_register_irq
716  * @hif_ce_state: hif_ce_state
717  * @mask: which coppy engines to unregister for.
718  *
719  * Registers copy engine irqs matching mask.  If a 1 is set at bit x,
720  * Register for copy engine x.
721  *
722  * Return: QDF_STATUS
723  */
724 QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
725 {
726 	int id;
727 	int ce_count;
728 	int ret;
729 	unsigned long irqflags = IRQF_TRIGGER_RISING;
730 	uint32_t done_mask = 0;
731 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
732 
733 	ce_count = scn->ce_count;
734 
735 	for (id = 0; id < ce_count; id++) {
736 		if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
737 			ret = pld_ce_request_irq(scn->qdf_dev->dev, id,
738 				hif_snoc_interrupt_handler,
739 				irqflags, ce_name[id],
740 				&hif_ce_state->tasklets[id]);
741 			if (ret) {
742 				HIF_ERROR(
743 					"%s: cannot register CE %d irq handler, ret = %d",
744 					__func__, id, ret);
745 				ce_unregister_irq(hif_ce_state, done_mask);
746 				return QDF_STATUS_E_FAULT;
747 			}
748 			done_mask |= 1 << id;
749 		}
750 	}
751 	hif_ce_state->ce_register_irq_done |= done_mask;
752 
753 	return QDF_STATUS_SUCCESS;
754 }
755