xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.c (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <linux/pci.h>
21 #include <linux/slab.h>
22 #include <linux/interrupt.h>
23 #include <linux/if_arp.h>
24 #include "qdf_lock.h"
25 #include "qdf_types.h"
26 #include "qdf_status.h"
27 #include "regtable.h"
28 #include "hif.h"
29 #include "hif_io32.h"
30 #include "ce_main.h"
31 #include "ce_api.h"
32 #include "ce_reg.h"
33 #include "ce_internal.h"
34 #include "ce_tasklet.h"
35 #include "pld_common.h"
36 #include "hif_debug.h"
37 #include "hif_napi.h"
38 
39 /**
40  * struct tasklet_work
41  *
42  * @id: ce_id
43  * @work: work
44  */
45 struct tasklet_work {
46 	enum ce_id_type id;
47 	void *data;
48 	qdf_work_t reg_work;
49 };
50 
51 
52 /**
53  * ce_tasklet_schedule() - schedule CE tasklet
54  * @tasklet_entry: ce tasklet entry
55  *
56  * Return: None
57  */
58 static inline void ce_tasklet_schedule(struct ce_tasklet_entry *tasklet_entry)
59 {
60 	if (tasklet_entry->hi_tasklet_ce)
61 		tasklet_hi_schedule(&tasklet_entry->intr_tq);
62 	else
63 		tasklet_schedule(&tasklet_entry->intr_tq);
64 }
65 
66 /**
67  * reschedule_ce_tasklet_work_handler() - reschedule work
68  * @work: struct work_struct
69  *
70  * Return: N/A
71  */
72 static void reschedule_ce_tasklet_work_handler(struct work_struct *work)
73 {
74 	qdf_work_t *reg_work = qdf_container_of(work, qdf_work_t, work);
75 	struct tasklet_work *ce_work = qdf_container_of(reg_work,
76 							struct tasklet_work,
77 							reg_work);
78 	struct hif_softc *scn = ce_work->data;
79 	struct HIF_CE_state *hif_ce_state;
80 
81 	if (!scn) {
82 		hif_err("tasklet scn is null");
83 		return;
84 	}
85 
86 	hif_ce_state = HIF_GET_CE_STATE(scn);
87 
88 	if (scn->hif_init_done == false) {
89 		hif_err("wlan driver is unloaded");
90 		return;
91 	}
92 	if (hif_ce_state->tasklets[ce_work->id].inited)
93 		ce_tasklet_schedule(&hif_ce_state->tasklets[ce_work->id]);
94 }
95 
96 static struct tasklet_work tasklet_workers[CE_ID_MAX];
97 
98 /**
99  * init_tasklet_work() - init_tasklet_work
100  * @work: struct work_struct
101  * @work_handler: work_handler
102  *
103  * Return: N/A
104  */
105 static void init_tasklet_work(struct work_struct *work,
106 			      work_func_t work_handler)
107 {
108 	INIT_WORK(work, work_handler);
109 }
110 
111 /**
112  * init_tasklet_workers() - init_tasklet_workers
113  * @scn: HIF Context
114  *
115  * Return: N/A
116  */
117 void init_tasklet_worker_by_ceid(struct hif_opaque_softc *scn, int ce_id)
118 {
119 
120 	tasklet_workers[ce_id].id = ce_id;
121 	tasklet_workers[ce_id].data = scn;
122 	init_tasklet_work(&tasklet_workers[ce_id].reg_work.work,
123 			  reschedule_ce_tasklet_work_handler);
124 }
125 
126 /**
127  * deinit_tasklet_workers() - deinit_tasklet_workers
128  * @scn: HIF Context
129  *
130  * Return: N/A
131  */
132 void deinit_tasklet_workers(struct hif_opaque_softc *scn)
133 {
134 	u32 id;
135 
136 	for (id = 0; id < CE_ID_MAX; id++)
137 		qdf_cancel_work(&tasklet_workers[id].reg_work);
138 }
139 
140 #ifdef CE_TASKLET_DEBUG_ENABLE
141 /**
142  * hif_record_tasklet_exec_entry_ts() - Record ce tasklet execution
143  *                                      entry time
144  * @scn: hif_softc
145  * @ce_id: ce_id
146  *
147  * Return: None
148  */
149 static inline void
150 hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id)
151 {
152 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
153 
154 	hif_ce_state->stats.tasklet_exec_entry_ts[ce_id] =
155 					qdf_get_log_timestamp_usecs();
156 }
157 
158 /**
159  * hif_record_tasklet_sched_entry_ts() - Record ce tasklet scheduled
160  *                                       entry time
161  * @scn: hif_softc
162  * @ce_id: ce_id
163  *
164  * Return: None
165  */
166 static inline void
167 hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id)
168 {
169 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
170 
171 	hif_ce_state->stats.tasklet_sched_entry_ts[ce_id] =
172 					qdf_get_log_timestamp_usecs();
173 }
174 
175 /**
176  * hif_ce_latency_stats() - Display ce latency information
177  * @hif_ctx: hif_softc struct
178  *
179  * Return: None
180  */
181 static void
182 hif_ce_latency_stats(struct hif_softc *hif_ctx)
183 {
184 	uint8_t i, j;
185 	uint32_t index, start_index;
186 	uint64_t secs, usecs;
187 	static const char * const buck_str[] = {"0 - 0.5", "0.5 - 1", "1  -  2",
188 					       "2  -  5", "5  - 10", "  >  10"};
189 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx);
190 	struct ce_stats *stats = &hif_ce_state->stats;
191 
192 	hif_err("\tCE TASKLET ARRIVAL AND EXECUTION STATS");
193 	for (i = 0; i < CE_COUNT_MAX; i++) {
194 		hif_nofl_err("\n\t\tCE Ring %d Tasklet Execution Bucket", i);
195 		for (j = 0; j < CE_BUCKET_MAX; j++) {
196 			qdf_log_timestamp_to_secs(
197 				       stats->ce_tasklet_exec_last_update[i][j],
198 				       &secs, &usecs);
199 			hif_nofl_err("\t Bucket %sms :%llu\t last update:% 8lld.%06lld",
200 				     buck_str[j],
201 				     stats->ce_tasklet_exec_bucket[i][j],
202 				     secs, usecs);
203 		}
204 
205 		hif_nofl_err("\n\t\tCE Ring %d Tasklet Scheduled Bucket", i);
206 		for (j = 0; j < CE_BUCKET_MAX; j++) {
207 			qdf_log_timestamp_to_secs(
208 				      stats->ce_tasklet_sched_last_update[i][j],
209 				      &secs, &usecs);
210 			hif_nofl_err("\t Bucket %sms :%llu\t last update :% 8lld.%06lld",
211 				     buck_str[j],
212 				     stats->ce_tasklet_sched_bucket[i][j],
213 				     secs, usecs);
214 		}
215 
216 		hif_nofl_err("\n\t\t CE RING %d Last %d time records",
217 			     i, HIF_REQUESTED_EVENTS);
218 		index = stats->record_index[i];
219 		start_index = stats->record_index[i];
220 
221 		for (j = 0; j < HIF_REQUESTED_EVENTS; j++) {
222 			hif_nofl_err("\tExecution time: %lluus Total Scheduled time: %lluus",
223 				     stats->tasklet_exec_time_record[i][index],
224 				     stats->
225 					   tasklet_sched_time_record[i][index]);
226 			if (index)
227 				index = (index - 1) % HIF_REQUESTED_EVENTS;
228 			else
229 				index = HIF_REQUESTED_EVENTS - 1;
230 			if (index == start_index)
231 				break;
232 		}
233 	}
234 }
235 
236 /**
237  * ce_tasklet_update_bucket() - update ce execution and scehduled time latency
238  *                              in corresponding time buckets
239  * @stats: struct ce_stats
240  * @ce_id: ce_id_type
241  * @entry_us: timestamp when tasklet is started to execute
242  * @exit_us: timestamp when tasklet is completed execution
243  *
244  * Return: N/A
245  */
246 static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state,
247 				     uint8_t ce_id)
248 {
249 	uint32_t index;
250 	uint64_t exec_time, exec_ms;
251 	uint64_t sched_time, sched_ms;
252 	uint64_t curr_time = qdf_get_log_timestamp_usecs();
253 	struct ce_stats *stats = &hif_ce_state->stats;
254 
255 	exec_time = curr_time - (stats->tasklet_exec_entry_ts[ce_id]);
256 	sched_time = (stats->tasklet_exec_entry_ts[ce_id]) -
257 		      (stats->tasklet_sched_entry_ts[ce_id]);
258 
259 	index = stats->record_index[ce_id];
260 	index = (index + 1) % HIF_REQUESTED_EVENTS;
261 
262 	stats->tasklet_exec_time_record[ce_id][index] = exec_time;
263 	stats->tasklet_sched_time_record[ce_id][index] = sched_time;
264 	stats->record_index[ce_id] = index;
265 
266 	exec_ms = qdf_do_div(exec_time, 1000);
267 	sched_ms = qdf_do_div(sched_time, 1000);
268 
269 	if (exec_ms > 10) {
270 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_BEYOND]++;
271 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_BEYOND]
272 								= curr_time;
273 	} else if (exec_ms > 5) {
274 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_10_MS]++;
275 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_10_MS]
276 								= curr_time;
277 	} else if (exec_ms > 2) {
278 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_5_MS]++;
279 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_5_MS]
280 								= curr_time;
281 	} else if (exec_ms > 1) {
282 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_2_MS]++;
283 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_2_MS]
284 								= curr_time;
285 	} else if (exec_time > 500) {
286 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_1_MS]++;
287 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_1_MS]
288 								= curr_time;
289 	} else {
290 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_500_US]++;
291 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_500_US]
292 								= curr_time;
293 	}
294 
295 	if (sched_ms > 10) {
296 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_BEYOND]++;
297 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_BEYOND]
298 								= curr_time;
299 	} else if (sched_ms > 5) {
300 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_10_MS]++;
301 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_10_MS]
302 								= curr_time;
303 	} else if (sched_ms > 2) {
304 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_5_MS]++;
305 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_5_MS]
306 								= curr_time;
307 	} else if (sched_ms > 1) {
308 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_2_MS]++;
309 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_2_MS]
310 								= curr_time;
311 	} else if (sched_time > 500) {
312 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_1_MS]++;
313 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_1_MS]
314 								= curr_time;
315 	} else {
316 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_500_US]++;
317 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_500_US]
318 								= curr_time;
319 	}
320 }
321 #else
322 static inline void
323 hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id)
324 {
325 }
326 
327 static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state,
328 				     uint8_t ce_id)
329 {
330 }
331 
332 static inline void
333 hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id)
334 {
335 }
336 
337 static void
338 hif_ce_latency_stats(struct hif_softc *hif_ctx)
339 {
340 }
341 #endif /*CE_TASKLET_DEBUG_ENABLE*/
342 
343 #if defined(CE_TASKLET_DEBUG_ENABLE) && defined(CE_TASKLET_SCHEDULE_ON_FULL)
344 /**
345  * hif_reset_ce_full_count() - Reset ce full count
346  * @scn: hif_softc
347  * @ce_id: ce_id
348  *
349  * Return: None
350  */
351 static inline void
352 hif_reset_ce_full_count(struct hif_softc *scn, uint8_t ce_id)
353 {
354 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
355 
356 	hif_ce_state->stats.ce_ring_full_count[ce_id] = 0;
357 }
358 #else
359 static inline void
360 hif_reset_ce_full_count(struct hif_softc *scn, uint8_t ce_id)
361 {
362 }
363 #endif
364 
365 #ifdef HIF_DETECTION_LATENCY_ENABLE
366 static inline
367 void hif_latency_detect_tasklet_sched(
368 	struct hif_softc *scn,
369 	struct ce_tasklet_entry *tasklet_entry)
370 {
371 	if (tasklet_entry->ce_id != CE_ID_2)
372 		return;
373 
374 	scn->latency_detect.ce2_tasklet_sched_cpuid = qdf_get_cpu();
375 	scn->latency_detect.ce2_tasklet_sched_time = qdf_system_ticks();
376 }
377 
378 static inline
379 void hif_latency_detect_tasklet_exec(
380 	struct hif_softc *scn,
381 	struct ce_tasklet_entry *tasklet_entry)
382 {
383 	if (tasklet_entry->ce_id != CE_ID_2)
384 		return;
385 
386 	scn->latency_detect.ce2_tasklet_exec_time = qdf_system_ticks();
387 	hif_check_detection_latency(scn, false, BIT(HIF_DETECT_TASKLET));
388 }
389 #else
390 static inline
391 void hif_latency_detect_tasklet_sched(
392 	struct hif_softc *scn,
393 	struct ce_tasklet_entry *tasklet_entry)
394 {}
395 
396 static inline
397 void hif_latency_detect_tasklet_exec(
398 	struct hif_softc *scn,
399 	struct ce_tasklet_entry *tasklet_entry)
400 {}
401 #endif
402 
403 /**
404  * ce_tasklet() - ce_tasklet
405  * @data: data
406  *
407  * Return: N/A
408  */
409 static void ce_tasklet(unsigned long data)
410 {
411 	struct ce_tasklet_entry *tasklet_entry =
412 		(struct ce_tasklet_entry *)data;
413 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
414 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
415 	struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id];
416 
417 	hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
418 				 HIF_CE_TASKLET_ENTRY, NULL, NULL, -1, 0);
419 
420 	if (scn->ce_latency_stats)
421 		hif_record_tasklet_exec_entry_ts(scn, tasklet_entry->ce_id);
422 
423 	hif_latency_detect_tasklet_exec(scn, tasklet_entry);
424 
425 	if (qdf_atomic_read(&scn->link_suspended)) {
426 		hif_err("ce %d tasklet fired after link suspend",
427 			tasklet_entry->ce_id);
428 		QDF_BUG(0);
429 	}
430 
431 	ce_per_engine_service(scn, tasklet_entry->ce_id);
432 
433 	if (ce_check_rx_pending(CE_state) && tasklet_entry->inited) {
434 		/*
435 		 * There are frames pending, schedule tasklet to process them.
436 		 * Enable the interrupt only when there is no pending frames in
437 		 * any of the Copy Engine pipes.
438 		 */
439 		if (test_bit(TASKLET_STATE_SCHED,
440 			     &tasklet_entry->intr_tq.state)) {
441 			hif_info("ce_id%d tasklet was scheduled, return",
442 				 tasklet_entry->ce_id);
443 			qdf_atomic_dec(&scn->active_tasklet_cnt);
444 			return;
445 		}
446 
447 		hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
448 					 HIF_CE_TASKLET_RESCHEDULE,
449 					 NULL, NULL, -1, 0);
450 
451 		ce_tasklet_schedule(tasklet_entry);
452 		hif_latency_detect_tasklet_sched(scn, tasklet_entry);
453 
454 		hif_reset_ce_full_count(scn, tasklet_entry->ce_id);
455 		if (scn->ce_latency_stats) {
456 			ce_tasklet_update_bucket(hif_ce_state,
457 						 tasklet_entry->ce_id);
458 			hif_record_tasklet_sched_entry_ts(scn,
459 							  tasklet_entry->ce_id);
460 		}
461 		return;
462 	}
463 
464 	hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT,
465 				NULL, NULL, -1, 0);
466 
467 	if (scn->ce_latency_stats)
468 		ce_tasklet_update_bucket(hif_ce_state, tasklet_entry->ce_id);
469 
470 	if ((scn->target_status != TARGET_STATUS_RESET) &&
471 	    !scn->free_irq_done)
472 		hif_irq_enable(scn, tasklet_entry->ce_id);
473 
474 	qdf_atomic_dec(&scn->active_tasklet_cnt);
475 }
476 
477 /**
478  * ce_tasklet_init() - ce_tasklet_init
479  * @hif_ce_state: hif_ce_state
480  * @mask: mask
481  *
482  * Return: N/A
483  */
484 void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask)
485 {
486 	int i;
487 	struct CE_attr *attr;
488 
489 	for (i = 0; i < CE_COUNT_MAX; i++) {
490 		if (mask & (1 << i)) {
491 			hif_ce_state->tasklets[i].ce_id = i;
492 			hif_ce_state->tasklets[i].inited = true;
493 			hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state;
494 
495 			attr = &hif_ce_state->host_ce_config[i];
496 			if (attr->flags & CE_ATTR_HI_TASKLET)
497 				hif_ce_state->tasklets[i].hi_tasklet_ce = true;
498 			else
499 				hif_ce_state->tasklets[i].hi_tasklet_ce = false;
500 
501 			tasklet_init(&hif_ce_state->tasklets[i].intr_tq,
502 				ce_tasklet,
503 				(unsigned long)&hif_ce_state->tasklets[i]);
504 		}
505 	}
506 }
507 /**
508  * ce_tasklet_kill() - ce_tasklet_kill
509  * @hif_ce_state: hif_ce_state
510  *
511  * Context: Non-Atomic context
512  * Return: N/A
513  */
514 void ce_tasklet_kill(struct hif_softc *scn)
515 {
516 	int i;
517 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
518 
519 	for (i = 0; i < CE_COUNT_MAX; i++) {
520 		if (hif_ce_state->tasklets[i].inited) {
521 			hif_ce_state->tasklets[i].inited = false;
522 			/*
523 			 * Cancel the tasklet work before tasklet_disable
524 			 * to avoid race between tasklet_schedule and
525 			 * tasklet_kill. Here cancel_work_sync() won't
526 			 * return before reschedule_ce_tasklet_work_handler()
527 			 * completes. Even if tasklet_schedule() happens
528 			 * tasklet_disable() will take care of that.
529 			 */
530 			qdf_cancel_work(&tasklet_workers[i].reg_work);
531 			tasklet_kill(&hif_ce_state->tasklets[i].intr_tq);
532 		}
533 	}
534 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
535 }
536 
537 /**
538  * ce_tasklet_entry_dump() - dump tasklet entries info
539  * @hif_ce_state: ce state
540  *
541  * This function will dump all tasklet entries info
542  *
543  * Return: None
544  */
545 static void ce_tasklet_entry_dump(struct HIF_CE_state *hif_ce_state)
546 {
547 	struct ce_tasklet_entry *tasklet_entry;
548 	int i;
549 
550 	if (hif_ce_state) {
551 		for (i = 0; i < CE_COUNT_MAX; i++) {
552 			tasklet_entry = &hif_ce_state->tasklets[i];
553 
554 			hif_info("%02d: ce_id=%d, inited=%d, hi_tasklet_ce=%d hif_ce_state=%pK",
555 				 i,
556 				 tasklet_entry->ce_id,
557 				 tasklet_entry->inited,
558 				 tasklet_entry->hi_tasklet_ce,
559 				 tasklet_entry->hif_ce_state);
560 		}
561 	}
562 }
563 
564 #define HIF_CE_DRAIN_WAIT_CNT          20
565 /**
566  * hif_drain_tasklets(): wait until no tasklet is pending
567  * @scn: hif context
568  *
569  * Let running tasklets clear pending traffic.
570  *
571  * Return: 0 if no bottom half is in progress when it returns.
572  *   -EFAULT if it times out.
573  */
574 int hif_drain_tasklets(struct hif_softc *scn)
575 {
576 	uint32_t ce_drain_wait_cnt = 0;
577 	int32_t tasklet_cnt;
578 
579 	while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) {
580 		if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) {
581 			hif_err("CE still not done with access: %d",
582 				tasklet_cnt);
583 
584 			return -EFAULT;
585 		}
586 		hif_info("Waiting for CE to finish access");
587 		msleep(10);
588 	}
589 	return 0;
590 }
591 
592 #ifdef WLAN_SUSPEND_RESUME_TEST
593 /**
594  * hif_interrupt_is_ut_resume(): Tests if an irq on the given copy engine should
595  *	trigger a unit-test resume.
596  * @scn: The HIF context to operate on
597  * @ce_id: The copy engine Id from the originating interrupt
598  *
599  * Return: true if the raised irq should trigger a unit-test resume
600  */
601 static bool hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
602 {
603 	int errno;
604 	uint8_t wake_ce_id;
605 
606 	if (!hif_is_ut_suspended(scn))
607 		return false;
608 
609 	/* ensure passed ce_id matches wake ce_id */
610 	errno = hif_get_wake_ce_id(scn, &wake_ce_id);
611 	if (errno) {
612 		hif_err("Failed to get wake CE Id: %d", errno);
613 		return false;
614 	}
615 
616 	return ce_id == wake_ce_id;
617 }
618 #else
619 static inline bool
620 hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
621 {
622 	return false;
623 }
624 #endif /* WLAN_SUSPEND_RESUME_TEST */
625 
626 /**
627  * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler
628  * @irq: irq coming from kernel
629  * @context: context
630  *
631  * Return: N/A
632  */
633 static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context)
634 {
635 	struct ce_tasklet_entry *tasklet_entry = context;
636 	struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state);
637 
638 	return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq),
639 				     tasklet_entry);
640 }
641 
642 /**
643  * hif_ce_increment_interrupt_count() - update ce stats
644  * @hif_ce_state: ce state
645  * @ce_id: ce id
646  *
647  * Return: none
648  */
649 static inline void
650 hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id)
651 {
652 	int cpu_id = qdf_get_cpu();
653 
654 	hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++;
655 }
656 
657 /**
658  * hif_display_ce_stats() - display ce stats
659  * @hif_ce_state: ce state
660  *
661  * Return: none
662  */
663 void hif_display_ce_stats(struct hif_softc *hif_ctx)
664 {
665 #define STR_SIZE 128
666 	uint8_t i, j, pos;
667 	char str_buffer[STR_SIZE];
668 	int size, ret;
669 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx);
670 
671 	qdf_debug("CE interrupt statistics:");
672 	for (i = 0; i < CE_COUNT_MAX; i++) {
673 		size = STR_SIZE;
674 		pos = 0;
675 		for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) {
676 			ret = snprintf(str_buffer + pos, size, "[%d]:%d ",
677 				       j, hif_ce_state->stats.ce_per_cpu[i][j]);
678 			if (ret <= 0 || ret >= size)
679 				break;
680 			size -= ret;
681 			pos += ret;
682 		}
683 		qdf_debug("CE id[%2d] - %s", i, str_buffer);
684 	}
685 
686 	if (hif_ctx->ce_latency_stats)
687 		hif_ce_latency_stats(hif_ctx);
688 #undef STR_SIZE
689 }
690 
691 /**
692  * hif_clear_ce_stats() - clear ce stats
693  * @hif_ce_state: ce state
694  *
695  * Return: none
696  */
697 void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state)
698 {
699 	qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats));
700 }
701 
702 #ifdef WLAN_TRACEPOINTS
703 /**
704  * hif_set_ce_tasklet_sched_time() - Set tasklet schedule time for
705  *  CE with matching ce_id
706  * @scn: hif context
707  * @ce_id: CE id
708  *
709  * Return: None
710  */
711 static inline
712 void hif_set_ce_tasklet_sched_time(struct hif_softc *scn, uint8_t ce_id)
713 {
714 	struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
715 
716 	ce_state->ce_tasklet_sched_time = qdf_time_sched_clock();
717 }
718 #else
719 static inline
720 void hif_set_ce_tasklet_sched_time(struct hif_softc *scn, uint8_t ce_id)
721 {
722 }
723 #endif
724 
725 /**
726  * hif_tasklet_schedule() - schedule tasklet
727  * @hif_ctx: hif context
728  * @tasklet_entry: ce tasklet entry
729  *
730  * Return: false if tasklet already scheduled, otherwise true
731  */
732 static inline bool hif_tasklet_schedule(struct hif_opaque_softc *hif_ctx,
733 					struct ce_tasklet_entry *tasklet_entry)
734 {
735 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
736 
737 	if (test_bit(TASKLET_STATE_SCHED, &tasklet_entry->intr_tq.state)) {
738 		hif_debug("tasklet scheduled, return");
739 		qdf_atomic_dec(&scn->active_tasklet_cnt);
740 		return false;
741 	}
742 
743 	hif_set_ce_tasklet_sched_time(scn, tasklet_entry->ce_id);
744 	/* keep it before tasklet_schedule, this is to happy whunt.
745 	 * in whunt, tasklet may run before finished hif_tasklet_schedule.
746 	 */
747 	hif_latency_detect_tasklet_sched(scn, tasklet_entry);
748 	ce_tasklet_schedule(tasklet_entry);
749 
750 	hif_reset_ce_full_count(scn, tasklet_entry->ce_id);
751 	if (scn->ce_latency_stats)
752 		hif_record_tasklet_sched_entry_ts(scn, tasklet_entry->ce_id);
753 
754 	return true;
755 }
756 
757 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
758 /**
759  * ce_poll_reap_by_id() - reap the available frames from CE by polling per ce_id
760  * @scn: hif context
761  * @ce_id: CE id
762  *
763  * This function needs to be called once after all the irqs are disabled
764  * and tasklets are drained during bus suspend.
765  *
766  * Return: 0 on success, unlikely -EBUSY if reaping goes infinite loop
767  */
768 static int ce_poll_reap_by_id(struct hif_softc *scn, enum ce_id_type ce_id)
769 {
770 	struct HIF_CE_state *hif_ce_state = (struct HIF_CE_state *)scn;
771 	struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
772 
773 	if (scn->ce_latency_stats)
774 		hif_record_tasklet_exec_entry_ts(scn, ce_id);
775 
776 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
777 				 NULL, NULL, -1, 0);
778 
779 	ce_per_engine_service(scn, ce_id);
780 
781 	/*
782 	 * In an unlikely case, if frames are still pending to reap,
783 	 * could be an infinite loop, so return -EBUSY.
784 	 */
785 	if (ce_check_rx_pending(CE_state))
786 		return -EBUSY;
787 
788 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
789 				 NULL, NULL, -1, 0);
790 
791 	if (scn->ce_latency_stats)
792 		ce_tasklet_update_bucket(hif_ce_state, ce_id);
793 
794 	return 0;
795 }
796 
797 /**
798  * hif_drain_fw_diag_ce() - reap all the available FW diag logs from CE
799  * @scn: hif context
800  *
801  * This function needs to be called once after all the irqs are disabled
802  * and tasklets are drained during bus suspend.
803  *
804  * Return: 0 on success, unlikely -EBUSY if reaping goes infinite loop
805  */
806 int hif_drain_fw_diag_ce(struct hif_softc *scn)
807 {
808 	uint8_t ce_id;
809 
810 	if (hif_get_fw_diag_ce_id(scn, &ce_id))
811 		return 0;
812 
813 	return ce_poll_reap_by_id(scn, ce_id);
814 }
815 #endif
816 
817 #ifdef CE_TASKLET_SCHEDULE_ON_FULL
818 static inline int ce_check_tasklet_status(int ce_id,
819 					  struct ce_tasklet_entry *entry)
820 {
821 	struct HIF_CE_state *hif_ce_state = entry->hif_ce_state;
822 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
823 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
824 
825 	if (hif_napi_enabled(hif_hdl, ce_id)) {
826 		struct qca_napi_info *napi;
827 
828 		napi = scn->napi_data.napis[ce_id];
829 		if (test_bit(NAPI_STATE_SCHED, &napi->napi.state))
830 			return -EBUSY;
831 	} else {
832 		if (test_bit(TASKLET_STATE_SCHED,
833 			     &hif_ce_state->tasklets[ce_id].intr_tq.state))
834 			return -EBUSY;
835 	}
836 	return 0;
837 }
838 
839 static inline void ce_interrupt_lock(struct CE_state *ce_state)
840 {
841 	qdf_spin_lock_irqsave(&ce_state->ce_interrupt_lock);
842 }
843 
844 static inline void ce_interrupt_unlock(struct CE_state *ce_state)
845 {
846 	qdf_spin_unlock_irqrestore(&ce_state->ce_interrupt_lock);
847 }
848 #else
849 static inline int ce_check_tasklet_status(int ce_id,
850 					  struct ce_tasklet_entry *entry)
851 {
852 	return 0;
853 }
854 
855 static inline void ce_interrupt_lock(struct CE_state *ce_state)
856 {
857 }
858 
859 static inline void ce_interrupt_unlock(struct CE_state *ce_state)
860 {
861 }
862 #endif
863 
864 /**
865  * ce_dispatch_interrupt() - dispatch an interrupt to a processing context
866  * @ce_id: ce_id
867  * @tasklet_entry: context
868  *
869  * Return: N/A
870  */
871 irqreturn_t ce_dispatch_interrupt(int ce_id,
872 				  struct ce_tasklet_entry *tasklet_entry)
873 {
874 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
875 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
876 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
877 	struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
878 
879 	if (tasklet_entry->ce_id != ce_id) {
880 		bool rl;
881 
882 		rl = hif_err_rl("ce_id (expect %d, received %d) does not match, inited=%d, ce_count=%u",
883 				tasklet_entry->ce_id, ce_id,
884 				tasklet_entry->inited,
885 				scn->ce_count);
886 
887 		if (!rl)
888 			ce_tasklet_entry_dump(hif_ce_state);
889 
890 		return IRQ_NONE;
891 	}
892 	if (unlikely(ce_id >= CE_COUNT_MAX)) {
893 		hif_err("ce_id=%d > CE_COUNT_MAX=%d",
894 			tasklet_entry->ce_id, CE_COUNT_MAX);
895 		return IRQ_NONE;
896 	}
897 
898 	ce_interrupt_lock(ce_state);
899 	if (ce_check_tasklet_status(ce_id, tasklet_entry)) {
900 		ce_interrupt_unlock(ce_state);
901 		return IRQ_NONE;
902 	}
903 
904 	hif_irq_disable(scn, ce_id);
905 
906 	if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
907 		ce_interrupt_unlock(ce_state);
908 		return IRQ_HANDLED;
909 	}
910 
911 	hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT,
912 				NULL, NULL, 0, 0);
913 	hif_ce_increment_interrupt_count(hif_ce_state, ce_id);
914 
915 	if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) {
916 		hif_ut_fw_resume(scn);
917 		hif_irq_enable(scn, ce_id);
918 		ce_interrupt_unlock(ce_state);
919 		return IRQ_HANDLED;
920 	}
921 
922 	qdf_atomic_inc(&scn->active_tasklet_cnt);
923 
924 	if (hif_napi_enabled(hif_hdl, ce_id))
925 		hif_napi_schedule(hif_hdl, ce_id);
926 	else
927 		hif_tasklet_schedule(hif_hdl, tasklet_entry);
928 
929 	ce_interrupt_unlock(ce_state);
930 
931 	return IRQ_HANDLED;
932 }
933 
934 /**
935  * const char *ce_name
936  *
937  * @ce_name: ce_name
938  */
939 const char *ce_name[CE_COUNT_MAX] = {
940 	"WLAN_CE_0",
941 	"WLAN_CE_1",
942 	"WLAN_CE_2",
943 	"WLAN_CE_3",
944 	"WLAN_CE_4",
945 	"WLAN_CE_5",
946 	"WLAN_CE_6",
947 	"WLAN_CE_7",
948 	"WLAN_CE_8",
949 	"WLAN_CE_9",
950 	"WLAN_CE_10",
951 	"WLAN_CE_11",
952 #ifdef QCA_WIFI_QCN9224
953 	"WLAN_CE_12",
954 	"WLAN_CE_13",
955 	"WLAN_CE_14",
956 	"WLAN_CE_15",
957 #endif
958 };
959 /**
960  * ce_unregister_irq() - ce_unregister_irq
961  * @hif_ce_state: hif_ce_state copy engine device handle
962  * @mask: which copy engines to unregister for.
963  *
964  * Unregisters copy engine irqs matching mask.  If a 1 is set at bit x,
965  * unregister for copy engine x.
966  *
967  * Return: QDF_STATUS
968  */
969 QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
970 {
971 	int id;
972 	int ce_count;
973 	int ret;
974 	struct hif_softc *scn;
975 
976 	if (!hif_ce_state) {
977 		hif_warn("hif_ce_state = NULL");
978 		return QDF_STATUS_SUCCESS;
979 	}
980 
981 	scn = HIF_GET_SOFTC(hif_ce_state);
982 	ce_count = scn->ce_count;
983 	/* we are removing interrupts, so better stop NAPI */
984 	ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn),
985 			     NAPI_EVT_INT_STATE, (void *)0);
986 	if (ret != 0)
987 		hif_err("napi_event INT_STATE returned %d", ret);
988 	/* this is not fatal, continue */
989 
990 	/* filter mask to free only for ce's with irq registered */
991 	mask &= hif_ce_state->ce_register_irq_done;
992 	for (id = 0; id < ce_count; id++) {
993 		if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
994 			ret = pld_ce_free_irq(scn->qdf_dev->dev, id,
995 					&hif_ce_state->tasklets[id]);
996 			if (ret < 0)
997 				hif_err(
998 					"pld_unregister_irq error - ce_id = %d, ret = %d",
999 					id, ret);
1000 		}
1001 		ce_disable_polling(scn->ce_id_to_state[id]);
1002 	}
1003 	hif_ce_state->ce_register_irq_done &= ~mask;
1004 
1005 	return QDF_STATUS_SUCCESS;
1006 }
1007 /**
1008  * ce_register_irq() - ce_register_irq
1009  * @hif_ce_state: hif_ce_state
1010  * @mask: which copy engines to unregister for.
1011  *
1012  * Registers copy engine irqs matching mask.  If a 1 is set at bit x,
1013  * Register for copy engine x.
1014  *
1015  * Return: QDF_STATUS
1016  */
1017 QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
1018 {
1019 	int id;
1020 	int ce_count;
1021 	int ret;
1022 	unsigned long irqflags = IRQF_TRIGGER_RISING;
1023 	uint32_t done_mask = 0;
1024 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
1025 
1026 	ce_count = scn->ce_count;
1027 
1028 	for (id = 0; id < ce_count; id++) {
1029 		if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
1030 			ret = pld_ce_request_irq(scn->qdf_dev->dev, id,
1031 				hif_snoc_interrupt_handler,
1032 				irqflags, ce_name[id],
1033 				&hif_ce_state->tasklets[id]);
1034 			if (ret) {
1035 				hif_err(
1036 					"cannot register CE %d irq handler, ret = %d",
1037 					id, ret);
1038 				ce_unregister_irq(hif_ce_state, done_mask);
1039 				return QDF_STATUS_E_FAULT;
1040 			}
1041 			done_mask |= 1 << id;
1042 		}
1043 	}
1044 	hif_ce_state->ce_register_irq_done |= done_mask;
1045 
1046 	return QDF_STATUS_SUCCESS;
1047 }
1048