xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/if_arp.h>
23 #include "qdf_lock.h"
24 #include "qdf_types.h"
25 #include "qdf_status.h"
26 #include "regtable.h"
27 #include "hif.h"
28 #include "hif_io32.h"
29 #include "ce_main.h"
30 #include "ce_api.h"
31 #include "ce_reg.h"
32 #include "ce_internal.h"
33 #include "ce_tasklet.h"
34 #include "pld_common.h"
35 #include "hif_debug.h"
36 #include "hif_napi.h"
37 
38 /**
39  * struct tasklet_work
40  *
41  * @id: ce_id
42  * @work: work
43  */
44 struct tasklet_work {
45 	enum ce_id_type id;
46 	void *data;
47 	qdf_work_t reg_work;
48 };
49 
50 
51 /**
52  * ce_tasklet_schedule() - schedule CE tasklet
53  * @tasklet_entry: ce tasklet entry
54  *
55  * Return: None
56  */
57 static inline void ce_tasklet_schedule(struct ce_tasklet_entry *tasklet_entry)
58 {
59 	if (tasklet_entry->hi_tasklet_ce)
60 		tasklet_hi_schedule(&tasklet_entry->intr_tq);
61 	else
62 		tasklet_schedule(&tasklet_entry->intr_tq);
63 }
64 
65 /**
66  * reschedule_ce_tasklet_work_handler() - reschedule work
67  * @work: struct work_struct
68  *
69  * Return: N/A
70  */
71 static void reschedule_ce_tasklet_work_handler(struct work_struct *work)
72 {
73 	qdf_work_t *reg_work = qdf_container_of(work, qdf_work_t, work);
74 	struct tasklet_work *ce_work = qdf_container_of(reg_work,
75 							struct tasklet_work,
76 							reg_work);
77 	struct hif_softc *scn = ce_work->data;
78 	struct HIF_CE_state *hif_ce_state;
79 
80 	if (!scn) {
81 		hif_err("tasklet scn is null");
82 		return;
83 	}
84 
85 	hif_ce_state = HIF_GET_CE_STATE(scn);
86 
87 	if (scn->hif_init_done == false) {
88 		hif_err("wlan driver is unloaded");
89 		return;
90 	}
91 	if (hif_ce_state->tasklets[ce_work->id].inited)
92 		ce_tasklet_schedule(&hif_ce_state->tasklets[ce_work->id]);
93 }
94 
95 static struct tasklet_work tasklet_workers[CE_ID_MAX];
96 
97 /**
98  * init_tasklet_work() - init_tasklet_work
99  * @work: struct work_struct
100  * @work_handler: work_handler
101  *
102  * Return: N/A
103  */
104 static void init_tasklet_work(struct work_struct *work,
105 			      work_func_t work_handler)
106 {
107 	INIT_WORK(work, work_handler);
108 }
109 
110 /**
111  * init_tasklet_workers() - init_tasklet_workers
112  * @scn: HIF Context
113  *
114  * Return: N/A
115  */
116 void init_tasklet_worker_by_ceid(struct hif_opaque_softc *scn, int ce_id)
117 {
118 
119 	tasklet_workers[ce_id].id = ce_id;
120 	tasklet_workers[ce_id].data = scn;
121 	init_tasklet_work(&tasklet_workers[ce_id].reg_work.work,
122 			  reschedule_ce_tasklet_work_handler);
123 }
124 
125 /**
126  * deinit_tasklet_workers() - deinit_tasklet_workers
127  * @scn: HIF Context
128  *
129  * Return: N/A
130  */
131 void deinit_tasklet_workers(struct hif_opaque_softc *scn)
132 {
133 	u32 id;
134 
135 	for (id = 0; id < CE_ID_MAX; id++)
136 		qdf_cancel_work(&tasklet_workers[id].reg_work);
137 }
138 
139 #ifdef CE_TASKLET_DEBUG_ENABLE
140 /**
141  * hif_record_tasklet_exec_entry_ts() - Record ce tasklet execution
142  *                                      entry time
143  * @scn: hif_softc
144  * @ce_id: ce_id
145  *
146  * Return: None
147  */
148 static inline void
149 hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id)
150 {
151 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
152 
153 	hif_ce_state->stats.tasklet_exec_entry_ts[ce_id] =
154 					qdf_get_log_timestamp_usecs();
155 }
156 
157 /**
158  * hif_record_tasklet_sched_entry_ts() - Record ce tasklet scheduled
159  *                                       entry time
160  * @scn: hif_softc
161  * @ce_id: ce_id
162  *
163  * Return: None
164  */
165 static inline void
166 hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id)
167 {
168 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
169 
170 	hif_ce_state->stats.tasklet_sched_entry_ts[ce_id] =
171 					qdf_get_log_timestamp_usecs();
172 }
173 
174 /**
175  * hif_ce_latency_stats() - Display ce latency information
176  * @hif_ctx: hif_softc struct
177  *
178  * Return: None
179  */
180 static void
181 hif_ce_latency_stats(struct hif_softc *hif_ctx)
182 {
183 	uint8_t i, j;
184 	uint32_t index, start_index;
185 	uint64_t secs, usecs;
186 	static const char * const buck_str[] = {"0 - 0.5", "0.5 - 1", "1  -  2",
187 					       "2  -  5", "5  - 10", "  >  10"};
188 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx);
189 	struct ce_stats *stats = &hif_ce_state->stats;
190 
191 	hif_err("\tCE TASKLET ARRIVAL AND EXECUTION STATS");
192 	for (i = 0; i < CE_COUNT_MAX; i++) {
193 		hif_nofl_err("\n\t\tCE Ring %d Tasklet Execution Bucket", i);
194 		for (j = 0; j < CE_BUCKET_MAX; j++) {
195 			qdf_log_timestamp_to_secs(
196 				       stats->ce_tasklet_exec_last_update[i][j],
197 				       &secs, &usecs);
198 			hif_nofl_err("\t Bucket %sms :%llu\t last update:% 8lld.%06lld",
199 				     buck_str[j],
200 				     stats->ce_tasklet_exec_bucket[i][j],
201 				     secs, usecs);
202 		}
203 
204 		hif_nofl_err("\n\t\tCE Ring %d Tasklet Scheduled Bucket", i);
205 		for (j = 0; j < CE_BUCKET_MAX; j++) {
206 			qdf_log_timestamp_to_secs(
207 				      stats->ce_tasklet_sched_last_update[i][j],
208 				      &secs, &usecs);
209 			hif_nofl_err("\t Bucket %sms :%llu\t last update :% 8lld.%06lld",
210 				     buck_str[j],
211 				     stats->ce_tasklet_sched_bucket[i][j],
212 				     secs, usecs);
213 		}
214 
215 		hif_nofl_err("\n\t\t CE RING %d Last %d time records",
216 			     i, HIF_REQUESTED_EVENTS);
217 		index = stats->record_index[i];
218 		start_index = stats->record_index[i];
219 
220 		for (j = 0; j < HIF_REQUESTED_EVENTS; j++) {
221 			hif_nofl_err("\tExecution time: %lluus Total Scheduled time: %lluus",
222 				     stats->tasklet_exec_time_record[i][index],
223 				     stats->
224 					   tasklet_sched_time_record[i][index]);
225 			if (index)
226 				index = (index - 1) % HIF_REQUESTED_EVENTS;
227 			else
228 				index = HIF_REQUESTED_EVENTS - 1;
229 			if (index == start_index)
230 				break;
231 		}
232 	}
233 }
234 
235 /**
236  * ce_tasklet_update_bucket() - update ce execution and scehduled time latency
237  *                              in corresponding time buckets
238  * @stats: struct ce_stats
239  * @ce_id: ce_id_type
240  * @entry_us: timestamp when tasklet is started to execute
241  * @exit_us: timestamp when tasklet is completed execution
242  *
243  * Return: N/A
244  */
245 static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state,
246 				     uint8_t ce_id)
247 {
248 	uint32_t index;
249 	uint64_t exec_time, exec_ms;
250 	uint64_t sched_time, sched_ms;
251 	uint64_t curr_time = qdf_get_log_timestamp_usecs();
252 	struct ce_stats *stats = &hif_ce_state->stats;
253 
254 	exec_time = curr_time - (stats->tasklet_exec_entry_ts[ce_id]);
255 	sched_time = (stats->tasklet_exec_entry_ts[ce_id]) -
256 		      (stats->tasklet_sched_entry_ts[ce_id]);
257 
258 	index = stats->record_index[ce_id];
259 	index = (index + 1) % HIF_REQUESTED_EVENTS;
260 
261 	stats->tasklet_exec_time_record[ce_id][index] = exec_time;
262 	stats->tasklet_sched_time_record[ce_id][index] = sched_time;
263 	stats->record_index[ce_id] = index;
264 
265 	exec_ms = qdf_do_div(exec_time, 1000);
266 	sched_ms = qdf_do_div(sched_time, 1000);
267 
268 	if (exec_ms > 10) {
269 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_BEYOND]++;
270 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_BEYOND]
271 								= curr_time;
272 	} else if (exec_ms > 5) {
273 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_10_MS]++;
274 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_10_MS]
275 								= curr_time;
276 	} else if (exec_ms > 2) {
277 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_5_MS]++;
278 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_5_MS]
279 								= curr_time;
280 	} else if (exec_ms > 1) {
281 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_2_MS]++;
282 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_2_MS]
283 								= curr_time;
284 	} else if (exec_time > 500) {
285 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_1_MS]++;
286 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_1_MS]
287 								= curr_time;
288 	} else {
289 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_500_US]++;
290 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_500_US]
291 								= curr_time;
292 	}
293 
294 	if (sched_ms > 10) {
295 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_BEYOND]++;
296 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_BEYOND]
297 								= curr_time;
298 	} else if (sched_ms > 5) {
299 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_10_MS]++;
300 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_10_MS]
301 								= curr_time;
302 	} else if (sched_ms > 2) {
303 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_5_MS]++;
304 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_5_MS]
305 								= curr_time;
306 	} else if (sched_ms > 1) {
307 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_2_MS]++;
308 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_2_MS]
309 								= curr_time;
310 	} else if (sched_time > 500) {
311 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_1_MS]++;
312 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_1_MS]
313 								= curr_time;
314 	} else {
315 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_500_US]++;
316 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_500_US]
317 								= curr_time;
318 	}
319 }
320 #else
321 static inline void
322 hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id)
323 {
324 }
325 
326 static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state,
327 				     uint8_t ce_id)
328 {
329 }
330 
331 static inline void
332 hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id)
333 {
334 }
335 
336 static void
337 hif_ce_latency_stats(struct hif_softc *hif_ctx)
338 {
339 }
340 #endif /*CE_TASKLET_DEBUG_ENABLE*/
341 
342 #ifdef HIF_DETECTION_LATENCY_ENABLE
343 static inline
344 void hif_latency_detect_tasklet_sched(
345 	struct hif_softc *scn,
346 	struct ce_tasklet_entry *tasklet_entry)
347 {
348 	if (tasklet_entry->ce_id != CE_ID_2)
349 		return;
350 
351 	scn->latency_detect.ce2_tasklet_sched_cpuid = qdf_get_cpu();
352 	scn->latency_detect.ce2_tasklet_sched_time = qdf_system_ticks();
353 }
354 
355 static inline
356 void hif_latency_detect_tasklet_exec(
357 	struct hif_softc *scn,
358 	struct ce_tasklet_entry *tasklet_entry)
359 {
360 	if (tasklet_entry->ce_id != CE_ID_2)
361 		return;
362 
363 	scn->latency_detect.ce2_tasklet_exec_time = qdf_system_ticks();
364 	hif_check_detection_latency(scn, false, BIT(HIF_DETECT_TASKLET));
365 }
366 #else
367 static inline
368 void hif_latency_detect_tasklet_sched(
369 	struct hif_softc *scn,
370 	struct ce_tasklet_entry *tasklet_entry)
371 {}
372 
373 static inline
374 void hif_latency_detect_tasklet_exec(
375 	struct hif_softc *scn,
376 	struct ce_tasklet_entry *tasklet_entry)
377 {}
378 #endif
379 
380 /**
381  * ce_tasklet() - ce_tasklet
382  * @data: data
383  *
384  * Return: N/A
385  */
386 static void ce_tasklet(unsigned long data)
387 {
388 	struct ce_tasklet_entry *tasklet_entry =
389 		(struct ce_tasklet_entry *)data;
390 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
391 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
392 	struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id];
393 
394 	if (scn->ce_latency_stats)
395 		hif_record_tasklet_exec_entry_ts(scn, tasklet_entry->ce_id);
396 
397 	hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
398 				 HIF_CE_TASKLET_ENTRY, NULL, NULL, -1, 0);
399 
400 	hif_latency_detect_tasklet_exec(scn, tasklet_entry);
401 
402 	if (qdf_atomic_read(&scn->link_suspended)) {
403 		hif_err("ce %d tasklet fired after link suspend",
404 			tasklet_entry->ce_id);
405 		QDF_BUG(0);
406 	}
407 
408 	ce_per_engine_service(scn, tasklet_entry->ce_id);
409 
410 	if (ce_check_rx_pending(CE_state) && tasklet_entry->inited) {
411 		/*
412 		 * There are frames pending, schedule tasklet to process them.
413 		 * Enable the interrupt only when there is no pending frames in
414 		 * any of the Copy Engine pipes.
415 		 */
416 		hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
417 				HIF_CE_TASKLET_RESCHEDULE, NULL, NULL, -1, 0);
418 
419 		if (test_bit(TASKLET_STATE_SCHED,
420 			     &tasklet_entry->intr_tq.state)) {
421 			hif_info("ce_id%d tasklet was scheduled, return",
422 				 tasklet_entry->ce_id);
423 			qdf_atomic_dec(&scn->active_tasklet_cnt);
424 			return;
425 		}
426 
427 		ce_tasklet_schedule(tasklet_entry);
428 		hif_latency_detect_tasklet_sched(scn, tasklet_entry);
429 
430 		if (scn->ce_latency_stats) {
431 			ce_tasklet_update_bucket(hif_ce_state,
432 						 tasklet_entry->ce_id);
433 			hif_record_tasklet_sched_entry_ts(scn,
434 							  tasklet_entry->ce_id);
435 		}
436 		return;
437 	}
438 
439 	hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT,
440 				NULL, NULL, -1, 0);
441 
442 	if (scn->ce_latency_stats)
443 		ce_tasklet_update_bucket(hif_ce_state, tasklet_entry->ce_id);
444 
445 	if ((scn->target_status != TARGET_STATUS_RESET) &&
446 	    !scn->free_irq_done)
447 		hif_irq_enable(scn, tasklet_entry->ce_id);
448 
449 	qdf_atomic_dec(&scn->active_tasklet_cnt);
450 }
451 
452 /**
453  * ce_tasklet_init() - ce_tasklet_init
454  * @hif_ce_state: hif_ce_state
455  * @mask: mask
456  *
457  * Return: N/A
458  */
459 void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask)
460 {
461 	int i;
462 	struct CE_attr *attr;
463 
464 	for (i = 0; i < CE_COUNT_MAX; i++) {
465 		if (mask & (1 << i)) {
466 			hif_ce_state->tasklets[i].ce_id = i;
467 			hif_ce_state->tasklets[i].inited = true;
468 			hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state;
469 
470 			attr = &hif_ce_state->host_ce_config[i];
471 			if (attr->flags & CE_ATTR_HI_TASKLET)
472 				hif_ce_state->tasklets[i].hi_tasklet_ce = true;
473 			else
474 				hif_ce_state->tasklets[i].hi_tasklet_ce = false;
475 
476 			tasklet_init(&hif_ce_state->tasklets[i].intr_tq,
477 				ce_tasklet,
478 				(unsigned long)&hif_ce_state->tasklets[i]);
479 		}
480 	}
481 }
482 /**
483  * ce_tasklet_kill() - ce_tasklet_kill
484  * @hif_ce_state: hif_ce_state
485  *
486  * Context: Non-Atomic context
487  * Return: N/A
488  */
489 void ce_tasklet_kill(struct hif_softc *scn)
490 {
491 	int i;
492 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
493 
494 	for (i = 0; i < CE_COUNT_MAX; i++) {
495 		if (hif_ce_state->tasklets[i].inited) {
496 			hif_ce_state->tasklets[i].inited = false;
497 			/*
498 			 * Cancel the tasklet work before tasklet_disable
499 			 * to avoid race between tasklet_schedule and
500 			 * tasklet_kill. Here cancel_work_sync() won't
501 			 * return before reschedule_ce_tasklet_work_handler()
502 			 * completes. Even if tasklet_schedule() happens
503 			 * tasklet_disable() will take care of that.
504 			 */
505 			qdf_cancel_work(&tasklet_workers[i].reg_work);
506 			tasklet_kill(&hif_ce_state->tasklets[i].intr_tq);
507 		}
508 	}
509 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
510 }
511 
512 #define HIF_CE_DRAIN_WAIT_CNT          20
513 /**
514  * hif_drain_tasklets(): wait until no tasklet is pending
515  * @scn: hif context
516  *
517  * Let running tasklets clear pending trafic.
518  *
519  * Return: 0 if no bottom half is in progress when it returns.
520  *   -EFAULT if it times out.
521  */
522 int hif_drain_tasklets(struct hif_softc *scn)
523 {
524 	uint32_t ce_drain_wait_cnt = 0;
525 	int32_t tasklet_cnt;
526 
527 	while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) {
528 		if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) {
529 			hif_err("CE still not done with access: %d",
530 				tasklet_cnt);
531 
532 			return -EFAULT;
533 		}
534 		hif_info("Waiting for CE to finish access");
535 		msleep(10);
536 	}
537 	return 0;
538 }
539 
540 #ifdef WLAN_SUSPEND_RESUME_TEST
541 /**
542  * hif_interrupt_is_ut_resume(): Tests if an irq on the given copy engine should
543  *	trigger a unit-test resume.
544  * @scn: The HIF context to operate on
545  * @ce_id: The copy engine Id from the originating interrupt
546  *
547  * Return: true if the raised irq should trigger a unit-test resume
548  */
549 static bool hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
550 {
551 	int errno;
552 	uint8_t wake_ce_id;
553 
554 	if (!hif_is_ut_suspended(scn))
555 		return false;
556 
557 	/* ensure passed ce_id matches wake ce_id */
558 	errno = hif_get_wake_ce_id(scn, &wake_ce_id);
559 	if (errno) {
560 		hif_err("Failed to get wake CE Id: %d", errno);
561 		return false;
562 	}
563 
564 	return ce_id == wake_ce_id;
565 }
566 #else
567 static inline bool
568 hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
569 {
570 	return false;
571 }
572 #endif /* WLAN_SUSPEND_RESUME_TEST */
573 
574 /**
575  * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler
576  * @irq: irq coming from kernel
577  * @context: context
578  *
579  * Return: N/A
580  */
581 static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context)
582 {
583 	struct ce_tasklet_entry *tasklet_entry = context;
584 	struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state);
585 
586 	return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq),
587 				     tasklet_entry);
588 }
589 
590 /**
591  * hif_ce_increment_interrupt_count() - update ce stats
592  * @hif_ce_state: ce state
593  * @ce_id: ce id
594  *
595  * Return: none
596  */
597 static inline void
598 hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id)
599 {
600 	int cpu_id = qdf_get_cpu();
601 
602 	hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++;
603 }
604 
605 /**
606  * hif_display_ce_stats() - display ce stats
607  * @hif_ce_state: ce state
608  *
609  * Return: none
610  */
611 void hif_display_ce_stats(struct hif_softc *hif_ctx)
612 {
613 #define STR_SIZE 128
614 	uint8_t i, j, pos;
615 	char str_buffer[STR_SIZE];
616 	int size, ret;
617 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx);
618 
619 	qdf_debug("CE interrupt statistics:");
620 	for (i = 0; i < CE_COUNT_MAX; i++) {
621 		size = STR_SIZE;
622 		pos = 0;
623 		for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) {
624 			ret = snprintf(str_buffer + pos, size, "[%d]:%d ",
625 				       j, hif_ce_state->stats.ce_per_cpu[i][j]);
626 			if (ret <= 0 || ret >= size)
627 				break;
628 			size -= ret;
629 			pos += ret;
630 		}
631 		qdf_debug("CE id[%2d] - %s", i, str_buffer);
632 	}
633 
634 	if (hif_ctx->ce_latency_stats)
635 		hif_ce_latency_stats(hif_ctx);
636 #undef STR_SIZE
637 }
638 
639 /**
640  * hif_clear_ce_stats() - clear ce stats
641  * @hif_ce_state: ce state
642  *
643  * Return: none
644  */
645 void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state)
646 {
647 	qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats));
648 }
649 
650 /**
651  * hif_tasklet_schedule() - schedule tasklet
652  * @hif_ctx: hif context
653  * @tasklet_entry: ce tasklet entry
654  *
655  * Return: false if tasklet already scheduled, otherwise true
656  */
657 static inline bool hif_tasklet_schedule(struct hif_opaque_softc *hif_ctx,
658 					struct ce_tasklet_entry *tasklet_entry)
659 {
660 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
661 
662 	if (test_bit(TASKLET_STATE_SCHED, &tasklet_entry->intr_tq.state)) {
663 		hif_debug("tasklet scheduled, return");
664 		qdf_atomic_dec(&scn->active_tasklet_cnt);
665 		return false;
666 	}
667 	/* keep it before tasklet_schedule, this is to happy whunt.
668 	 * in whunt, tasklet may run before finished hif_tasklet_schedule.
669 	 */
670 	hif_latency_detect_tasklet_sched(scn, tasklet_entry);
671 	ce_tasklet_schedule(tasklet_entry);
672 
673 	if (scn->ce_latency_stats)
674 		hif_record_tasklet_sched_entry_ts(scn, tasklet_entry->ce_id);
675 
676 	return true;
677 }
678 
679 /**
680  * ce_poll_reap_by_id() - reap the available frames from CE by polling per ce_id
681  * @scn: hif context
682  * @ce_id: CE id
683  *
684  * This function needs to be called once after all the irqs are disabled
685  * and tasklets are drained during bus suspend.
686  *
687  * Return: 0 on success, unlikely -EBUSY if reaping goes infinite loop
688  */
689 static int ce_poll_reap_by_id(struct hif_softc *scn, enum ce_id_type ce_id)
690 {
691 	struct HIF_CE_state *hif_ce_state = (struct HIF_CE_state *)scn;
692 	struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
693 
694 	if (scn->ce_latency_stats)
695 		hif_record_tasklet_exec_entry_ts(scn, ce_id);
696 
697 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
698 				 NULL, NULL, -1, 0);
699 
700 	ce_per_engine_service(scn, ce_id);
701 
702 	/*
703 	 * In an unlikely case, if frames are still pending to reap,
704 	 * could be an infinite loop, so return -EBUSY.
705 	 */
706 	if (ce_check_rx_pending(CE_state))
707 		return -EBUSY;
708 
709 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
710 				 NULL, NULL, -1, 0);
711 
712 	if (scn->ce_latency_stats)
713 		ce_tasklet_update_bucket(hif_ce_state, ce_id);
714 
715 	return 0;
716 }
717 
718 /**
719  * hif_drain_fw_diag_ce() - reap all the available FW diag logs from CE
720  * @scn: hif context
721  *
722  * This function needs to be called once after all the irqs are disabled
723  * and tasklets are drained during bus suspend.
724  *
725  * Return: 0 on success, unlikely -EBUSY if reaping goes infinite loop
726  */
727 int hif_drain_fw_diag_ce(struct hif_softc *scn)
728 {
729 	uint8_t ce_id;
730 
731 	if (hif_get_fw_diag_ce_id(scn, &ce_id))
732 		return 0;
733 
734 	return ce_poll_reap_by_id(scn, ce_id);
735 }
736 
737 /**
738  * ce_dispatch_interrupt() - dispatch an interrupt to a processing context
739  * @ce_id: ce_id
740  * @tasklet_entry: context
741  *
742  * Return: N/A
743  */
744 irqreturn_t ce_dispatch_interrupt(int ce_id,
745 				  struct ce_tasklet_entry *tasklet_entry)
746 {
747 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
748 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
749 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
750 
751 	if (tasklet_entry->ce_id != ce_id) {
752 		hif_err("ce_id (expect %d, received %d) does not match",
753 			tasklet_entry->ce_id, ce_id);
754 		return IRQ_NONE;
755 	}
756 	if (unlikely(ce_id >= CE_COUNT_MAX)) {
757 		hif_err("ce_id=%d > CE_COUNT_MAX=%d",
758 			tasklet_entry->ce_id, CE_COUNT_MAX);
759 		return IRQ_NONE;
760 	}
761 
762 	hif_irq_disable(scn, ce_id);
763 
764 	if (!TARGET_REGISTER_ACCESS_ALLOWED(scn))
765 		return IRQ_HANDLED;
766 
767 	hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT,
768 				NULL, NULL, 0, 0);
769 	hif_ce_increment_interrupt_count(hif_ce_state, ce_id);
770 
771 	if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) {
772 		hif_ut_fw_resume(scn);
773 		hif_irq_enable(scn, ce_id);
774 		return IRQ_HANDLED;
775 	}
776 
777 	qdf_atomic_inc(&scn->active_tasklet_cnt);
778 
779 	if (hif_napi_enabled(hif_hdl, ce_id))
780 		hif_napi_schedule(hif_hdl, ce_id);
781 	else
782 		hif_tasklet_schedule(hif_hdl, tasklet_entry);
783 
784 	return IRQ_HANDLED;
785 }
786 
787 /**
788  * const char *ce_name
789  *
790  * @ce_name: ce_name
791  */
792 const char *ce_name[] = {
793 	"WLAN_CE_0",
794 	"WLAN_CE_1",
795 	"WLAN_CE_2",
796 	"WLAN_CE_3",
797 	"WLAN_CE_4",
798 	"WLAN_CE_5",
799 	"WLAN_CE_6",
800 	"WLAN_CE_7",
801 	"WLAN_CE_8",
802 	"WLAN_CE_9",
803 	"WLAN_CE_10",
804 	"WLAN_CE_11",
805 };
806 /**
807  * ce_unregister_irq() - ce_unregister_irq
808  * @hif_ce_state: hif_ce_state copy engine device handle
809  * @mask: which coppy engines to unregister for.
810  *
811  * Unregisters copy engine irqs matching mask.  If a 1 is set at bit x,
812  * unregister for copy engine x.
813  *
814  * Return: QDF_STATUS
815  */
816 QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
817 {
818 	int id;
819 	int ce_count;
820 	int ret;
821 	struct hif_softc *scn;
822 
823 	if (!hif_ce_state) {
824 		hif_warn("hif_ce_state = NULL");
825 		return QDF_STATUS_SUCCESS;
826 	}
827 
828 	scn = HIF_GET_SOFTC(hif_ce_state);
829 	ce_count = scn->ce_count;
830 	/* we are removing interrupts, so better stop NAPI */
831 	ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn),
832 			     NAPI_EVT_INT_STATE, (void *)0);
833 	if (ret != 0)
834 		hif_err("napi_event INT_STATE returned %d", ret);
835 	/* this is not fatal, continue */
836 
837 	/* filter mask to free only for ce's with irq registered */
838 	mask &= hif_ce_state->ce_register_irq_done;
839 	for (id = 0; id < ce_count; id++) {
840 		if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
841 			ret = pld_ce_free_irq(scn->qdf_dev->dev, id,
842 					&hif_ce_state->tasklets[id]);
843 			if (ret < 0)
844 				hif_err(
845 					"pld_unregister_irq error - ce_id = %d, ret = %d",
846 					id, ret);
847 		}
848 		ce_disable_polling(scn->ce_id_to_state[id]);
849 	}
850 	hif_ce_state->ce_register_irq_done &= ~mask;
851 
852 	return QDF_STATUS_SUCCESS;
853 }
854 /**
855  * ce_register_irq() - ce_register_irq
856  * @hif_ce_state: hif_ce_state
857  * @mask: which coppy engines to unregister for.
858  *
859  * Registers copy engine irqs matching mask.  If a 1 is set at bit x,
860  * Register for copy engine x.
861  *
862  * Return: QDF_STATUS
863  */
864 QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
865 {
866 	int id;
867 	int ce_count;
868 	int ret;
869 	unsigned long irqflags = IRQF_TRIGGER_RISING;
870 	uint32_t done_mask = 0;
871 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
872 
873 	ce_count = scn->ce_count;
874 
875 	for (id = 0; id < ce_count; id++) {
876 		if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
877 			ret = pld_ce_request_irq(scn->qdf_dev->dev, id,
878 				hif_snoc_interrupt_handler,
879 				irqflags, ce_name[id],
880 				&hif_ce_state->tasklets[id]);
881 			if (ret) {
882 				hif_err(
883 					"cannot register CE %d irq handler, ret = %d",
884 					id, ret);
885 				ce_unregister_irq(hif_ce_state, done_mask);
886 				return QDF_STATUS_E_FAULT;
887 			}
888 			done_mask |= 1 << id;
889 		}
890 	}
891 	hif_ce_state->ce_register_irq_done |= done_mask;
892 
893 	return QDF_STATUS_SUCCESS;
894 }
895