xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.c (revision fe41df9c00a24498eda0519239a68dbbd8546193)
1 /*
2  * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/if_arp.h>
23 #include "qdf_lock.h"
24 #include "qdf_types.h"
25 #include "qdf_status.h"
26 #include "regtable.h"
27 #include "hif.h"
28 #include "hif_io32.h"
29 #include "ce_main.h"
30 #include "ce_api.h"
31 #include "ce_reg.h"
32 #include "ce_internal.h"
33 #include "ce_tasklet.h"
34 #include "pld_common.h"
35 #include "hif_debug.h"
36 #include "hif_napi.h"
37 
38 
39 /**
40  * struct tasklet_work
41  *
42  * @id: ce_id
43  * @work: work
44  */
45 struct tasklet_work {
46 	enum ce_id_type id;
47 	void *data;
48 	struct work_struct work;
49 };
50 
51 
52 /**
53  * reschedule_ce_tasklet_work_handler() - reschedule work
54  * @work: struct work_struct
55  *
56  * Return: N/A
57  */
58 static void reschedule_ce_tasklet_work_handler(struct work_struct *work)
59 {
60 	struct tasklet_work *ce_work = container_of(work, struct tasklet_work,
61 						    work);
62 	struct hif_softc *scn = ce_work->data;
63 	struct HIF_CE_state *hif_ce_state;
64 
65 	if (NULL == scn) {
66 		HIF_ERROR("%s: tasklet scn is null", __func__);
67 		return;
68 	}
69 
70 	hif_ce_state = HIF_GET_CE_STATE(scn);
71 
72 	if (scn->hif_init_done == false) {
73 		HIF_ERROR("%s: wlan driver is unloaded", __func__);
74 		return;
75 	}
76 	tasklet_schedule(&hif_ce_state->tasklets[ce_work->id].intr_tq);
77 }
78 
79 static struct tasklet_work tasklet_workers[CE_ID_MAX];
80 static bool work_initialized;
81 
82 /**
83  * init_tasklet_work() - init_tasklet_work
84  * @work: struct work_struct
85  * @work_handler: work_handler
86  *
87  * Return: N/A
88  */
89 static void init_tasklet_work(struct work_struct *work,
90 			      work_func_t work_handler)
91 {
92 	INIT_WORK(work, work_handler);
93 }
94 
95 /**
96  * init_tasklet_workers() - init_tasklet_workers
97  * @scn: HIF Context
98  *
99  * Return: N/A
100  */
101 void init_tasklet_workers(struct hif_opaque_softc *scn)
102 {
103 	uint32_t id;
104 
105 	for (id = 0; id < CE_ID_MAX; id++) {
106 		tasklet_workers[id].id = id;
107 		tasklet_workers[id].data = scn;
108 		init_tasklet_work(&tasklet_workers[id].work,
109 				  reschedule_ce_tasklet_work_handler);
110 	}
111 	work_initialized = true;
112 }
113 
114 /**
115  * deinit_tasklet_workers() - deinit_tasklet_workers
116  * @scn: HIF Context
117  *
118  * Return: N/A
119  */
120 void deinit_tasklet_workers(struct hif_opaque_softc *scn)
121 {
122 	u32 id;
123 
124 	for (id = 0; id < CE_ID_MAX; id++)
125 		cancel_work_sync(&tasklet_workers[id].work);
126 
127 	work_initialized = false;
128 }
129 
130 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
131 /**
132  * ce_schedule_tasklet() - schedule ce tasklet
133  * @tasklet_entry: struct ce_tasklet_entry
134  *
135  * Return: N/A
136  */
137 static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
138 {
139 	if (work_initialized && (tasklet_entry->ce_id < CE_ID_MAX))
140 		schedule_work(&tasklet_workers[tasklet_entry->ce_id].work);
141 	else
142 		HIF_ERROR("%s: work_initialized = %d, ce_id = %d",
143 			__func__, work_initialized, tasklet_entry->ce_id);
144 }
145 #else
146 /**
147  * ce_schedule_tasklet() - schedule ce tasklet
148  * @tasklet_entry: struct ce_tasklet_entry
149  *
150  * Return: N/A
151  */
152 static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
153 {
154 	tasklet_schedule(&tasklet_entry->intr_tq);
155 }
156 #endif
157 
158 /**
159  * ce_tasklet() - ce_tasklet
160  * @data: data
161  *
162  * Return: N/A
163  */
164 static void ce_tasklet(unsigned long data)
165 {
166 	struct ce_tasklet_entry *tasklet_entry =
167 		(struct ce_tasklet_entry *)data;
168 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
169 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
170 	struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id];
171 
172 	hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
173 			HIF_CE_TASKLET_ENTRY, NULL, NULL, 0, 0);
174 
175 	if (qdf_atomic_read(&scn->link_suspended)) {
176 		HIF_ERROR("%s: ce %d tasklet fired after link suspend.",
177 				__func__, tasklet_entry->ce_id);
178 		QDF_BUG(0);
179 	}
180 
181 	ce_per_engine_service(scn, tasklet_entry->ce_id);
182 
183 	if (ce_check_rx_pending(CE_state)) {
184 		/*
185 		 * There are frames pending, schedule tasklet to process them.
186 		 * Enable the interrupt only when there is no pending frames in
187 		 * any of the Copy Engine pipes.
188 		 */
189 		hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
190 				HIF_CE_TASKLET_RESCHEDULE, NULL, NULL, 0, 0);
191 
192 		ce_schedule_tasklet(tasklet_entry);
193 		return;
194 	}
195 
196 	if (scn->target_status != TARGET_STATUS_RESET)
197 		hif_irq_enable(scn, tasklet_entry->ce_id);
198 
199 	hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT,
200 				 NULL, NULL, 0, 0);
201 
202 	qdf_atomic_dec(&scn->active_tasklet_cnt);
203 }
204 
205 /**
206  * ce_tasklet_init() - ce_tasklet_init
207  * @hif_ce_state: hif_ce_state
208  * @mask: mask
209  *
210  * Return: N/A
211  */
212 void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask)
213 {
214 	int i;
215 
216 	for (i = 0; i < CE_COUNT_MAX; i++) {
217 		if (mask & (1 << i)) {
218 			hif_ce_state->tasklets[i].ce_id = i;
219 			hif_ce_state->tasklets[i].inited = true;
220 			hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state;
221 			tasklet_init(&hif_ce_state->tasklets[i].intr_tq,
222 				ce_tasklet,
223 				(unsigned long)&hif_ce_state->tasklets[i]);
224 		}
225 	}
226 }
227 /**
228  * ce_tasklet_kill() - ce_tasklet_kill
229  * @hif_ce_state: hif_ce_state
230  *
231  * Return: N/A
232  */
233 void ce_tasklet_kill(struct hif_softc *scn)
234 {
235 	int i;
236 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
237 
238 	for (i = 0; i < CE_COUNT_MAX; i++)
239 		if (hif_ce_state->tasklets[i].inited) {
240 			tasklet_kill(&hif_ce_state->tasklets[i].intr_tq);
241 			hif_ce_state->tasklets[i].inited = false;
242 		}
243 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
244 }
245 
246 #define HIF_CE_DRAIN_WAIT_CNT          20
247 /**
248  * hif_drain_tasklets(): wait until no tasklet is pending
249  * @scn: hif context
250  *
251  * Let running tasklets clear pending trafic.
252  *
253  * Return: 0 if no bottom half is in progress when it returns.
254  *   -EFAULT if it times out.
255  */
256 int hif_drain_tasklets(struct hif_softc *scn)
257 {
258 	uint32_t ce_drain_wait_cnt = 0;
259 	int32_t tasklet_cnt;
260 
261 	while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) {
262 		if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) {
263 			HIF_ERROR("%s: CE still not done with access: %d",
264 				  __func__, tasklet_cnt);
265 
266 			return -EFAULT;
267 		}
268 		HIF_INFO("%s: Waiting for CE to finish access", __func__);
269 		msleep(10);
270 	}
271 	return 0;
272 }
273 
274 #ifdef WLAN_SUSPEND_RESUME_TEST
275 /**
276  * hif_interrupt_is_ut_resume(): Tests if an irq on the given copy engine should
277  *	trigger a unit-test resume.
278  * @scn: The HIF context to operate on
279  * @ce_id: The copy engine Id from the originating interrupt
280  *
281  * Return: true if the raised irq should trigger a unit-test resume
282  */
283 static bool hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
284 {
285 	int errno;
286 	uint8_t wake_ce_id;
287 
288 	if (!hif_is_ut_suspended(scn))
289 		return false;
290 
291 	/* ensure passed ce_id matches wake ce_id */
292 	errno = hif_get_wake_ce_id(scn, &wake_ce_id);
293 	if (errno) {
294 		HIF_ERROR("%s: failed to get wake CE Id: %d", __func__, errno);
295 		return false;
296 	}
297 
298 	return ce_id == wake_ce_id;
299 }
300 #else
301 static inline bool
302 hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
303 {
304 	return false;
305 }
306 #endif /* WLAN_SUSPEND_RESUME_TEST */
307 
308 /**
309  * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler
310  * @irq: irq coming from kernel
311  * @context: context
312  *
313  * Return: N/A
314  */
315 static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context)
316 {
317 	struct ce_tasklet_entry *tasklet_entry = context;
318 	struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state);
319 
320 	return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq),
321 				     tasklet_entry);
322 }
323 
324 /**
325  * hif_ce_increment_interrupt_count() - update ce stats
326  * @hif_ce_state: ce state
327  * @ce_id: ce id
328  *
329  * Return: none
330  */
331 static inline void
332 hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id)
333 {
334 	int cpu_id = qdf_get_cpu();
335 
336 	hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++;
337 }
338 
339 /**
340  * hif_display_ce_stats() - display ce stats
341  * @hif_ce_state: ce state
342  *
343  * Return: none
344  */
345 void hif_display_ce_stats(struct HIF_CE_state *hif_ce_state)
346 {
347 #define STR_SIZE 128
348 	uint8_t i, j, pos;
349 	char str_buffer[STR_SIZE];
350 	int size, ret;
351 
352 	qdf_debug("CE interrupt statistics:");
353 	for (i = 0; i < CE_COUNT_MAX; i++) {
354 		size = STR_SIZE;
355 		pos = 0;
356 		for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) {
357 			ret = snprintf(str_buffer + pos, size, "[%d]:%d ",
358 				j, hif_ce_state->stats.ce_per_cpu[i][j]);
359 			if (ret <= 0 || ret >= size)
360 				break;
361 			size -= ret;
362 			pos += ret;
363 		}
364 		qdf_debug("CE id[%2d] - %s", i, str_buffer);
365 	}
366 #undef STR_SIZE
367 }
368 
369 /**
370  * hif_clear_ce_stats() - clear ce stats
371  * @hif_ce_state: ce state
372  *
373  * Return: none
374  */
375 void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state)
376 {
377 	qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats));
378 }
379 
380 /**
381  * ce_dispatch_interrupt() - dispatch an interrupt to a processing context
382  * @ce_id: ce_id
383  * @tasklet_entry: context
384  *
385  * Return: N/A
386  */
387 irqreturn_t ce_dispatch_interrupt(int ce_id,
388 				  struct ce_tasklet_entry *tasklet_entry)
389 {
390 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
391 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
392 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
393 
394 	if (tasklet_entry->ce_id != ce_id) {
395 		HIF_ERROR("%s: ce_id (expect %d, received %d) does not match",
396 			  __func__, tasklet_entry->ce_id, ce_id);
397 		return IRQ_NONE;
398 	}
399 	if (unlikely(ce_id >= CE_COUNT_MAX)) {
400 		HIF_ERROR("%s: ce_id=%d > CE_COUNT_MAX=%d",
401 			  __func__, tasklet_entry->ce_id, CE_COUNT_MAX);
402 		return IRQ_NONE;
403 	}
404 
405 	hif_irq_disable(scn, ce_id);
406 
407 	if (!TARGET_REGISTER_ACCESS_ALLOWED(scn))
408 		return IRQ_HANDLED;
409 
410 	hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT,
411 				NULL, NULL, 0, 0);
412 	hif_ce_increment_interrupt_count(hif_ce_state, ce_id);
413 
414 	if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) {
415 		hif_ut_fw_resume(scn);
416 		hif_irq_enable(scn, ce_id);
417 		return IRQ_HANDLED;
418 	}
419 
420 	qdf_atomic_inc(&scn->active_tasklet_cnt);
421 
422 	if (hif_napi_enabled(hif_hdl, ce_id))
423 		hif_napi_schedule(hif_hdl, ce_id);
424 	else
425 		tasklet_schedule(&tasklet_entry->intr_tq);
426 
427 	return IRQ_HANDLED;
428 }
429 
430 /**
431  * const char *ce_name
432  *
433  * @ce_name: ce_name
434  */
435 const char *ce_name[] = {
436 	"WLAN_CE_0",
437 	"WLAN_CE_1",
438 	"WLAN_CE_2",
439 	"WLAN_CE_3",
440 	"WLAN_CE_4",
441 	"WLAN_CE_5",
442 	"WLAN_CE_6",
443 	"WLAN_CE_7",
444 	"WLAN_CE_8",
445 	"WLAN_CE_9",
446 	"WLAN_CE_10",
447 	"WLAN_CE_11",
448 };
449 /**
450  * ce_unregister_irq() - ce_unregister_irq
451  * @hif_ce_state: hif_ce_state copy engine device handle
452  * @mask: which coppy engines to unregister for.
453  *
454  * Unregisters copy engine irqs matching mask.  If a 1 is set at bit x,
455  * unregister for copy engine x.
456  *
457  * Return: QDF_STATUS
458  */
459 QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
460 {
461 	int id;
462 	int ce_count;
463 	int ret;
464 	struct hif_softc *scn;
465 
466 	if (hif_ce_state == NULL) {
467 		HIF_WARN("%s: hif_ce_state = NULL", __func__);
468 		return QDF_STATUS_SUCCESS;
469 	}
470 
471 	scn = HIF_GET_SOFTC(hif_ce_state);
472 	ce_count = scn->ce_count;
473 	/* we are removing interrupts, so better stop NAPI */
474 	ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn),
475 			     NAPI_EVT_INT_STATE, (void *)0);
476 	if (ret != 0)
477 		HIF_ERROR("%s: napi_event INT_STATE returned %d",
478 			  __func__, ret);
479 	/* this is not fatal, continue */
480 
481 	/* filter mask to free only for ce's with irq registered */
482 	mask &= hif_ce_state->ce_register_irq_done;
483 	for (id = 0; id < ce_count; id++) {
484 		if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
485 			ret = pld_ce_free_irq(scn->qdf_dev->dev, id,
486 					&hif_ce_state->tasklets[id]);
487 			if (ret < 0)
488 				HIF_ERROR(
489 					"%s: pld_unregister_irq error - ce_id = %d, ret = %d",
490 					__func__, id, ret);
491 		}
492 	}
493 	hif_ce_state->ce_register_irq_done &= ~mask;
494 
495 	return QDF_STATUS_SUCCESS;
496 }
497 /**
498  * ce_register_irq() - ce_register_irq
499  * @hif_ce_state: hif_ce_state
500  * @mask: which coppy engines to unregister for.
501  *
502  * Registers copy engine irqs matching mask.  If a 1 is set at bit x,
503  * Register for copy engine x.
504  *
505  * Return: QDF_STATUS
506  */
507 QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
508 {
509 	int id;
510 	int ce_count;
511 	int ret;
512 	unsigned long irqflags = IRQF_TRIGGER_RISING;
513 	uint32_t done_mask = 0;
514 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
515 
516 	ce_count = scn->ce_count;
517 
518 	for (id = 0; id < ce_count; id++) {
519 		if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
520 			ret = pld_ce_request_irq(scn->qdf_dev->dev, id,
521 				hif_snoc_interrupt_handler,
522 				irqflags, ce_name[id],
523 				&hif_ce_state->tasklets[id]);
524 			if (ret) {
525 				HIF_ERROR(
526 					"%s: cannot register CE %d irq handler, ret = %d",
527 					__func__, id, ret);
528 				ce_unregister_irq(hif_ce_state, done_mask);
529 				return QDF_STATUS_E_FAULT;
530 			}
531 			done_mask |= 1 << id;
532 		}
533 	}
534 	hif_ce_state->ce_register_irq_done |= done_mask;
535 
536 	return QDF_STATUS_SUCCESS;
537 }
538