xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.c (revision dd4dc88b837a295134aa9869114a2efee0f4894b)
1 /*
2  * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/if_arp.h>
23 #include "qdf_lock.h"
24 #include "qdf_types.h"
25 #include "qdf_status.h"
26 #include "regtable.h"
27 #include "hif.h"
28 #include "hif_io32.h"
29 #include "ce_main.h"
30 #include "ce_api.h"
31 #include "ce_reg.h"
32 #include "ce_internal.h"
33 #include "ce_tasklet.h"
34 #include "pld_common.h"
35 #include "hif_debug.h"
36 #include "hif_napi.h"
37 
38 
39 /**
40  * struct tasklet_work
41  *
42  * @id: ce_id
43  * @work: work
44  */
45 struct tasklet_work {
46 	enum ce_id_type id;
47 	void *data;
48 	struct work_struct work;
49 };
50 
51 
52 /**
53  * reschedule_ce_tasklet_work_handler() - reschedule work
54  * @work: struct work_struct
55  *
56  * Return: N/A
57  */
58 static void reschedule_ce_tasklet_work_handler(struct work_struct *work)
59 {
60 	struct tasklet_work *ce_work = container_of(work, struct tasklet_work,
61 						    work);
62 	struct hif_softc *scn = ce_work->data;
63 	struct HIF_CE_state *hif_ce_state;
64 
65 	if (!scn) {
66 		HIF_ERROR("%s: tasklet scn is null", __func__);
67 		return;
68 	}
69 
70 	hif_ce_state = HIF_GET_CE_STATE(scn);
71 
72 	if (scn->hif_init_done == false) {
73 		HIF_ERROR("%s: wlan driver is unloaded", __func__);
74 		return;
75 	}
76 	if (hif_ce_state->tasklets[ce_work->id].inited)
77 		tasklet_schedule(&hif_ce_state->tasklets[ce_work->id].intr_tq);
78 }
79 
80 static struct tasklet_work tasklet_workers[CE_ID_MAX];
81 static bool work_initialized;
82 
83 /**
84  * init_tasklet_work() - init_tasklet_work
85  * @work: struct work_struct
86  * @work_handler: work_handler
87  *
88  * Return: N/A
89  */
90 static void init_tasklet_work(struct work_struct *work,
91 			      work_func_t work_handler)
92 {
93 	INIT_WORK(work, work_handler);
94 }
95 
96 /**
97  * init_tasklet_workers() - init_tasklet_workers
98  * @scn: HIF Context
99  *
100  * Return: N/A
101  */
102 void init_tasklet_workers(struct hif_opaque_softc *scn)
103 {
104 	uint32_t id;
105 
106 	for (id = 0; id < CE_ID_MAX; id++) {
107 		tasklet_workers[id].id = id;
108 		tasklet_workers[id].data = scn;
109 		init_tasklet_work(&tasklet_workers[id].work,
110 				  reschedule_ce_tasklet_work_handler);
111 	}
112 	work_initialized = true;
113 }
114 
115 /**
116  * deinit_tasklet_workers() - deinit_tasklet_workers
117  * @scn: HIF Context
118  *
119  * Return: N/A
120  */
121 void deinit_tasklet_workers(struct hif_opaque_softc *scn)
122 {
123 	u32 id;
124 
125 	for (id = 0; id < CE_ID_MAX; id++)
126 		cancel_work_sync(&tasklet_workers[id].work);
127 
128 	work_initialized = false;
129 }
130 
131 /**
132  * ce_schedule_tasklet() - schedule ce tasklet
133  * @tasklet_entry: struct ce_tasklet_entry
134  *
135  * Return: N/A
136  */
137 static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
138 {
139 	tasklet_schedule(&tasklet_entry->intr_tq);
140 }
141 
142 /**
143  * ce_tasklet() - ce_tasklet
144  * @data: data
145  *
146  * Return: N/A
147  */
148 static void ce_tasklet(unsigned long data)
149 {
150 	struct ce_tasklet_entry *tasklet_entry =
151 		(struct ce_tasklet_entry *)data;
152 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
153 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
154 	struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id];
155 
156 	hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
157 			HIF_CE_TASKLET_ENTRY, NULL, NULL, 0, 0);
158 
159 	if (qdf_atomic_read(&scn->link_suspended)) {
160 		HIF_ERROR("%s: ce %d tasklet fired after link suspend.",
161 				__func__, tasklet_entry->ce_id);
162 		QDF_BUG(0);
163 	}
164 
165 	ce_per_engine_service(scn, tasklet_entry->ce_id);
166 
167 	if (ce_check_rx_pending(CE_state) && tasklet_entry->inited) {
168 		/*
169 		 * There are frames pending, schedule tasklet to process them.
170 		 * Enable the interrupt only when there is no pending frames in
171 		 * any of the Copy Engine pipes.
172 		 */
173 		hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
174 				HIF_CE_TASKLET_RESCHEDULE, NULL, NULL, 0, 0);
175 
176 		ce_schedule_tasklet(tasklet_entry);
177 		return;
178 	}
179 
180 	if (scn->target_status != TARGET_STATUS_RESET)
181 		hif_irq_enable(scn, tasklet_entry->ce_id);
182 
183 	hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT,
184 				 NULL, NULL, 0, 0);
185 
186 	qdf_atomic_dec(&scn->active_tasklet_cnt);
187 }
188 
189 /**
190  * ce_tasklet_init() - ce_tasklet_init
191  * @hif_ce_state: hif_ce_state
192  * @mask: mask
193  *
194  * Return: N/A
195  */
196 void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask)
197 {
198 	int i;
199 
200 	for (i = 0; i < CE_COUNT_MAX; i++) {
201 		if (mask & (1 << i)) {
202 			hif_ce_state->tasklets[i].ce_id = i;
203 			hif_ce_state->tasklets[i].inited = true;
204 			hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state;
205 			tasklet_init(&hif_ce_state->tasklets[i].intr_tq,
206 				ce_tasklet,
207 				(unsigned long)&hif_ce_state->tasklets[i]);
208 		}
209 	}
210 }
211 /**
212  * ce_tasklet_kill() - ce_tasklet_kill
213  * @hif_ce_state: hif_ce_state
214  *
215  * Context: Non-Atomic context
216  * Return: N/A
217  */
218 void ce_tasklet_kill(struct hif_softc *scn)
219 {
220 	int i;
221 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
222 
223 	work_initialized = false;
224 
225 	for (i = 0; i < CE_COUNT_MAX; i++) {
226 		if (hif_ce_state->tasklets[i].inited) {
227 			hif_ce_state->tasklets[i].inited = false;
228 			/*
229 			 * Cancel the tasklet work before tasklet_disable
230 			 * to avoid race between tasklet_schedule and
231 			 * tasklet_kill. Here cancel_work_sync() won't
232 			 * return before reschedule_ce_tasklet_work_handler()
233 			 * completes. Even if tasklet_schedule() happens
234 			 * tasklet_disable() will take care of that.
235 			 */
236 			cancel_work_sync(&tasklet_workers[i].work);
237 			tasklet_kill(&hif_ce_state->tasklets[i].intr_tq);
238 		}
239 	}
240 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
241 }
242 
243 #define HIF_CE_DRAIN_WAIT_CNT          20
244 /**
245  * hif_drain_tasklets(): wait until no tasklet is pending
246  * @scn: hif context
247  *
248  * Let running tasklets clear pending trafic.
249  *
250  * Return: 0 if no bottom half is in progress when it returns.
251  *   -EFAULT if it times out.
252  */
253 int hif_drain_tasklets(struct hif_softc *scn)
254 {
255 	uint32_t ce_drain_wait_cnt = 0;
256 	int32_t tasklet_cnt;
257 
258 	while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) {
259 		if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) {
260 			HIF_ERROR("%s: CE still not done with access: %d",
261 				  __func__, tasklet_cnt);
262 
263 			return -EFAULT;
264 		}
265 		HIF_INFO("%s: Waiting for CE to finish access", __func__);
266 		msleep(10);
267 	}
268 	return 0;
269 }
270 
271 #ifdef WLAN_SUSPEND_RESUME_TEST
272 /**
273  * hif_interrupt_is_ut_resume(): Tests if an irq on the given copy engine should
274  *	trigger a unit-test resume.
275  * @scn: The HIF context to operate on
276  * @ce_id: The copy engine Id from the originating interrupt
277  *
278  * Return: true if the raised irq should trigger a unit-test resume
279  */
280 static bool hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
281 {
282 	int errno;
283 	uint8_t wake_ce_id;
284 
285 	if (!hif_is_ut_suspended(scn))
286 		return false;
287 
288 	/* ensure passed ce_id matches wake ce_id */
289 	errno = hif_get_wake_ce_id(scn, &wake_ce_id);
290 	if (errno) {
291 		HIF_ERROR("%s: failed to get wake CE Id: %d", __func__, errno);
292 		return false;
293 	}
294 
295 	return ce_id == wake_ce_id;
296 }
297 #else
298 static inline bool
299 hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
300 {
301 	return false;
302 }
303 #endif /* WLAN_SUSPEND_RESUME_TEST */
304 
305 /**
306  * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler
307  * @irq: irq coming from kernel
308  * @context: context
309  *
310  * Return: N/A
311  */
312 static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context)
313 {
314 	struct ce_tasklet_entry *tasklet_entry = context;
315 	struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state);
316 
317 	return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq),
318 				     tasklet_entry);
319 }
320 
321 /**
322  * hif_ce_increment_interrupt_count() - update ce stats
323  * @hif_ce_state: ce state
324  * @ce_id: ce id
325  *
326  * Return: none
327  */
328 static inline void
329 hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id)
330 {
331 	int cpu_id = qdf_get_cpu();
332 
333 	hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++;
334 }
335 
336 /**
337  * hif_display_ce_stats() - display ce stats
338  * @hif_ce_state: ce state
339  *
340  * Return: none
341  */
342 void hif_display_ce_stats(struct HIF_CE_state *hif_ce_state)
343 {
344 #define STR_SIZE 128
345 	uint8_t i, j, pos;
346 	char str_buffer[STR_SIZE];
347 	int size, ret;
348 
349 	qdf_debug("CE interrupt statistics:");
350 	for (i = 0; i < CE_COUNT_MAX; i++) {
351 		size = STR_SIZE;
352 		pos = 0;
353 		for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) {
354 			ret = snprintf(str_buffer + pos, size, "[%d]:%d ",
355 				j, hif_ce_state->stats.ce_per_cpu[i][j]);
356 			if (ret <= 0 || ret >= size)
357 				break;
358 			size -= ret;
359 			pos += ret;
360 		}
361 		qdf_debug("CE id[%2d] - %s", i, str_buffer);
362 	}
363 #undef STR_SIZE
364 }
365 
366 /**
367  * hif_clear_ce_stats() - clear ce stats
368  * @hif_ce_state: ce state
369  *
370  * Return: none
371  */
372 void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state)
373 {
374 	qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats));
375 }
376 
377 /**
378  * hif_tasklet_schedule() - schedule tasklet
379  * @hif_ctx: hif context
380  * @tasklet_entry: ce tasklet entry
381  *
382  * Return: false if tasklet already scheduled, otherwise true
383  */
384 static inline bool hif_tasklet_schedule(struct hif_opaque_softc *hif_ctx,
385 					struct ce_tasklet_entry *tasklet_entry)
386 {
387 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
388 
389 	if (test_bit(TASKLET_STATE_SCHED, &tasklet_entry->intr_tq.state)) {
390 		HIF_DBG("tasklet scheduled, return");
391 		qdf_atomic_dec(&scn->active_tasklet_cnt);
392 		return false;
393 	}
394 
395 	tasklet_schedule(&tasklet_entry->intr_tq);
396 	return true;
397 }
398 
399 /**
400  * ce_dispatch_interrupt() - dispatch an interrupt to a processing context
401  * @ce_id: ce_id
402  * @tasklet_entry: context
403  *
404  * Return: N/A
405  */
406 irqreturn_t ce_dispatch_interrupt(int ce_id,
407 				  struct ce_tasklet_entry *tasklet_entry)
408 {
409 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
410 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
411 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
412 
413 	if (tasklet_entry->ce_id != ce_id) {
414 		HIF_ERROR("%s: ce_id (expect %d, received %d) does not match",
415 			  __func__, tasklet_entry->ce_id, ce_id);
416 		return IRQ_NONE;
417 	}
418 	if (unlikely(ce_id >= CE_COUNT_MAX)) {
419 		HIF_ERROR("%s: ce_id=%d > CE_COUNT_MAX=%d",
420 			  __func__, tasklet_entry->ce_id, CE_COUNT_MAX);
421 		return IRQ_NONE;
422 	}
423 
424 	hif_irq_disable(scn, ce_id);
425 
426 	if (!TARGET_REGISTER_ACCESS_ALLOWED(scn))
427 		return IRQ_HANDLED;
428 
429 	hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT,
430 				NULL, NULL, 0, 0);
431 	hif_ce_increment_interrupt_count(hif_ce_state, ce_id);
432 
433 	if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) {
434 		hif_ut_fw_resume(scn);
435 		hif_irq_enable(scn, ce_id);
436 		return IRQ_HANDLED;
437 	}
438 
439 	qdf_atomic_inc(&scn->active_tasklet_cnt);
440 
441 	if (hif_napi_enabled(hif_hdl, ce_id))
442 		hif_napi_schedule(hif_hdl, ce_id);
443 	else
444 		hif_tasklet_schedule(hif_hdl, tasklet_entry);
445 
446 	return IRQ_HANDLED;
447 }
448 
449 /**
450  * const char *ce_name
451  *
452  * @ce_name: ce_name
453  */
454 const char *ce_name[] = {
455 	"WLAN_CE_0",
456 	"WLAN_CE_1",
457 	"WLAN_CE_2",
458 	"WLAN_CE_3",
459 	"WLAN_CE_4",
460 	"WLAN_CE_5",
461 	"WLAN_CE_6",
462 	"WLAN_CE_7",
463 	"WLAN_CE_8",
464 	"WLAN_CE_9",
465 	"WLAN_CE_10",
466 	"WLAN_CE_11",
467 };
468 /**
469  * ce_unregister_irq() - ce_unregister_irq
470  * @hif_ce_state: hif_ce_state copy engine device handle
471  * @mask: which coppy engines to unregister for.
472  *
473  * Unregisters copy engine irqs matching mask.  If a 1 is set at bit x,
474  * unregister for copy engine x.
475  *
476  * Return: QDF_STATUS
477  */
478 QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
479 {
480 	int id;
481 	int ce_count;
482 	int ret;
483 	struct hif_softc *scn;
484 
485 	if (!hif_ce_state) {
486 		HIF_WARN("%s: hif_ce_state = NULL", __func__);
487 		return QDF_STATUS_SUCCESS;
488 	}
489 
490 	scn = HIF_GET_SOFTC(hif_ce_state);
491 	ce_count = scn->ce_count;
492 	/* we are removing interrupts, so better stop NAPI */
493 	ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn),
494 			     NAPI_EVT_INT_STATE, (void *)0);
495 	if (ret != 0)
496 		HIF_ERROR("%s: napi_event INT_STATE returned %d",
497 			  __func__, ret);
498 	/* this is not fatal, continue */
499 
500 	/* filter mask to free only for ce's with irq registered */
501 	mask &= hif_ce_state->ce_register_irq_done;
502 	for (id = 0; id < ce_count; id++) {
503 		if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
504 			ret = pld_ce_free_irq(scn->qdf_dev->dev, id,
505 					&hif_ce_state->tasklets[id]);
506 			if (ret < 0)
507 				HIF_ERROR(
508 					"%s: pld_unregister_irq error - ce_id = %d, ret = %d",
509 					__func__, id, ret);
510 		}
511 		ce_disable_polling(scn->ce_id_to_state[id]);
512 	}
513 	hif_ce_state->ce_register_irq_done &= ~mask;
514 
515 	return QDF_STATUS_SUCCESS;
516 }
517 /**
518  * ce_register_irq() - ce_register_irq
519  * @hif_ce_state: hif_ce_state
520  * @mask: which coppy engines to unregister for.
521  *
522  * Registers copy engine irqs matching mask.  If a 1 is set at bit x,
523  * Register for copy engine x.
524  *
525  * Return: QDF_STATUS
526  */
527 QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
528 {
529 	int id;
530 	int ce_count;
531 	int ret;
532 	unsigned long irqflags = IRQF_TRIGGER_RISING;
533 	uint32_t done_mask = 0;
534 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
535 
536 	ce_count = scn->ce_count;
537 
538 	for (id = 0; id < ce_count; id++) {
539 		if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
540 			ret = pld_ce_request_irq(scn->qdf_dev->dev, id,
541 				hif_snoc_interrupt_handler,
542 				irqflags, ce_name[id],
543 				&hif_ce_state->tasklets[id]);
544 			if (ret) {
545 				HIF_ERROR(
546 					"%s: cannot register CE %d irq handler, ret = %d",
547 					__func__, id, ret);
548 				ce_unregister_irq(hif_ce_state, done_mask);
549 				return QDF_STATUS_E_FAULT;
550 			}
551 			done_mask |= 1 << id;
552 		}
553 	}
554 	hif_ce_state->ce_register_irq_done |= done_mask;
555 
556 	return QDF_STATUS_SUCCESS;
557 }
558