xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.c (revision 1b9674e21e24478fba4530f5ae7396b9555e9c6a)
1 /*
2  * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/if_arp.h>
23 #include "qdf_lock.h"
24 #include "qdf_types.h"
25 #include "qdf_status.h"
26 #include "regtable.h"
27 #include "hif.h"
28 #include "hif_io32.h"
29 #include "ce_main.h"
30 #include "ce_api.h"
31 #include "ce_reg.h"
32 #include "ce_internal.h"
33 #include "ce_tasklet.h"
34 #include "pld_common.h"
35 #include "hif_debug.h"
36 #include "hif_napi.h"
37 
38 
39 /**
40  * struct tasklet_work
41  *
42  * @id: ce_id
43  * @work: work
44  */
45 struct tasklet_work {
46 	enum ce_id_type id;
47 	void *data;
48 	struct work_struct work;
49 };
50 
51 
52 /**
53  * reschedule_ce_tasklet_work_handler() - reschedule work
54  * @work: struct work_struct
55  *
56  * Return: N/A
57  */
58 static void reschedule_ce_tasklet_work_handler(struct work_struct *work)
59 {
60 	struct tasklet_work *ce_work = container_of(work, struct tasklet_work,
61 						    work);
62 	struct hif_softc *scn = ce_work->data;
63 	struct HIF_CE_state *hif_ce_state;
64 
65 	if (NULL == scn) {
66 		HIF_ERROR("%s: tasklet scn is null", __func__);
67 		return;
68 	}
69 
70 	hif_ce_state = HIF_GET_CE_STATE(scn);
71 
72 	if (scn->hif_init_done == false) {
73 		HIF_ERROR("%s: wlan driver is unloaded", __func__);
74 		return;
75 	}
76 	tasklet_schedule(&hif_ce_state->tasklets[ce_work->id].intr_tq);
77 }
78 
79 static struct tasklet_work tasklet_workers[CE_ID_MAX];
80 static bool work_initialized;
81 
82 /**
83  * init_tasklet_work() - init_tasklet_work
84  * @work: struct work_struct
85  * @work_handler: work_handler
86  *
87  * Return: N/A
88  */
89 static void init_tasklet_work(struct work_struct *work,
90 			      work_func_t work_handler)
91 {
92 	INIT_WORK(work, work_handler);
93 }
94 
95 /**
96  * init_tasklet_workers() - init_tasklet_workers
97  * @scn: HIF Context
98  *
99  * Return: N/A
100  */
101 void init_tasklet_workers(struct hif_opaque_softc *scn)
102 {
103 	uint32_t id;
104 
105 	for (id = 0; id < CE_ID_MAX; id++) {
106 		tasklet_workers[id].id = id;
107 		tasklet_workers[id].data = scn;
108 		init_tasklet_work(&tasklet_workers[id].work,
109 				  reschedule_ce_tasklet_work_handler);
110 	}
111 	work_initialized = true;
112 }
113 
114 /**
115  * deinit_tasklet_workers() - deinit_tasklet_workers
116  * @scn: HIF Context
117  *
118  * Return: N/A
119  */
120 void deinit_tasklet_workers(struct hif_opaque_softc *scn)
121 {
122 	u32 id;
123 
124 	for (id = 0; id < CE_ID_MAX; id++)
125 		cancel_work_sync(&tasklet_workers[id].work);
126 
127 	work_initialized = false;
128 }
129 
130 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
131 /**
132  * ce_schedule_tasklet() - schedule ce tasklet
133  * @tasklet_entry: struct ce_tasklet_entry
134  *
135  * Return: N/A
136  */
137 static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
138 {
139 	if (work_initialized && (tasklet_entry->ce_id < CE_ID_MAX))
140 		schedule_work(&tasklet_workers[tasklet_entry->ce_id].work);
141 	else
142 		HIF_ERROR("%s: work_initialized = %d, ce_id = %d",
143 			__func__, work_initialized, tasklet_entry->ce_id);
144 }
145 #else
146 /**
147  * ce_schedule_tasklet() - schedule ce tasklet
148  * @tasklet_entry: struct ce_tasklet_entry
149  *
150  * Return: N/A
151  */
152 static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
153 {
154 	tasklet_schedule(&tasklet_entry->intr_tq);
155 }
156 #endif
157 
158 /**
159  * ce_tasklet() - ce_tasklet
160  * @data: data
161  *
162  * Return: N/A
163  */
164 static void ce_tasklet(unsigned long data)
165 {
166 	struct ce_tasklet_entry *tasklet_entry =
167 		(struct ce_tasklet_entry *)data;
168 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
169 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
170 	struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id];
171 
172 	hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
173 			HIF_CE_TASKLET_ENTRY, NULL, NULL, 0, 0);
174 
175 	if (qdf_atomic_read(&scn->link_suspended)) {
176 		HIF_ERROR("%s: ce %d tasklet fired after link suspend.",
177 				__func__, tasklet_entry->ce_id);
178 		QDF_BUG(0);
179 	}
180 
181 	ce_per_engine_service(scn, tasklet_entry->ce_id);
182 
183 	if (ce_check_rx_pending(CE_state) && tasklet_entry->inited) {
184 		/*
185 		 * There are frames pending, schedule tasklet to process them.
186 		 * Enable the interrupt only when there is no pending frames in
187 		 * any of the Copy Engine pipes.
188 		 */
189 		hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
190 				HIF_CE_TASKLET_RESCHEDULE, NULL, NULL, 0, 0);
191 
192 		ce_schedule_tasklet(tasklet_entry);
193 		return;
194 	}
195 
196 	if (scn->target_status != TARGET_STATUS_RESET)
197 		hif_irq_enable(scn, tasklet_entry->ce_id);
198 
199 	hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT,
200 				 NULL, NULL, 0, 0);
201 
202 	qdf_atomic_dec(&scn->active_tasklet_cnt);
203 }
204 
205 /**
206  * ce_tasklet_init() - ce_tasklet_init
207  * @hif_ce_state: hif_ce_state
208  * @mask: mask
209  *
210  * Return: N/A
211  */
212 void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask)
213 {
214 	int i;
215 
216 	for (i = 0; i < CE_COUNT_MAX; i++) {
217 		if (mask & (1 << i)) {
218 			hif_ce_state->tasklets[i].ce_id = i;
219 			hif_ce_state->tasklets[i].inited = true;
220 			hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state;
221 			tasklet_init(&hif_ce_state->tasklets[i].intr_tq,
222 				ce_tasklet,
223 				(unsigned long)&hif_ce_state->tasklets[i]);
224 		}
225 	}
226 }
227 /**
228  * ce_tasklet_kill() - ce_tasklet_kill
229  * @hif_ce_state: hif_ce_state
230  *
231  * Return: N/A
232  */
233 void ce_tasklet_kill(struct hif_softc *scn)
234 {
235 	int i;
236 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
237 
238 	for (i = 0; i < CE_COUNT_MAX; i++)
239 		if (hif_ce_state->tasklets[i].inited) {
240 			tasklet_disable(&hif_ce_state->tasklets[i].intr_tq);
241 			hif_ce_state->tasklets[i].inited = false;
242 			tasklet_kill(&hif_ce_state->tasklets[i].intr_tq);
243 		}
244 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
245 }
246 
247 #define HIF_CE_DRAIN_WAIT_CNT          20
248 /**
249  * hif_drain_tasklets(): wait until no tasklet is pending
250  * @scn: hif context
251  *
252  * Let running tasklets clear pending trafic.
253  *
254  * Return: 0 if no bottom half is in progress when it returns.
255  *   -EFAULT if it times out.
256  */
257 int hif_drain_tasklets(struct hif_softc *scn)
258 {
259 	uint32_t ce_drain_wait_cnt = 0;
260 	int32_t tasklet_cnt;
261 
262 	while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) {
263 		if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) {
264 			HIF_ERROR("%s: CE still not done with access: %d",
265 				  __func__, tasklet_cnt);
266 
267 			return -EFAULT;
268 		}
269 		HIF_INFO("%s: Waiting for CE to finish access", __func__);
270 		msleep(10);
271 	}
272 	return 0;
273 }
274 
275 #ifdef WLAN_SUSPEND_RESUME_TEST
276 /**
277  * hif_interrupt_is_ut_resume(): Tests if an irq on the given copy engine should
278  *	trigger a unit-test resume.
279  * @scn: The HIF context to operate on
280  * @ce_id: The copy engine Id from the originating interrupt
281  *
282  * Return: true if the raised irq should trigger a unit-test resume
283  */
284 static bool hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
285 {
286 	int errno;
287 	uint8_t wake_ce_id;
288 
289 	if (!hif_is_ut_suspended(scn))
290 		return false;
291 
292 	/* ensure passed ce_id matches wake ce_id */
293 	errno = hif_get_wake_ce_id(scn, &wake_ce_id);
294 	if (errno) {
295 		HIF_ERROR("%s: failed to get wake CE Id: %d", __func__, errno);
296 		return false;
297 	}
298 
299 	return ce_id == wake_ce_id;
300 }
301 #else
302 static inline bool
303 hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
304 {
305 	return false;
306 }
307 #endif /* WLAN_SUSPEND_RESUME_TEST */
308 
309 /**
310  * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler
311  * @irq: irq coming from kernel
312  * @context: context
313  *
314  * Return: N/A
315  */
316 static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context)
317 {
318 	struct ce_tasklet_entry *tasklet_entry = context;
319 	struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state);
320 
321 	return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq),
322 				     tasklet_entry);
323 }
324 
325 /**
326  * hif_ce_increment_interrupt_count() - update ce stats
327  * @hif_ce_state: ce state
328  * @ce_id: ce id
329  *
330  * Return: none
331  */
332 static inline void
333 hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id)
334 {
335 	int cpu_id = qdf_get_cpu();
336 
337 	hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++;
338 }
339 
340 /**
341  * hif_display_ce_stats() - display ce stats
342  * @hif_ce_state: ce state
343  *
344  * Return: none
345  */
346 void hif_display_ce_stats(struct HIF_CE_state *hif_ce_state)
347 {
348 #define STR_SIZE 128
349 	uint8_t i, j, pos;
350 	char str_buffer[STR_SIZE];
351 	int size, ret;
352 
353 	qdf_debug("CE interrupt statistics:");
354 	for (i = 0; i < CE_COUNT_MAX; i++) {
355 		size = STR_SIZE;
356 		pos = 0;
357 		for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) {
358 			ret = snprintf(str_buffer + pos, size, "[%d]:%d ",
359 				j, hif_ce_state->stats.ce_per_cpu[i][j]);
360 			if (ret <= 0 || ret >= size)
361 				break;
362 			size -= ret;
363 			pos += ret;
364 		}
365 		qdf_debug("CE id[%2d] - %s", i, str_buffer);
366 	}
367 #undef STR_SIZE
368 }
369 
370 /**
371  * hif_clear_ce_stats() - clear ce stats
372  * @hif_ce_state: ce state
373  *
374  * Return: none
375  */
376 void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state)
377 {
378 	qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats));
379 }
380 
381 /**
382  * hif_tasklet_schedule() - schedule tasklet
383  * @hif_ctx: hif context
384  * @tasklet_entry: ce tasklet entry
385  *
386  * Return: false if tasklet already scheduled, otherwise true
387  */
388 static inline bool hif_tasklet_schedule(struct hif_opaque_softc *hif_ctx,
389 					struct ce_tasklet_entry *tasklet_entry)
390 {
391 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
392 
393 	if (test_bit(TASKLET_STATE_SCHED, &tasklet_entry->intr_tq.state)) {
394 		HIF_DBG("tasklet scheduled, return");
395 		qdf_atomic_dec(&scn->active_tasklet_cnt);
396 		return false;
397 	}
398 
399 	tasklet_schedule(&tasklet_entry->intr_tq);
400 	return true;
401 }
402 
403 /**
404  * ce_dispatch_interrupt() - dispatch an interrupt to a processing context
405  * @ce_id: ce_id
406  * @tasklet_entry: context
407  *
408  * Return: N/A
409  */
410 irqreturn_t ce_dispatch_interrupt(int ce_id,
411 				  struct ce_tasklet_entry *tasklet_entry)
412 {
413 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
414 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
415 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
416 
417 	if (tasklet_entry->ce_id != ce_id) {
418 		HIF_ERROR("%s: ce_id (expect %d, received %d) does not match",
419 			  __func__, tasklet_entry->ce_id, ce_id);
420 		return IRQ_NONE;
421 	}
422 	if (unlikely(ce_id >= CE_COUNT_MAX)) {
423 		HIF_ERROR("%s: ce_id=%d > CE_COUNT_MAX=%d",
424 			  __func__, tasklet_entry->ce_id, CE_COUNT_MAX);
425 		return IRQ_NONE;
426 	}
427 
428 	hif_irq_disable(scn, ce_id);
429 
430 	if (!TARGET_REGISTER_ACCESS_ALLOWED(scn))
431 		return IRQ_HANDLED;
432 
433 	hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT,
434 				NULL, NULL, 0, 0);
435 	hif_ce_increment_interrupt_count(hif_ce_state, ce_id);
436 
437 	if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) {
438 		hif_ut_fw_resume(scn);
439 		hif_irq_enable(scn, ce_id);
440 		return IRQ_HANDLED;
441 	}
442 
443 	qdf_atomic_inc(&scn->active_tasklet_cnt);
444 
445 	if (hif_napi_enabled(hif_hdl, ce_id))
446 		hif_napi_schedule(hif_hdl, ce_id);
447 	else
448 		hif_tasklet_schedule(hif_hdl, tasklet_entry);
449 
450 	return IRQ_HANDLED;
451 }
452 
453 /**
454  * const char *ce_name
455  *
456  * @ce_name: ce_name
457  */
458 const char *ce_name[] = {
459 	"WLAN_CE_0",
460 	"WLAN_CE_1",
461 	"WLAN_CE_2",
462 	"WLAN_CE_3",
463 	"WLAN_CE_4",
464 	"WLAN_CE_5",
465 	"WLAN_CE_6",
466 	"WLAN_CE_7",
467 	"WLAN_CE_8",
468 	"WLAN_CE_9",
469 	"WLAN_CE_10",
470 	"WLAN_CE_11",
471 };
472 /**
473  * ce_unregister_irq() - ce_unregister_irq
474  * @hif_ce_state: hif_ce_state copy engine device handle
475  * @mask: which coppy engines to unregister for.
476  *
477  * Unregisters copy engine irqs matching mask.  If a 1 is set at bit x,
478  * unregister for copy engine x.
479  *
480  * Return: QDF_STATUS
481  */
482 QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
483 {
484 	int id;
485 	int ce_count;
486 	int ret;
487 	struct hif_softc *scn;
488 
489 	if (hif_ce_state == NULL) {
490 		HIF_WARN("%s: hif_ce_state = NULL", __func__);
491 		return QDF_STATUS_SUCCESS;
492 	}
493 
494 	scn = HIF_GET_SOFTC(hif_ce_state);
495 	ce_count = scn->ce_count;
496 	/* we are removing interrupts, so better stop NAPI */
497 	ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn),
498 			     NAPI_EVT_INT_STATE, (void *)0);
499 	if (ret != 0)
500 		HIF_ERROR("%s: napi_event INT_STATE returned %d",
501 			  __func__, ret);
502 	/* this is not fatal, continue */
503 
504 	/* filter mask to free only for ce's with irq registered */
505 	mask &= hif_ce_state->ce_register_irq_done;
506 	for (id = 0; id < ce_count; id++) {
507 		if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
508 			ret = pld_ce_free_irq(scn->qdf_dev->dev, id,
509 					&hif_ce_state->tasklets[id]);
510 			if (ret < 0)
511 				HIF_ERROR(
512 					"%s: pld_unregister_irq error - ce_id = %d, ret = %d",
513 					__func__, id, ret);
514 		}
515 		ce_disable_polling(scn->ce_id_to_state[id]);
516 	}
517 	hif_ce_state->ce_register_irq_done &= ~mask;
518 
519 	return QDF_STATUS_SUCCESS;
520 }
521 /**
522  * ce_register_irq() - ce_register_irq
523  * @hif_ce_state: hif_ce_state
524  * @mask: which coppy engines to unregister for.
525  *
526  * Registers copy engine irqs matching mask.  If a 1 is set at bit x,
527  * Register for copy engine x.
528  *
529  * Return: QDF_STATUS
530  */
531 QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
532 {
533 	int id;
534 	int ce_count;
535 	int ret;
536 	unsigned long irqflags = IRQF_TRIGGER_RISING;
537 	uint32_t done_mask = 0;
538 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
539 
540 	ce_count = scn->ce_count;
541 
542 	for (id = 0; id < ce_count; id++) {
543 		if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
544 			ret = pld_ce_request_irq(scn->qdf_dev->dev, id,
545 				hif_snoc_interrupt_handler,
546 				irqflags, ce_name[id],
547 				&hif_ce_state->tasklets[id]);
548 			if (ret) {
549 				HIF_ERROR(
550 					"%s: cannot register CE %d irq handler, ret = %d",
551 					__func__, id, ret);
552 				ce_unregister_irq(hif_ce_state, done_mask);
553 				return QDF_STATUS_E_FAULT;
554 			}
555 			done_mask |= 1 << id;
556 		}
557 	}
558 	hif_ce_state->ce_register_irq_done |= done_mask;
559 
560 	return QDF_STATUS_SUCCESS;
561 }
562