xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.c (revision 6e4b9c54b687e18b0132e53b73b6cc7445a0ba3d)
1 /*
2  * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/if_arp.h>
23 #include "qdf_lock.h"
24 #include "qdf_types.h"
25 #include "qdf_status.h"
26 #include "regtable.h"
27 #include "hif.h"
28 #include "hif_io32.h"
29 #include "ce_main.h"
30 #include "ce_api.h"
31 #include "ce_reg.h"
32 #include "ce_internal.h"
33 #include "ce_tasklet.h"
34 #include "pld_common.h"
35 #include "hif_debug.h"
36 #include "hif_napi.h"
37 
38 
39 /**
40  * struct tasklet_work
41  *
42  * @id: ce_id
43  * @work: work
44  */
45 struct tasklet_work {
46 	enum ce_id_type id;
47 	void *data;
48 	struct work_struct work;
49 };
50 
51 
52 /**
53  * reschedule_ce_tasklet_work_handler() - reschedule work
54  * @work: struct work_struct
55  *
56  * Return: N/A
57  */
58 static void reschedule_ce_tasklet_work_handler(struct work_struct *work)
59 {
60 	struct tasklet_work *ce_work = container_of(work, struct tasklet_work,
61 						    work);
62 	struct hif_softc *scn = ce_work->data;
63 	struct HIF_CE_state *hif_ce_state;
64 
65 	if (NULL == scn) {
66 		HIF_ERROR("%s: tasklet scn is null", __func__);
67 		return;
68 	}
69 
70 	hif_ce_state = HIF_GET_CE_STATE(scn);
71 
72 	if (scn->hif_init_done == false) {
73 		HIF_ERROR("%s: wlan driver is unloaded", __func__);
74 		return;
75 	}
76 	if (hif_ce_state->tasklets[ce_work->id].inited)
77 		tasklet_schedule(&hif_ce_state->tasklets[ce_work->id].intr_tq);
78 }
79 
80 static struct tasklet_work tasklet_workers[CE_ID_MAX];
81 static bool work_initialized;
82 
83 /**
84  * init_tasklet_work() - init_tasklet_work
85  * @work: struct work_struct
86  * @work_handler: work_handler
87  *
88  * Return: N/A
89  */
90 static void init_tasklet_work(struct work_struct *work,
91 			      work_func_t work_handler)
92 {
93 	INIT_WORK(work, work_handler);
94 }
95 
96 /**
97  * init_tasklet_workers() - init_tasklet_workers
98  * @scn: HIF Context
99  *
100  * Return: N/A
101  */
102 void init_tasklet_workers(struct hif_opaque_softc *scn)
103 {
104 	uint32_t id;
105 
106 	for (id = 0; id < CE_ID_MAX; id++) {
107 		tasklet_workers[id].id = id;
108 		tasklet_workers[id].data = scn;
109 		init_tasklet_work(&tasklet_workers[id].work,
110 				  reschedule_ce_tasklet_work_handler);
111 	}
112 	work_initialized = true;
113 }
114 
115 /**
116  * deinit_tasklet_workers() - deinit_tasklet_workers
117  * @scn: HIF Context
118  *
119  * Return: N/A
120  */
121 void deinit_tasklet_workers(struct hif_opaque_softc *scn)
122 {
123 	u32 id;
124 
125 	for (id = 0; id < CE_ID_MAX; id++)
126 		cancel_work_sync(&tasklet_workers[id].work);
127 
128 	work_initialized = false;
129 }
130 
131 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
132 /**
133  * ce_schedule_tasklet() - schedule ce tasklet
134  * @tasklet_entry: struct ce_tasklet_entry
135  *
136  * Return: N/A
137  */
138 static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
139 {
140 	if (work_initialized && (tasklet_entry->ce_id < CE_ID_MAX))
141 		schedule_work(&tasklet_workers[tasklet_entry->ce_id].work);
142 	else
143 		HIF_ERROR("%s: work_initialized = %d, ce_id = %d",
144 			__func__, work_initialized, tasklet_entry->ce_id);
145 }
146 #else
147 /**
148  * ce_schedule_tasklet() - schedule ce tasklet
149  * @tasklet_entry: struct ce_tasklet_entry
150  *
151  * Return: N/A
152  */
153 static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
154 {
155 	tasklet_schedule(&tasklet_entry->intr_tq);
156 }
157 #endif
158 
159 /**
160  * ce_tasklet() - ce_tasklet
161  * @data: data
162  *
163  * Return: N/A
164  */
165 static void ce_tasklet(unsigned long data)
166 {
167 	struct ce_tasklet_entry *tasklet_entry =
168 		(struct ce_tasklet_entry *)data;
169 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
170 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
171 	struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id];
172 
173 	hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
174 			HIF_CE_TASKLET_ENTRY, NULL, NULL, 0, 0);
175 
176 	if (qdf_atomic_read(&scn->link_suspended)) {
177 		HIF_ERROR("%s: ce %d tasklet fired after link suspend.",
178 				__func__, tasklet_entry->ce_id);
179 		QDF_BUG(0);
180 	}
181 
182 	ce_per_engine_service(scn, tasklet_entry->ce_id);
183 
184 	if (ce_check_rx_pending(CE_state) && tasklet_entry->inited) {
185 		/*
186 		 * There are frames pending, schedule tasklet to process them.
187 		 * Enable the interrupt only when there is no pending frames in
188 		 * any of the Copy Engine pipes.
189 		 */
190 		hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
191 				HIF_CE_TASKLET_RESCHEDULE, NULL, NULL, 0, 0);
192 
193 		ce_schedule_tasklet(tasklet_entry);
194 		return;
195 	}
196 
197 	if (scn->target_status != TARGET_STATUS_RESET)
198 		hif_irq_enable(scn, tasklet_entry->ce_id);
199 
200 	hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT,
201 				 NULL, NULL, 0, 0);
202 
203 	qdf_atomic_dec(&scn->active_tasklet_cnt);
204 }
205 
206 /**
207  * ce_tasklet_init() - ce_tasklet_init
208  * @hif_ce_state: hif_ce_state
209  * @mask: mask
210  *
211  * Return: N/A
212  */
213 void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask)
214 {
215 	int i;
216 
217 	for (i = 0; i < CE_COUNT_MAX; i++) {
218 		if (mask & (1 << i)) {
219 			hif_ce_state->tasklets[i].ce_id = i;
220 			hif_ce_state->tasklets[i].inited = true;
221 			hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state;
222 			tasklet_init(&hif_ce_state->tasklets[i].intr_tq,
223 				ce_tasklet,
224 				(unsigned long)&hif_ce_state->tasklets[i]);
225 		}
226 	}
227 }
228 /**
229  * ce_tasklet_kill() - ce_tasklet_kill
230  * @hif_ce_state: hif_ce_state
231  *
232  * Context: Non-Atomic context
233  * Return: N/A
234  */
235 void ce_tasklet_kill(struct hif_softc *scn)
236 {
237 	int i;
238 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
239 
240 	work_initialized = false;
241 
242 	for (i = 0; i < CE_COUNT_MAX; i++)
243 		if (hif_ce_state->tasklets[i].inited) {
244 			hif_ce_state->tasklets[i].inited = false;
245 			/*
246 			 * Cancel the tasklet work before tasklet_disable
247 			 * to avoid race between tasklet_schedule and
248 			 * tasklet_kill. Here cancel_work_sync() won't
249 			 * return before reschedule_ce_tasklet_work_handler()
250 			 * completes. Even if tasklet_schedule() happens
251 			 * tasklet_disable() will take care of that.
252 			 */
253 			cancel_work_sync(&tasklet_workers[i].work);
254 			tasklet_disable(&hif_ce_state->tasklets[i].intr_tq);
255 			tasklet_kill(&hif_ce_state->tasklets[i].intr_tq);
256 		}
257 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
258 }
259 
260 #define HIF_CE_DRAIN_WAIT_CNT          20
261 /**
262  * hif_drain_tasklets(): wait until no tasklet is pending
263  * @scn: hif context
264  *
265  * Let running tasklets clear pending trafic.
266  *
267  * Return: 0 if no bottom half is in progress when it returns.
268  *   -EFAULT if it times out.
269  */
270 int hif_drain_tasklets(struct hif_softc *scn)
271 {
272 	uint32_t ce_drain_wait_cnt = 0;
273 	int32_t tasklet_cnt;
274 
275 	while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) {
276 		if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) {
277 			HIF_ERROR("%s: CE still not done with access: %d",
278 				  __func__, tasklet_cnt);
279 
280 			return -EFAULT;
281 		}
282 		HIF_INFO("%s: Waiting for CE to finish access", __func__);
283 		msleep(10);
284 	}
285 	return 0;
286 }
287 
288 #ifdef WLAN_SUSPEND_RESUME_TEST
289 /**
290  * hif_interrupt_is_ut_resume(): Tests if an irq on the given copy engine should
291  *	trigger a unit-test resume.
292  * @scn: The HIF context to operate on
293  * @ce_id: The copy engine Id from the originating interrupt
294  *
295  * Return: true if the raised irq should trigger a unit-test resume
296  */
297 static bool hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
298 {
299 	int errno;
300 	uint8_t wake_ce_id;
301 
302 	if (!hif_is_ut_suspended(scn))
303 		return false;
304 
305 	/* ensure passed ce_id matches wake ce_id */
306 	errno = hif_get_wake_ce_id(scn, &wake_ce_id);
307 	if (errno) {
308 		HIF_ERROR("%s: failed to get wake CE Id: %d", __func__, errno);
309 		return false;
310 	}
311 
312 	return ce_id == wake_ce_id;
313 }
314 #else
315 static inline bool
316 hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
317 {
318 	return false;
319 }
320 #endif /* WLAN_SUSPEND_RESUME_TEST */
321 
322 /**
323  * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler
324  * @irq: irq coming from kernel
325  * @context: context
326  *
327  * Return: N/A
328  */
329 static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context)
330 {
331 	struct ce_tasklet_entry *tasklet_entry = context;
332 	struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state);
333 
334 	return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq),
335 				     tasklet_entry);
336 }
337 
338 /**
339  * hif_ce_increment_interrupt_count() - update ce stats
340  * @hif_ce_state: ce state
341  * @ce_id: ce id
342  *
343  * Return: none
344  */
345 static inline void
346 hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id)
347 {
348 	int cpu_id = qdf_get_cpu();
349 
350 	hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++;
351 }
352 
353 /**
354  * hif_display_ce_stats() - display ce stats
355  * @hif_ce_state: ce state
356  *
357  * Return: none
358  */
359 void hif_display_ce_stats(struct HIF_CE_state *hif_ce_state)
360 {
361 #define STR_SIZE 128
362 	uint8_t i, j, pos;
363 	char str_buffer[STR_SIZE];
364 	int size, ret;
365 
366 	qdf_debug("CE interrupt statistics:");
367 	for (i = 0; i < CE_COUNT_MAX; i++) {
368 		size = STR_SIZE;
369 		pos = 0;
370 		for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) {
371 			ret = snprintf(str_buffer + pos, size, "[%d]:%d ",
372 				j, hif_ce_state->stats.ce_per_cpu[i][j]);
373 			if (ret <= 0 || ret >= size)
374 				break;
375 			size -= ret;
376 			pos += ret;
377 		}
378 		qdf_debug("CE id[%2d] - %s", i, str_buffer);
379 	}
380 #undef STR_SIZE
381 }
382 
383 /**
384  * hif_clear_ce_stats() - clear ce stats
385  * @hif_ce_state: ce state
386  *
387  * Return: none
388  */
389 void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state)
390 {
391 	qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats));
392 }
393 
394 /**
395  * hif_tasklet_schedule() - schedule tasklet
396  * @hif_ctx: hif context
397  * @tasklet_entry: ce tasklet entry
398  *
399  * Return: false if tasklet already scheduled, otherwise true
400  */
401 static inline bool hif_tasklet_schedule(struct hif_opaque_softc *hif_ctx,
402 					struct ce_tasklet_entry *tasklet_entry)
403 {
404 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
405 
406 	if (test_bit(TASKLET_STATE_SCHED, &tasklet_entry->intr_tq.state)) {
407 		HIF_DBG("tasklet scheduled, return");
408 		qdf_atomic_dec(&scn->active_tasklet_cnt);
409 		return false;
410 	}
411 
412 	tasklet_schedule(&tasklet_entry->intr_tq);
413 	return true;
414 }
415 
416 /**
417  * ce_dispatch_interrupt() - dispatch an interrupt to a processing context
418  * @ce_id: ce_id
419  * @tasklet_entry: context
420  *
421  * Return: N/A
422  */
423 irqreturn_t ce_dispatch_interrupt(int ce_id,
424 				  struct ce_tasklet_entry *tasklet_entry)
425 {
426 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
427 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
428 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
429 
430 	if (tasklet_entry->ce_id != ce_id) {
431 		HIF_ERROR("%s: ce_id (expect %d, received %d) does not match",
432 			  __func__, tasklet_entry->ce_id, ce_id);
433 		return IRQ_NONE;
434 	}
435 	if (unlikely(ce_id >= CE_COUNT_MAX)) {
436 		HIF_ERROR("%s: ce_id=%d > CE_COUNT_MAX=%d",
437 			  __func__, tasklet_entry->ce_id, CE_COUNT_MAX);
438 		return IRQ_NONE;
439 	}
440 
441 	hif_irq_disable(scn, ce_id);
442 
443 	if (!TARGET_REGISTER_ACCESS_ALLOWED(scn))
444 		return IRQ_HANDLED;
445 
446 	hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT,
447 				NULL, NULL, 0, 0);
448 	hif_ce_increment_interrupt_count(hif_ce_state, ce_id);
449 
450 	if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) {
451 		hif_ut_fw_resume(scn);
452 		hif_irq_enable(scn, ce_id);
453 		return IRQ_HANDLED;
454 	}
455 
456 	qdf_atomic_inc(&scn->active_tasklet_cnt);
457 
458 	if (hif_napi_enabled(hif_hdl, ce_id))
459 		hif_napi_schedule(hif_hdl, ce_id);
460 	else
461 		hif_tasklet_schedule(hif_hdl, tasklet_entry);
462 
463 	return IRQ_HANDLED;
464 }
465 
466 /**
467  * const char *ce_name
468  *
469  * @ce_name: ce_name
470  */
471 const char *ce_name[] = {
472 	"WLAN_CE_0",
473 	"WLAN_CE_1",
474 	"WLAN_CE_2",
475 	"WLAN_CE_3",
476 	"WLAN_CE_4",
477 	"WLAN_CE_5",
478 	"WLAN_CE_6",
479 	"WLAN_CE_7",
480 	"WLAN_CE_8",
481 	"WLAN_CE_9",
482 	"WLAN_CE_10",
483 	"WLAN_CE_11",
484 };
485 /**
486  * ce_unregister_irq() - ce_unregister_irq
487  * @hif_ce_state: hif_ce_state copy engine device handle
488  * @mask: which coppy engines to unregister for.
489  *
490  * Unregisters copy engine irqs matching mask.  If a 1 is set at bit x,
491  * unregister for copy engine x.
492  *
493  * Return: QDF_STATUS
494  */
495 QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
496 {
497 	int id;
498 	int ce_count;
499 	int ret;
500 	struct hif_softc *scn;
501 
502 	if (hif_ce_state == NULL) {
503 		HIF_WARN("%s: hif_ce_state = NULL", __func__);
504 		return QDF_STATUS_SUCCESS;
505 	}
506 
507 	scn = HIF_GET_SOFTC(hif_ce_state);
508 	ce_count = scn->ce_count;
509 	/* we are removing interrupts, so better stop NAPI */
510 	ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn),
511 			     NAPI_EVT_INT_STATE, (void *)0);
512 	if (ret != 0)
513 		HIF_ERROR("%s: napi_event INT_STATE returned %d",
514 			  __func__, ret);
515 	/* this is not fatal, continue */
516 
517 	/* filter mask to free only for ce's with irq registered */
518 	mask &= hif_ce_state->ce_register_irq_done;
519 	for (id = 0; id < ce_count; id++) {
520 		if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
521 			ret = pld_ce_free_irq(scn->qdf_dev->dev, id,
522 					&hif_ce_state->tasklets[id]);
523 			if (ret < 0)
524 				HIF_ERROR(
525 					"%s: pld_unregister_irq error - ce_id = %d, ret = %d",
526 					__func__, id, ret);
527 		}
528 		ce_disable_polling(scn->ce_id_to_state[id]);
529 	}
530 	hif_ce_state->ce_register_irq_done &= ~mask;
531 
532 	return QDF_STATUS_SUCCESS;
533 }
534 /**
535  * ce_register_irq() - ce_register_irq
536  * @hif_ce_state: hif_ce_state
537  * @mask: which coppy engines to unregister for.
538  *
539  * Registers copy engine irqs matching mask.  If a 1 is set at bit x,
540  * Register for copy engine x.
541  *
542  * Return: QDF_STATUS
543  */
544 QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
545 {
546 	int id;
547 	int ce_count;
548 	int ret;
549 	unsigned long irqflags = IRQF_TRIGGER_RISING;
550 	uint32_t done_mask = 0;
551 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
552 
553 	ce_count = scn->ce_count;
554 
555 	for (id = 0; id < ce_count; id++) {
556 		if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
557 			ret = pld_ce_request_irq(scn->qdf_dev->dev, id,
558 				hif_snoc_interrupt_handler,
559 				irqflags, ce_name[id],
560 				&hif_ce_state->tasklets[id]);
561 			if (ret) {
562 				HIF_ERROR(
563 					"%s: cannot register CE %d irq handler, ret = %d",
564 					__func__, id, ret);
565 				ce_unregister_irq(hif_ce_state, done_mask);
566 				return QDF_STATUS_E_FAULT;
567 			}
568 			done_mask |= 1 << id;
569 		}
570 	}
571 	hif_ce_state->ce_register_irq_done |= done_mask;
572 
573 	return QDF_STATUS_SUCCESS;
574 }
575