xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.c (revision d78dedc9dd8c4ee677ac1649d1d42f2a7c3cc1b7)
1 /*
2  * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/if_arp.h>
23 #include "qdf_lock.h"
24 #include "qdf_types.h"
25 #include "qdf_status.h"
26 #include "regtable.h"
27 #include "hif.h"
28 #include "hif_io32.h"
29 #include "ce_main.h"
30 #include "ce_api.h"
31 #include "ce_reg.h"
32 #include "ce_internal.h"
33 #include "ce_tasklet.h"
34 #include "pld_common.h"
35 #include "hif_debug.h"
36 #include "hif_napi.h"
37 
38 
39 /**
40  * struct tasklet_work
41  *
42  * @id: ce_id
43  * @work: work
44  */
45 struct tasklet_work {
46 	enum ce_id_type id;
47 	void *data;
48 	struct work_struct work;
49 };
50 
51 
52 /**
53  * reschedule_ce_tasklet_work_handler() - reschedule work
54  * @work: struct work_struct
55  *
56  * Return: N/A
57  */
58 static void reschedule_ce_tasklet_work_handler(struct work_struct *work)
59 {
60 	struct tasklet_work *ce_work = container_of(work, struct tasklet_work,
61 						    work);
62 	struct hif_softc *scn = ce_work->data;
63 	struct HIF_CE_state *hif_ce_state;
64 
65 	if (NULL == scn) {
66 		HIF_ERROR("%s: tasklet scn is null", __func__);
67 		return;
68 	}
69 
70 	hif_ce_state = HIF_GET_CE_STATE(scn);
71 
72 	if (scn->hif_init_done == false) {
73 		HIF_ERROR("%s: wlan driver is unloaded", __func__);
74 		return;
75 	}
76 	tasklet_schedule(&hif_ce_state->tasklets[ce_work->id].intr_tq);
77 }
78 
79 static struct tasklet_work tasklet_workers[CE_ID_MAX];
80 static bool work_initialized;
81 
82 /**
83  * init_tasklet_work() - init_tasklet_work
84  * @work: struct work_struct
85  * @work_handler: work_handler
86  *
87  * Return: N/A
88  */
89 static void init_tasklet_work(struct work_struct *work,
90 			      work_func_t work_handler)
91 {
92 	INIT_WORK(work, work_handler);
93 }
94 
95 /**
96  * init_tasklet_workers() - init_tasklet_workers
97  * @scn: HIF Context
98  *
99  * Return: N/A
100  */
101 void init_tasklet_workers(struct hif_opaque_softc *scn)
102 {
103 	uint32_t id;
104 
105 	for (id = 0; id < CE_ID_MAX; id++) {
106 		tasklet_workers[id].id = id;
107 		tasklet_workers[id].data = scn;
108 		init_tasklet_work(&tasklet_workers[id].work,
109 				  reschedule_ce_tasklet_work_handler);
110 	}
111 	work_initialized = true;
112 }
113 
114 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
115 /**
116  * ce_schedule_tasklet() - schedule ce tasklet
117  * @tasklet_entry: struct ce_tasklet_entry
118  *
119  * Return: N/A
120  */
121 static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
122 {
123 	if (work_initialized && (tasklet_entry->ce_id < CE_ID_MAX))
124 		schedule_work(&tasklet_workers[tasklet_entry->ce_id].work);
125 	else
126 		HIF_ERROR("%s: work_initialized = %d, ce_id = %d",
127 			__func__, work_initialized, tasklet_entry->ce_id);
128 }
129 #else
130 /**
131  * ce_schedule_tasklet() - schedule ce tasklet
132  * @tasklet_entry: struct ce_tasklet_entry
133  *
134  * Return: N/A
135  */
136 static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
137 {
138 	tasklet_schedule(&tasklet_entry->intr_tq);
139 }
140 #endif
141 
142 /**
143  * ce_tasklet() - ce_tasklet
144  * @data: data
145  *
146  * Return: N/A
147  */
148 static void ce_tasklet(unsigned long data)
149 {
150 	struct ce_tasklet_entry *tasklet_entry =
151 		(struct ce_tasklet_entry *)data;
152 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
153 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
154 	struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id];
155 
156 	hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
157 			HIF_CE_TASKLET_ENTRY, NULL, NULL, 0, 0);
158 
159 	if (qdf_atomic_read(&scn->link_suspended)) {
160 		HIF_ERROR("%s: ce %d tasklet fired after link suspend.",
161 				__func__, tasklet_entry->ce_id);
162 		QDF_BUG(0);
163 	}
164 
165 	ce_per_engine_service(scn, tasklet_entry->ce_id);
166 
167 	if (ce_check_rx_pending(CE_state)) {
168 		/*
169 		 * There are frames pending, schedule tasklet to process them.
170 		 * Enable the interrupt only when there is no pending frames in
171 		 * any of the Copy Engine pipes.
172 		 */
173 		hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
174 				HIF_CE_TASKLET_RESCHEDULE, NULL, NULL, 0, 0);
175 
176 		ce_schedule_tasklet(tasklet_entry);
177 		return;
178 	}
179 
180 	if (scn->target_status != TARGET_STATUS_RESET)
181 		hif_irq_enable(scn, tasklet_entry->ce_id);
182 
183 	hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT,
184 				 NULL, NULL, 0, 0);
185 
186 	qdf_atomic_dec(&scn->active_tasklet_cnt);
187 }
188 
189 /**
190  * ce_tasklet_init() - ce_tasklet_init
191  * @hif_ce_state: hif_ce_state
192  * @mask: mask
193  *
194  * Return: N/A
195  */
196 void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask)
197 {
198 	int i;
199 
200 	for (i = 0; i < CE_COUNT_MAX; i++) {
201 		if (mask & (1 << i)) {
202 			hif_ce_state->tasklets[i].ce_id = i;
203 			hif_ce_state->tasklets[i].inited = true;
204 			hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state;
205 			tasklet_init(&hif_ce_state->tasklets[i].intr_tq,
206 				ce_tasklet,
207 				(unsigned long)&hif_ce_state->tasklets[i]);
208 		}
209 	}
210 }
211 /**
212  * ce_tasklet_kill() - ce_tasklet_kill
213  * @hif_ce_state: hif_ce_state
214  *
215  * Return: N/A
216  */
217 void ce_tasklet_kill(struct hif_softc *scn)
218 {
219 	int i;
220 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
221 
222 	for (i = 0; i < CE_COUNT_MAX; i++)
223 		if (hif_ce_state->tasklets[i].inited) {
224 			tasklet_kill(&hif_ce_state->tasklets[i].intr_tq);
225 			hif_ce_state->tasklets[i].inited = false;
226 		}
227 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
228 }
229 
230 #define HIF_CE_DRAIN_WAIT_CNT          20
231 /**
232  * hif_drain_tasklets(): wait until no tasklet is pending
233  * @scn: hif context
234  *
235  * Let running tasklets clear pending trafic.
236  *
237  * Return: 0 if no bottom half is in progress when it returns.
238  *   -EFAULT if it times out.
239  */
240 int hif_drain_tasklets(struct hif_softc *scn)
241 {
242 	uint32_t ce_drain_wait_cnt = 0;
243 	int32_t tasklet_cnt;
244 
245 	while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) {
246 		if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) {
247 			HIF_ERROR("%s: CE still not done with access: %d",
248 				  __func__, tasklet_cnt);
249 
250 			return -EFAULT;
251 		}
252 		HIF_INFO("%s: Waiting for CE to finish access", __func__);
253 		msleep(10);
254 	}
255 	return 0;
256 }
257 
258 #ifdef WLAN_SUSPEND_RESUME_TEST
259 /**
260  * hif_interrupt_is_ut_resume(): Tests if an irq on the given copy engine should
261  *	trigger a unit-test resume.
262  * @scn: The HIF context to operate on
263  * @ce_id: The copy engine Id from the originating interrupt
264  *
265  * Return: true if the raised irq should trigger a unit-test resume
266  */
267 static bool hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
268 {
269 	int errno;
270 	uint8_t wake_ce_id;
271 
272 	if (!hif_is_ut_suspended(scn))
273 		return false;
274 
275 	/* ensure passed ce_id matches wake ce_id */
276 	errno = hif_get_wake_ce_id(scn, &wake_ce_id);
277 	if (errno) {
278 		HIF_ERROR("%s: failed to get wake CE Id: %d", __func__, errno);
279 		return false;
280 	}
281 
282 	return ce_id == wake_ce_id;
283 }
284 #else
285 static inline bool
286 hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
287 {
288 	return false;
289 }
290 #endif /* WLAN_SUSPEND_RESUME_TEST */
291 
292 /**
293  * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler
294  * @irq: irq coming from kernel
295  * @context: context
296  *
297  * Return: N/A
298  */
299 static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context)
300 {
301 	struct ce_tasklet_entry *tasklet_entry = context;
302 	struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state);
303 
304 	return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq),
305 				     tasklet_entry);
306 }
307 
308 /**
309  * hif_ce_increment_interrupt_count() - update ce stats
310  * @hif_ce_state: ce state
311  * @ce_id: ce id
312  *
313  * Return: none
314  */
315 static inline void
316 hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id)
317 {
318 	int cpu_id = qdf_get_cpu();
319 
320 	hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++;
321 }
322 
323 /**
324  * hif_display_ce_stats() - display ce stats
325  * @hif_ce_state: ce state
326  *
327  * Return: none
328  */
329 void hif_display_ce_stats(struct HIF_CE_state *hif_ce_state)
330 {
331 #define STR_SIZE 128
332 	uint8_t i, j, pos;
333 	char str_buffer[STR_SIZE];
334 	int size, ret;
335 
336 	qdf_debug("CE interrupt statistics:");
337 	for (i = 0; i < CE_COUNT_MAX; i++) {
338 		size = STR_SIZE;
339 		pos = 0;
340 		for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) {
341 			ret = snprintf(str_buffer + pos, size, "[%d]:%d ",
342 				j, hif_ce_state->stats.ce_per_cpu[i][j]);
343 			if (ret <= 0 || ret >= size)
344 				break;
345 			size -= ret;
346 			pos += ret;
347 		}
348 		qdf_debug("CE id[%2d] - %s", i, str_buffer);
349 	}
350 #undef STR_SIZE
351 }
352 
353 /**
354  * hif_clear_ce_stats() - clear ce stats
355  * @hif_ce_state: ce state
356  *
357  * Return: none
358  */
359 void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state)
360 {
361 	qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats));
362 }
363 
364 /**
365  * ce_dispatch_interrupt() - dispatch an interrupt to a processing context
366  * @ce_id: ce_id
367  * @tasklet_entry: context
368  *
369  * Return: N/A
370  */
371 irqreturn_t ce_dispatch_interrupt(int ce_id,
372 				  struct ce_tasklet_entry *tasklet_entry)
373 {
374 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
375 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
376 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
377 
378 	if (tasklet_entry->ce_id != ce_id) {
379 		HIF_ERROR("%s: ce_id (expect %d, received %d) does not match",
380 			  __func__, tasklet_entry->ce_id, ce_id);
381 		return IRQ_NONE;
382 	}
383 	if (unlikely(ce_id >= CE_COUNT_MAX)) {
384 		HIF_ERROR("%s: ce_id=%d > CE_COUNT_MAX=%d",
385 			  __func__, tasklet_entry->ce_id, CE_COUNT_MAX);
386 		return IRQ_NONE;
387 	}
388 
389 	hif_irq_disable(scn, ce_id);
390 
391 	if (!TARGET_REGISTER_ACCESS_ALLOWED(scn))
392 		return IRQ_HANDLED;
393 
394 	hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT,
395 				NULL, NULL, 0, 0);
396 	hif_ce_increment_interrupt_count(hif_ce_state, ce_id);
397 
398 	if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) {
399 		hif_ut_fw_resume(scn);
400 		hif_irq_enable(scn, ce_id);
401 		return IRQ_HANDLED;
402 	}
403 
404 	qdf_atomic_inc(&scn->active_tasklet_cnt);
405 
406 	if (hif_napi_enabled(hif_hdl, ce_id))
407 		hif_napi_schedule(hif_hdl, ce_id);
408 	else
409 		tasklet_schedule(&tasklet_entry->intr_tq);
410 
411 	return IRQ_HANDLED;
412 }
413 
414 /**
415  * const char *ce_name
416  *
417  * @ce_name: ce_name
418  */
419 const char *ce_name[] = {
420 	"WLAN_CE_0",
421 	"WLAN_CE_1",
422 	"WLAN_CE_2",
423 	"WLAN_CE_3",
424 	"WLAN_CE_4",
425 	"WLAN_CE_5",
426 	"WLAN_CE_6",
427 	"WLAN_CE_7",
428 	"WLAN_CE_8",
429 	"WLAN_CE_9",
430 	"WLAN_CE_10",
431 	"WLAN_CE_11",
432 };
433 /**
434  * ce_unregister_irq() - ce_unregister_irq
435  * @hif_ce_state: hif_ce_state copy engine device handle
436  * @mask: which coppy engines to unregister for.
437  *
438  * Unregisters copy engine irqs matching mask.  If a 1 is set at bit x,
439  * unregister for copy engine x.
440  *
441  * Return: QDF_STATUS
442  */
443 QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
444 {
445 	int id;
446 	int ce_count;
447 	int ret;
448 	struct hif_softc *scn;
449 
450 	if (hif_ce_state == NULL) {
451 		HIF_WARN("%s: hif_ce_state = NULL", __func__);
452 		return QDF_STATUS_SUCCESS;
453 	}
454 
455 	scn = HIF_GET_SOFTC(hif_ce_state);
456 	ce_count = scn->ce_count;
457 	/* we are removing interrupts, so better stop NAPI */
458 	ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn),
459 			     NAPI_EVT_INT_STATE, (void *)0);
460 	if (ret != 0)
461 		HIF_ERROR("%s: napi_event INT_STATE returned %d",
462 			  __func__, ret);
463 	/* this is not fatal, continue */
464 
465 	/* filter mask to free only for ce's with irq registered */
466 	mask &= hif_ce_state->ce_register_irq_done;
467 	for (id = 0; id < ce_count; id++) {
468 		if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
469 			ret = pld_ce_free_irq(scn->qdf_dev->dev, id,
470 					&hif_ce_state->tasklets[id]);
471 			if (ret < 0)
472 				HIF_ERROR(
473 					"%s: pld_unregister_irq error - ce_id = %d, ret = %d",
474 					__func__, id, ret);
475 		}
476 	}
477 	hif_ce_state->ce_register_irq_done &= ~mask;
478 
479 	return QDF_STATUS_SUCCESS;
480 }
481 /**
482  * ce_register_irq() - ce_register_irq
483  * @hif_ce_state: hif_ce_state
484  * @mask: which coppy engines to unregister for.
485  *
486  * Registers copy engine irqs matching mask.  If a 1 is set at bit x,
487  * Register for copy engine x.
488  *
489  * Return: QDF_STATUS
490  */
491 QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
492 {
493 	int id;
494 	int ce_count;
495 	int ret;
496 	unsigned long irqflags = IRQF_TRIGGER_RISING;
497 	uint32_t done_mask = 0;
498 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
499 
500 	ce_count = scn->ce_count;
501 
502 	for (id = 0; id < ce_count; id++) {
503 		if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
504 			ret = pld_ce_request_irq(scn->qdf_dev->dev, id,
505 				hif_snoc_interrupt_handler,
506 				irqflags, ce_name[id],
507 				&hif_ce_state->tasklets[id]);
508 			if (ret) {
509 				HIF_ERROR(
510 					"%s: cannot register CE %d irq handler, ret = %d",
511 					__func__, id, ret);
512 				ce_unregister_irq(hif_ce_state, done_mask);
513 				return QDF_STATUS_E_FAULT;
514 			}
515 			done_mask |= 1 << id;
516 		}
517 	}
518 	hif_ce_state->ce_register_irq_done |= done_mask;
519 
520 	return QDF_STATUS_SUCCESS;
521 }
522