xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.c (revision 3149adf58a329e17232a4c0e58d460d025edd55a)
1 /*
2  * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 #include <linux/pci.h>
29 #include <linux/slab.h>
30 #include <linux/interrupt.h>
31 #include <linux/if_arp.h>
32 #include "qdf_lock.h"
33 #include "qdf_types.h"
34 #include "qdf_status.h"
35 #include "regtable.h"
36 #include "hif.h"
37 #include "hif_io32.h"
38 #include "ce_main.h"
39 #include "ce_api.h"
40 #include "ce_reg.h"
41 #include "ce_internal.h"
42 #include "ce_tasklet.h"
43 #include "pld_common.h"
44 #include "hif_debug.h"
45 #include "hif_napi.h"
46 
47 
48 /**
49  * struct tasklet_work
50  *
51  * @id: ce_id
52  * @work: work
53  */
54 struct tasklet_work {
55 	enum ce_id_type id;
56 	void *data;
57 	struct work_struct work;
58 };
59 
60 
61 /**
62  * reschedule_ce_tasklet_work_handler() - reschedule work
63  * @work: struct work_struct
64  *
65  * Return: N/A
66  */
67 static void reschedule_ce_tasklet_work_handler(struct work_struct *work)
68 {
69 	struct tasklet_work *ce_work = container_of(work, struct tasklet_work,
70 						    work);
71 	struct hif_softc *scn = ce_work->data;
72 	struct HIF_CE_state *hif_ce_state;
73 
74 	if (NULL == scn) {
75 		HIF_ERROR("%s: tasklet scn is null", __func__);
76 		return;
77 	}
78 
79 	hif_ce_state = HIF_GET_CE_STATE(scn);
80 
81 	if (scn->hif_init_done == false) {
82 		HIF_ERROR("%s: wlan driver is unloaded", __func__);
83 		return;
84 	}
85 	tasklet_schedule(&hif_ce_state->tasklets[ce_work->id].intr_tq);
86 }
87 
88 static struct tasklet_work tasklet_workers[CE_ID_MAX];
89 static bool work_initialized;
90 
91 /**
92  * init_tasklet_work() - init_tasklet_work
93  * @work: struct work_struct
94  * @work_handler: work_handler
95  *
96  * Return: N/A
97  */
98 static void init_tasklet_work(struct work_struct *work,
99 			      work_func_t work_handler)
100 {
101 	INIT_WORK(work, work_handler);
102 }
103 
104 /**
105  * init_tasklet_workers() - init_tasklet_workers
106  * @scn: HIF Context
107  *
108  * Return: N/A
109  */
110 void init_tasklet_workers(struct hif_opaque_softc *scn)
111 {
112 	uint32_t id;
113 
114 	for (id = 0; id < CE_ID_MAX; id++) {
115 		tasklet_workers[id].id = id;
116 		tasklet_workers[id].data = scn;
117 		init_tasklet_work(&tasklet_workers[id].work,
118 				  reschedule_ce_tasklet_work_handler);
119 	}
120 	work_initialized = true;
121 }
122 
123 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
124 /**
125  * ce_schedule_tasklet() - schedule ce tasklet
126  * @tasklet_entry: struct ce_tasklet_entry
127  *
128  * Return: N/A
129  */
130 static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
131 {
132 	if (work_initialized && (tasklet_entry->ce_id < CE_ID_MAX))
133 		schedule_work(&tasklet_workers[tasklet_entry->ce_id].work);
134 	else
135 		HIF_ERROR("%s: work_initialized = %d, ce_id = %d",
136 			__func__, work_initialized, tasklet_entry->ce_id);
137 }
138 #else
139 /**
140  * ce_schedule_tasklet() - schedule ce tasklet
141  * @tasklet_entry: struct ce_tasklet_entry
142  *
143  * Return: N/A
144  */
145 static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
146 {
147 	tasklet_schedule(&tasklet_entry->intr_tq);
148 }
149 #endif
150 
151 /**
152  * ce_tasklet() - ce_tasklet
153  * @data: data
154  *
155  * Return: N/A
156  */
157 static void ce_tasklet(unsigned long data)
158 {
159 	struct ce_tasklet_entry *tasklet_entry =
160 		(struct ce_tasklet_entry *)data;
161 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
162 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
163 	struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id];
164 
165 	hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
166 			HIF_CE_TASKLET_ENTRY, NULL, NULL, 0, 0);
167 
168 	if (qdf_atomic_read(&scn->link_suspended)) {
169 		HIF_ERROR("%s: ce %d tasklet fired after link suspend.",
170 				__func__, tasklet_entry->ce_id);
171 		QDF_BUG(0);
172 	}
173 
174 	ce_per_engine_service(scn, tasklet_entry->ce_id);
175 
176 	qdf_lro_flush(CE_state->lro_data);
177 
178 	if (ce_check_rx_pending(CE_state)) {
179 		/*
180 		 * There are frames pending, schedule tasklet to process them.
181 		 * Enable the interrupt only when there is no pending frames in
182 		 * any of the Copy Engine pipes.
183 		 */
184 		hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
185 				HIF_CE_TASKLET_RESCHEDULE, NULL, NULL, 0, 0);
186 
187 		ce_schedule_tasklet(tasklet_entry);
188 		return;
189 	}
190 
191 	if (scn->target_status != TARGET_STATUS_RESET)
192 		hif_irq_enable(scn, tasklet_entry->ce_id);
193 
194 	hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT,
195 				 NULL, NULL, 0, 0);
196 
197 	qdf_atomic_dec(&scn->active_tasklet_cnt);
198 }
199 
200 /**
201  * ce_tasklet_init() - ce_tasklet_init
202  * @hif_ce_state: hif_ce_state
203  * @mask: mask
204  *
205  * Return: N/A
206  */
207 void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask)
208 {
209 	int i;
210 
211 	for (i = 0; i < CE_COUNT_MAX; i++) {
212 		if (mask & (1 << i)) {
213 			hif_ce_state->tasklets[i].ce_id = i;
214 			hif_ce_state->tasklets[i].inited = true;
215 			hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state;
216 			tasklet_init(&hif_ce_state->tasklets[i].intr_tq,
217 				ce_tasklet,
218 				(unsigned long)&hif_ce_state->tasklets[i]);
219 		}
220 	}
221 }
222 /**
223  * ce_tasklet_kill() - ce_tasklet_kill
224  * @hif_ce_state: hif_ce_state
225  *
226  * Return: N/A
227  */
228 void ce_tasklet_kill(struct hif_softc *scn)
229 {
230 	int i;
231 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
232 
233 	for (i = 0; i < CE_COUNT_MAX; i++)
234 		if (hif_ce_state->tasklets[i].inited) {
235 			tasklet_kill(&hif_ce_state->tasklets[i].intr_tq);
236 			hif_ce_state->tasklets[i].inited = false;
237 		}
238 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
239 }
240 
241 #define HIF_CE_DRAIN_WAIT_CNT          20
242 /**
243  * hif_drain_tasklets(): wait untill no tasklet is pending
244  * @scn: hif context
245  *
246  * Let running tasklets clear pending trafic.
247  *
248  * Return: 0 if no bottom half is in progress when it returns.
249  *   -EFAULT if it times out.
250  */
251 int hif_drain_tasklets(struct hif_softc *scn)
252 {
253 	uint32_t ce_drain_wait_cnt = 0;
254 	int32_t tasklet_cnt;
255 
256 	while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) {
257 		if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) {
258 			HIF_ERROR("%s: CE still not done with access: %d",
259 				  __func__, tasklet_cnt);
260 
261 			return -EFAULT;
262 		}
263 		HIF_INFO("%s: Waiting for CE to finish access", __func__);
264 		msleep(10);
265 	}
266 	return 0;
267 }
268 
269 #ifdef WLAN_SUSPEND_RESUME_TEST
270 /**
271  * hif_interrupt_is_ut_resume(): Tests if an irq on the given copy engine should
272  *	trigger a unit-test resume.
273  * @scn: The HIF context to operate on
274  * @ce_id: The copy engine Id from the originating interrupt
275  *
276  * Return: true if the raised irq should trigger a unit-test resume
277  */
278 static bool hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
279 {
280 	int errno;
281 	uint8_t wake_ce_id;
282 
283 	if (!hif_is_ut_suspended(scn))
284 		return false;
285 
286 	/* ensure passed ce_id matches wake ce_id */
287 	errno = hif_get_wake_ce_id(scn, &wake_ce_id);
288 	if (errno) {
289 		HIF_ERROR("%s: failed to get wake CE Id: %d", __func__, errno);
290 		return false;
291 	}
292 
293 	return ce_id == wake_ce_id;
294 }
295 #else
296 static inline bool
297 hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
298 {
299 	return false;
300 }
301 #endif /* WLAN_SUSPEND_RESUME_TEST */
302 
303 /**
304  * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler
305  * @irq: irq coming from kernel
306  * @context: context
307  *
308  * Return: N/A
309  */
310 static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context)
311 {
312 	struct ce_tasklet_entry *tasklet_entry = context;
313 	struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state);
314 
315 	return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq),
316 				     tasklet_entry);
317 }
318 
319 /**
320  * hif_ce_increment_interrupt_count() - update ce stats
321  * @hif_ce_state: ce state
322  * @ce_id: ce id
323  *
324  * Return: none
325  */
326 static inline void
327 hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id)
328 {
329 	int cpu_id = qdf_get_cpu();
330 
331 	hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++;
332 }
333 
334 /**
335  * hif_display_ce_stats() - display ce stats
336  * @hif_ce_state: ce state
337  *
338  * Return: none
339  */
340 void hif_display_ce_stats(struct HIF_CE_state *hif_ce_state)
341 {
342 #define STR_SIZE 128
343 	uint8_t i, j, pos;
344 	char str_buffer[STR_SIZE];
345 	int size, ret;
346 
347 	qdf_debug("CE interrupt statistics:");
348 	for (i = 0; i < CE_COUNT_MAX; i++) {
349 		size = STR_SIZE;
350 		pos = 0;
351 		for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) {
352 			ret = snprintf(str_buffer + pos, size, "[%d]:%d ",
353 				j, hif_ce_state->stats.ce_per_cpu[i][j]);
354 			if (ret <= 0 || ret >= size)
355 				break;
356 			size -= ret;
357 			pos += ret;
358 		}
359 		qdf_debug("CE id[%2d] - %s", i, str_buffer);
360 	}
361 #undef STR_SIZE
362 }
363 
364 /**
365  * hif_clear_ce_stats() - clear ce stats
366  * @hif_ce_state: ce state
367  *
368  * Return: none
369  */
370 void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state)
371 {
372 	qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats));
373 }
374 
375 /**
376  * ce_dispatch_interrupt() - dispatch an interrupt to a processing context
377  * @ce_id: ce_id
378  * @tasklet_entry: context
379  *
380  * Return: N/A
381  */
382 irqreturn_t ce_dispatch_interrupt(int ce_id,
383 				  struct ce_tasklet_entry *tasklet_entry)
384 {
385 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
386 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
387 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
388 
389 	if (tasklet_entry->ce_id != ce_id) {
390 		HIF_ERROR("%s: ce_id (expect %d, received %d) does not match",
391 			  __func__, tasklet_entry->ce_id, ce_id);
392 		return IRQ_NONE;
393 	}
394 	if (unlikely(ce_id >= CE_COUNT_MAX)) {
395 		HIF_ERROR("%s: ce_id=%d > CE_COUNT_MAX=%d",
396 			  __func__, tasklet_entry->ce_id, CE_COUNT_MAX);
397 		return IRQ_NONE;
398 	}
399 
400 	hif_irq_disable(scn, ce_id);
401 
402 	if (!TARGET_REGISTER_ACCESS_ALLOWED(scn))
403 		return IRQ_HANDLED;
404 
405 	hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT,
406 				NULL, NULL, 0, 0);
407 	hif_ce_increment_interrupt_count(hif_ce_state, ce_id);
408 
409 	if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) {
410 		hif_ut_fw_resume(scn);
411 		hif_irq_enable(scn, ce_id);
412 		return IRQ_HANDLED;
413 	}
414 
415 	qdf_atomic_inc(&scn->active_tasklet_cnt);
416 
417 	if (hif_napi_enabled(hif_hdl, ce_id))
418 		hif_napi_schedule(hif_hdl, ce_id);
419 	else
420 		tasklet_schedule(&tasklet_entry->intr_tq);
421 
422 	return IRQ_HANDLED;
423 }
424 
425 /**
426  * const char *ce_name
427  *
428  * @ce_name: ce_name
429  */
430 const char *ce_name[] = {
431 	"WLAN_CE_0",
432 	"WLAN_CE_1",
433 	"WLAN_CE_2",
434 	"WLAN_CE_3",
435 	"WLAN_CE_4",
436 	"WLAN_CE_5",
437 	"WLAN_CE_6",
438 	"WLAN_CE_7",
439 	"WLAN_CE_8",
440 	"WLAN_CE_9",
441 	"WLAN_CE_10",
442 	"WLAN_CE_11",
443 };
444 /**
445  * ce_unregister_irq() - ce_unregister_irq
446  * @hif_ce_state: hif_ce_state copy engine device handle
447  * @mask: which coppy engines to unregister for.
448  *
449  * Unregisters copy engine irqs matching mask.  If a 1 is set at bit x,
450  * unregister for copy engine x.
451  *
452  * Return: QDF_STATUS
453  */
454 QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
455 {
456 	int id;
457 	int ce_count;
458 	int ret;
459 	struct hif_softc *scn;
460 
461 	if (hif_ce_state == NULL) {
462 		HIF_WARN("%s: hif_ce_state = NULL", __func__);
463 		return QDF_STATUS_SUCCESS;
464 	}
465 
466 	scn = HIF_GET_SOFTC(hif_ce_state);
467 	ce_count = scn->ce_count;
468 	/* we are removing interrupts, so better stop NAPI */
469 	ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn),
470 			     NAPI_EVT_INT_STATE, (void *)0);
471 	if (ret != 0)
472 		HIF_ERROR("%s: napi_event INT_STATE returned %d",
473 			  __func__, ret);
474 	/* this is not fatal, continue */
475 
476 	/* filter mask to free only for ce's with irq registered */
477 	mask &= hif_ce_state->ce_register_irq_done;
478 	for (id = 0; id < ce_count; id++) {
479 		if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
480 			ret = pld_ce_free_irq(scn->qdf_dev->dev, id,
481 					&hif_ce_state->tasklets[id]);
482 			if (ret < 0)
483 				HIF_ERROR(
484 					"%s: pld_unregister_irq error - ce_id = %d, ret = %d",
485 					__func__, id, ret);
486 		}
487 	}
488 	hif_ce_state->ce_register_irq_done &= ~mask;
489 
490 	return QDF_STATUS_SUCCESS;
491 }
492 /**
493  * ce_register_irq() - ce_register_irq
494  * @hif_ce_state: hif_ce_state
495  * @mask: which coppy engines to unregister for.
496  *
497  * Registers copy engine irqs matching mask.  If a 1 is set at bit x,
498  * Register for copy engine x.
499  *
500  * Return: QDF_STATUS
501  */
502 QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
503 {
504 	int id;
505 	int ce_count;
506 	int ret;
507 	unsigned long irqflags = IRQF_TRIGGER_RISING;
508 	uint32_t done_mask = 0;
509 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
510 
511 	ce_count = scn->ce_count;
512 
513 	for (id = 0; id < ce_count; id++) {
514 		if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
515 			ret = pld_ce_request_irq(scn->qdf_dev->dev, id,
516 				hif_snoc_interrupt_handler,
517 				irqflags, ce_name[id],
518 				&hif_ce_state->tasklets[id]);
519 			if (ret) {
520 				HIF_ERROR(
521 					"%s: cannot register CE %d irq handler, ret = %d",
522 					__func__, id, ret);
523 				ce_unregister_irq(hif_ce_state, done_mask);
524 				return QDF_STATUS_E_FAULT;
525 			}
526 			done_mask |= 1 << id;
527 		}
528 	}
529 	hif_ce_state->ce_register_irq_done |= done_mask;
530 
531 	return QDF_STATUS_SUCCESS;
532 }
533