xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_dev.c (revision dd4dc88b837a295134aa9869114a2efee0f4894b)
1 /*
2  * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #define ATH_MODULE_NAME hif
20 #include "a_debug.h"
21 
22 #include <qdf_types.h>
23 #include <qdf_status.h>
24 #include <qdf_timer.h>
25 #include <qdf_time.h>
26 #include <qdf_lock.h>
27 #include <qdf_mem.h>
28 #include <qdf_util.h>
29 #include <qdf_defer.h>
30 #include <qdf_atomic.h>
31 #include <qdf_nbuf.h>
32 #include <athdefs.h>
33 #include <qdf_net_types.h>
34 #include <a_types.h>
35 #include <athdefs.h>
36 #include <a_osapi.h>
37 #include <hif.h>
38 #include <htc_services.h>
39 #include "hif_sdio_internal.h"
40 #include "if_sdio.h"
41 #include "regtable_sdio.h"
42 
43 /**
44  * hif_dev_alloc_rx_buffer() - allocate rx buffer.
45  * @pDev: sdio device context
46  *
47  *
48  * Return: htc buffer pointer
49  */
50 HTC_PACKET *hif_dev_alloc_rx_buffer(struct hif_sdio_device *pdev)
51 {
52 	HTC_PACKET *packet;
53 	qdf_nbuf_t netbuf;
54 	uint32_t bufsize = 0, headsize = 0;
55 
56 	bufsize = HIF_SDIO_RX_BUFFER_SIZE + HIF_SDIO_RX_DATA_OFFSET;
57 	headsize = sizeof(HTC_PACKET);
58 	netbuf = qdf_nbuf_alloc(NULL, bufsize + headsize, 0, 4, false);
59 	if (!netbuf) {
60 		hif_err_rl("Allocate netbuf failed");
61 		return NULL;
62 	}
63 	packet = (HTC_PACKET *) qdf_nbuf_data(netbuf);
64 	qdf_nbuf_reserve(netbuf, headsize);
65 
66 	SET_HTC_PACKET_INFO_RX_REFILL(packet,
67 				      pdev,
68 				      qdf_nbuf_data(netbuf),
69 				      bufsize, ENDPOINT_0);
70 	SET_HTC_PACKET_NET_BUF_CONTEXT(packet, netbuf);
71 	return packet;
72 }
73 
74 /**
75  * hif_dev_create() - create hif device after probe.
76  * @hif_device: HIF context
77  * @callbacks: htc callbacks
78  * @target: HIF target
79  *
80  *
81  * Return: int
82  */
83 struct hif_sdio_device *hif_dev_create(struct hif_sdio_dev *hif_device,
84 			struct hif_msg_callbacks *callbacks, void *target)
85 {
86 
87 	QDF_STATUS status;
88 	struct hif_sdio_device *pdev;
89 
90 	HIF_ENTER();
91 	pdev = qdf_mem_malloc(sizeof(struct hif_sdio_device));
92 	if (!pdev) {
93 		A_ASSERT(false);
94 		return NULL;
95 	}
96 
97 	qdf_spinlock_create(&pdev->Lock);
98 	qdf_spinlock_create(&pdev->TxLock);
99 	qdf_spinlock_create(&pdev->RxLock);
100 
101 	pdev->HIFDevice = hif_device;
102 	pdev->pTarget = target;
103 	status = hif_configure_device(NULL, hif_device,
104 				      HIF_DEVICE_SET_HTC_CONTEXT,
105 				      (void *)pdev, sizeof(pdev));
106 	if (status != QDF_STATUS_SUCCESS)
107 		HIF_ERROR("%s: set context failed", __func__);
108 
109 	A_MEMCPY(&pdev->hif_callbacks, callbacks, sizeof(*callbacks));
110 
111 	HIF_EXIT();
112 	return pdev;
113 }
114 
115 /**
116  * hif_dev_destroy() - destroy hif device.
117  * @pDev: sdio device context
118  *
119  *
120  * Return: none
121  */
122 void hif_dev_destroy(struct hif_sdio_device *pdev)
123 {
124 	QDF_STATUS status;
125 
126 	status = hif_configure_device(NULL, pdev->HIFDevice,
127 				      HIF_DEVICE_SET_HTC_CONTEXT,
128 				      (void *)NULL, 0);
129 	if (status != QDF_STATUS_SUCCESS)
130 		HIF_ERROR("%s: set context failed", __func__);
131 
132 	qdf_mem_free(pdev);
133 }
134 
135 /**
136  * hif_dev_from_hif() - get sdio device from hif device.
137  * @pDev: hif device context
138  *
139  *
140  * Return: hif sdio device context
141  */
142 struct hif_sdio_device *hif_dev_from_hif(struct hif_sdio_dev *hif_device)
143 {
144 	struct hif_sdio_device *pdev = NULL;
145 	QDF_STATUS status;
146 
147 	status = hif_configure_device(NULL, hif_device,
148 				      HIF_DEVICE_GET_HTC_CONTEXT,
149 				      (void **)&pdev,
150 				      sizeof(struct hif_sdio_device));
151 	if (status != QDF_STATUS_SUCCESS)
152 		HIF_ERROR("%s: set context failed", __func__);
153 
154 	return pdev;
155 }
156 
157 /**
158  * hif_dev_disable_interrupts() - disable hif device interrupts.
159  * @pDev: sdio device context
160  *
161  *
162  * Return: int
163  */
164 QDF_STATUS hif_dev_disable_interrupts(struct hif_sdio_device *pdev)
165 {
166 	QDF_STATUS status = QDF_STATUS_SUCCESS;
167 
168 	HIF_ENTER();
169 
170 	hif_dev_mask_interrupts(pdev);
171 
172 	/* To Do mask the host controller interrupts */
173 	hif_mask_interrupt(pdev->HIFDevice);
174 
175 	HIF_EXIT();
176 	return status;
177 }
178 
179 /**
180  * hif_dev_enable_interrupts() - enables hif device interrupts.
181  * @pDev: sdio device context
182  *
183  *
184  * Return: int
185  */
186 QDF_STATUS hif_dev_enable_interrupts(struct hif_sdio_device *pdev)
187 {
188 	QDF_STATUS status;
189 
190 	HIF_ENTER();
191 
192 	/* for good measure, make sure interrupt are disabled
193 	 * before unmasking at the HIF layer.
194 	 * The rationale here is that between device insertion
195 	 * (where we clear the interrupts the first time)
196 	 * and when HTC is finally ready to handle interrupts,
197 	 * other software can perform target "soft" resets.
198 	 */
199 	status = hif_dev_disable_interrupts(pdev);
200 
201 	/* Unmask the host controller interrupts */
202 	hif_un_mask_interrupt(pdev->HIFDevice);
203 
204 	hif_dev_unmask_interrupts(pdev);
205 
206 	HIF_EXIT();
207 
208 	return status;
209 }
210 
211 #define DEV_CHECK_RECV_YIELD(pdev) \
212 	((pdev)->CurrentDSRRecvCount >= \
213 	 (pdev)->HifIRQYieldParams.recv_packet_yield_count)
214 
215 /**
216  * hif_dev_dsr_handler() - Synchronous interrupt handler
217  *
218  * @context: hif send context
219  *
220  * Return: 0 for success and non-zero for failure
221  */
222 QDF_STATUS hif_dev_dsr_handler(void *context)
223 {
224 	struct hif_sdio_device *pdev = (struct hif_sdio_device *)context;
225 	QDF_STATUS status = QDF_STATUS_SUCCESS;
226 	bool done = false;
227 	bool async_proc = false;
228 
229 	HIF_ENTER();
230 
231 	/* reset the recv counter that tracks when we need
232 	 * to yield from the DSR
233 	 */
234 	pdev->CurrentDSRRecvCount = 0;
235 	/* reset counter used to flag a re-scan of IRQ
236 	 * status registers on the target
237 	 */
238 	pdev->RecheckIRQStatusCnt = 0;
239 
240 	while (!done) {
241 		status = hif_dev_process_pending_irqs(pdev, &done, &async_proc);
242 		if (QDF_IS_STATUS_ERROR(status))
243 			break;
244 
245 		if (pdev->HifIRQProcessingMode == HIF_DEVICE_IRQ_SYNC_ONLY) {
246 			/* the HIF layer does not allow async IRQ processing,
247 			 * override the asyncProc flag
248 			 */
249 			async_proc = false;
250 			/* this will cause us to re-enter ProcessPendingIRQ()
251 			 * and re-read interrupt status registers.
252 			 * This has a nice side effect of blocking us until all
253 			 * async read requests are completed. This behavior is
254 			 * required as we  do not allow ASYNC processing
255 			 * in interrupt handlers (like Windows CE)
256 			 */
257 
258 			if (pdev->DSRCanYield && DEV_CHECK_RECV_YIELD(pdev))
259 				/* ProcessPendingIRQs() pulled enough recv
260 				 * messages to satisfy the yield count, stop
261 				 * checking for more messages and return
262 				 */
263 				break;
264 		}
265 
266 		if (async_proc) {
267 			/* the function does some async I/O for performance,
268 			 * we need to exit the ISR immediately, the check below
269 			 * will prevent the interrupt from being
270 			 * Ack'd while we handle it asynchronously
271 			 */
272 			break;
273 		}
274 	}
275 
276 	if (QDF_IS_STATUS_SUCCESS(status) && !async_proc) {
277 		/* Ack the interrupt only if :
278 		 *  1. we did not get any errors in processing interrupts
279 		 *  2. there are no outstanding async processing requests
280 		 */
281 		if (pdev->DSRCanYield) {
282 			/* if the DSR can yield do not ACK the interrupt, there
283 			 * could be more pending messages. The HIF layer
284 			 * must ACK the interrupt on behalf of HTC
285 			 */
286 			HIF_INFO("%s:  Yield (RX count: %d)",
287 				 __func__, pdev->CurrentDSRRecvCount);
288 		} else {
289 			HIF_INFO("%s: Ack interrupt", __func__);
290 			hif_ack_interrupt(pdev->HIFDevice);
291 		}
292 	}
293 
294 	HIF_EXIT();
295 	return status;
296 }
297 
298 /** hif_dev_set_mailbox_swap() - Set the mailbox swap from firmware
299  * @pdev : The HIF layer object
300  *
301  * Return: none
302  */
303 void hif_dev_set_mailbox_swap(struct hif_sdio_dev *pdev)
304 {
305 	struct hif_sdio_device *hif_device = hif_dev_from_hif(pdev);
306 
307 	HIF_ENTER();
308 
309 	hif_device->swap_mailbox = true;
310 
311 	HIF_EXIT();
312 }
313 
314 /** hif_dev_get_mailbox_swap() - Get the mailbox swap setting
315  * @pdev : The HIF layer object
316  *
317  * Return: none
318  */
319 bool hif_dev_get_mailbox_swap(struct hif_sdio_dev *pdev)
320 {
321 	struct hif_sdio_device *hif_device;
322 
323 	HIF_ENTER();
324 
325 	hif_device = hif_dev_from_hif(pdev);
326 
327 	HIF_EXIT();
328 
329 	return hif_device->swap_mailbox;
330 }
331 
332 /**
333  * hif_dev_setup() - set up sdio device.
334  * @pDev: sdio device context
335  *
336  *
337  * Return: int
338  */
339 QDF_STATUS hif_dev_setup(struct hif_sdio_device *pdev)
340 {
341 	QDF_STATUS status;
342 	struct htc_callbacks htc_cbs;
343 	struct hif_sdio_dev *hif_device = pdev->HIFDevice;
344 
345 	HIF_ENTER();
346 
347 	status = hif_dev_setup_device(pdev);
348 
349 
350 	if (status != QDF_STATUS_SUCCESS) {
351 		HIF_ERROR("%s: device specific setup failed", __func__);
352 		return QDF_STATUS_E_INVAL;
353 	}
354 
355 	pdev->BlockMask = pdev->BlockSize - 1;
356 	A_ASSERT((pdev->BlockSize & pdev->BlockMask) == 0);
357 
358 	/* assume we can process HIF interrupt events asynchronously */
359 	pdev->HifIRQProcessingMode = HIF_DEVICE_IRQ_ASYNC_SYNC;
360 
361 	/* see if the HIF layer overrides this assumption */
362 	hif_configure_device(NULL, hif_device,
363 			     HIF_DEVICE_GET_IRQ_PROC_MODE,
364 			     &pdev->HifIRQProcessingMode,
365 			     sizeof(pdev->HifIRQProcessingMode));
366 
367 	switch (pdev->HifIRQProcessingMode) {
368 	case HIF_DEVICE_IRQ_SYNC_ONLY:
369 		AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
370 			("HIF Interrupt processing is SYNC ONLY\n"));
371 		/* see if HIF layer wants HTC to yield */
372 		hif_configure_device(NULL, hif_device,
373 				     HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
374 				     &pdev->HifIRQYieldParams,
375 				     sizeof(pdev->HifIRQYieldParams));
376 
377 		if (pdev->HifIRQYieldParams.recv_packet_yield_count > 0) {
378 			AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
379 				("HIF req of DSR yield per %d RECV packets\n",
380 				 pdev->HifIRQYieldParams.
381 				 recv_packet_yield_count));
382 			pdev->DSRCanYield = true;
383 		}
384 		break;
385 	case HIF_DEVICE_IRQ_ASYNC_SYNC:
386 		AR_DEBUG_PRINTF(ATH_DEBUG_TRC,
387 			("HIF Interrupt processing is ASYNC and SYNC\n"));
388 		break;
389 	default:
390 		A_ASSERT(false);
391 		break;
392 	}
393 
394 	pdev->HifMaskUmaskRecvEvent = NULL;
395 
396 	/* see if the HIF layer implements the mask/unmask recv
397 	 * events function
398 	 */
399 	hif_configure_device(NULL, hif_device,
400 			     HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
401 			     &pdev->HifMaskUmaskRecvEvent,
402 			     sizeof(pdev->HifMaskUmaskRecvEvent));
403 
404 	status = hif_dev_disable_interrupts(pdev);
405 
406 	qdf_mem_zero(&htc_cbs, sizeof(struct htc_callbacks));
407 	/* the device layer handles these */
408 	htc_cbs.rw_compl_handler = hif_dev_rw_completion_handler;
409 	htc_cbs.dsr_handler = hif_dev_dsr_handler;
410 	htc_cbs.context = pdev;
411 	status = hif_attach_htc(pdev->HIFDevice, &htc_cbs);
412 
413 	HIF_EXIT();
414 	return status;
415 }
416