xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_dev.c (revision 6ecd284e5a94a1c96e26d571dd47419ac305990d)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  * Permission to use, copy, modify, and/or distribute this software for
7  * any purpose with or without fee is hereby granted, provided that the
8  * above copyright notice and this permission notice appear in all
9  * copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
12  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
13  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
14  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
15  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
16  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
17  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
18  * PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*
22  * This file was originally distributed by Qualcomm Atheros, Inc.
23  * under proprietary terms before Copyright ownership was assigned
24  * to the Linux Foundation.
25  */
26 
27 #define ATH_MODULE_NAME hif
28 #include "a_debug.h"
29 
30 #include <qdf_types.h>
31 #include <qdf_status.h>
32 #include <qdf_timer.h>
33 #include <qdf_time.h>
34 #include <qdf_lock.h>
35 #include <qdf_mem.h>
36 #include <qdf_util.h>
37 #include <qdf_defer.h>
38 #include <qdf_atomic.h>
39 #include <qdf_nbuf.h>
40 #include <athdefs.h>
41 #include <qdf_net_types.h>
42 #include <a_types.h>
43 #include <athdefs.h>
44 #include <a_osapi.h>
45 #include <hif.h>
46 #include <htc_services.h>
47 #include "hif_sdio_internal.h"
48 #include "if_sdio.h"
49 #include "regtable_sdio.h"
50 
51 /* under HL SDIO, with Interface Memory support, we have
52  * the following reasons to support 2 mboxs:
53  * a) we need place different buffers in different
54  * mempool, for example, data using Interface Memory,
55  * desc and other using DRAM, they need different SDIO
56  * mbox channels.
57  * b) currently, tx mempool in LL case is seperated from
58  * main mempool, the structure (descs at the beginning
59  * of every pool buffer) is different, because they only
60  * need store tx desc from host. To align with LL case,
61  * we also need 2 mbox support just as PCIe LL cases.
62  */
63 
64 /**
65  * hif_dev_map_pipe_to_mail_box() - maps pipe id to mailbox.
66  * @pdev: sdio device context
67  * @pipeid: pipe index
68  *
69  *
70  * Return: mailbox index
71  */
72 uint8_t hif_dev_map_pipe_to_mail_box(struct hif_sdio_device *pdev,
73 			uint8_t pipeid)
74 {
75 	/* TODO: temp version, should not hardcoded here, will be
76 	 * updated after HIF design
77 	 */
78 	if (2 == pipeid || 3 == pipeid)
79 		return 1;
80 	else if (0 == pipeid || 1 == pipeid)
81 		return 0;
82 	AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: pipeid=%d,should not happen\n",
83 					__func__, pipeid));
84 	qdf_assert(0);
85 	return INVALID_MAILBOX_NUMBER;
86 }
87 
88 /**
89  * hif_dev_map_mail_box_to_pipe() - map sdio mailbox to htc pipe.
90  * @pdev: sdio device
91  * @mboxIndex: mailbox index
92  * @upload: boolean to decide mailbox index
93  *
94  * Disable hif device interrupts and destroy hif context
95  *
96  * Return: none
97  */
98 uint8_t hif_dev_map_mail_box_to_pipe(struct hif_sdio_device *pdev,
99 			uint8_t mbox_index,
100 				     bool upload)
101 {
102 	/* TODO: temp version, should not hardcoded here, will be
103 	 * updated after HIF design
104 	 */
105 	if (mbox_index == 0)
106 		return upload ? 1 : 0;
107 	else if (mbox_index == 1)
108 		return upload ? 3 : 2;
109 	AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
110 			("%s:-----mboxIndex=%d,upload=%d, should not happen\n",
111 			__func__, mbox_index, upload));
112 	qdf_assert(0);
113 	return 0xff;
114 }
115 
116 /**
117  * hif_dev_map_service_to_pipe() - maps ul/dl pipe to service id.
118  * @pDev: sdio device context
119  * @ServiceId: sevice index
120  * @ULPipe: uplink pipe id
121  * @DLPipe: down-linklink pipe id
122  * @SwapMapping: mailbox swap mapping
123  *
124  * Return: int
125  */
126 QDF_STATUS hif_dev_map_service_to_pipe(struct hif_sdio_device *pdev,
127 				     uint16_t service_id,
128 				     uint8_t *ul_pipe, uint8_t *dl_pipe,
129 				     bool swap_mapping)
130 {
131 	QDF_STATUS status = QDF_STATUS_SUCCESS;
132 
133 	switch (service_id) {
134 	case HTT_DATA_MSG_SVC:
135 		if (swap_mapping) {
136 			*ul_pipe = 1;
137 			*dl_pipe = 0;
138 		} else {
139 			*ul_pipe = 3;
140 			*dl_pipe = 2;
141 		}
142 		break;
143 
144 	case HTC_CTRL_RSVD_SVC:
145 	case HTC_RAW_STREAMS_SVC:
146 		*ul_pipe = 1;
147 		*dl_pipe = 0;
148 		break;
149 
150 	case WMI_DATA_BE_SVC:
151 	case WMI_DATA_BK_SVC:
152 	case WMI_DATA_VI_SVC:
153 	case WMI_DATA_VO_SVC:
154 		*ul_pipe = 1;
155 		*dl_pipe = 0;
156 		break;
157 
158 	case WMI_CONTROL_SVC:
159 		if (swap_mapping) {
160 			*ul_pipe = 3;
161 			*dl_pipe = 2;
162 		} else {
163 			*ul_pipe = 1;
164 			*dl_pipe = 0;
165 		}
166 		break;
167 
168 	default:
169 		status = !QDF_STATUS_SUCCESS;
170 		break;
171 	}
172 	return status;
173 }
174 
175 /**
176  * hif_dev_alloc_rx_buffer() - allocate rx buffer.
177  * @pDev: sdio device context
178  *
179  *
180  * Return: htc buffer pointer
181  */
182 HTC_PACKET *hif_dev_alloc_rx_buffer(struct hif_sdio_device *pdev)
183 {
184 	HTC_PACKET *packet;
185 	qdf_nbuf_t netbuf;
186 	uint32_t bufsize = 0, headsize = 0;
187 
188 	bufsize = HIF_SDIO_RX_BUFFER_SIZE + HIF_SDIO_RX_DATA_OFFSET;
189 	headsize = sizeof(HTC_PACKET);
190 	netbuf = qdf_nbuf_alloc(NULL, bufsize + headsize, 0, 4, false);
191 	if (netbuf == NULL) {
192 		hif_err_rl("Allocate netbuf failed");
193 		return NULL;
194 	}
195 	packet = (HTC_PACKET *) qdf_nbuf_data(netbuf);
196 	qdf_nbuf_reserve(netbuf, headsize);
197 
198 	SET_HTC_PACKET_INFO_RX_REFILL(packet,
199 				      pdev,
200 				      qdf_nbuf_data(netbuf),
201 				      bufsize, ENDPOINT_0);
202 	SET_HTC_PACKET_NET_BUF_CONTEXT(packet, netbuf);
203 	return packet;
204 }
205 
206 /**
207  * hif_dev_create() - create hif device after probe.
208  * @scn: HIF context
209  * @callbacks: htc callbacks
210  * @target: HIF target
211  *
212  *
213  * Return: int
214  */
215 struct hif_sdio_device *hif_dev_create(struct hif_sdio_dev *hif_device,
216 			struct hif_msg_callbacks *callbacks, void *target)
217 {
218 
219 	QDF_STATUS status;
220 	struct hif_sdio_device *pdev;
221 
222 	pdev = qdf_mem_malloc(sizeof(struct hif_sdio_device));
223 	if (!pdev) {
224 		A_ASSERT(false);
225 		return NULL;
226 	}
227 
228 	qdf_spinlock_create(&pdev->Lock);
229 	qdf_spinlock_create(&pdev->TxLock);
230 	qdf_spinlock_create(&pdev->RxLock);
231 
232 	pdev->HIFDevice = hif_device;
233 	pdev->pTarget = target;
234 	status = hif_configure_device(hif_device,
235 				      HIF_DEVICE_SET_HTC_CONTEXT,
236 				      (void *)pdev, sizeof(pdev));
237 	if (status != QDF_STATUS_SUCCESS) {
238 		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
239 				("(%s)HIF_DEVICE_SET_HTC_CONTEXT failed!!!\n",
240 				 __func__));
241 	}
242 
243 	A_MEMCPY(&pdev->hif_callbacks, callbacks, sizeof(*callbacks));
244 
245 	return pdev;
246 }
247 
248 /**
249  * hif_dev_destroy() - destroy hif device.
250  * @pDev: sdio device context
251  *
252  *
253  * Return: none
254  */
255 void hif_dev_destroy(struct hif_sdio_device *pdev)
256 {
257 	QDF_STATUS status;
258 
259 	status = hif_configure_device(pdev->HIFDevice,
260 				      HIF_DEVICE_SET_HTC_CONTEXT,
261 				      (void *)NULL, 0);
262 	if (status != QDF_STATUS_SUCCESS) {
263 		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
264 				("(%s)HIF_DEVICE_SET_HTC_CONTEXT failed!!!\n",
265 				 __func__));
266 	}
267 	qdf_mem_free(pdev);
268 }
269 
270 /**
271  * hif_dev_from_hif() - get sdio device from hif device.
272  * @pDev: hif device context
273  *
274  *
275  * Return: hif sdio device context
276  */
277 struct hif_sdio_device *hif_dev_from_hif(struct hif_sdio_dev *hif_device)
278 {
279 	struct hif_sdio_device *pdev = NULL;
280 	QDF_STATUS status;
281 
282 	status = hif_configure_device(hif_device,
283 				HIF_DEVICE_GET_HTC_CONTEXT,
284 				(void **)&pdev, sizeof(struct hif_sdio_device));
285 	if (status != QDF_STATUS_SUCCESS) {
286 		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
287 				("(%s)HTC_SDIO_CONTEXT is NULL!!!\n",
288 				 __func__));
289 	}
290 	return pdev;
291 }
292 
293 /**
294  * hif_dev_disable_interrupts() - disable hif device interrupts.
295  * @pDev: sdio device context
296  *
297  *
298  * Return: int
299  */
300 QDF_STATUS hif_dev_disable_interrupts(struct hif_sdio_device *pdev)
301 {
302 	struct MBOX_IRQ_ENABLE_REGISTERS regs;
303 	QDF_STATUS status = QDF_STATUS_SUCCESS;
304 
305 	HIF_ENTER();
306 
307 	LOCK_HIF_DEV(pdev);
308 	/* Disable all interrupts */
309 	pdev->IrqEnableRegisters.int_status_enable = 0;
310 	pdev->IrqEnableRegisters.cpu_int_status_enable = 0;
311 	pdev->IrqEnableRegisters.error_status_enable = 0;
312 	pdev->IrqEnableRegisters.counter_int_status_enable = 0;
313 	/* copy into our temp area */
314 	A_MEMCPY(&regs,
315 		 &pdev->IrqEnableRegisters, sizeof(pdev->IrqEnableRegisters));
316 
317 	UNLOCK_HIF_DEV(pdev);
318 
319 	/* always synchronous */
320 	status = hif_read_write(pdev->HIFDevice,
321 				INT_STATUS_ENABLE_ADDRESS,
322 				(char *) &regs,
323 				sizeof(struct MBOX_IRQ_ENABLE_REGISTERS),
324 				HIF_WR_SYNC_BYTE_INC, NULL);
325 
326 	if (status != QDF_STATUS_SUCCESS) {
327 		/* Can't write it for some reason */
328 		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
329 			("Failed to update interrupt control registers err: %d",
330 			 status));
331 	}
332 
333 	/* To Do mask the host controller interrupts */
334 	hif_mask_interrupt(pdev->HIFDevice);
335 	HIF_EXIT("status :%d", status);
336 	return status;
337 }
338 
339 /**
340  * hif_dev_enable_interrupts() - enables hif device interrupts.
341  * @pDev: sdio device context
342  *
343  *
344  * Return: int
345  */
346 QDF_STATUS hif_dev_enable_interrupts(struct hif_sdio_device *pdev)
347 {
348 	QDF_STATUS status;
349 	struct MBOX_IRQ_ENABLE_REGISTERS regs;
350 
351 	HIF_ENTER();
352 
353 	/* for good measure, make sure interrupt are disabled
354 	 * before unmasking at the HIF layer.
355 	 * The rationale here is that between device insertion
356 	 * (where we clear the interrupts the first time)
357 	 * and when HTC is finally ready to handle interrupts,
358 	 * other software can perform target "soft" resets.
359 	 * The AR6K interrupt enables reset back to an "enabled"
360 	 * state when this happens.
361 	 */
362 	hif_dev_disable_interrupts(pdev);
363 
364 	/* Unmask the host controller interrupts */
365 	hif_un_mask_interrupt(pdev->HIFDevice);
366 
367 	LOCK_HIF_DEV(pdev);
368 
369 	/* Enable all the interrupts except for the internal
370 	 * AR6000 CPU interrupt
371 	 */
372 	pdev->IrqEnableRegisters.int_status_enable =
373 		INT_STATUS_ENABLE_ERROR_SET(0x01) |
374 			INT_STATUS_ENABLE_CPU_SET(0x01)
375 		| INT_STATUS_ENABLE_COUNTER_SET(0x01);
376 
377 		/* enable 2 mboxs INT */
378 	pdev->IrqEnableRegisters.int_status_enable |=
379 			INT_STATUS_ENABLE_MBOX_DATA_SET(0x01) |
380 			INT_STATUS_ENABLE_MBOX_DATA_SET(0x02);
381 
382 	/* Set up the CPU Interrupt Status Register, enable
383 	 * CPU sourced interrupt #0, #1.
384 	 * #0 is used for report assertion from target
385 	 * #1 is used for inform host that credit arrived
386 	 */
387 	pdev->IrqEnableRegisters.cpu_int_status_enable = 0x03;
388 
389 	/* Set up the Error Interrupt Status Register */
390 	pdev->IrqEnableRegisters.error_status_enable =
391 		(ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(0x01)
392 		 | ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(0x01)) >> 16;
393 
394 	/* Set up the Counter Interrupt Status Register
395 	 * (only for debug interrupt to catch fatal errors)
396 	 */
397 	pdev->IrqEnableRegisters.counter_int_status_enable =
398 	   (COUNTER_INT_STATUS_ENABLE_BIT_SET(AR6K_TARGET_DEBUG_INTR_MASK)) >>
399 		24;
400 
401 	/* copy into our temp area */
402 	A_MEMCPY(&regs,
403 		 &pdev->IrqEnableRegisters,
404 		 sizeof(struct MBOX_IRQ_ENABLE_REGISTERS));
405 
406 	UNLOCK_HIF_DEV(pdev);
407 
408 	/* always synchronous */
409 	status = hif_read_write(pdev->HIFDevice,
410 				INT_STATUS_ENABLE_ADDRESS,
411 				(char *) &regs,
412 				sizeof(struct MBOX_IRQ_ENABLE_REGISTERS),
413 				HIF_WR_SYNC_BYTE_INC, NULL);
414 
415 	if (status != QDF_STATUS_SUCCESS) {
416 		/* Can't write it for some reason */
417 		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
418 		  ("Failed to update interrupt control registers err: %d\n",
419 				 status));
420 
421 	}
422 	HIF_EXIT();
423 	return status;
424 }
425 
426 /**
427  * hif_dev_setup() - set up sdio device.
428  * @pDev: sdio device context
429  *
430  *
431  * Return: int
432  */
433 QDF_STATUS hif_dev_setup(struct hif_sdio_device *pdev)
434 {
435 	QDF_STATUS status;
436 	uint32_t blocksizes[MAILBOX_COUNT];
437 	struct htc_callbacks htc_cbs;
438 	struct hif_sdio_dev *hif_device = pdev->HIFDevice;
439 
440 	HIF_ENTER();
441 
442 	status = hif_configure_device(hif_device,
443 				      HIF_DEVICE_GET_MBOX_ADDR,
444 				      &pdev->MailBoxInfo,
445 				      sizeof(pdev->MailBoxInfo));
446 
447 	if (status != QDF_STATUS_SUCCESS) {
448 		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
449 				("(%s)HIF_DEVICE_GET_MBOX_ADDR failed!!!\n",
450 				 __func__));
451 		A_ASSERT(false);
452 	}
453 
454 	status = hif_configure_device(hif_device,
455 				      HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
456 				      blocksizes, sizeof(blocksizes));
457 	if (status != QDF_STATUS_SUCCESS) {
458 		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
459 			("(%s)HIF_DEVICE_GET_MBOX_BLOCK_SIZE failed!!!\n",
460 				 __func__));
461 		A_ASSERT(false);
462 	}
463 
464 	pdev->BlockSize = blocksizes[MAILBOX_FOR_BLOCK_SIZE];
465 	pdev->BlockMask = pdev->BlockSize - 1;
466 	A_ASSERT((pdev->BlockSize & pdev->BlockMask) == 0);
467 
468 	/* assume we can process HIF interrupt events asynchronously */
469 	pdev->HifIRQProcessingMode = HIF_DEVICE_IRQ_ASYNC_SYNC;
470 
471 	/* see if the HIF layer overrides this assumption */
472 	hif_configure_device(hif_device,
473 			     HIF_DEVICE_GET_IRQ_PROC_MODE,
474 			     &pdev->HifIRQProcessingMode,
475 			     sizeof(pdev->HifIRQProcessingMode));
476 
477 	switch (pdev->HifIRQProcessingMode) {
478 	case HIF_DEVICE_IRQ_SYNC_ONLY:
479 		AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
480 			("HIF Interrupt processing is SYNC ONLY\n"));
481 		/* see if HIF layer wants HTC to yield */
482 		hif_configure_device(hif_device,
483 				     HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
484 				     &pdev->HifIRQYieldParams,
485 				     sizeof(pdev->HifIRQYieldParams));
486 
487 		if (pdev->HifIRQYieldParams.recv_packet_yield_count > 0) {
488 			AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
489 				("HIF req of DSR yield per %d RECV packets\n",
490 				 pdev->HifIRQYieldParams.
491 				 recv_packet_yield_count));
492 			pdev->DSRCanYield = true;
493 		}
494 		break;
495 	case HIF_DEVICE_IRQ_ASYNC_SYNC:
496 		AR_DEBUG_PRINTF(ATH_DEBUG_TRC,
497 			("HIF Interrupt processing is ASYNC and SYNC\n"));
498 		break;
499 	default:
500 		A_ASSERT(false);
501 		break;
502 	}
503 
504 	pdev->HifMaskUmaskRecvEvent = NULL;
505 
506 	/* see if the HIF layer implements the mask/unmask recv
507 	 * events function
508 	 */
509 	hif_configure_device(hif_device,
510 			     HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
511 			     &pdev->HifMaskUmaskRecvEvent,
512 			     sizeof(pdev->HifMaskUmaskRecvEvent));
513 
514 	status = hif_dev_disable_interrupts(pdev);
515 
516 	qdf_mem_zero(&htc_cbs, sizeof(struct htc_callbacks));
517 	/* the device layer handles these */
518 	htc_cbs.rwCompletionHandler = hif_dev_rw_completion_handler;
519 	htc_cbs.dsrHandler = hif_dev_dsr_handler;
520 	htc_cbs.context = pdev;
521 	status = hif_attach_htc(pdev->HIFDevice, &htc_cbs);
522 
523 	HIF_EXIT();
524 	return status;
525 }
526