xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_dev.c (revision 4865edfd190c086bbe2c69aae12a8226f877b91e)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #define ATH_MODULE_NAME hif
20 #include "a_debug.h"
21 
22 #include <qdf_types.h>
23 #include <qdf_status.h>
24 #include <qdf_timer.h>
25 #include <qdf_time.h>
26 #include <qdf_lock.h>
27 #include <qdf_mem.h>
28 #include <qdf_util.h>
29 #include <qdf_defer.h>
30 #include <qdf_atomic.h>
31 #include <qdf_nbuf.h>
32 #include <athdefs.h>
33 #include <qdf_net_types.h>
34 #include <a_types.h>
35 #include <athdefs.h>
36 #include <a_osapi.h>
37 #include <hif.h>
38 #include <htc_services.h>
39 #include "hif_sdio_internal.h"
40 #include "if_sdio.h"
41 #include "regtable_sdio.h"
42 
43 /* under HL SDIO, with Interface Memory support, we have
44  * the following reasons to support 2 mboxs:
45  * a) we need place different buffers in different
46  * mempool, for example, data using Interface Memory,
47  * desc and other using DRAM, they need different SDIO
48  * mbox channels.
49  * b) currently, tx mempool in LL case is separated from
50  * main mempool, the structure (descs at the beginning
51  * of every pool buffer) is different, because they only
52  * need store tx desc from host. To align with LL case,
53  * we also need 2 mbox support just as PCIe LL cases.
54  */
55 
56 /**
57  * hif_dev_map_pipe_to_mail_box() - maps pipe id to mailbox.
58  * @pdev: sdio device context
59  * @pipeid: pipe index
60  *
61  *
62  * Return: mailbox index
63  */
64 uint8_t hif_dev_map_pipe_to_mail_box(struct hif_sdio_device *pdev,
65 			uint8_t pipeid)
66 {
67 	/* TODO: temp version, should not hardcoded here, will be
68 	 * updated after HIF design
69 	 */
70 	if (2 == pipeid || 3 == pipeid)
71 		return 1;
72 	else if (0 == pipeid || 1 == pipeid)
73 		return 0;
74 	AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: pipeid=%d,should not happen\n",
75 					__func__, pipeid));
76 	qdf_assert(0);
77 	return INVALID_MAILBOX_NUMBER;
78 }
79 
80 /**
81  * hif_dev_map_mail_box_to_pipe() - map sdio mailbox to htc pipe.
82  * @pdev: sdio device
83  * @mboxIndex: mailbox index
84  * @upload: boolean to decide mailbox index
85  *
86  * Disable hif device interrupts and destroy hif context
87  *
88  * Return: none
89  */
90 uint8_t hif_dev_map_mail_box_to_pipe(struct hif_sdio_device *pdev,
91 			uint8_t mbox_index,
92 				     bool upload)
93 {
94 	/* TODO: temp version, should not hardcoded here, will be
95 	 * updated after HIF design
96 	 */
97 	if (mbox_index == 0)
98 		return upload ? 1 : 0;
99 	else if (mbox_index == 1)
100 		return upload ? 3 : 2;
101 	AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
102 			("%s:-----mboxIndex=%d,upload=%d, should not happen\n",
103 			__func__, mbox_index, upload));
104 	qdf_assert(0);
105 	return 0xff;
106 }
107 
108 /**
109  * hif_dev_map_service_to_pipe() - maps ul/dl pipe to service id.
110  * @pDev: sdio device context
111  * @ServiceId: sevice index
112  * @ULPipe: uplink pipe id
113  * @DLPipe: down-linklink pipe id
114  * @SwapMapping: mailbox swap mapping
115  *
116  * Return: int
117  */
118 QDF_STATUS hif_dev_map_service_to_pipe(struct hif_sdio_device *pdev,
119 				     uint16_t service_id,
120 				     uint8_t *ul_pipe, uint8_t *dl_pipe,
121 				     bool swap_mapping)
122 {
123 	QDF_STATUS status = QDF_STATUS_SUCCESS;
124 
125 	switch (service_id) {
126 	case HTT_DATA_MSG_SVC:
127 		if (swap_mapping) {
128 			*ul_pipe = 1;
129 			*dl_pipe = 0;
130 		} else {
131 			*ul_pipe = 3;
132 			*dl_pipe = 2;
133 		}
134 		break;
135 
136 	case HTC_CTRL_RSVD_SVC:
137 	case HTC_RAW_STREAMS_SVC:
138 		*ul_pipe = 1;
139 		*dl_pipe = 0;
140 		break;
141 
142 	case WMI_DATA_BE_SVC:
143 	case WMI_DATA_BK_SVC:
144 	case WMI_DATA_VI_SVC:
145 	case WMI_DATA_VO_SVC:
146 		*ul_pipe = 1;
147 		*dl_pipe = 0;
148 		break;
149 
150 	case WMI_CONTROL_SVC:
151 		if (swap_mapping) {
152 			*ul_pipe = 3;
153 			*dl_pipe = 2;
154 		} else {
155 			*ul_pipe = 1;
156 			*dl_pipe = 0;
157 		}
158 		break;
159 
160 	default:
161 		status = !QDF_STATUS_SUCCESS;
162 		break;
163 	}
164 	return status;
165 }
166 
167 /**
168  * hif_dev_alloc_rx_buffer() - allocate rx buffer.
169  * @pDev: sdio device context
170  *
171  *
172  * Return: htc buffer pointer
173  */
174 HTC_PACKET *hif_dev_alloc_rx_buffer(struct hif_sdio_device *pdev)
175 {
176 	HTC_PACKET *packet;
177 	qdf_nbuf_t netbuf;
178 	uint32_t bufsize = 0, headsize = 0;
179 
180 	bufsize = HIF_SDIO_RX_BUFFER_SIZE + HIF_SDIO_RX_DATA_OFFSET;
181 	headsize = sizeof(HTC_PACKET);
182 	netbuf = qdf_nbuf_alloc(NULL, bufsize + headsize, 0, 4, false);
183 	if (netbuf == NULL) {
184 		hif_err_rl("Allocate netbuf failed");
185 		return NULL;
186 	}
187 	packet = (HTC_PACKET *) qdf_nbuf_data(netbuf);
188 	qdf_nbuf_reserve(netbuf, headsize);
189 
190 	SET_HTC_PACKET_INFO_RX_REFILL(packet,
191 				      pdev,
192 				      qdf_nbuf_data(netbuf),
193 				      bufsize, ENDPOINT_0);
194 	SET_HTC_PACKET_NET_BUF_CONTEXT(packet, netbuf);
195 	return packet;
196 }
197 
198 /**
199  * hif_dev_create() - create hif device after probe.
200  * @scn: HIF context
201  * @callbacks: htc callbacks
202  * @target: HIF target
203  *
204  *
205  * Return: int
206  */
207 struct hif_sdio_device *hif_dev_create(struct hif_sdio_dev *hif_device,
208 			struct hif_msg_callbacks *callbacks, void *target)
209 {
210 
211 	QDF_STATUS status;
212 	struct hif_sdio_device *pdev;
213 
214 	pdev = qdf_mem_malloc(sizeof(struct hif_sdio_device));
215 	if (!pdev) {
216 		A_ASSERT(false);
217 		return NULL;
218 	}
219 
220 	qdf_spinlock_create(&pdev->Lock);
221 	qdf_spinlock_create(&pdev->TxLock);
222 	qdf_spinlock_create(&pdev->RxLock);
223 
224 	pdev->HIFDevice = hif_device;
225 	pdev->pTarget = target;
226 	status = hif_configure_device(hif_device,
227 				      HIF_DEVICE_SET_HTC_CONTEXT,
228 				      (void *)pdev, sizeof(pdev));
229 	if (status != QDF_STATUS_SUCCESS) {
230 		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
231 				("(%s)HIF_DEVICE_SET_HTC_CONTEXT failed!!!\n",
232 				 __func__));
233 	}
234 
235 	A_MEMCPY(&pdev->hif_callbacks, callbacks, sizeof(*callbacks));
236 
237 	return pdev;
238 }
239 
240 /**
241  * hif_dev_destroy() - destroy hif device.
242  * @pDev: sdio device context
243  *
244  *
245  * Return: none
246  */
247 void hif_dev_destroy(struct hif_sdio_device *pdev)
248 {
249 	QDF_STATUS status;
250 
251 	status = hif_configure_device(pdev->HIFDevice,
252 				      HIF_DEVICE_SET_HTC_CONTEXT,
253 				      (void *)NULL, 0);
254 	if (status != QDF_STATUS_SUCCESS) {
255 		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
256 				("(%s)HIF_DEVICE_SET_HTC_CONTEXT failed!!!\n",
257 				 __func__));
258 	}
259 	qdf_mem_free(pdev);
260 }
261 
262 /**
263  * hif_dev_from_hif() - get sdio device from hif device.
264  * @pDev: hif device context
265  *
266  *
267  * Return: hif sdio device context
268  */
269 struct hif_sdio_device *hif_dev_from_hif(struct hif_sdio_dev *hif_device)
270 {
271 	struct hif_sdio_device *pdev = NULL;
272 	QDF_STATUS status;
273 
274 	status = hif_configure_device(hif_device,
275 				HIF_DEVICE_GET_HTC_CONTEXT,
276 				(void **)&pdev, sizeof(struct hif_sdio_device));
277 	if (status != QDF_STATUS_SUCCESS) {
278 		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
279 				("(%s)HTC_SDIO_CONTEXT is NULL!!!\n",
280 				 __func__));
281 	}
282 	return pdev;
283 }
284 
285 /**
286  * hif_dev_disable_interrupts() - disable hif device interrupts.
287  * @pDev: sdio device context
288  *
289  *
290  * Return: int
291  */
292 QDF_STATUS hif_dev_disable_interrupts(struct hif_sdio_device *pdev)
293 {
294 	struct MBOX_IRQ_ENABLE_REGISTERS regs;
295 	QDF_STATUS status = QDF_STATUS_SUCCESS;
296 
297 	HIF_ENTER();
298 
299 	LOCK_HIF_DEV(pdev);
300 	/* Disable all interrupts */
301 	pdev->IrqEnableRegisters.int_status_enable = 0;
302 	pdev->IrqEnableRegisters.cpu_int_status_enable = 0;
303 	pdev->IrqEnableRegisters.error_status_enable = 0;
304 	pdev->IrqEnableRegisters.counter_int_status_enable = 0;
305 	/* copy into our temp area */
306 	A_MEMCPY(&regs,
307 		 &pdev->IrqEnableRegisters, sizeof(pdev->IrqEnableRegisters));
308 
309 	UNLOCK_HIF_DEV(pdev);
310 
311 	/* always synchronous */
312 	status = hif_read_write(pdev->HIFDevice,
313 				INT_STATUS_ENABLE_ADDRESS,
314 				(char *) &regs,
315 				sizeof(struct MBOX_IRQ_ENABLE_REGISTERS),
316 				HIF_WR_SYNC_BYTE_INC, NULL);
317 
318 	if (status != QDF_STATUS_SUCCESS) {
319 		/* Can't write it for some reason */
320 		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
321 			("Failed to update interrupt control registers err: %d",
322 			 status));
323 	}
324 
325 	/* To Do mask the host controller interrupts */
326 	hif_mask_interrupt(pdev->HIFDevice);
327 	HIF_EXIT("status :%d", status);
328 	return status;
329 }
330 
331 /**
332  * hif_dev_enable_interrupts() - enables hif device interrupts.
333  * @pDev: sdio device context
334  *
335  *
336  * Return: int
337  */
338 QDF_STATUS hif_dev_enable_interrupts(struct hif_sdio_device *pdev)
339 {
340 	QDF_STATUS status;
341 	struct MBOX_IRQ_ENABLE_REGISTERS regs;
342 
343 	HIF_ENTER();
344 
345 	/* for good measure, make sure interrupt are disabled
346 	 * before unmasking at the HIF layer.
347 	 * The rationale here is that between device insertion
348 	 * (where we clear the interrupts the first time)
349 	 * and when HTC is finally ready to handle interrupts,
350 	 * other software can perform target "soft" resets.
351 	 * The AR6K interrupt enables reset back to an "enabled"
352 	 * state when this happens.
353 	 */
354 	hif_dev_disable_interrupts(pdev);
355 
356 	/* Unmask the host controller interrupts */
357 	hif_un_mask_interrupt(pdev->HIFDevice);
358 
359 	LOCK_HIF_DEV(pdev);
360 
361 	/* Enable all the interrupts except for the internal
362 	 * AR6000 CPU interrupt
363 	 */
364 	pdev->IrqEnableRegisters.int_status_enable =
365 		INT_STATUS_ENABLE_ERROR_SET(0x01) |
366 			INT_STATUS_ENABLE_CPU_SET(0x01)
367 		| INT_STATUS_ENABLE_COUNTER_SET(0x01);
368 
369 		/* enable 2 mboxs INT */
370 	pdev->IrqEnableRegisters.int_status_enable |=
371 			INT_STATUS_ENABLE_MBOX_DATA_SET(0x01) |
372 			INT_STATUS_ENABLE_MBOX_DATA_SET(0x02);
373 
374 	/* Set up the CPU Interrupt Status Register, enable
375 	 * CPU sourced interrupt #0, #1.
376 	 * #0 is used for report assertion from target
377 	 * #1 is used for inform host that credit arrived
378 	 */
379 	pdev->IrqEnableRegisters.cpu_int_status_enable = 0x03;
380 
381 	/* Set up the Error Interrupt Status Register */
382 	pdev->IrqEnableRegisters.error_status_enable =
383 		(ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(0x01)
384 		 | ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(0x01)) >> 16;
385 
386 	/* Set up the Counter Interrupt Status Register
387 	 * (only for debug interrupt to catch fatal errors)
388 	 */
389 	pdev->IrqEnableRegisters.counter_int_status_enable =
390 	   (COUNTER_INT_STATUS_ENABLE_BIT_SET(AR6K_TARGET_DEBUG_INTR_MASK)) >>
391 		24;
392 
393 	/* copy into our temp area */
394 	A_MEMCPY(&regs,
395 		 &pdev->IrqEnableRegisters,
396 		 sizeof(struct MBOX_IRQ_ENABLE_REGISTERS));
397 
398 	UNLOCK_HIF_DEV(pdev);
399 
400 	/* always synchronous */
401 	status = hif_read_write(pdev->HIFDevice,
402 				INT_STATUS_ENABLE_ADDRESS,
403 				(char *) &regs,
404 				sizeof(struct MBOX_IRQ_ENABLE_REGISTERS),
405 				HIF_WR_SYNC_BYTE_INC, NULL);
406 
407 	if (status != QDF_STATUS_SUCCESS) {
408 		/* Can't write it for some reason */
409 		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
410 		  ("Failed to update interrupt control registers err: %d\n",
411 				 status));
412 
413 	}
414 	HIF_EXIT();
415 	return status;
416 }
417 
418 /**
419  * hif_dev_setup() - set up sdio device.
420  * @pDev: sdio device context
421  *
422  *
423  * Return: int
424  */
425 QDF_STATUS hif_dev_setup(struct hif_sdio_device *pdev)
426 {
427 	QDF_STATUS status;
428 	uint32_t blocksizes[MAILBOX_COUNT];
429 	struct htc_callbacks htc_cbs;
430 	struct hif_sdio_dev *hif_device = pdev->HIFDevice;
431 
432 	HIF_ENTER();
433 
434 	status = hif_configure_device(hif_device,
435 				      HIF_DEVICE_GET_MBOX_ADDR,
436 				      &pdev->MailBoxInfo,
437 				      sizeof(pdev->MailBoxInfo));
438 
439 	if (status != QDF_STATUS_SUCCESS) {
440 		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
441 				("(%s)HIF_DEVICE_GET_MBOX_ADDR failed!!!\n",
442 				 __func__));
443 		A_ASSERT(false);
444 	}
445 
446 	status = hif_configure_device(hif_device,
447 				      HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
448 				      blocksizes, sizeof(blocksizes));
449 	if (status != QDF_STATUS_SUCCESS) {
450 		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
451 			("(%s)HIF_DEVICE_GET_MBOX_BLOCK_SIZE failed!!!\n",
452 				 __func__));
453 		A_ASSERT(false);
454 	}
455 
456 	pdev->BlockSize = blocksizes[MAILBOX_FOR_BLOCK_SIZE];
457 	pdev->BlockMask = pdev->BlockSize - 1;
458 	A_ASSERT((pdev->BlockSize & pdev->BlockMask) == 0);
459 
460 	/* assume we can process HIF interrupt events asynchronously */
461 	pdev->HifIRQProcessingMode = HIF_DEVICE_IRQ_ASYNC_SYNC;
462 
463 	/* see if the HIF layer overrides this assumption */
464 	hif_configure_device(hif_device,
465 			     HIF_DEVICE_GET_IRQ_PROC_MODE,
466 			     &pdev->HifIRQProcessingMode,
467 			     sizeof(pdev->HifIRQProcessingMode));
468 
469 	switch (pdev->HifIRQProcessingMode) {
470 	case HIF_DEVICE_IRQ_SYNC_ONLY:
471 		AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
472 			("HIF Interrupt processing is SYNC ONLY\n"));
473 		/* see if HIF layer wants HTC to yield */
474 		hif_configure_device(hif_device,
475 				     HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
476 				     &pdev->HifIRQYieldParams,
477 				     sizeof(pdev->HifIRQYieldParams));
478 
479 		if (pdev->HifIRQYieldParams.recv_packet_yield_count > 0) {
480 			AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
481 				("HIF req of DSR yield per %d RECV packets\n",
482 				 pdev->HifIRQYieldParams.
483 				 recv_packet_yield_count));
484 			pdev->DSRCanYield = true;
485 		}
486 		break;
487 	case HIF_DEVICE_IRQ_ASYNC_SYNC:
488 		AR_DEBUG_PRINTF(ATH_DEBUG_TRC,
489 			("HIF Interrupt processing is ASYNC and SYNC\n"));
490 		break;
491 	default:
492 		A_ASSERT(false);
493 		break;
494 	}
495 
496 	pdev->HifMaskUmaskRecvEvent = NULL;
497 
498 	/* see if the HIF layer implements the mask/unmask recv
499 	 * events function
500 	 */
501 	hif_configure_device(hif_device,
502 			     HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
503 			     &pdev->HifMaskUmaskRecvEvent,
504 			     sizeof(pdev->HifMaskUmaskRecvEvent));
505 
506 	status = hif_dev_disable_interrupts(pdev);
507 
508 	qdf_mem_zero(&htc_cbs, sizeof(struct htc_callbacks));
509 	/* the device layer handles these */
510 	htc_cbs.rwCompletionHandler = hif_dev_rw_completion_handler;
511 	htc_cbs.dsrHandler = hif_dev_dsr_handler;
512 	htc_cbs.context = pdev;
513 	status = hif_attach_htc(pdev->HIFDevice, &htc_cbs);
514 
515 	HIF_EXIT();
516 	return status;
517 }
518