1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifdef CONFIG_SDIO_TRANSFER_MAILBOX
21 #define ATH_MODULE_NAME hif
22 #include <linux/kthread.h>
23 #include <qdf_types.h>
24 #include <qdf_status.h>
25 #include <qdf_timer.h>
26 #include <qdf_time.h>
27 #include <qdf_lock.h>
28 #include <qdf_mem.h>
29 #include <qdf_util.h>
30 #include <qdf_defer.h>
31 #include <qdf_atomic.h>
32 #include <qdf_nbuf.h>
33 #include <qdf_threads.h>
34 #include <athdefs.h>
35 #include <qdf_net_types.h>
36 #include <a_types.h>
37 #include <athdefs.h>
38 #include <a_osapi.h>
39 #include <hif.h>
40 #include <htc_internal.h>
41 #include <htc_services.h>
42 #include <a_debug.h>
43 #include "hif_sdio_internal.h"
44 #include "if_sdio.h"
45 #include "regtable.h"
46 #include "transfer.h"
47 
48 /*
49  * The following commit was introduced in v5.17:
50  * cead18552660 ("exit: Rename complete_and_exit to kthread_complete_and_exit")
51  * Use the old name for kernels before 5.17
52  */
53 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0))
54 #define kthread_complete_and_exit(c, s) complete_and_exit(c, s)
55 #endif
56 
57 /* by default setup a bounce buffer for the data packets,
58  * if the underlying host controller driver
59  * does not use DMA you may be able to skip this step
60  * and save the memory allocation and transfer time
61  */
62 #define HIF_USE_DMA_BOUNCE_BUFFER 1
63 #if HIF_USE_DMA_BOUNCE_BUFFER
64 /* macro to check if DMA buffer is WORD-aligned and DMA-able.
65  * Most host controllers assume the
66  * buffer is DMA'able and will bug-check otherwise (i.e. buffers on the stack).
67  * virt_addr_valid check fails on stack memory.
68  */
69 #define BUFFER_NEEDS_BOUNCE(buffer)  (((unsigned long)(buffer) & 0x3) || \
70 					!virt_addr_valid((buffer)))
71 #else
72 #define BUFFER_NEEDS_BOUNCE(buffer)   (false)
73 #endif
74 
75 #ifdef SDIO_3_0
76 /**
77  * set_extended_mbox_size() - set extended MBOX size
78  * @pinfo: sdio mailbox info
79  *
80  * Return: none.
81  */
set_extended_mbox_size(struct hif_device_mbox_info * pinfo)82 static void set_extended_mbox_size(struct hif_device_mbox_info *pinfo)
83 {
84 	pinfo->mbox_prop[0].extended_size =
85 		HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0;
86 	pinfo->mbox_prop[1].extended_size =
87 		HIF_MBOX1_EXTENDED_WIDTH_AR6320;
88 }
89 
90 /**
91  * set_extended_mbox_address() - set extended MBOX address
92  * @pinfo: sdio mailbox info
93  *
94  * Return: none.
95  */
set_extended_mbox_address(struct hif_device_mbox_info * pinfo)96 static void set_extended_mbox_address(struct hif_device_mbox_info *pinfo)
97 {
98 	pinfo->mbox_prop[1].extended_address =
99 		pinfo->mbox_prop[0].extended_address +
100 		pinfo->mbox_prop[0].extended_size +
101 		HIF_MBOX_DUMMY_SPACE_SIZE_AR6320;
102 }
103 #else
set_extended_mbox_size(struct hif_device_mbox_info * pinfo)104 static void set_extended_mbox_size(struct hif_device_mbox_info *pinfo)
105 {
106 	pinfo->mbox_prop[0].extended_size =
107 		HIF_MBOX0_EXTENDED_WIDTH_AR6320;
108 }
109 
110 static inline void
set_extended_mbox_address(struct hif_device_mbox_info * pinfo)111 set_extended_mbox_address(struct hif_device_mbox_info *pinfo)
112 {
113 }
114 #endif
115 
116 /**
117  * set_extended_mbox_window_info() - set extended MBOX window
118  * information for SDIO interconnects
119  * @manf_id: manufacturer id
120  * @pinfo: sdio mailbox info
121  *
122  * Return: none.
123  */
set_extended_mbox_window_info(uint16_t manf_id,struct hif_device_mbox_info * pinfo)124 static void set_extended_mbox_window_info(uint16_t manf_id,
125 					  struct hif_device_mbox_info *pinfo)
126 {
127 	switch (manf_id & MANUFACTURER_ID_AR6K_BASE_MASK) {
128 	case MANUFACTURER_ID_AR6002_BASE:
129 		/* MBOX 0 has an extended range */
130 
131 		pinfo->mbox_prop[0].extended_address =
132 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
133 		pinfo->mbox_prop[0].extended_size =
134 			HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
135 
136 		pinfo->mbox_prop[0].extended_address =
137 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
138 		pinfo->mbox_prop[0].extended_size =
139 			HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
140 
141 		pinfo->mbox_prop[0].extended_address =
142 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004;
143 		pinfo->mbox_prop[0].extended_size =
144 			HIF_MBOX0_EXTENDED_WIDTH_AR6004;
145 
146 		break;
147 	case MANUFACTURER_ID_AR6003_BASE:
148 		/* MBOX 0 has an extended range */
149 		pinfo->mbox_prop[0].extended_address =
150 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
151 		pinfo->mbox_prop[0].extended_size =
152 			HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
153 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
154 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
155 		break;
156 	case MANUFACTURER_ID_AR6004_BASE:
157 		pinfo->mbox_prop[0].extended_address =
158 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004;
159 		pinfo->mbox_prop[0].extended_size =
160 			HIF_MBOX0_EXTENDED_WIDTH_AR6004;
161 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
162 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
163 		break;
164 	case MANUFACTURER_ID_AR6320_BASE:
165 	{
166 		uint16_t rev = manf_id & MANUFACTURER_ID_AR6K_REV_MASK;
167 
168 		pinfo->mbox_prop[0].extended_address =
169 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320;
170 		if (rev < 4)
171 			pinfo->mbox_prop[0].extended_size =
172 				HIF_MBOX0_EXTENDED_WIDTH_AR6320;
173 		else
174 			set_extended_mbox_size(pinfo);
175 		set_extended_mbox_address(pinfo);
176 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
177 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
178 		break;
179 	}
180 	case MANUFACTURER_ID_QCA9377_BASE:
181 	case MANUFACTURER_ID_QCA9379_BASE:
182 		pinfo->mbox_prop[0].extended_address =
183 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320;
184 		pinfo->mbox_prop[0].extended_size =
185 			HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0;
186 		pinfo->mbox_prop[1].extended_address =
187 			pinfo->mbox_prop[0].extended_address +
188 			pinfo->mbox_prop[0].extended_size +
189 			HIF_MBOX_DUMMY_SPACE_SIZE_AR6320;
190 		pinfo->mbox_prop[1].extended_size =
191 			HIF_MBOX1_EXTENDED_WIDTH_AR6320;
192 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
193 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
194 		break;
195 	default:
196 		A_ASSERT(false);
197 		break;
198 	}
199 }
200 
201 /** hif_dev_set_mailbox_swap() - Set the mailbox swap from firmware
202  * @pdev : The HIF layer object
203  *
204  * Return: none
205  */
hif_dev_set_mailbox_swap(struct hif_sdio_dev * pdev)206 void hif_dev_set_mailbox_swap(struct hif_sdio_dev *pdev)
207 {
208 	struct hif_sdio_device *hif_device = hif_dev_from_hif(pdev);
209 
210 	HIF_ENTER();
211 
212 	hif_device->swap_mailbox = true;
213 
214 	HIF_EXIT();
215 }
216 
217 /** hif_dev_get_mailbox_swap() - Get the mailbox swap setting
218  * @pdev : The HIF layer object
219  *
220  * Return: true or false
221  */
hif_dev_get_mailbox_swap(struct hif_sdio_dev * pdev)222 bool hif_dev_get_mailbox_swap(struct hif_sdio_dev *pdev)
223 {
224 	struct hif_sdio_device *hif_device;
225 
226 	HIF_ENTER();
227 
228 	hif_device = hif_dev_from_hif(pdev);
229 
230 	HIF_EXIT();
231 
232 	return hif_device->swap_mailbox;
233 }
234 
235 /**
236  * hif_dev_get_fifo_address() - get the fifo addresses for dma
237  * @pdev:  SDIO HIF object
238  * @config: mbox address config pointer
239  * @config_len: config length
240  *
241  * Return : 0 for success, non-zero for error
242  */
hif_dev_get_fifo_address(struct hif_sdio_dev * pdev,void * config,uint32_t config_len)243 int hif_dev_get_fifo_address(struct hif_sdio_dev *pdev,
244 			     void *config,
245 			     uint32_t config_len)
246 {
247 	uint32_t count;
248 	struct hif_device_mbox_info *cfg =
249 				(struct hif_device_mbox_info *)config;
250 
251 	for (count = 0; count < 4; count++)
252 		cfg->mbox_addresses[count] = HIF_MBOX_START_ADDR(count);
253 
254 	if (config_len >= sizeof(struct hif_device_mbox_info)) {
255 		set_extended_mbox_window_info((uint16_t)pdev->func->device,
256 					      cfg);
257 		return 0;
258 	}
259 
260 	return -EINVAL;
261 }
262 
263 /**
264  * hif_dev_get_block_size() - get the mbox block size for dma
265  * @config : mbox size config pointer
266  *
267  * Return : NONE
268  */
hif_dev_get_block_size(void * config)269 void hif_dev_get_block_size(void *config)
270 {
271 	((uint32_t *)config)[0] = HIF_MBOX0_BLOCK_SIZE;
272 	((uint32_t *)config)[1] = HIF_MBOX1_BLOCK_SIZE;
273 	((uint32_t *)config)[2] = HIF_MBOX2_BLOCK_SIZE;
274 	((uint32_t *)config)[3] = HIF_MBOX3_BLOCK_SIZE;
275 }
276 
277 /**
278  * hif_dev_map_service_to_pipe() - maps ul/dl pipe to service id.
279  * @pdev: SDIO HIF object
280  * @svc: service index
281  * @ul_pipe: uplink pipe id
282  * @dl_pipe: down-linklink pipe id
283  *
284  * Return: 0 on success, error value on invalid map
285  */
hif_dev_map_service_to_pipe(struct hif_sdio_dev * pdev,uint16_t svc,uint8_t * ul_pipe,uint8_t * dl_pipe)286 QDF_STATUS hif_dev_map_service_to_pipe(struct hif_sdio_dev *pdev, uint16_t svc,
287 				       uint8_t *ul_pipe, uint8_t *dl_pipe)
288 {
289 	QDF_STATUS status = QDF_STATUS_SUCCESS;
290 
291 	switch (svc) {
292 	case HTT_DATA_MSG_SVC:
293 		if (hif_dev_get_mailbox_swap(pdev)) {
294 			*ul_pipe = 1;
295 			*dl_pipe = 0;
296 		} else {
297 			*ul_pipe = 3;
298 			*dl_pipe = 2;
299 		}
300 		break;
301 
302 	case HTC_CTRL_RSVD_SVC:
303 	case HTC_RAW_STREAMS_SVC:
304 		*ul_pipe = 1;
305 		*dl_pipe = 0;
306 		break;
307 
308 	case WMI_DATA_BE_SVC:
309 	case WMI_DATA_BK_SVC:
310 	case WMI_DATA_VI_SVC:
311 	case WMI_DATA_VO_SVC:
312 		*ul_pipe = 1;
313 		*dl_pipe = 0;
314 		break;
315 
316 	case WMI_CONTROL_SVC:
317 		if (hif_dev_get_mailbox_swap(pdev)) {
318 			*ul_pipe = 3;
319 			*dl_pipe = 2;
320 		} else {
321 			*ul_pipe = 1;
322 			*dl_pipe = 0;
323 		}
324 		break;
325 
326 	default:
327 		hif_err("Invalid service: %d", svc);
328 		status = QDF_STATUS_E_INVAL;
329 		break;
330 	}
331 	return status;
332 }
333 
334 /** hif_dev_setup_device() - Setup device specific stuff here required for hif
335  * @pdev : HIF layer object
336  *
337  * return 0 on success, error otherwise
338  */
hif_dev_setup_device(struct hif_sdio_device * pdev)339 int hif_dev_setup_device(struct hif_sdio_device *pdev)
340 {
341 	int status = 0;
342 	uint32_t blocksizes[MAILBOX_COUNT];
343 
344 	status = hif_configure_device(NULL, pdev->HIFDevice,
345 				      HIF_DEVICE_GET_FIFO_ADDR,
346 				      &pdev->MailBoxInfo,
347 				      sizeof(pdev->MailBoxInfo));
348 
349 	if (status != QDF_STATUS_SUCCESS)
350 		hif_err("HIF_DEVICE_GET_MBOX_ADDR failed");
351 
352 	status = hif_configure_device(NULL, pdev->HIFDevice,
353 				      HIF_DEVICE_GET_BLOCK_SIZE,
354 				      blocksizes, sizeof(blocksizes));
355 	if (status != QDF_STATUS_SUCCESS)
356 		hif_err("HIF_DEVICE_GET_MBOX_BLOCK_SIZE fail");
357 
358 	pdev->BlockSize = blocksizes[MAILBOX_FOR_BLOCK_SIZE];
359 
360 	return status;
361 }
362 
363 /** hif_dev_mask_interrupts() - Disable the interrupts in the device
364  * @pdev SDIO HIF Object
365  *
366  * Return: NONE
367  */
hif_dev_mask_interrupts(struct hif_sdio_device * pdev)368 void hif_dev_mask_interrupts(struct hif_sdio_device *pdev)
369 {
370 	int status = QDF_STATUS_SUCCESS;
371 
372 	HIF_ENTER();
373 	/* Disable all interrupts */
374 	LOCK_HIF_DEV(pdev);
375 	mboxEnaRegs(pdev).int_status_enable = 0;
376 	mboxEnaRegs(pdev).cpu_int_status_enable = 0;
377 	mboxEnaRegs(pdev).error_status_enable = 0;
378 	mboxEnaRegs(pdev).counter_int_status_enable = 0;
379 	UNLOCK_HIF_DEV(pdev);
380 
381 	/* always synchronous */
382 	status = hif_read_write(pdev->HIFDevice,
383 				INT_STATUS_ENABLE_ADDRESS,
384 				(char *)&mboxEnaRegs(pdev),
385 				sizeof(struct MBOX_IRQ_ENABLE_REGISTERS),
386 				HIF_WR_SYNC_BYTE_INC, NULL);
387 
388 	if (status != QDF_STATUS_SUCCESS)
389 		hif_err("Updating intr reg: %d", status);
390 }
391 
392 /** hif_dev_unmask_interrupts() - Enable the interrupts in the device
393  * @pdev SDIO HIF Object
394  *
395  * Return: NONE
396  */
hif_dev_unmask_interrupts(struct hif_sdio_device * pdev)397 void hif_dev_unmask_interrupts(struct hif_sdio_device *pdev)
398 {
399 	QDF_STATUS status = QDF_STATUS_SUCCESS;
400 
401 	LOCK_HIF_DEV(pdev);
402 
403 	/* Enable all the interrupts except for the internal
404 	 * AR6000 CPU interrupt
405 	 */
406 	mboxEnaRegs(pdev).int_status_enable =
407 		INT_STATUS_ENABLE_ERROR_SET(0x01) |
408 		INT_STATUS_ENABLE_CPU_SET(0x01)
409 		| INT_STATUS_ENABLE_COUNTER_SET(0x01);
410 
411 	/* enable 2 mboxs INT */
412 	mboxEnaRegs(pdev).int_status_enable |=
413 		INT_STATUS_ENABLE_MBOX_DATA_SET(0x01) |
414 		INT_STATUS_ENABLE_MBOX_DATA_SET(0x02);
415 
416 	/* Set up the CPU Interrupt Status Register, enable
417 	 * CPU sourced interrupt #0, #1.
418 	 * #0 is used for report assertion from target
419 	 * #1 is used for inform host that credit arrived
420 	 */
421 	mboxEnaRegs(pdev).cpu_int_status_enable = 0x03;
422 
423 	/* Set up the Error Interrupt Status Register */
424 	mboxEnaRegs(pdev).error_status_enable =
425 		(ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(0x01)
426 		 | ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(0x01)) >> 16;
427 
428 	/* Set up the Counter Interrupt Status Register
429 	 * (only for debug interrupt to catch fatal errors)
430 	 */
431 	mboxEnaRegs(pdev).counter_int_status_enable =
432 	(COUNTER_INT_STATUS_ENABLE_BIT_SET(AR6K_TARGET_DEBUG_INTR_MASK)) >> 24;
433 
434 	UNLOCK_HIF_DEV(pdev);
435 
436 	/* always synchronous */
437 	status = hif_read_write(pdev->HIFDevice,
438 				INT_STATUS_ENABLE_ADDRESS,
439 				(char *)&mboxEnaRegs(pdev),
440 				sizeof(struct MBOX_IRQ_ENABLE_REGISTERS),
441 				HIF_WR_SYNC_BYTE_INC,
442 				NULL);
443 
444 	if (status != QDF_STATUS_SUCCESS)
445 		hif_err("Updating intr reg: %d", status);
446 }
447 
hif_dev_dump_registers(struct hif_sdio_device * pdev,struct MBOX_IRQ_PROC_REGISTERS * irq_proc,struct MBOX_IRQ_ENABLE_REGISTERS * irq_en,struct MBOX_COUNTER_REGISTERS * mbox_regs)448 void hif_dev_dump_registers(struct hif_sdio_device *pdev,
449 			    struct MBOX_IRQ_PROC_REGISTERS *irq_proc,
450 			    struct MBOX_IRQ_ENABLE_REGISTERS *irq_en,
451 			    struct MBOX_COUNTER_REGISTERS *mbox_regs)
452 {
453 	int i = 0;
454 
455 	hif_debug("Mailbox registers:");
456 
457 	if (irq_proc) {
458 		hif_debug("HostIntStatus: 0x%x ", irq_proc->host_int_status);
459 		hif_debug("CPUIntStatus: 0x%x ", irq_proc->cpu_int_status);
460 		hif_debug("ErrorIntStatus: 0x%x ", irq_proc->error_int_status);
461 		hif_debug("CounterIntStat: 0x%x ",
462 			  irq_proc->counter_int_status);
463 		hif_debug("MboxFrame: 0x%x ", irq_proc->mbox_frame);
464 		hif_debug("RxLKAValid: 0x%x ", irq_proc->rx_lookahead_valid);
465 		hif_debug("RxLKA0: 0x%x", irq_proc->rx_lookahead[0]);
466 		hif_debug("RxLKA1: 0x%x ", irq_proc->rx_lookahead[1]);
467 		hif_debug("RxLKA2: 0x%x ", irq_proc->rx_lookahead[2]);
468 		hif_debug("RxLKA3: 0x%x", irq_proc->rx_lookahead[3]);
469 
470 		if (pdev->MailBoxInfo.gmbox_address != 0) {
471 			hif_debug("GMBOX-HostIntStatus2:  0x%x ",
472 				  irq_proc->host_int_status2);
473 			hif_debug("GMBOX-RX-Avail: 0x%x ",
474 				  irq_proc->gmbox_rx_avail);
475 		}
476 	}
477 
478 	if (irq_en) {
479 		hif_debug("IntStatusEnable: 0x%x",
480 			  irq_en->int_status_enable);
481 		hif_debug("CounterIntStatus: 0x%x",
482 			  irq_en->counter_int_status_enable);
483 	}
484 
485 	for (i = 0; mbox_regs && i < 4; i++)
486 		hif_debug("Counter[%d]: 0x%x", i, mbox_regs->counter[i]);
487 }
488 
489 /* under HL SDIO, with Interface Memory support, we have
490  * the following reasons to support 2 mboxs:
491  * a) we need place different buffers in different
492  * mempool, for example, data using Interface Memory,
493  * desc and other using DRAM, they need different SDIO
494  * mbox channels.
495  * b) currently, tx mempool in LL case is separated from
496  * main mempool, the structure (descs at the beginning
497  * of every pool buffer) is different, because they only
498  * need store tx desc from host. To align with LL case,
499  * we also need 2 mbox support just as PCIe LL cases.
500  */
501 
502 /**
503  * hif_dev_map_pipe_to_mail_box() - maps pipe id to mailbox.
504  * @pdev: The pointer to the hif device object
505  * @pipeid: pipe index
506  *
507  * Return: mailbox index
508  */
hif_dev_map_pipe_to_mail_box(struct hif_sdio_device * pdev,uint8_t pipeid)509 static uint8_t hif_dev_map_pipe_to_mail_box(struct hif_sdio_device *pdev,
510 					    uint8_t pipeid)
511 {
512 	if (2 == pipeid || 3 == pipeid)
513 		return 1;
514 	else if (0 == pipeid || 1 == pipeid)
515 		return 0;
516 
517 	hif_err("pipeid=%d invalid", pipeid);
518 
519 	qdf_assert(0);
520 
521 	return INVALID_MAILBOX_NUMBER;
522 }
523 
524 /**
525  * hif_dev_map_mail_box_to_pipe() - map sdio mailbox to htc pipe.
526  * @pdev: The pointer to the hif device object
527  * @mbox_index: mailbox index
528  * @upload: boolean to decide mailbox index
529  *
530  * Return: Invalid pipe index
531  */
hif_dev_map_mail_box_to_pipe(struct hif_sdio_device * pdev,uint8_t mbox_index,bool upload)532 static uint8_t hif_dev_map_mail_box_to_pipe(struct hif_sdio_device *pdev,
533 					    uint8_t mbox_index, bool upload)
534 {
535 	if (mbox_index == 0)
536 		return upload ? 1 : 0;
537 	else if (mbox_index == 1)
538 		return upload ? 3 : 2;
539 
540 	hif_err("mbox_index=%d, upload=%d invalid", mbox_index, upload);
541 
542 	qdf_assert(0);
543 
544 	return INVALID_MAILBOX_NUMBER; /* invalid pipe id */
545 }
546 
547 /**
548  * hif_get_send_address() - Get the transfer pipe address
549  * @pdev: The pointer to the hif device object
550  * @pipe: The pipe identifier
551  * @addr:
552  *
553  * Return 0 for success and non-zero for failure to map
554  */
hif_get_send_address(struct hif_sdio_device * pdev,uint8_t pipe,unsigned long * addr)555 int hif_get_send_address(struct hif_sdio_device *pdev,
556 			 uint8_t pipe, unsigned long *addr)
557 {
558 	uint8_t mbox_index = INVALID_MAILBOX_NUMBER;
559 
560 	if (!addr)
561 		return -EINVAL;
562 
563 	mbox_index = hif_dev_map_pipe_to_mail_box(pdev, pipe);
564 
565 	if (mbox_index == INVALID_MAILBOX_NUMBER)
566 		return -EINVAL;
567 
568 	*addr = pdev->MailBoxInfo.mbox_prop[mbox_index].extended_address;
569 
570 	return 0;
571 }
572 
573 /**
574  * hif_fixup_write_param() - Tweak the address and length parameters
575  * @pdev: The pointer to the hif device object
576  * @req:
577  * @length: The length pointer
578  * @addr: The addr pointer
579  *
580  * Return: None
581  */
hif_fixup_write_param(struct hif_sdio_dev * pdev,uint32_t req,uint32_t * length,uint32_t * addr)582 void hif_fixup_write_param(struct hif_sdio_dev *pdev, uint32_t req,
583 			   uint32_t *length, uint32_t *addr)
584 {
585 	struct hif_device_mbox_info mboxinfo;
586 	uint32_t taddr = *addr, mboxlen = 0;
587 
588 	hif_configure_device(NULL, pdev, HIF_DEVICE_GET_FIFO_ADDR,
589 			     &mboxinfo, sizeof(mboxinfo));
590 
591 	if (taddr >= 0x800 && taddr < 0xC00) {
592 		/* Host control register and CIS Window */
593 		mboxlen = 0;
594 	} else if (taddr == mboxinfo.mbox_addresses[0] ||
595 		   taddr == mboxinfo.mbox_addresses[1] ||
596 		   taddr == mboxinfo.mbox_addresses[2] ||
597 		   taddr == mboxinfo.mbox_addresses[3]) {
598 		mboxlen = HIF_MBOX_WIDTH;
599 	} else if (taddr == mboxinfo.mbox_prop[0].extended_address) {
600 		mboxlen = mboxinfo.mbox_prop[0].extended_size;
601 	} else if (taddr == mboxinfo.mbox_prop[1].extended_address) {
602 		mboxlen = mboxinfo.mbox_prop[1].extended_size;
603 	} else {
604 		hif_err("Invalid write addr: 0x%08x", taddr);
605 		return;
606 	}
607 
608 	if (mboxlen != 0) {
609 		if (*length > mboxlen) {
610 			hif_err("Error (%u > %u)", *length, mboxlen);
611 			return;
612 		}
613 
614 		taddr = taddr + (mboxlen - *length);
615 		taddr = taddr + ((req & HIF_DUMMY_SPACE_MASK) >> 16);
616 		*addr = taddr;
617 	}
618 }
619 
620 /**
621  * hif_dev_recv_packet() - Receive HTC packet/packet information from device
622  * @pdev : HIF device object
623  * @packet : The HTC packet pointer
624  * @recv_length : The length of information to be received
625  * @mbox_index : The mailbox that contains this information
626  *
627  * Return 0 for success and non zero of error
628  */
hif_dev_recv_packet(struct hif_sdio_device * pdev,HTC_PACKET * packet,uint32_t recv_length,uint32_t mbox_index)629 static QDF_STATUS hif_dev_recv_packet(struct hif_sdio_device *pdev,
630 				      HTC_PACKET *packet,
631 				      uint32_t recv_length,
632 				      uint32_t mbox_index)
633 {
634 	QDF_STATUS status;
635 	uint32_t padded_length;
636 	bool sync = (packet->Completion) ? false : true;
637 	uint32_t req = sync ? HIF_RD_SYNC_BLOCK_FIX : HIF_RD_ASYNC_BLOCK_FIX;
638 
639 	/* adjust the length to be a multiple of block size if appropriate */
640 	padded_length = DEV_CALC_RECV_PADDED_LEN(pdev, recv_length);
641 
642 	if (padded_length > packet->BufferLength) {
643 		hif_err("No space for padlen:%d recvlen:%d bufferlen:%d",
644 			padded_length,
645 			recv_length, packet->BufferLength);
646 		if (packet->Completion) {
647 			COMPLETE_HTC_PACKET(packet, QDF_STATUS_E_INVAL);
648 			return QDF_STATUS_SUCCESS;
649 		}
650 		return QDF_STATUS_E_INVAL;
651 	}
652 
653 	/* mailbox index is saved in Endpoint member */
654 	hif_debug("hdr:0x%x, len:%d, padded length: %d Mbox:0x%x",
655 		  packet->PktInfo.AsRx.ExpectedHdr, recv_length,
656 		  padded_length, mbox_index);
657 
658 	status = hif_read_write(pdev->HIFDevice,
659 				pdev->MailBoxInfo.mbox_addresses[mbox_index],
660 				packet->pBuffer,
661 				padded_length,
662 				req, sync ? NULL : packet);
663 
664 	if (status != QDF_STATUS_SUCCESS && status != QDF_STATUS_E_PENDING)
665 		hif_err("Failed %d", status);
666 
667 	if (sync) {
668 		packet->Status = status;
669 		if (status == QDF_STATUS_SUCCESS) {
670 			HTC_FRAME_HDR *hdr = (HTC_FRAME_HDR *) packet->pBuffer;
671 
672 			hif_debug("EP:%d,Len:%d,Flg:%d,CB:0x%02X,0x%02X",
673 				  hdr->EndpointID, hdr->PayloadLen,
674 				  hdr->Flags, hdr->ControlBytes0,
675 				  hdr->ControlBytes1);
676 		}
677 	}
678 
679 	return status;
680 }
681 
hif_dev_issue_recv_packet_bundle(struct hif_sdio_device * pdev,HTC_PACKET_QUEUE * recv_pkt_queue,HTC_PACKET_QUEUE * sync_completion_queue,uint8_t mail_box_index,int * num_packets_fetched,bool partial_bundle)682 static QDF_STATUS hif_dev_issue_recv_packet_bundle
683 (
684 	struct hif_sdio_device *pdev,
685 	HTC_PACKET_QUEUE *recv_pkt_queue,
686 	HTC_PACKET_QUEUE *sync_completion_queue,
687 	uint8_t mail_box_index,
688 	int *num_packets_fetched,
689 	bool partial_bundle
690 )
691 {
692 	uint32_t padded_length;
693 	int i, total_length = 0;
694 	HTC_TARGET *target = NULL;
695 	int bundleSpaceRemaining = 0;
696 	unsigned char *bundle_buffer = NULL;
697 	HTC_PACKET *packet, *packet_rx_bundle;
698 	QDF_STATUS status = QDF_STATUS_SUCCESS;
699 
700 	target = (HTC_TARGET *)pdev->pTarget;
701 
702 	if ((HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue) -
703 	     HTC_MAX_MSG_PER_BUNDLE_RX) > 0) {
704 		partial_bundle = true;
705 		hif_warn("partial bundle detected num: %d, %d",
706 			 HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue),
707 			 HTC_MAX_MSG_PER_BUNDLE_RX);
708 	}
709 
710 	bundleSpaceRemaining =
711 		HTC_MAX_MSG_PER_BUNDLE_RX * target->TargetCreditSize;
712 	packet_rx_bundle = allocate_htc_bundle_packet(target);
713 	if (!packet_rx_bundle) {
714 		hif_err("packet_rx_bundle is NULL");
715 		qdf_sleep(NBUF_ALLOC_FAIL_WAIT_TIME);  /* 100 msec sleep */
716 		return QDF_STATUS_E_NOMEM;
717 	}
718 	bundle_buffer = packet_rx_bundle->pBuffer;
719 
720 	for (i = 0;
721 	     !HTC_QUEUE_EMPTY(recv_pkt_queue) && i < HTC_MAX_MSG_PER_BUNDLE_RX;
722 	     i++) {
723 		packet = htc_packet_dequeue(recv_pkt_queue);
724 		A_ASSERT(packet);
725 		if (!packet)
726 			break;
727 		padded_length =
728 			DEV_CALC_RECV_PADDED_LEN(pdev, packet->ActualLength);
729 		if (packet->PktInfo.AsRx.HTCRxFlags &
730 				HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK)
731 			padded_length += HIF_BLOCK_SIZE;
732 		if ((bundleSpaceRemaining - padded_length) < 0) {
733 			/* exceeds what we can transfer, put the packet back */
734 			HTC_PACKET_ENQUEUE_TO_HEAD(recv_pkt_queue, packet);
735 			break;
736 		}
737 		bundleSpaceRemaining -= padded_length;
738 
739 		if (partial_bundle ||
740 		    HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue) > 0) {
741 			packet->PktInfo.AsRx.HTCRxFlags |=
742 				HTC_RX_PKT_IGNORE_LOOKAHEAD;
743 		}
744 		packet->PktInfo.AsRx.HTCRxFlags |= HTC_RX_PKT_PART_OF_BUNDLE;
745 
746 		if (sync_completion_queue)
747 			HTC_PACKET_ENQUEUE(sync_completion_queue, packet);
748 
749 		total_length += padded_length;
750 	}
751 #if DEBUG_BUNDLE
752 	qdf_print("Recv bundle count %d, length %d.",
753 		  sync_completion_queue ?
754 		  HTC_PACKET_QUEUE_DEPTH(sync_completion_queue) : 0,
755 		  total_length);
756 #endif
757 
758 	status = hif_read_write(pdev->HIFDevice,
759 				pdev->MailBoxInfo.
760 				mbox_addresses[(int)mail_box_index],
761 				bundle_buffer, total_length,
762 				HIF_RD_SYNC_BLOCK_FIX, NULL);
763 
764 	if (status != QDF_STATUS_SUCCESS) {
765 		hif_err("hif_send Failed status:%d", status);
766 	} else {
767 		unsigned char *buffer = bundle_buffer;
768 		*num_packets_fetched = i;
769 		if (sync_completion_queue) {
770 			HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(
771 				sync_completion_queue, packet) {
772 				padded_length =
773 				DEV_CALC_RECV_PADDED_LEN(pdev,
774 							 packet->ActualLength);
775 				if (packet->PktInfo.AsRx.HTCRxFlags &
776 				HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK)
777 					padded_length +=
778 						HIF_BLOCK_SIZE;
779 				A_MEMCPY(packet->pBuffer,
780 					 buffer, padded_length);
781 				buffer += padded_length;
782 			} HTC_PACKET_QUEUE_ITERATE_END;
783 		}
784 	}
785 	/* free bundle space under Sync mode */
786 	free_htc_bundle_packet(target, packet_rx_bundle);
787 	return status;
788 }
789 
790 #define ISSUE_BUNDLE hif_dev_issue_recv_packet_bundle
791 static
hif_dev_recv_message_pending_handler(struct hif_sdio_device * pdev,uint8_t mail_box_index,uint32_t msg_look_aheads[],int num_look_aheads,bool * async_proc,int * num_pkts_fetched)792 QDF_STATUS hif_dev_recv_message_pending_handler(struct hif_sdio_device *pdev,
793 						uint8_t mail_box_index,
794 						uint32_t msg_look_aheads[],
795 						int num_look_aheads,
796 						bool *async_proc,
797 						int *num_pkts_fetched)
798 {
799 	int pkts_fetched;
800 	HTC_PACKET *pkt;
801 	HTC_ENDPOINT_ID id;
802 	bool partial_bundle;
803 	int total_fetched = 0;
804 	bool asyncProc = false;
805 	QDF_STATUS status = QDF_STATUS_SUCCESS;
806 	uint32_t look_aheads[HTC_MAX_MSG_PER_BUNDLE_RX];
807 	HTC_PACKET_QUEUE recv_q, sync_comp_q;
808 	QDF_STATUS (*rxCompletion)(void *, qdf_nbuf_t,	uint8_t);
809 
810 	hif_debug("NumLookAheads: %d", num_look_aheads);
811 
812 	if (num_pkts_fetched)
813 		*num_pkts_fetched = 0;
814 
815 	if (IS_DEV_IRQ_PROCESSING_ASYNC_ALLOWED(pdev)) {
816 		/* We use async mode to get the packets if the
817 		 * device layer supports it. The device layer
818 		 * interfaces with HIF in which HIF may have
819 		 * restrictions on how interrupts are processed
820 		 */
821 		asyncProc = true;
822 	}
823 
824 	if (async_proc) {
825 		/* indicate to caller how we decided to process this */
826 		*async_proc = asyncProc;
827 	}
828 
829 	if (num_look_aheads > HTC_MAX_MSG_PER_BUNDLE_RX) {
830 		A_ASSERT(false);
831 		return QDF_STATUS_E_PROTO;
832 	}
833 
834 	A_MEMCPY(look_aheads, msg_look_aheads,
835 		 (sizeof(uint32_t)) * num_look_aheads);
836 	while (true) {
837 		/* reset packets queues */
838 		INIT_HTC_PACKET_QUEUE(&recv_q);
839 		INIT_HTC_PACKET_QUEUE(&sync_comp_q);
840 		if (num_look_aheads > HTC_MAX_MSG_PER_BUNDLE_RX) {
841 			status = QDF_STATUS_E_PROTO;
842 			A_ASSERT(false);
843 			break;
844 		}
845 
846 		/* first lookahead sets the expected endpoint IDs for
847 		 * all packets in a bundle
848 		 */
849 		id = ((HTC_FRAME_HDR *)&look_aheads[0])->EndpointID;
850 
851 		if (id >= ENDPOINT_MAX) {
852 			hif_err("Invalid Endpoint in lookahead: %d", id);
853 			status = QDF_STATUS_E_PROTO;
854 			break;
855 		}
856 		/* try to allocate as many HTC RX packets indicated
857 		 * by the lookaheads these packets are stored
858 		 * in the recvPkt queue
859 		 */
860 		status = hif_dev_alloc_and_prepare_rx_packets(pdev,
861 							      look_aheads,
862 							      num_look_aheads,
863 							      &recv_q);
864 		if (QDF_IS_STATUS_ERROR(status))
865 			break;
866 		total_fetched += HTC_PACKET_QUEUE_DEPTH(&recv_q);
867 
868 		/* we've got packet buffers for all we can currently fetch,
869 		 * this count is not valid anymore
870 		 */
871 		num_look_aheads = 0;
872 		partial_bundle = false;
873 
874 		/* now go fetch the list of HTC packets */
875 		while (!HTC_QUEUE_EMPTY(&recv_q)) {
876 			pkts_fetched = 0;
877 			if ((HTC_PACKET_QUEUE_DEPTH(&recv_q) > 1)) {
878 				/* there are enough packets to attempt a bundle
879 				 * transfer and recv bundling is allowed
880 				 */
881 				status = ISSUE_BUNDLE(pdev,
882 						      &recv_q,
883 						      asyncProc ? NULL :
884 						      &sync_comp_q,
885 						      mail_box_index,
886 						      &pkts_fetched,
887 						      partial_bundle);
888 				if (QDF_IS_STATUS_ERROR(status)) {
889 					hif_dev_free_recv_pkt_queue(
890 							&recv_q);
891 					break;
892 				}
893 
894 				if (HTC_PACKET_QUEUE_DEPTH(&recv_q) !=
895 					0) {
896 					/* we couldn't fetch all packets at one,
897 					 * time this creates a broken
898 					 * bundle
899 					 */
900 					partial_bundle = true;
901 				}
902 			}
903 
904 			/* see if the previous operation fetched any
905 			 * packets using bundling
906 			 */
907 			if (pkts_fetched == 0) {
908 				/* dequeue one packet */
909 				pkt = htc_packet_dequeue(&recv_q);
910 				A_ASSERT(pkt);
911 				if (!pkt)
912 					break;
913 
914 				pkt->Completion = NULL;
915 
916 				if (HTC_PACKET_QUEUE_DEPTH(&recv_q) >
917 				    0) {
918 					/* lookaheads in all packets except the
919 					 * last one in must be ignored
920 					 */
921 					pkt->PktInfo.AsRx.HTCRxFlags |=
922 						HTC_RX_PKT_IGNORE_LOOKAHEAD;
923 				}
924 
925 				/* go fetch the packet */
926 				status =
927 				hif_dev_recv_packet(pdev, pkt,
928 						    pkt->ActualLength,
929 						    mail_box_index);
930 				while (QDF_IS_STATUS_ERROR(status) &&
931 				       !HTC_QUEUE_EMPTY(&recv_q)) {
932 					qdf_nbuf_t nbuf;
933 
934 					pkt = htc_packet_dequeue(&recv_q);
935 					if (!pkt)
936 						break;
937 					nbuf = pkt->pNetBufContext;
938 					if (nbuf)
939 						qdf_nbuf_free(nbuf);
940 				}
941 
942 				if (QDF_IS_STATUS_ERROR(status))
943 					break;
944 				/* sent synchronously, queue this packet for
945 				 * synchronous completion
946 				 */
947 				HTC_PACKET_ENQUEUE(&sync_comp_q, pkt);
948 			}
949 		}
950 
951 		/* synchronous handling */
952 		if (pdev->DSRCanYield) {
953 			/* for the SYNC case, increment count that tracks
954 			 * when the DSR should yield
955 			 */
956 			pdev->CurrentDSRRecvCount++;
957 		}
958 
959 		/* in the sync case, all packet buffers are now filled,
960 		 * we can process each packet, check lookahead , then repeat
961 		 */
962 		rxCompletion = pdev->hif_callbacks.rxCompletionHandler;
963 
964 		/* unload sync completion queue */
965 		while (!HTC_QUEUE_EMPTY(&sync_comp_q)) {
966 			uint8_t pipeid;
967 			qdf_nbuf_t netbuf;
968 
969 			pkt = htc_packet_dequeue(&sync_comp_q);
970 			A_ASSERT(pkt);
971 			if (!pkt)
972 				break;
973 
974 			num_look_aheads = 0;
975 			status = hif_dev_process_recv_header(pdev, pkt,
976 							     look_aheads,
977 							     &num_look_aheads);
978 			if (QDF_IS_STATUS_ERROR(status)) {
979 				HTC_PACKET_ENQUEUE_TO_HEAD(&sync_comp_q, pkt);
980 				break;
981 			}
982 
983 			netbuf = (qdf_nbuf_t)pkt->pNetBufContext;
984 			/* set data length */
985 			qdf_nbuf_put_tail(netbuf, pkt->ActualLength);
986 
987 			if (rxCompletion) {
988 				pipeid =
989 				hif_dev_map_mail_box_to_pipe(pdev,
990 							     mail_box_index,
991 							     true);
992 				rxCompletion(pdev->hif_callbacks.Context,
993 					     netbuf, pipeid);
994 			}
995 		}
996 
997 		if (QDF_IS_STATUS_ERROR(status)) {
998 			if (!HTC_QUEUE_EMPTY(&sync_comp_q))
999 				hif_dev_free_recv_pkt_queue(
1000 						&sync_comp_q);
1001 			break;
1002 		}
1003 
1004 		if (num_look_aheads == 0) {
1005 			/* no more look aheads */
1006 			break;
1007 		}
1008 		/* check whether other OS contexts have queued any WMI
1009 		 * command/data for WLAN. This check is needed only if WLAN
1010 		 * Tx and Rx happens in same thread context
1011 		 */
1012 		/* A_CHECK_DRV_TX(); */
1013 	}
1014 	if (num_pkts_fetched)
1015 		*num_pkts_fetched = total_fetched;
1016 
1017 	AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-HTCRecvMessagePendingHandler\n"));
1018 	return status;
1019 }
1020 
1021 /**
1022  * hif_dev_service_cpu_interrupt() - service fatal interrupts
1023  * synchronously
1024  *
1025  * @pdev: hif sdio device context
1026  *
1027  * Return: QDF_STATUS_SUCCESS for success
1028  */
hif_dev_service_cpu_interrupt(struct hif_sdio_device * pdev)1029 static QDF_STATUS hif_dev_service_cpu_interrupt(struct hif_sdio_device *pdev)
1030 {
1031 	QDF_STATUS status;
1032 	uint8_t reg_buffer[4];
1033 	uint8_t cpu_int_status;
1034 
1035 	cpu_int_status = mboxProcRegs(pdev).cpu_int_status &
1036 			 mboxEnaRegs(pdev).cpu_int_status_enable;
1037 
1038 	hif_err("CPU intr status: 0x%x", (uint32_t)cpu_int_status);
1039 
1040 	/* Clear the interrupt */
1041 	mboxProcRegs(pdev).cpu_int_status &= ~cpu_int_status;
1042 
1043 	/*set up the register transfer buffer to hit the register
1044 	 * 4 times , this is done to make the access 4-byte aligned
1045 	 * to mitigate issues with host bus interconnects that
1046 	 * restrict bus transfer lengths to be a multiple of 4-bytes
1047 	 * set W1C value to clear the interrupt, this hits the register
1048 	 * first
1049 	 */
1050 	reg_buffer[0] = cpu_int_status;
1051 	/* the remaining 4 values are set to zero which have no-effect  */
1052 	reg_buffer[1] = 0;
1053 	reg_buffer[2] = 0;
1054 	reg_buffer[3] = 0;
1055 
1056 	status = hif_read_write(pdev->HIFDevice,
1057 				CPU_INT_STATUS_ADDRESS,
1058 				reg_buffer, 4, HIF_WR_SYNC_BYTE_FIX, NULL);
1059 
1060 	A_ASSERT(status == QDF_STATUS_SUCCESS);
1061 
1062 	/* The Interrupt sent to the Host is generated via bit0
1063 	 * of CPU INT register
1064 	 */
1065 	if (cpu_int_status & 0x1) {
1066 		if (pdev->hif_callbacks.fwEventHandler)
1067 			/* It calls into HTC which propagates this
1068 			 * to ol_target_failure()
1069 			 */
1070 			pdev->hif_callbacks.fwEventHandler(
1071 				pdev->hif_callbacks.Context,
1072 				QDF_STATUS_E_FAILURE);
1073 	} else {
1074 		hif_err("Unrecognized CPU event");
1075 	}
1076 
1077 	return status;
1078 }
1079 
1080 /**
1081  * hif_dev_service_error_interrupt() - service error interrupts
1082  * synchronously
1083  *
1084  * @pdev: hif sdio device context
1085  *
1086  * Return: QDF_STATUS_SUCCESS for success
1087  */
hif_dev_service_error_interrupt(struct hif_sdio_device * pdev)1088 static QDF_STATUS hif_dev_service_error_interrupt(struct hif_sdio_device *pdev)
1089 {
1090 	QDF_STATUS status;
1091 	uint8_t reg_buffer[4];
1092 	uint8_t error_int_status = 0;
1093 
1094 	error_int_status = mboxProcRegs(pdev).error_int_status & 0x0F;
1095 	hif_err("Err intr status: 0x%x", error_int_status);
1096 
1097 	if (ERROR_INT_STATUS_WAKEUP_GET(error_int_status))
1098 		hif_err("Error : Wakeup");
1099 
1100 	if (ERROR_INT_STATUS_RX_UNDERFLOW_GET(error_int_status))
1101 		hif_err("Error : Rx Underflow");
1102 
1103 	if (ERROR_INT_STATUS_TX_OVERFLOW_GET(error_int_status))
1104 		hif_err("Error : Tx Overflow");
1105 
1106 	/* Clear the interrupt */
1107 	mboxProcRegs(pdev).error_int_status &= ~error_int_status;
1108 
1109 	/* set up the register transfer buffer to hit the register
1110 	 * 4 times , this is done to make the access 4-byte
1111 	 * aligned to mitigate issues with host bus interconnects that
1112 	 * restrict bus transfer lengths to be a multiple of 4-bytes
1113 	 */
1114 
1115 	/* set W1C value to clear the interrupt */
1116 	reg_buffer[0] = error_int_status;
1117 	/* the remaining 4 values are set to zero which have no-effect  */
1118 	reg_buffer[1] = 0;
1119 	reg_buffer[2] = 0;
1120 	reg_buffer[3] = 0;
1121 
1122 	status = hif_read_write(pdev->HIFDevice,
1123 				ERROR_INT_STATUS_ADDRESS,
1124 				reg_buffer, 4, HIF_WR_SYNC_BYTE_FIX, NULL);
1125 
1126 	A_ASSERT(status == QDF_STATUS_SUCCESS);
1127 	return status;
1128 }
1129 
1130 /**
1131  * hif_dev_service_debug_interrupt() - service debug interrupts
1132  * synchronously
1133  *
1134  * @pdev: hif sdio device context
1135  *
1136  * Return: QDF_STATUS_SUCCESS for success
1137  */
hif_dev_service_debug_interrupt(struct hif_sdio_device * pdev)1138 static QDF_STATUS hif_dev_service_debug_interrupt(struct hif_sdio_device *pdev)
1139 {
1140 	uint32_t dummy;
1141 	QDF_STATUS status;
1142 
1143 	/* Send a target failure event to the application */
1144 	hif_err("Target debug interrupt");
1145 
1146 	/* clear the interrupt , the debug error interrupt is counter 0
1147 	 * read counter to clear interrupt
1148 	 */
1149 	status = hif_read_write(pdev->HIFDevice,
1150 				COUNT_DEC_ADDRESS,
1151 				(uint8_t *)&dummy,
1152 				4, HIF_RD_SYNC_BYTE_INC, NULL);
1153 
1154 	A_ASSERT(status == QDF_STATUS_SUCCESS);
1155 	return status;
1156 }
1157 
1158 /**
1159  * hif_dev_service_counter_interrupt() - service counter interrupts
1160  *                                       synchronously
1161  * @pdev: hif sdio device context
1162  *
1163  * Return: QDF_STATUS_SUCCESS for success
1164  */
1165 static
hif_dev_service_counter_interrupt(struct hif_sdio_device * pdev)1166 QDF_STATUS hif_dev_service_counter_interrupt(struct hif_sdio_device *pdev)
1167 {
1168 	uint8_t counter_int_status;
1169 
1170 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Counter Interrupt\n"));
1171 
1172 	counter_int_status = mboxProcRegs(pdev).counter_int_status &
1173 			     mboxEnaRegs(pdev).counter_int_status_enable;
1174 
1175 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1176 			("Valid interrupt source in COUNTER_INT_STATUS: 0x%x\n",
1177 			 counter_int_status));
1178 
1179 	/* Check if the debug interrupt is pending
1180 	 * NOTE: other modules like GMBOX may use the counter interrupt
1181 	 * for credit flow control on other counters, we only need to
1182 	 * check for the debug assertion counter interrupt
1183 	 */
1184 	if (counter_int_status & AR6K_TARGET_DEBUG_INTR_MASK)
1185 		return hif_dev_service_debug_interrupt(pdev);
1186 
1187 	return QDF_STATUS_SUCCESS;
1188 }
1189 
1190 #define RX_LOOAHEAD_GET(pdev, i) \
1191 	mboxProcRegs(pdev).rx_lookahead[MAILBOX_LOOKAHEAD_SIZE_IN_WORD * i]
1192 /**
1193  * hif_dev_process_pending_irqs() - process pending interrupts
1194  * @pdev: hif sdio device context
1195  * @done: pending irq completion status
1196  * @async_processing: sync/async processing flag
1197  *
1198  * Return: QDF_STATUS_SUCCESS for success
1199  */
hif_dev_process_pending_irqs(struct hif_sdio_device * pdev,bool * done,bool * async_processing)1200 QDF_STATUS hif_dev_process_pending_irqs(struct hif_sdio_device *pdev,
1201 					bool *done,
1202 					bool *async_processing)
1203 {
1204 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1205 	uint8_t host_int_status = 0;
1206 	uint32_t l_ahead[MAILBOX_USED_COUNT];
1207 	int i;
1208 
1209 	qdf_mem_zero(&l_ahead, sizeof(l_ahead));
1210 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1211 			("+ProcessPendingIRQs: (dev: 0x%lX)\n",
1212 			 (unsigned long)pdev));
1213 
1214 	/* NOTE: the HIF implementation guarantees that the context
1215 	 * of this call allows us to perform SYNCHRONOUS I/O,
1216 	 * that is we can block, sleep or call any API that
1217 	 * can block or switch thread/task ontexts.
1218 	 * This is a fully schedulable context.
1219 	 */
1220 	do {
1221 		if (mboxEnaRegs(pdev).int_status_enable == 0) {
1222 			/* interrupt enables have been cleared, do not try
1223 			 * to process any pending interrupts that
1224 			 * may result in more bus transactions.
1225 			 * The target may be unresponsive at this point.
1226 			 */
1227 			break;
1228 		}
1229 		status = hif_read_write(pdev->HIFDevice,
1230 					HOST_INT_STATUS_ADDRESS,
1231 					(uint8_t *)&mboxProcRegs(pdev),
1232 					sizeof(mboxProcRegs(pdev)),
1233 					HIF_RD_SYNC_BYTE_INC, NULL);
1234 
1235 		if (QDF_IS_STATUS_ERROR(status))
1236 			break;
1237 
1238 		if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_IRQ)) {
1239 			hif_dev_dump_registers(pdev,
1240 					       &mboxProcRegs(pdev),
1241 					       &mboxEnaRegs(pdev),
1242 					       &mboxCountRegs(pdev));
1243 		}
1244 
1245 		/* Update only those registers that are enabled */
1246 		host_int_status = mboxProcRegs(pdev).host_int_status
1247 				  & mboxEnaRegs(pdev).int_status_enable;
1248 
1249 		/* only look at mailbox status if the HIF layer did not
1250 		 * provide this function, on some HIF interfaces reading
1251 		 * the RX lookahead is not valid to do
1252 		 */
1253 		for (i = 0; i < MAILBOX_USED_COUNT; i++) {
1254 			l_ahead[i] = 0;
1255 			if (host_int_status & (1 << i)) {
1256 				/* mask out pending mailbox value, we use
1257 				 * "lookAhead" as the real flag for
1258 				 * mailbox processing below
1259 				 */
1260 				host_int_status &= ~(1 << i);
1261 				if (mboxProcRegs(pdev).
1262 				    rx_lookahead_valid & (1 << i)) {
1263 					/* mailbox has a message and the
1264 					 * look ahead is valid
1265 					 */
1266 					l_ahead[i] = RX_LOOAHEAD_GET(pdev, i);
1267 				}
1268 			}
1269 		} /*end of for loop */
1270 	} while (false);
1271 
1272 	do {
1273 		bool bLookAheadValid = false;
1274 		/* did the interrupt status fetches succeed? */
1275 		if (QDF_IS_STATUS_ERROR(status))
1276 			break;
1277 
1278 		for (i = 0; i < MAILBOX_USED_COUNT; i++) {
1279 			if (l_ahead[i] != 0) {
1280 				bLookAheadValid = true;
1281 				break;
1282 			}
1283 		}
1284 
1285 		if ((host_int_status == 0) && !bLookAheadValid) {
1286 			/* nothing to process, the caller can use this
1287 			 * to break out of a loop
1288 			 */
1289 			*done = true;
1290 			break;
1291 		}
1292 
1293 		if (bLookAheadValid) {
1294 			for (i = 0; i < MAILBOX_USED_COUNT; i++) {
1295 				int fetched = 0;
1296 
1297 				if (l_ahead[i] == 0)
1298 					continue;
1299 				AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1300 						("mbox[%d],lookahead:0x%X\n",
1301 						i, l_ahead[i]));
1302 				/* Mailbox Interrupt, the HTC layer may issue
1303 				 * async requests to empty the mailbox...
1304 				 * When emptying the recv mailbox we use the
1305 				 * async handler from the completion routine of
1306 				 * routine of the callers read request.
1307 				 * This can improve performance by reducing
1308 				 * the  context switching when we rapidly
1309 				 * pull packets
1310 				 */
1311 				status = hif_dev_recv_message_pending_handler(
1312 							pdev, i,
1313 							&l_ahead
1314 							[i], 1,
1315 							async_processing,
1316 							&fetched);
1317 				if (QDF_IS_STATUS_ERROR(status))
1318 					break;
1319 
1320 				if (!fetched) {
1321 					/* HTC could not pull any messages out
1322 					 * due to lack of resources force DSR
1323 					 * handle to ack the interrupt
1324 					 */
1325 					*async_processing = false;
1326 					pdev->RecheckIRQStatusCnt = 0;
1327 				}
1328 			}
1329 		}
1330 
1331 		/* now handle the rest of them */
1332 		AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1333 				("Valid source for OTHER interrupts: 0x%x\n",
1334 				host_int_status));
1335 
1336 		if (HOST_INT_STATUS_CPU_GET(host_int_status)) {
1337 			/* CPU Interrupt */
1338 			status = hif_dev_service_cpu_interrupt(pdev);
1339 			if (QDF_IS_STATUS_ERROR(status))
1340 				break;
1341 		}
1342 
1343 		if (HOST_INT_STATUS_ERROR_GET(host_int_status)) {
1344 			/* Error Interrupt */
1345 			status = hif_dev_service_error_interrupt(pdev);
1346 			if (QDF_IS_STATUS_ERROR(status))
1347 				break;
1348 		}
1349 
1350 		if (HOST_INT_STATUS_COUNTER_GET(host_int_status)) {
1351 			/* Counter Interrupt */
1352 			status = hif_dev_service_counter_interrupt(pdev);
1353 			if (QDF_IS_STATUS_ERROR(status))
1354 				break;
1355 		}
1356 
1357 	} while (false);
1358 
1359 	/* an optimization to bypass reading the IRQ status registers
1360 	 * unnecessarily which can re-wake the target, if upper layers
1361 	 * determine that we are in a low-throughput mode, we can
1362 	 * rely on taking another interrupt rather than re-checking
1363 	 * the status registers which can re-wake the target.
1364 	 *
1365 	 * NOTE : for host interfaces that use the special
1366 	 * GetPendingEventsFunc, this optimization cannot be used due to
1367 	 * possible side-effects.  For example, SPI requires the host
1368 	 * to drain all messages from the mailbox before exiting
1369 	 * the ISR routine.
1370 	 */
1371 	if (!(*async_processing) && (pdev->RecheckIRQStatusCnt == 0)) {
1372 		AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1373 				("Bypass IRQ Status re-check, forcing done\n"));
1374 		*done = true;
1375 	}
1376 
1377 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1378 			("-ProcessPendingIRQs: (done:%d, async:%d) status=%d\n",
1379 			 *done, *async_processing, status));
1380 
1381 	return status;
1382 }
1383 
1384 #define DEV_CHECK_RECV_YIELD(pdev) \
1385 	((pdev)->CurrentDSRRecvCount >= \
1386 	 (pdev)->HifIRQYieldParams.recv_packet_yield_count)
1387 /**
1388  * hif_dev_dsr_handler() - Synchronous interrupt handler
1389  *
1390  * @context: hif send context
1391  *
1392  * Return: 0 for success and non-zero for failure
1393  */
hif_dev_dsr_handler(void * context)1394 QDF_STATUS hif_dev_dsr_handler(void *context)
1395 {
1396 	struct hif_sdio_device *pdev = (struct hif_sdio_device *)context;
1397 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1398 	bool done = false;
1399 	bool async_proc = false;
1400 
1401 	/* reset the recv counter that tracks when we need
1402 	 * to yield from the DSR
1403 	 */
1404 	pdev->CurrentDSRRecvCount = 0;
1405 	/* reset counter used to flag a re-scan of IRQ
1406 	 * status registers on the target
1407 	 */
1408 	pdev->RecheckIRQStatusCnt = 0;
1409 
1410 	while (!done) {
1411 		status = hif_dev_process_pending_irqs(pdev, &done, &async_proc);
1412 		if (QDF_IS_STATUS_ERROR(status))
1413 			break;
1414 
1415 		if (pdev->HifIRQProcessingMode == HIF_DEVICE_IRQ_SYNC_ONLY) {
1416 			/* the HIF layer does not allow async IRQ processing,
1417 			 * override the asyncProc flag
1418 			 */
1419 			async_proc = false;
1420 			/* this will cause us to re-enter ProcessPendingIRQ()
1421 			 * and re-read interrupt status registers.
1422 			 * This has a nice side effect of blocking us until all
1423 			 * async read requests are completed. This behavior is
1424 			 * required as we  do not allow ASYNC processing
1425 			 * in interrupt handlers (like Windows CE)
1426 			 */
1427 
1428 			if (pdev->DSRCanYield && DEV_CHECK_RECV_YIELD(pdev))
1429 				/* ProcessPendingIRQs() pulled enough recv
1430 				 * messages to satisfy the yield count, stop
1431 				 * checking for more messages and return
1432 				 */
1433 				break;
1434 		}
1435 
1436 		if (async_proc) {
1437 			/* the function does some async I/O for performance,
1438 			 * we need to exit the ISR immediately, the check below
1439 			 * will prevent the interrupt from being
1440 			 * Ack'd while we handle it asynchronously
1441 			 */
1442 			break;
1443 		}
1444 	}
1445 
1446 	if (QDF_IS_STATUS_SUCCESS(status) && !async_proc) {
1447 		/* Ack the interrupt only if :
1448 		 *  1. we did not get any errors in processing interrupts
1449 		 *  2. there are no outstanding async processing requests
1450 		 */
1451 		if (pdev->DSRCanYield) {
1452 			/* if the DSR can yield do not ACK the interrupt, there
1453 			 * could be more pending messages. The HIF layer
1454 			 * must ACK the interrupt on behalf of HTC
1455 			 */
1456 			hif_info("Yield (RX count: %d)",
1457 				 pdev->CurrentDSRRecvCount);
1458 		} else {
1459 			hif_ack_interrupt(pdev->HIFDevice);
1460 		}
1461 	}
1462 
1463 	return status;
1464 }
1465 
1466 /**
1467  * hif_read_write() - queue a read/write request
1468  * @device: pointer to hif device structure
1469  * @address: address to read
1470  * @buffer: buffer to hold read/write data
1471  * @length: length to read/write
1472  * @request: read/write/sync/async request
1473  * @context: pointer to hold calling context
1474  *
1475  * Return: 0 on success, error number otherwise.
1476  */
1477 QDF_STATUS
hif_read_write(struct hif_sdio_dev * device,unsigned long address,char * buffer,uint32_t length,uint32_t request,void * context)1478 hif_read_write(struct hif_sdio_dev *device,
1479 	       unsigned long address,
1480 	       char *buffer, uint32_t length,
1481 	       uint32_t request, void *context)
1482 {
1483 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1484 	struct bus_request *busrequest;
1485 
1486 	AR_DEBUG_ASSERT(device);
1487 	AR_DEBUG_ASSERT(device->func);
1488 	hif_debug("device 0x%pK addr 0x%lX buffer 0x%pK",
1489 		  device, address, buffer);
1490 	hif_debug("len %d req 0x%X context 0x%pK",
1491 		  length, request, context);
1492 
1493 	/*sdio r/w action is not needed when suspend, so just return */
1494 	if ((device->is_suspend) &&
1495 	    (device->power_config == HIF_DEVICE_POWER_CUT)) {
1496 		AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("skip io when suspending\n"));
1497 		return QDF_STATUS_SUCCESS;
1498 	}
1499 	do {
1500 		if ((request & HIF_ASYNCHRONOUS) ||
1501 		    (request & HIF_SYNCHRONOUS)) {
1502 			/* serialize all requests through the async thread */
1503 			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1504 					("%s: Execution mode: %s\n", __func__,
1505 					 (request & HIF_ASYNCHRONOUS) ? "Async"
1506 					 : "Synch"));
1507 			busrequest = hif_allocate_bus_request(device);
1508 			if (!busrequest) {
1509 				hif_err("bus requests unavail");
1510 				hif_err("%s, addr:0x%lX, len:%d",
1511 					request & HIF_SDIO_READ ? "READ" :
1512 					"WRITE", address, length);
1513 				return QDF_STATUS_E_FAILURE;
1514 			}
1515 			busrequest->address = address;
1516 			busrequest->buffer = buffer;
1517 			busrequest->length = length;
1518 			busrequest->request = request;
1519 			busrequest->context = context;
1520 
1521 			add_to_async_list(device, busrequest);
1522 
1523 			if (request & HIF_SYNCHRONOUS) {
1524 				AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1525 						("%s: queued sync req: 0x%lX\n",
1526 						 __func__,
1527 						 (unsigned long)busrequest));
1528 
1529 				/* wait for completion */
1530 				up(&device->sem_async);
1531 				if (down_interruptible(&busrequest->sem_req) ==
1532 				    0) {
1533 					QDF_STATUS status = busrequest->status;
1534 
1535 					hif_debug("sync freeing 0x%lX:0x%X",
1536 						  (unsigned long)busrequest,
1537 						  busrequest->status);
1538 					hif_debug("freeing req: 0x%X",
1539 						  (unsigned int)request);
1540 					hif_free_bus_request(device,
1541 							     busrequest);
1542 					return status;
1543 				} else {
1544 					/* interrupted, exit */
1545 					return QDF_STATUS_E_FAILURE;
1546 				}
1547 			} else {
1548 				hif_debug("queued async req: 0x%lX",
1549 					  (unsigned long)busrequest);
1550 				up(&device->sem_async);
1551 				return QDF_STATUS_E_PENDING;
1552 			}
1553 		} else {
1554 			hif_err("Invalid execution mode: 0x%08x",
1555 				(unsigned int)request);
1556 			status = QDF_STATUS_E_INVAL;
1557 			break;
1558 		}
1559 	} while (0);
1560 
1561 	return status;
1562 }
1563 
1564 /**
1565  * hif_sdio_func_enable() - Handle device enabling as per device
1566  * @ol_sc: HIF device object
1567  * @func: function pointer
1568  *
1569  * Return QDF_STATUS
1570  */
hif_sdio_func_enable(struct hif_softc * ol_sc,struct sdio_func * func)1571 static QDF_STATUS hif_sdio_func_enable(struct hif_softc *ol_sc,
1572 				       struct sdio_func *func)
1573 {
1574 	struct hif_sdio_dev *device = get_hif_device(ol_sc, func);
1575 
1576 	if (device->is_disabled) {
1577 		int ret = 0;
1578 
1579 		sdio_claim_host(func);
1580 
1581 		ret = hif_sdio_quirk_async_intr(ol_sc, func);
1582 		if (ret) {
1583 			hif_err("Error setting async intr:%d", ret);
1584 			sdio_release_host(func);
1585 			return QDF_STATUS_E_FAILURE;
1586 		}
1587 
1588 		func->enable_timeout = 100;
1589 		ret = sdio_enable_func(func);
1590 		if (ret) {
1591 			hif_err("Unable to enable function: %d", ret);
1592 			sdio_release_host(func);
1593 			return QDF_STATUS_E_FAILURE;
1594 		}
1595 
1596 		ret = sdio_set_block_size(func, HIF_BLOCK_SIZE);
1597 		if (ret) {
1598 			hif_err("Unable to set block size 0x%X : %d",
1599 				HIF_BLOCK_SIZE, ret);
1600 			sdio_release_host(func);
1601 			return QDF_STATUS_E_FAILURE;
1602 		}
1603 
1604 		ret = hif_sdio_quirk_mod_strength(ol_sc, func);
1605 		if (ret) {
1606 			hif_err("Error setting mod strength : %d", ret);
1607 			sdio_release_host(func);
1608 			return QDF_STATUS_E_FAILURE;
1609 		}
1610 
1611 		sdio_release_host(func);
1612 	}
1613 
1614 	return QDF_STATUS_SUCCESS;
1615 }
1616 
1617 /**
1618  * __hif_read_write() - sdio read/write wrapper
1619  * @device: pointer to hif device structure
1620  * @address: address to read
1621  * @buffer: buffer to hold read/write data
1622  * @length: length to read/write
1623  * @request: read/write/sync/async request
1624  * @context: pointer to hold calling context
1625  *
1626  * Return: 0 on success, error number otherwise.
1627  */
1628 static QDF_STATUS
__hif_read_write(struct hif_sdio_dev * device,uint32_t address,char * buffer,uint32_t length,uint32_t request,void * context)1629 __hif_read_write(struct hif_sdio_dev *device,
1630 		 uint32_t address, char *buffer,
1631 		 uint32_t length, uint32_t request, void *context)
1632 {
1633 	uint8_t opcode;
1634 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1635 	int ret = A_OK;
1636 	uint8_t *tbuffer;
1637 	bool bounced = false;
1638 
1639 	if (!device) {
1640 		hif_err("Device null!");
1641 		return QDF_STATUS_E_INVAL;
1642 	}
1643 
1644 	if (!device->func) {
1645 		hif_err("func null!");
1646 		return QDF_STATUS_E_INVAL;
1647 	}
1648 
1649 	hif_debug("addr:0X%06X, len:%08d, %s, %s",
1650 		  address, length,
1651 		  request & HIF_SDIO_READ ? "Read " : "Write",
1652 		  request & HIF_ASYNCHRONOUS ? "Async" : "Sync ");
1653 
1654 	do {
1655 		if (request & HIF_EXTENDED_IO) {
1656 			//HIF_INFO_HI("%s: Command type: CMD53\n", __func__);
1657 		} else {
1658 			hif_err("Invalid command type: 0x%08x\n", request);
1659 			status = QDF_STATUS_E_INVAL;
1660 			break;
1661 		}
1662 
1663 		if (request & HIF_BLOCK_BASIS) {
1664 			/* round to whole block length size */
1665 			length =
1666 				(length / HIF_BLOCK_SIZE) *
1667 				HIF_BLOCK_SIZE;
1668 			hif_debug("Block mode (BlockLen: %d)", length);
1669 		} else if (request & HIF_BYTE_BASIS) {
1670 			hif_debug("Byte mode (BlockLen: %d)", length);
1671 		} else {
1672 			hif_err("Invalid data mode: 0x%08x", request);
1673 			status = QDF_STATUS_E_INVAL;
1674 			break;
1675 		}
1676 		if (request & HIF_SDIO_WRITE) {
1677 			hif_fixup_write_param(device, request,
1678 					      &length, &address);
1679 
1680 			hif_debug("addr:%08X, len:0x%08X, dummy:0x%04X",
1681 				  address, length,
1682 				  (request & HIF_DUMMY_SPACE_MASK) >> 16);
1683 		}
1684 
1685 		if (request & HIF_FIXED_ADDRESS) {
1686 			opcode = CMD53_FIXED_ADDRESS;
1687 			hif_debug("Addr mode: fixed 0x%X", address);
1688 		} else if (request & HIF_INCREMENTAL_ADDRESS) {
1689 			opcode = CMD53_INCR_ADDRESS;
1690 			hif_debug("Address mode: Incremental 0x%X", address);
1691 		} else {
1692 			hif_err("Invalid address mode: 0x%08x", request);
1693 			status = QDF_STATUS_E_INVAL;
1694 			break;
1695 		}
1696 
1697 		if (request & HIF_SDIO_WRITE) {
1698 #if HIF_USE_DMA_BOUNCE_BUFFER
1699 			if (BUFFER_NEEDS_BOUNCE(buffer)) {
1700 				AR_DEBUG_ASSERT(device->dma_buffer);
1701 				tbuffer = device->dma_buffer;
1702 				/* copy the write data to the dma buffer */
1703 				AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE);
1704 				if (length > HIF_DMA_BUFFER_SIZE) {
1705 					hif_err("Invalid write len: %d",
1706 						length);
1707 					status = QDF_STATUS_E_INVAL;
1708 					break;
1709 				}
1710 				memcpy(tbuffer, buffer, length);
1711 				bounced = true;
1712 			} else {
1713 				tbuffer = buffer;
1714 			}
1715 #else
1716 			tbuffer = buffer;
1717 #endif
1718 			if (opcode == CMD53_FIXED_ADDRESS  && tbuffer) {
1719 				ret = sdio_writesb(device->func, address,
1720 						   tbuffer, length);
1721 				hif_debug("r=%d addr:0x%X, len:%d, 0x%X",
1722 					  ret, address, length,
1723 					  *(int *)tbuffer);
1724 			} else if (tbuffer) {
1725 				ret = sdio_memcpy_toio(device->func, address,
1726 						       tbuffer, length);
1727 				hif_debug("r=%d addr:0x%X, len:%d, 0x%X",
1728 					  ret, address, length,
1729 					  *(int *)tbuffer);
1730 			}
1731 		} else if (request & HIF_SDIO_READ) {
1732 #if HIF_USE_DMA_BOUNCE_BUFFER
1733 			if (BUFFER_NEEDS_BOUNCE(buffer)) {
1734 				AR_DEBUG_ASSERT(device->dma_buffer);
1735 				AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE);
1736 				if (length > HIF_DMA_BUFFER_SIZE) {
1737 					hif_err("Invalid read len: %d", length);
1738 					status = QDF_STATUS_E_INVAL;
1739 					break;
1740 				}
1741 				tbuffer = device->dma_buffer;
1742 				bounced = true;
1743 			} else {
1744 				tbuffer = buffer;
1745 			}
1746 #else
1747 			tbuffer = buffer;
1748 #endif
1749 			if (opcode == CMD53_FIXED_ADDRESS && tbuffer) {
1750 				ret = sdio_readsb(device->func, tbuffer,
1751 						  address, length);
1752 				hif_debug("r=%d addr:0x%X, len:%d, 0x%X",
1753 					  ret, address, length,
1754 					  *(int *)tbuffer);
1755 			} else if (tbuffer) {
1756 				ret = sdio_memcpy_fromio(device->func,
1757 							 tbuffer, address,
1758 							 length);
1759 				hif_debug("r=%d addr:0x%X, len:%d, 0x%X",
1760 					  ret, address, length,
1761 					  *(int *)tbuffer);
1762 			}
1763 #if HIF_USE_DMA_BOUNCE_BUFFER
1764 			if (bounced && tbuffer)
1765 				memcpy(buffer, tbuffer, length);
1766 #endif
1767 		} else {
1768 			hif_err("Invalid dir: 0x%08x", request);
1769 			status = QDF_STATUS_E_INVAL;
1770 			return status;
1771 		}
1772 
1773 		if (ret) {
1774 			hif_err("SDIO bus operation failed!");
1775 			hif_err("MMC stack returned : %d", ret);
1776 			hif_err("addr:0X%06X, len:%08d, %s, %s",
1777 				address, length,
1778 				request & HIF_SDIO_READ ? "Read " : "Write",
1779 				request & HIF_ASYNCHRONOUS ?
1780 				"Async" : "Sync");
1781 			status = QDF_STATUS_E_FAILURE;
1782 		}
1783 	} while (false);
1784 
1785 	return status;
1786 }
1787 
1788 /**
1789  * async_task() - thread function to serialize all bus requests
1790  * @param: pointer to hif device
1791  *
1792  * thread function to serialize all requests, both sync and async
1793  * Return: 0 on success, error number otherwise.
1794  */
async_task(void * param)1795 static int async_task(void *param)
1796 {
1797 	struct hif_sdio_dev *device;
1798 	struct bus_request *request;
1799 	QDF_STATUS status;
1800 	bool claimed = false;
1801 
1802 	device = (struct hif_sdio_dev *)param;
1803 	set_current_state(TASK_INTERRUPTIBLE);
1804 	while (!device->async_shutdown) {
1805 		/* wait for work */
1806 		if (down_interruptible(&device->sem_async) != 0) {
1807 			/* interrupted, exit */
1808 			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1809 					("%s: async task interrupted\n",
1810 					 __func__));
1811 			break;
1812 		}
1813 		if (device->async_shutdown) {
1814 			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1815 					("%s: async task stopping\n",
1816 					 __func__));
1817 			break;
1818 		}
1819 		/* we want to hold the host over multiple cmds
1820 		 * if possible, but holding the host blocks
1821 		 * card interrupts
1822 		 */
1823 		qdf_spin_lock_irqsave(&device->asynclock);
1824 		/* pull the request to work on */
1825 		while (device->asyncreq) {
1826 			request = device->asyncreq;
1827 			if (request->inusenext)
1828 				device->asyncreq = request->inusenext;
1829 			else
1830 				device->asyncreq = NULL;
1831 			qdf_spin_unlock_irqrestore(&device->asynclock);
1832 			hif_debug("processing req: 0x%lX",
1833 				  (unsigned long)request);
1834 
1835 			if (!claimed) {
1836 				sdio_claim_host(device->func);
1837 				claimed = true;
1838 			}
1839 			if (request->scatter_req) {
1840 				A_ASSERT(device->scatter_enabled);
1841 				/* pass the request to scatter routine which
1842 				 * executes it synchronously, note, no need
1843 				 * to free the request since scatter requests
1844 				 * are maintained on a separate list
1845 				 */
1846 				status = do_hif_read_write_scatter(device,
1847 								   request);
1848 			} else {
1849 				/* call hif_read_write in sync mode */
1850 				status =
1851 					__hif_read_write(device,
1852 							 request->address,
1853 							 request->buffer,
1854 							 request->length,
1855 							 request->
1856 							 request &
1857 							 ~HIF_SYNCHRONOUS,
1858 							 NULL);
1859 				if (request->request & HIF_ASYNCHRONOUS) {
1860 					void *context = request->context;
1861 
1862 					hif_free_bus_request(device, request);
1863 					device->htc_callbacks.
1864 					rw_compl_handler(context, status);
1865 				} else {
1866 					hif_debug("upping req: 0x%lX",
1867 						  (unsigned long)request);
1868 					request->status = status;
1869 					up(&request->sem_req);
1870 				}
1871 			}
1872 			qdf_spin_lock_irqsave(&device->asynclock);
1873 		}
1874 		qdf_spin_unlock_irqrestore(&device->asynclock);
1875 		if (claimed) {
1876 			sdio_release_host(device->func);
1877 			claimed = false;
1878 		}
1879 	}
1880 
1881 	kthread_complete_and_exit(&device->async_completion, 0);
1882 
1883 	return 0;
1884 }
1885 
1886 /**
1887  * hif_disable_func() - Disable SDIO function
1888  *
1889  * @device: HIF device pointer
1890  * @func: SDIO function pointer
1891  * @reset: If this is called from resume or probe
1892  *
1893  * Return: 0 in case of success, else error value
1894  */
hif_disable_func(struct hif_sdio_dev * device,struct sdio_func * func,bool reset)1895 QDF_STATUS hif_disable_func(struct hif_sdio_dev *device,
1896 			    struct sdio_func *func,
1897 			    bool reset)
1898 {
1899 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1900 
1901 	HIF_ENTER();
1902 	if (!IS_ERR(device->async_task)) {
1903 		init_completion(&device->async_completion);
1904 		device->async_shutdown = 1;
1905 		up(&device->sem_async);
1906 		wait_for_completion(&device->async_completion);
1907 		device->async_task = NULL;
1908 		sema_init(&device->sem_async, 0);
1909 	}
1910 
1911 	status = hif_sdio_func_disable(device, func, reset);
1912 	if (status == QDF_STATUS_SUCCESS)
1913 		device->is_disabled = true;
1914 
1915 	cleanup_hif_scatter_resources(device);
1916 
1917 	HIF_EXIT();
1918 
1919 	return status;
1920 }
1921 
1922 /**
1923  * hif_enable_func() - Enable SDIO function
1924  *
1925  * @ol_sc: HIF object pointer
1926  * @device: HIF device pointer
1927  * @func: SDIO function pointer
1928  * @resume: If this is called from resume or probe
1929  *
1930  * Return: 0 in case of success, else error value
1931  */
hif_enable_func(struct hif_softc * ol_sc,struct hif_sdio_dev * device,struct sdio_func * func,bool resume)1932 QDF_STATUS hif_enable_func(struct hif_softc *ol_sc, struct hif_sdio_dev *device,
1933 			   struct sdio_func *func, bool resume)
1934 {
1935 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
1936 
1937 	HIF_ENTER();
1938 
1939 	if (!device) {
1940 		hif_err("HIF device is NULL");
1941 		return QDF_STATUS_E_INVAL;
1942 	}
1943 
1944 	if (hif_sdio_func_enable(ol_sc, func))
1945 		return QDF_STATUS_E_FAILURE;
1946 
1947 	/* create async I/O thread */
1948 	if (!device->async_task && device->is_disabled) {
1949 		device->async_shutdown = 0;
1950 		device->async_task = kthread_create(async_task,
1951 						    (void *)device,
1952 						    "AR6K Async");
1953 		if (IS_ERR(device->async_task)) {
1954 			hif_err("Error creating async task");
1955 			return QDF_STATUS_E_FAILURE;
1956 		}
1957 		device->is_disabled = false;
1958 		wake_up_process(device->async_task);
1959 	}
1960 
1961 	if (!resume)
1962 		ret = hif_sdio_probe(ol_sc, func, device);
1963 
1964 	HIF_EXIT();
1965 
1966 	return ret;
1967 }
1968 #endif /* CONFIG_SDIO_TRANSFER_MAILBOX */
1969