xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/sdio/transfer/mailbox.c (revision 8b3dca18206e1a0461492f082fa6e270b092c035)
1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifdef CONFIG_SDIO_TRANSFER_MAILBOX
21 #define ATH_MODULE_NAME hif
22 #include <linux/kthread.h>
23 #include <qdf_types.h>
24 #include <qdf_status.h>
25 #include <qdf_timer.h>
26 #include <qdf_time.h>
27 #include <qdf_lock.h>
28 #include <qdf_mem.h>
29 #include <qdf_util.h>
30 #include <qdf_defer.h>
31 #include <qdf_atomic.h>
32 #include <qdf_nbuf.h>
33 #include <qdf_threads.h>
34 #include <athdefs.h>
35 #include <qdf_net_types.h>
36 #include <a_types.h>
37 #include <athdefs.h>
38 #include <a_osapi.h>
39 #include <hif.h>
40 #include <htc_internal.h>
41 #include <htc_services.h>
42 #include <a_debug.h>
43 #include "hif_sdio_internal.h"
44 #include "if_sdio.h"
45 #include "regtable.h"
46 #include "transfer.h"
47 
48 /*
49  * The following commit was introduced in v5.17:
50  * cead18552660 ("exit: Rename complete_and_exit to kthread_complete_and_exit")
51  * Use the old name for kernels before 5.17
52  */
53 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0))
54 #define kthread_complete_and_exit(c, s) complete_and_exit(c, s)
55 #endif
56 
57 /* by default setup a bounce buffer for the data packets,
58  * if the underlying host controller driver
59  * does not use DMA you may be able to skip this step
60  * and save the memory allocation and transfer time
61  */
62 #define HIF_USE_DMA_BOUNCE_BUFFER 1
63 #if HIF_USE_DMA_BOUNCE_BUFFER
64 /* macro to check if DMA buffer is WORD-aligned and DMA-able.
65  * Most host controllers assume the
66  * buffer is DMA'able and will bug-check otherwise (i.e. buffers on the stack).
67  * virt_addr_valid check fails on stack memory.
68  */
69 #define BUFFER_NEEDS_BOUNCE(buffer)  (((unsigned long)(buffer) & 0x3) || \
70 					!virt_addr_valid((buffer)))
71 #else
72 #define BUFFER_NEEDS_BOUNCE(buffer)   (false)
73 #endif
74 
75 #ifdef SDIO_3_0
76 /**
77  * set_extended_mbox_size() - set extended MBOX size
78  * @pinfo: sdio mailbox info
79  *
80  * Return: none.
81  */
82 static void set_extended_mbox_size(struct hif_device_mbox_info *pinfo)
83 {
84 	pinfo->mbox_prop[0].extended_size =
85 		HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0;
86 	pinfo->mbox_prop[1].extended_size =
87 		HIF_MBOX1_EXTENDED_WIDTH_AR6320;
88 }
89 
90 /**
91  * set_extended_mbox_address() - set extended MBOX address
92  * @pinfo: sdio mailbox info
93  *
94  * Return: none.
95  */
96 static void set_extended_mbox_address(struct hif_device_mbox_info *pinfo)
97 {
98 	pinfo->mbox_prop[1].extended_address =
99 		pinfo->mbox_prop[0].extended_address +
100 		pinfo->mbox_prop[0].extended_size +
101 		HIF_MBOX_DUMMY_SPACE_SIZE_AR6320;
102 }
103 #else
104 static void set_extended_mbox_size(struct hif_device_mbox_info *pinfo)
105 {
106 	pinfo->mbox_prop[0].extended_size =
107 		HIF_MBOX0_EXTENDED_WIDTH_AR6320;
108 }
109 
110 static inline void
111 set_extended_mbox_address(struct hif_device_mbox_info *pinfo)
112 {
113 }
114 #endif
115 
116 /**
117  * set_extended_mbox_window_info() - set extended MBOX window
118  * information for SDIO interconnects
119  * @manf_id: manufacturer id
120  * @pinfo: sdio mailbox info
121  *
122  * Return: none.
123  */
124 static void set_extended_mbox_window_info(uint16_t manf_id,
125 					  struct hif_device_mbox_info *pinfo)
126 {
127 	switch (manf_id & MANUFACTURER_ID_AR6K_BASE_MASK) {
128 	case MANUFACTURER_ID_AR6002_BASE:
129 		/* MBOX 0 has an extended range */
130 
131 		pinfo->mbox_prop[0].extended_address =
132 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
133 		pinfo->mbox_prop[0].extended_size =
134 			HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
135 
136 		pinfo->mbox_prop[0].extended_address =
137 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
138 		pinfo->mbox_prop[0].extended_size =
139 			HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
140 
141 		pinfo->mbox_prop[0].extended_address =
142 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004;
143 		pinfo->mbox_prop[0].extended_size =
144 			HIF_MBOX0_EXTENDED_WIDTH_AR6004;
145 
146 		break;
147 	case MANUFACTURER_ID_AR6003_BASE:
148 		/* MBOX 0 has an extended range */
149 		pinfo->mbox_prop[0].extended_address =
150 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
151 		pinfo->mbox_prop[0].extended_size =
152 			HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
153 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
154 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
155 		break;
156 	case MANUFACTURER_ID_AR6004_BASE:
157 		pinfo->mbox_prop[0].extended_address =
158 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004;
159 		pinfo->mbox_prop[0].extended_size =
160 			HIF_MBOX0_EXTENDED_WIDTH_AR6004;
161 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
162 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
163 		break;
164 	case MANUFACTURER_ID_AR6320_BASE:
165 	{
166 		uint16_t rev = manf_id & MANUFACTURER_ID_AR6K_REV_MASK;
167 
168 		pinfo->mbox_prop[0].extended_address =
169 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320;
170 		if (rev < 4)
171 			pinfo->mbox_prop[0].extended_size =
172 				HIF_MBOX0_EXTENDED_WIDTH_AR6320;
173 		else
174 			set_extended_mbox_size(pinfo);
175 		set_extended_mbox_address(pinfo);
176 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
177 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
178 		break;
179 	}
180 	case MANUFACTURER_ID_QCA9377_BASE:
181 	case MANUFACTURER_ID_QCA9379_BASE:
182 		pinfo->mbox_prop[0].extended_address =
183 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320;
184 		pinfo->mbox_prop[0].extended_size =
185 			HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0;
186 		pinfo->mbox_prop[1].extended_address =
187 			pinfo->mbox_prop[0].extended_address +
188 			pinfo->mbox_prop[0].extended_size +
189 			HIF_MBOX_DUMMY_SPACE_SIZE_AR6320;
190 		pinfo->mbox_prop[1].extended_size =
191 			HIF_MBOX1_EXTENDED_WIDTH_AR6320;
192 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
193 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
194 		break;
195 	default:
196 		A_ASSERT(false);
197 		break;
198 	}
199 }
200 
201 /** hif_dev_set_mailbox_swap() - Set the mailbox swap from firmware
202  * @pdev : The HIF layer object
203  *
204  * Return: none
205  */
206 void hif_dev_set_mailbox_swap(struct hif_sdio_dev *pdev)
207 {
208 	struct hif_sdio_device *hif_device = hif_dev_from_hif(pdev);
209 
210 	HIF_ENTER();
211 
212 	hif_device->swap_mailbox = true;
213 
214 	HIF_EXIT();
215 }
216 
217 /** hif_dev_get_mailbox_swap() - Get the mailbox swap setting
218  * @pdev : The HIF layer object
219  *
220  * Return: true or false
221  */
222 bool hif_dev_get_mailbox_swap(struct hif_sdio_dev *pdev)
223 {
224 	struct hif_sdio_device *hif_device;
225 
226 	HIF_ENTER();
227 
228 	hif_device = hif_dev_from_hif(pdev);
229 
230 	HIF_EXIT();
231 
232 	return hif_device->swap_mailbox;
233 }
234 
235 /**
236  * hif_dev_get_fifo_address() - get the fifo addresses for dma
237  * @pdev:  SDIO HIF object
238  * @config: mbox address config pointer
239  *
240  * Return : 0 for success, non-zero for error
241  */
242 int hif_dev_get_fifo_address(struct hif_sdio_dev *pdev,
243 			     void *config,
244 			     uint32_t config_len)
245 {
246 	uint32_t count;
247 	struct hif_device_mbox_info *cfg =
248 				(struct hif_device_mbox_info *)config;
249 
250 	for (count = 0; count < 4; count++)
251 		cfg->mbox_addresses[count] = HIF_MBOX_START_ADDR(count);
252 
253 	if (config_len >= sizeof(struct hif_device_mbox_info)) {
254 		set_extended_mbox_window_info((uint16_t)pdev->func->device,
255 					      cfg);
256 		return 0;
257 	}
258 
259 	return -EINVAL;
260 }
261 
262 /**
263  * hif_dev_get_block_size() - get the mbox block size for dma
264  * @config : mbox size config pointer
265  *
266  * Return : NONE
267  */
268 void hif_dev_get_block_size(void *config)
269 {
270 	((uint32_t *)config)[0] = HIF_MBOX0_BLOCK_SIZE;
271 	((uint32_t *)config)[1] = HIF_MBOX1_BLOCK_SIZE;
272 	((uint32_t *)config)[2] = HIF_MBOX2_BLOCK_SIZE;
273 	((uint32_t *)config)[3] = HIF_MBOX3_BLOCK_SIZE;
274 }
275 
276 /**
277  * hif_dev_map_service_to_pipe() - maps ul/dl pipe to service id.
278  * @pDev: SDIO HIF object
279  * @ServiceId: sevice index
280  * @ULPipe: uplink pipe id
281  * @DLPipe: down-linklink pipe id
282  *
283  * Return: 0 on success, error value on invalid map
284  */
285 QDF_STATUS hif_dev_map_service_to_pipe(struct hif_sdio_dev *pdev, uint16_t svc,
286 				       uint8_t *ul_pipe, uint8_t *dl_pipe)
287 {
288 	QDF_STATUS status = QDF_STATUS_SUCCESS;
289 
290 	switch (svc) {
291 	case HTT_DATA_MSG_SVC:
292 		if (hif_dev_get_mailbox_swap(pdev)) {
293 			*ul_pipe = 1;
294 			*dl_pipe = 0;
295 		} else {
296 			*ul_pipe = 3;
297 			*dl_pipe = 2;
298 		}
299 		break;
300 
301 	case HTC_CTRL_RSVD_SVC:
302 	case HTC_RAW_STREAMS_SVC:
303 		*ul_pipe = 1;
304 		*dl_pipe = 0;
305 		break;
306 
307 	case WMI_DATA_BE_SVC:
308 	case WMI_DATA_BK_SVC:
309 	case WMI_DATA_VI_SVC:
310 	case WMI_DATA_VO_SVC:
311 		*ul_pipe = 1;
312 		*dl_pipe = 0;
313 		break;
314 
315 	case WMI_CONTROL_SVC:
316 		if (hif_dev_get_mailbox_swap(pdev)) {
317 			*ul_pipe = 3;
318 			*dl_pipe = 2;
319 		} else {
320 			*ul_pipe = 1;
321 			*dl_pipe = 0;
322 		}
323 		break;
324 
325 	default:
326 		hif_err("%s: Err : Invalid service (%d)",
327 			__func__, svc);
328 		status = QDF_STATUS_E_INVAL;
329 		break;
330 	}
331 	return status;
332 }
333 
334 /** hif_dev_setup_device() - Setup device specific stuff here required for hif
335  * @pdev : HIF layer object
336  *
337  * return 0 on success, error otherwise
338  */
339 int hif_dev_setup_device(struct hif_sdio_device *pdev)
340 {
341 	int status = 0;
342 	uint32_t blocksizes[MAILBOX_COUNT];
343 
344 	status = hif_configure_device(NULL, pdev->HIFDevice,
345 				      HIF_DEVICE_GET_FIFO_ADDR,
346 				      &pdev->MailBoxInfo,
347 				      sizeof(pdev->MailBoxInfo));
348 
349 	if (status != QDF_STATUS_SUCCESS)
350 		hif_err("%s: HIF_DEVICE_GET_MBOX_ADDR failed", __func__);
351 
352 	status = hif_configure_device(NULL, pdev->HIFDevice,
353 				      HIF_DEVICE_GET_BLOCK_SIZE,
354 				      blocksizes, sizeof(blocksizes));
355 	if (status != QDF_STATUS_SUCCESS)
356 		hif_err("%s: HIF_DEVICE_GET_MBOX_BLOCK_SIZE fail", __func__);
357 
358 	pdev->BlockSize = blocksizes[MAILBOX_FOR_BLOCK_SIZE];
359 
360 	return status;
361 }
362 
363 /** hif_dev_mask_interrupts() - Disable the interrupts in the device
364  * @pdev SDIO HIF Object
365  *
366  * Return: NONE
367  */
368 void hif_dev_mask_interrupts(struct hif_sdio_device *pdev)
369 {
370 	int status = QDF_STATUS_SUCCESS;
371 
372 	HIF_ENTER();
373 	/* Disable all interrupts */
374 	LOCK_HIF_DEV(pdev);
375 	mboxEnaRegs(pdev).int_status_enable = 0;
376 	mboxEnaRegs(pdev).cpu_int_status_enable = 0;
377 	mboxEnaRegs(pdev).error_status_enable = 0;
378 	mboxEnaRegs(pdev).counter_int_status_enable = 0;
379 	UNLOCK_HIF_DEV(pdev);
380 
381 	/* always synchronous */
382 	status = hif_read_write(pdev->HIFDevice,
383 				INT_STATUS_ENABLE_ADDRESS,
384 				(char *)&mboxEnaRegs(pdev),
385 				sizeof(struct MBOX_IRQ_ENABLE_REGISTERS),
386 				HIF_WR_SYNC_BYTE_INC, NULL);
387 
388 	if (status != QDF_STATUS_SUCCESS)
389 		hif_err("%s: Err updating intr reg: %d", __func__, status);
390 }
391 
392 /** hif_dev_unmask_interrupts() - Enable the interrupts in the device
393  * @pdev SDIO HIF Object
394  *
395  * Return: NONE
396  */
397 void hif_dev_unmask_interrupts(struct hif_sdio_device *pdev)
398 {
399 	QDF_STATUS status = QDF_STATUS_SUCCESS;
400 
401 	LOCK_HIF_DEV(pdev);
402 
403 	/* Enable all the interrupts except for the internal
404 	 * AR6000 CPU interrupt
405 	 */
406 	mboxEnaRegs(pdev).int_status_enable =
407 		INT_STATUS_ENABLE_ERROR_SET(0x01) |
408 		INT_STATUS_ENABLE_CPU_SET(0x01)
409 		| INT_STATUS_ENABLE_COUNTER_SET(0x01);
410 
411 	/* enable 2 mboxs INT */
412 	mboxEnaRegs(pdev).int_status_enable |=
413 		INT_STATUS_ENABLE_MBOX_DATA_SET(0x01) |
414 		INT_STATUS_ENABLE_MBOX_DATA_SET(0x02);
415 
416 	/* Set up the CPU Interrupt Status Register, enable
417 	 * CPU sourced interrupt #0, #1.
418 	 * #0 is used for report assertion from target
419 	 * #1 is used for inform host that credit arrived
420 	 */
421 	mboxEnaRegs(pdev).cpu_int_status_enable = 0x03;
422 
423 	/* Set up the Error Interrupt Status Register */
424 	mboxEnaRegs(pdev).error_status_enable =
425 		(ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(0x01)
426 		 | ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(0x01)) >> 16;
427 
428 	/* Set up the Counter Interrupt Status Register
429 	 * (only for debug interrupt to catch fatal errors)
430 	 */
431 	mboxEnaRegs(pdev).counter_int_status_enable =
432 	(COUNTER_INT_STATUS_ENABLE_BIT_SET(AR6K_TARGET_DEBUG_INTR_MASK)) >> 24;
433 
434 	UNLOCK_HIF_DEV(pdev);
435 
436 	/* always synchronous */
437 	status = hif_read_write(pdev->HIFDevice,
438 				INT_STATUS_ENABLE_ADDRESS,
439 				(char *)&mboxEnaRegs(pdev),
440 				sizeof(struct MBOX_IRQ_ENABLE_REGISTERS),
441 				HIF_WR_SYNC_BYTE_INC,
442 				NULL);
443 
444 	if (status != QDF_STATUS_SUCCESS)
445 		hif_err("%s: Err updating intr reg: %d", __func__, status);
446 }
447 
448 void hif_dev_dump_registers(struct hif_sdio_device *pdev,
449 			    struct MBOX_IRQ_PROC_REGISTERS *irq_proc,
450 			    struct MBOX_IRQ_ENABLE_REGISTERS *irq_en,
451 			    struct MBOX_COUNTER_REGISTERS *mbox_regs)
452 {
453 	int i = 0;
454 
455 	hif_debug("%s: Mailbox registers:", __func__);
456 
457 	if (irq_proc) {
458 		hif_debug("HostIntStatus: 0x%x ", irq_proc->host_int_status);
459 		hif_debug("CPUIntStatus: 0x%x ", irq_proc->cpu_int_status);
460 		hif_debug("ErrorIntStatus: 0x%x ", irq_proc->error_int_status);
461 		hif_debug("CounterIntStat: 0x%x ",
462 			  irq_proc->counter_int_status);
463 		hif_debug("MboxFrame: 0x%x ", irq_proc->mbox_frame);
464 		hif_debug("RxLKAValid: 0x%x ", irq_proc->rx_lookahead_valid);
465 		hif_debug("RxLKA0: 0x%x", irq_proc->rx_lookahead[0]);
466 		hif_debug("RxLKA1: 0x%x ", irq_proc->rx_lookahead[1]);
467 		hif_debug("RxLKA2: 0x%x ", irq_proc->rx_lookahead[2]);
468 		hif_debug("RxLKA3: 0x%x", irq_proc->rx_lookahead[3]);
469 
470 		if (pdev->MailBoxInfo.gmbox_address != 0) {
471 			hif_debug("GMBOX-HostIntStatus2:  0x%x ",
472 				  irq_proc->host_int_status2);
473 			hif_debug("GMBOX-RX-Avail: 0x%x ",
474 				  irq_proc->gmbox_rx_avail);
475 		}
476 	}
477 
478 	if (irq_en) {
479 		hif_debug("IntStatusEnable: 0x%x\n",
480 			  irq_en->int_status_enable);
481 		hif_debug("CounterIntStatus: 0x%x\n",
482 			  irq_en->counter_int_status_enable);
483 	}
484 
485 	for (i = 0; mbox_regs && i < 4; i++)
486 		hif_debug("Counter[%d]: 0x%x\n", i, mbox_regs->counter[i]);
487 }
488 
489 /* under HL SDIO, with Interface Memory support, we have
490  * the following reasons to support 2 mboxs:
491  * a) we need place different buffers in different
492  * mempool, for example, data using Interface Memory,
493  * desc and other using DRAM, they need different SDIO
494  * mbox channels.
495  * b) currently, tx mempool in LL case is separated from
496  * main mempool, the structure (descs at the beginning
497  * of every pool buffer) is different, because they only
498  * need store tx desc from host. To align with LL case,
499  * we also need 2 mbox support just as PCIe LL cases.
500  */
501 
502 /**
503  * hif_dev_map_pipe_to_mail_box() - maps pipe id to mailbox.
504  * @pdev: The pointer to the hif device object
505  * @pipeid: pipe index
506  *
507  * Return: mailbox index
508  */
509 static uint8_t hif_dev_map_pipe_to_mail_box(struct hif_sdio_device *pdev,
510 					    uint8_t pipeid)
511 {
512 	if (2 == pipeid || 3 == pipeid)
513 		return 1;
514 	else if (0 == pipeid || 1 == pipeid)
515 		return 0;
516 
517 	hif_err("%s: pipeid=%d invalid", __func__, pipeid);
518 
519 	qdf_assert(0);
520 
521 	return INVALID_MAILBOX_NUMBER;
522 }
523 
524 /**
525  * hif_dev_map_mail_box_to_pipe() - map sdio mailbox to htc pipe.
526  * @pdev: The pointer to the hif device object
527  * @mboxIndex: mailbox index
528  * @upload: boolean to decide mailbox index
529  *
530  * Return: Invalid pipe index
531  */
532 static uint8_t hif_dev_map_mail_box_to_pipe(struct hif_sdio_device *pdev,
533 					    uint8_t mbox_index, bool upload)
534 {
535 	if (mbox_index == 0)
536 		return upload ? 1 : 0;
537 	else if (mbox_index == 1)
538 		return upload ? 3 : 2;
539 
540 	hif_err("%s: mbox_index=%d, upload=%d invalid",
541 		__func__, mbox_index, upload);
542 
543 	qdf_assert(0);
544 
545 	return INVALID_MAILBOX_NUMBER; /* invalid pipe id */
546 }
547 
548 /**
549  * hif_get_send_addr() - Get the transfer pipe address
550  * @pdev: The pointer to the hif device object
551  * @pipe: The pipe identifier
552  *
553  * Return 0 for success and non-zero for failure to map
554  */
555 int hif_get_send_address(struct hif_sdio_device *pdev,
556 			 uint8_t pipe, unsigned long *addr)
557 {
558 	uint8_t mbox_index = INVALID_MAILBOX_NUMBER;
559 
560 	if (!addr)
561 		return -EINVAL;
562 
563 	mbox_index = hif_dev_map_pipe_to_mail_box(pdev, pipe);
564 
565 	if (mbox_index == INVALID_MAILBOX_NUMBER)
566 		return -EINVAL;
567 
568 	*addr = pdev->MailBoxInfo.mbox_prop[mbox_index].extended_address;
569 
570 	return 0;
571 }
572 
573 /**
574  * hif_fixup_write_param() - Tweak the address and length parameters
575  * @pdev: The pointer to the hif device object
576  * @length: The length pointer
577  * @addr: The addr pointer
578  *
579  * Return: None
580  */
581 void hif_fixup_write_param(struct hif_sdio_dev *pdev, uint32_t req,
582 			   uint32_t *length, uint32_t *addr)
583 {
584 	struct hif_device_mbox_info mboxinfo;
585 	uint32_t taddr = *addr, mboxlen = 0;
586 
587 	hif_configure_device(NULL, pdev, HIF_DEVICE_GET_FIFO_ADDR,
588 			     &mboxinfo, sizeof(mboxinfo));
589 
590 	if (taddr >= 0x800 && taddr < 0xC00) {
591 		/* Host control register and CIS Window */
592 		mboxlen = 0;
593 	} else if (taddr == mboxinfo.mbox_addresses[0] ||
594 		   taddr == mboxinfo.mbox_addresses[1] ||
595 		   taddr == mboxinfo.mbox_addresses[2] ||
596 		   taddr == mboxinfo.mbox_addresses[3]) {
597 		mboxlen = HIF_MBOX_WIDTH;
598 	} else if (taddr == mboxinfo.mbox_prop[0].extended_address) {
599 		mboxlen = mboxinfo.mbox_prop[0].extended_size;
600 	} else if (taddr == mboxinfo.mbox_prop[1].extended_address) {
601 		mboxlen = mboxinfo.mbox_prop[1].extended_size;
602 	} else {
603 		hif_err("%s: Invalid write addr: 0x%08x\n", __func__, taddr);
604 		return;
605 	}
606 
607 	if (mboxlen != 0) {
608 		if (*length > mboxlen) {
609 			hif_err("%s: Error (%u > %u)",
610 				__func__, *length, mboxlen);
611 			return;
612 		}
613 
614 		taddr = taddr + (mboxlen - *length);
615 		taddr = taddr + ((req & HIF_DUMMY_SPACE_MASK) >> 16);
616 		*addr = taddr;
617 	}
618 }
619 
620 /**
621  * hif_dev_recv_packet() - Receieve HTC packet/packet information from device
622  * @pdev : HIF device object
623  * @packet : The HTC packet pointer
624  * @recv_length : The length of information to be received
625  * @mbox_index : The mailbox that contains this information
626  *
627  * Return 0 for success and non zero of error
628  */
629 static QDF_STATUS hif_dev_recv_packet(struct hif_sdio_device *pdev,
630 				      HTC_PACKET *packet,
631 				      uint32_t recv_length,
632 				      uint32_t mbox_index)
633 {
634 	QDF_STATUS status;
635 	uint32_t padded_length;
636 	bool sync = (packet->Completion) ? false : true;
637 	uint32_t req = sync ? HIF_RD_SYNC_BLOCK_FIX : HIF_RD_ASYNC_BLOCK_FIX;
638 
639 	/* adjust the length to be a multiple of block size if appropriate */
640 	padded_length = DEV_CALC_RECV_PADDED_LEN(pdev, recv_length);
641 
642 	if (padded_length > packet->BufferLength) {
643 		hif_err("%s: No space for padlen:%d recvlen:%d bufferlen:%d",
644 			__func__, padded_length,
645 			recv_length, packet->BufferLength);
646 		if (packet->Completion) {
647 			COMPLETE_HTC_PACKET(packet, QDF_STATUS_E_INVAL);
648 			return QDF_STATUS_SUCCESS;
649 		}
650 		return QDF_STATUS_E_INVAL;
651 	}
652 
653 	/* mailbox index is saved in Endpoint member */
654 	hif_debug("%s : hdr:0x%x, len:%d, padded length: %d Mbox:0x%x",
655 		  __func__, packet->PktInfo.AsRx.ExpectedHdr, recv_length,
656 		  padded_length, mbox_index);
657 
658 	status = hif_read_write(pdev->HIFDevice,
659 				pdev->MailBoxInfo.mbox_addresses[mbox_index],
660 				packet->pBuffer,
661 				padded_length,
662 				req, sync ? NULL : packet);
663 
664 	if (status != QDF_STATUS_SUCCESS && status != QDF_STATUS_E_PENDING)
665 		hif_err("%s : Failed %d", __func__, status);
666 
667 	if (sync) {
668 		packet->Status = status;
669 		if (status == QDF_STATUS_SUCCESS) {
670 			HTC_FRAME_HDR *hdr = (HTC_FRAME_HDR *) packet->pBuffer;
671 
672 			hif_debug("%s:EP:%d,Len:%d,Flg:%d,CB:0x%02X,0x%02X\n",
673 				  __func__,
674 				  hdr->EndpointID, hdr->PayloadLen,
675 				  hdr->Flags, hdr->ControlBytes0,
676 				  hdr->ControlBytes1);
677 		}
678 	}
679 
680 	return status;
681 }
682 
683 static QDF_STATUS hif_dev_issue_recv_packet_bundle
684 (
685 	struct hif_sdio_device *pdev,
686 	HTC_PACKET_QUEUE *recv_pkt_queue,
687 	HTC_PACKET_QUEUE *sync_completion_queue,
688 	uint8_t mail_box_index,
689 	int *num_packets_fetched,
690 	bool partial_bundle
691 )
692 {
693 	uint32_t padded_length;
694 	int i, total_length = 0;
695 	HTC_TARGET *target = NULL;
696 	int bundleSpaceRemaining = 0;
697 	unsigned char *bundle_buffer = NULL;
698 	HTC_PACKET *packet, *packet_rx_bundle;
699 	QDF_STATUS status = QDF_STATUS_SUCCESS;
700 
701 	target = (HTC_TARGET *)pdev->pTarget;
702 
703 	if ((HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue) -
704 	     HTC_MAX_MSG_PER_BUNDLE_RX) > 0) {
705 		partial_bundle = true;
706 		hif_warn("%s, partial bundle detected num: %d, %d\n",
707 			 __func__,
708 			 HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue),
709 			 HTC_MAX_MSG_PER_BUNDLE_RX);
710 	}
711 
712 	bundleSpaceRemaining =
713 		HTC_MAX_MSG_PER_BUNDLE_RX * target->TargetCreditSize;
714 	packet_rx_bundle = allocate_htc_bundle_packet(target);
715 	if (!packet_rx_bundle) {
716 		hif_err("%s: packet_rx_bundle is NULL\n", __func__);
717 		qdf_sleep(NBUF_ALLOC_FAIL_WAIT_TIME);  /* 100 msec sleep */
718 		return QDF_STATUS_E_NOMEM;
719 	}
720 	bundle_buffer = packet_rx_bundle->pBuffer;
721 
722 	for (i = 0;
723 	     !HTC_QUEUE_EMPTY(recv_pkt_queue) && i < HTC_MAX_MSG_PER_BUNDLE_RX;
724 	     i++) {
725 		packet = htc_packet_dequeue(recv_pkt_queue);
726 		A_ASSERT(packet);
727 		if (!packet)
728 			break;
729 		padded_length =
730 			DEV_CALC_RECV_PADDED_LEN(pdev, packet->ActualLength);
731 		if (packet->PktInfo.AsRx.HTCRxFlags &
732 				HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK)
733 			padded_length += HIF_BLOCK_SIZE;
734 		if ((bundleSpaceRemaining - padded_length) < 0) {
735 			/* exceeds what we can transfer, put the packet back */
736 			HTC_PACKET_ENQUEUE_TO_HEAD(recv_pkt_queue, packet);
737 			break;
738 		}
739 		bundleSpaceRemaining -= padded_length;
740 
741 		if (partial_bundle ||
742 		    HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue) > 0) {
743 			packet->PktInfo.AsRx.HTCRxFlags |=
744 				HTC_RX_PKT_IGNORE_LOOKAHEAD;
745 		}
746 		packet->PktInfo.AsRx.HTCRxFlags |= HTC_RX_PKT_PART_OF_BUNDLE;
747 
748 		if (sync_completion_queue)
749 			HTC_PACKET_ENQUEUE(sync_completion_queue, packet);
750 
751 		total_length += padded_length;
752 	}
753 #if DEBUG_BUNDLE
754 	qdf_print("Recv bundle count %d, length %d.",
755 		  sync_completion_queue ?
756 		  HTC_PACKET_QUEUE_DEPTH(sync_completion_queue) : 0,
757 		  total_length);
758 #endif
759 
760 	status = hif_read_write(pdev->HIFDevice,
761 				pdev->MailBoxInfo.
762 				mbox_addresses[(int)mail_box_index],
763 				bundle_buffer, total_length,
764 				HIF_RD_SYNC_BLOCK_FIX, NULL);
765 
766 	if (status != QDF_STATUS_SUCCESS) {
767 		hif_err("%s, hif_send Failed status:%d\n",
768 			__func__, status);
769 	} else {
770 		unsigned char *buffer = bundle_buffer;
771 		*num_packets_fetched = i;
772 		if (sync_completion_queue) {
773 			HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(
774 				sync_completion_queue, packet) {
775 				padded_length =
776 				DEV_CALC_RECV_PADDED_LEN(pdev,
777 							 packet->ActualLength);
778 				if (packet->PktInfo.AsRx.HTCRxFlags &
779 				HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK)
780 					padded_length +=
781 						HIF_BLOCK_SIZE;
782 				A_MEMCPY(packet->pBuffer,
783 					 buffer, padded_length);
784 				buffer += padded_length;
785 			} HTC_PACKET_QUEUE_ITERATE_END;
786 		}
787 	}
788 	/* free bundle space under Sync mode */
789 	free_htc_bundle_packet(target, packet_rx_bundle);
790 	return status;
791 }
792 
793 #define ISSUE_BUNDLE hif_dev_issue_recv_packet_bundle
794 static
795 QDF_STATUS hif_dev_recv_message_pending_handler(struct hif_sdio_device *pdev,
796 						uint8_t mail_box_index,
797 						uint32_t msg_look_aheads[],
798 						int num_look_aheads,
799 						bool *async_proc,
800 						int *num_pkts_fetched)
801 {
802 	int pkts_fetched;
803 	HTC_PACKET *pkt;
804 	HTC_ENDPOINT_ID id;
805 	bool partial_bundle;
806 	int total_fetched = 0;
807 	bool asyncProc = false;
808 	QDF_STATUS status = QDF_STATUS_SUCCESS;
809 	uint32_t look_aheads[HTC_MAX_MSG_PER_BUNDLE_RX];
810 	HTC_PACKET_QUEUE recv_q, sync_comp_q;
811 	QDF_STATUS (*rxCompletion)(void *, qdf_nbuf_t,	uint8_t);
812 
813 	hif_debug("%s: NumLookAheads: %d\n", __func__, num_look_aheads);
814 
815 	if (num_pkts_fetched)
816 		*num_pkts_fetched = 0;
817 
818 	if (IS_DEV_IRQ_PROCESSING_ASYNC_ALLOWED(pdev)) {
819 		/* We use async mode to get the packets if the
820 		 * device layer supports it. The device layer
821 		 * interfaces with HIF in which HIF may have
822 		 * restrictions on how interrupts are processed
823 		 */
824 		asyncProc = true;
825 	}
826 
827 	if (async_proc) {
828 		/* indicate to caller how we decided to process this */
829 		*async_proc = asyncProc;
830 	}
831 
832 	if (num_look_aheads > HTC_MAX_MSG_PER_BUNDLE_RX) {
833 		A_ASSERT(false);
834 		return QDF_STATUS_E_PROTO;
835 	}
836 
837 	A_MEMCPY(look_aheads, msg_look_aheads,
838 		 (sizeof(uint32_t)) * num_look_aheads);
839 	while (true) {
840 		/* reset packets queues */
841 		INIT_HTC_PACKET_QUEUE(&recv_q);
842 		INIT_HTC_PACKET_QUEUE(&sync_comp_q);
843 		if (num_look_aheads > HTC_MAX_MSG_PER_BUNDLE_RX) {
844 			status = QDF_STATUS_E_PROTO;
845 			A_ASSERT(false);
846 			break;
847 		}
848 
849 		/* first lookahead sets the expected endpoint IDs for
850 		 * all packets in a bundle
851 		 */
852 		id = ((HTC_FRAME_HDR *)&look_aheads[0])->EndpointID;
853 
854 		if (id >= ENDPOINT_MAX) {
855 			hif_err("%s: Invalid Endpoint in lookahead: %d\n",
856 				__func__, id);
857 			status = QDF_STATUS_E_PROTO;
858 			break;
859 		}
860 		/* try to allocate as many HTC RX packets indicated
861 		 * by the lookaheads these packets are stored
862 		 * in the recvPkt queue
863 		 */
864 		status = hif_dev_alloc_and_prepare_rx_packets(pdev,
865 							      look_aheads,
866 							      num_look_aheads,
867 							      &recv_q);
868 		if (QDF_IS_STATUS_ERROR(status))
869 			break;
870 		total_fetched += HTC_PACKET_QUEUE_DEPTH(&recv_q);
871 
872 		/* we've got packet buffers for all we can currently fetch,
873 		 * this count is not valid anymore
874 		 */
875 		num_look_aheads = 0;
876 		partial_bundle = false;
877 
878 		/* now go fetch the list of HTC packets */
879 		while (!HTC_QUEUE_EMPTY(&recv_q)) {
880 			pkts_fetched = 0;
881 			if ((HTC_PACKET_QUEUE_DEPTH(&recv_q) > 1)) {
882 				/* there are enough packets to attempt a bundle
883 				 * transfer and recv bundling is allowed
884 				 */
885 				status = ISSUE_BUNDLE(pdev,
886 						      &recv_q,
887 						      asyncProc ? NULL :
888 						      &sync_comp_q,
889 						      mail_box_index,
890 						      &pkts_fetched,
891 						      partial_bundle);
892 				if (QDF_IS_STATUS_ERROR(status)) {
893 					hif_dev_free_recv_pkt_queue(
894 							&recv_q);
895 					break;
896 				}
897 
898 				if (HTC_PACKET_QUEUE_DEPTH(&recv_q) !=
899 					0) {
900 					/* we couldn't fetch all packets at one,
901 					 * time this creates a broken
902 					 * bundle
903 					 */
904 					partial_bundle = true;
905 				}
906 			}
907 
908 			/* see if the previous operation fetched any
909 			 * packets using bundling
910 			 */
911 			if (pkts_fetched == 0) {
912 				/* dequeue one packet */
913 				pkt = htc_packet_dequeue(&recv_q);
914 				A_ASSERT(pkt);
915 				if (!pkt)
916 					break;
917 
918 				pkt->Completion = NULL;
919 
920 				if (HTC_PACKET_QUEUE_DEPTH(&recv_q) >
921 				    0) {
922 					/* lookaheads in all packets except the
923 					 * last one in must be ignored
924 					 */
925 					pkt->PktInfo.AsRx.HTCRxFlags |=
926 						HTC_RX_PKT_IGNORE_LOOKAHEAD;
927 				}
928 
929 				/* go fetch the packet */
930 				status =
931 				hif_dev_recv_packet(pdev, pkt,
932 						    pkt->ActualLength,
933 						    mail_box_index);
934 				while (QDF_IS_STATUS_ERROR(status) &&
935 				       !HTC_QUEUE_EMPTY(&recv_q)) {
936 					qdf_nbuf_t nbuf;
937 
938 					pkt = htc_packet_dequeue(&recv_q);
939 					if (!pkt)
940 						break;
941 					nbuf = pkt->pNetBufContext;
942 					if (nbuf)
943 						qdf_nbuf_free(nbuf);
944 				}
945 
946 				if (QDF_IS_STATUS_ERROR(status))
947 					break;
948 				/* sent synchronously, queue this packet for
949 				 * synchronous completion
950 				 */
951 				HTC_PACKET_ENQUEUE(&sync_comp_q, pkt);
952 			}
953 		}
954 
955 		/* synchronous handling */
956 		if (pdev->DSRCanYield) {
957 			/* for the SYNC case, increment count that tracks
958 			 * when the DSR should yield
959 			 */
960 			pdev->CurrentDSRRecvCount++;
961 		}
962 
963 		/* in the sync case, all packet buffers are now filled,
964 		 * we can process each packet, check lookahead , then repeat
965 		 */
966 		rxCompletion = pdev->hif_callbacks.rxCompletionHandler;
967 
968 		/* unload sync completion queue */
969 		while (!HTC_QUEUE_EMPTY(&sync_comp_q)) {
970 			uint8_t pipeid;
971 			qdf_nbuf_t netbuf;
972 
973 			pkt = htc_packet_dequeue(&sync_comp_q);
974 			A_ASSERT(pkt);
975 			if (!pkt)
976 				break;
977 
978 			num_look_aheads = 0;
979 			status = hif_dev_process_recv_header(pdev, pkt,
980 							     look_aheads,
981 							     &num_look_aheads);
982 			if (QDF_IS_STATUS_ERROR(status)) {
983 				HTC_PACKET_ENQUEUE_TO_HEAD(&sync_comp_q, pkt);
984 				break;
985 			}
986 
987 			netbuf = (qdf_nbuf_t)pkt->pNetBufContext;
988 			/* set data length */
989 			qdf_nbuf_put_tail(netbuf, pkt->ActualLength);
990 
991 			if (rxCompletion) {
992 				pipeid =
993 				hif_dev_map_mail_box_to_pipe(pdev,
994 							     mail_box_index,
995 							     true);
996 				rxCompletion(pdev->hif_callbacks.Context,
997 					     netbuf, pipeid);
998 			}
999 		}
1000 
1001 		if (QDF_IS_STATUS_ERROR(status)) {
1002 			if (!HTC_QUEUE_EMPTY(&sync_comp_q))
1003 				hif_dev_free_recv_pkt_queue(
1004 						&sync_comp_q);
1005 			break;
1006 		}
1007 
1008 		if (num_look_aheads == 0) {
1009 			/* no more look aheads */
1010 			break;
1011 		}
1012 		/* check whether other OS contexts have queued any WMI
1013 		 * command/data for WLAN. This check is needed only if WLAN
1014 		 * Tx and Rx happens in same thread context
1015 		 */
1016 		/* A_CHECK_DRV_TX(); */
1017 	}
1018 	if (num_pkts_fetched)
1019 		*num_pkts_fetched = total_fetched;
1020 
1021 	AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-HTCRecvMessagePendingHandler\n"));
1022 	return status;
1023 }
1024 
1025 /**
1026  * hif_dev_service_cpu_interrupt() - service fatal interrupts
1027  * synchronously
1028  *
1029  * @pDev: hif sdio device context
1030  *
1031  * Return: QDF_STATUS_SUCCESS for success
1032  */
1033 static QDF_STATUS hif_dev_service_cpu_interrupt(struct hif_sdio_device *pdev)
1034 {
1035 	QDF_STATUS status;
1036 	uint8_t reg_buffer[4];
1037 	uint8_t cpu_int_status;
1038 
1039 	cpu_int_status = mboxProcRegs(pdev).cpu_int_status &
1040 			 mboxEnaRegs(pdev).cpu_int_status_enable;
1041 
1042 	hif_err("%s: 0x%x", __func__, (uint32_t)cpu_int_status);
1043 
1044 	/* Clear the interrupt */
1045 	mboxProcRegs(pdev).cpu_int_status &= ~cpu_int_status;
1046 
1047 	/*set up the register transfer buffer to hit the register
1048 	 * 4 times , this is done to make the access 4-byte aligned
1049 	 * to mitigate issues with host bus interconnects that
1050 	 * restrict bus transfer lengths to be a multiple of 4-bytes
1051 	 * set W1C value to clear the interrupt, this hits the register
1052 	 * first
1053 	 */
1054 	reg_buffer[0] = cpu_int_status;
1055 	/* the remaining 4 values are set to zero which have no-effect  */
1056 	reg_buffer[1] = 0;
1057 	reg_buffer[2] = 0;
1058 	reg_buffer[3] = 0;
1059 
1060 	status = hif_read_write(pdev->HIFDevice,
1061 				CPU_INT_STATUS_ADDRESS,
1062 				reg_buffer, 4, HIF_WR_SYNC_BYTE_FIX, NULL);
1063 
1064 	A_ASSERT(status == QDF_STATUS_SUCCESS);
1065 
1066 	/* The Interrupt sent to the Host is generated via bit0
1067 	 * of CPU INT register
1068 	 */
1069 	if (cpu_int_status & 0x1) {
1070 		if (pdev->hif_callbacks.fwEventHandler)
1071 			/* It calls into HTC which propagates this
1072 			 * to ol_target_failure()
1073 			 */
1074 			pdev->hif_callbacks.fwEventHandler(
1075 				pdev->hif_callbacks.Context,
1076 				QDF_STATUS_E_FAILURE);
1077 	} else {
1078 		hif_err("%s: Unrecognized CPU event", __func__);
1079 	}
1080 
1081 	return status;
1082 }
1083 
1084 /**
1085  * hif_dev_service_error_interrupt() - service error interrupts
1086  * synchronously
1087  *
1088  * @pDev: hif sdio device context
1089  *
1090  * Return: QDF_STATUS_SUCCESS for success
1091  */
1092 static QDF_STATUS hif_dev_service_error_interrupt(struct hif_sdio_device *pdev)
1093 {
1094 	QDF_STATUS status;
1095 	uint8_t reg_buffer[4];
1096 	uint8_t error_int_status = 0;
1097 
1098 	error_int_status = mboxProcRegs(pdev).error_int_status & 0x0F;
1099 	hif_err("%s: 0x%x", __func__, error_int_status);
1100 
1101 	if (ERROR_INT_STATUS_WAKEUP_GET(error_int_status))
1102 		hif_err("%s: Error : Wakeup", __func__);
1103 
1104 	if (ERROR_INT_STATUS_RX_UNDERFLOW_GET(error_int_status))
1105 		hif_err("%s: Error : Rx Underflow", __func__);
1106 
1107 	if (ERROR_INT_STATUS_TX_OVERFLOW_GET(error_int_status))
1108 		hif_err("%s: Error : Tx Overflow", __func__);
1109 
1110 	/* Clear the interrupt */
1111 	mboxProcRegs(pdev).error_int_status &= ~error_int_status;
1112 
1113 	/* set up the register transfer buffer to hit the register
1114 	 * 4 times , this is done to make the access 4-byte
1115 	 * aligned to mitigate issues with host bus interconnects that
1116 	 * restrict bus transfer lengths to be a multiple of 4-bytes
1117 	 */
1118 
1119 	/* set W1C value to clear the interrupt */
1120 	reg_buffer[0] = error_int_status;
1121 	/* the remaining 4 values are set to zero which have no-effect  */
1122 	reg_buffer[1] = 0;
1123 	reg_buffer[2] = 0;
1124 	reg_buffer[3] = 0;
1125 
1126 	status = hif_read_write(pdev->HIFDevice,
1127 				ERROR_INT_STATUS_ADDRESS,
1128 				reg_buffer, 4, HIF_WR_SYNC_BYTE_FIX, NULL);
1129 
1130 	A_ASSERT(status == QDF_STATUS_SUCCESS);
1131 	return status;
1132 }
1133 
1134 /**
1135  * hif_dev_service_debug_interrupt() - service debug interrupts
1136  * synchronously
1137  *
1138  * @pDev: hif sdio device context
1139  *
1140  * Return: QDF_STATUS_SUCCESS for success
1141  */
1142 static QDF_STATUS hif_dev_service_debug_interrupt(struct hif_sdio_device *pdev)
1143 {
1144 	uint32_t dummy;
1145 	QDF_STATUS status;
1146 
1147 	/* Send a target failure event to the application */
1148 	hif_err("%s: Target debug interrupt", __func__);
1149 
1150 	/* clear the interrupt , the debug error interrupt is counter 0
1151 	 * read counter to clear interrupt
1152 	 */
1153 	status = hif_read_write(pdev->HIFDevice,
1154 				COUNT_DEC_ADDRESS,
1155 				(uint8_t *)&dummy,
1156 				4, HIF_RD_SYNC_BYTE_INC, NULL);
1157 
1158 	A_ASSERT(status == QDF_STATUS_SUCCESS);
1159 	return status;
1160 }
1161 
1162 /**
1163  * hif_dev_service_counter_interrupt() - service counter interrupts
1164  * synchronously
1165  *
1166  * @pDev: hif sdio device context
1167  *
1168  * Return: QDF_STATUS_SUCCESS for success
1169  */
1170 static
1171 QDF_STATUS hif_dev_service_counter_interrupt(struct hif_sdio_device *pdev)
1172 {
1173 	uint8_t counter_int_status;
1174 
1175 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Counter Interrupt\n"));
1176 
1177 	counter_int_status = mboxProcRegs(pdev).counter_int_status &
1178 			     mboxEnaRegs(pdev).counter_int_status_enable;
1179 
1180 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1181 			("Valid interrupt source in COUNTER_INT_STATUS: 0x%x\n",
1182 			 counter_int_status));
1183 
1184 	/* Check if the debug interrupt is pending
1185 	 * NOTE: other modules like GMBOX may use the counter interrupt
1186 	 * for credit flow control on other counters, we only need to
1187 	 * check for the debug assertion counter interrupt
1188 	 */
1189 	if (counter_int_status & AR6K_TARGET_DEBUG_INTR_MASK)
1190 		return hif_dev_service_debug_interrupt(pdev);
1191 
1192 	return QDF_STATUS_SUCCESS;
1193 }
1194 
1195 #define RX_LOOAHEAD_GET(pdev, i) \
1196 	mboxProcRegs(pdev).rx_lookahead[MAILBOX_LOOKAHEAD_SIZE_IN_WORD * i]
1197 /**
1198  * hif_dev_process_pending_irqs() - process pending interrupts
1199  * @pDev: hif sdio device context
1200  * @pDone: pending irq completion status
1201  * @pASyncProcessing: sync/async processing flag
1202  *
1203  * Return: QDF_STATUS_SUCCESS for success
1204  */
1205 QDF_STATUS hif_dev_process_pending_irqs(struct hif_sdio_device *pdev,
1206 					bool *done,
1207 					bool *async_processing)
1208 {
1209 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1210 	uint8_t host_int_status = 0;
1211 	uint32_t l_ahead[MAILBOX_USED_COUNT];
1212 	int i;
1213 
1214 	qdf_mem_zero(&l_ahead, sizeof(l_ahead));
1215 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1216 			("+ProcessPendingIRQs: (dev: 0x%lX)\n",
1217 			 (unsigned long)pdev));
1218 
1219 	/* NOTE: the HIF implementation guarantees that the context
1220 	 * of this call allows us to perform SYNCHRONOUS I/O,
1221 	 * that is we can block, sleep or call any API that
1222 	 * can block or switch thread/task ontexts.
1223 	 * This is a fully schedulable context.
1224 	 */
1225 	do {
1226 		if (mboxEnaRegs(pdev).int_status_enable == 0) {
1227 			/* interrupt enables have been cleared, do not try
1228 			 * to process any pending interrupts that
1229 			 * may result in more bus transactions.
1230 			 * The target may be unresponsive at this point.
1231 			 */
1232 			break;
1233 		}
1234 		status = hif_read_write(pdev->HIFDevice,
1235 					HOST_INT_STATUS_ADDRESS,
1236 					(uint8_t *)&mboxProcRegs(pdev),
1237 					sizeof(mboxProcRegs(pdev)),
1238 					HIF_RD_SYNC_BYTE_INC, NULL);
1239 
1240 		if (QDF_IS_STATUS_ERROR(status))
1241 			break;
1242 
1243 		if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_IRQ)) {
1244 			hif_dev_dump_registers(pdev,
1245 					       &mboxProcRegs(pdev),
1246 					       &mboxEnaRegs(pdev),
1247 					       &mboxCountRegs(pdev));
1248 		}
1249 
1250 		/* Update only those registers that are enabled */
1251 		host_int_status = mboxProcRegs(pdev).host_int_status
1252 				  & mboxEnaRegs(pdev).int_status_enable;
1253 
1254 		/* only look at mailbox status if the HIF layer did not
1255 		 * provide this function, on some HIF interfaces reading
1256 		 * the RX lookahead is not valid to do
1257 		 */
1258 		for (i = 0; i < MAILBOX_USED_COUNT; i++) {
1259 			l_ahead[i] = 0;
1260 			if (host_int_status & (1 << i)) {
1261 				/* mask out pending mailbox value, we use
1262 				 * "lookAhead" as the real flag for
1263 				 * mailbox processing below
1264 				 */
1265 				host_int_status &= ~(1 << i);
1266 				if (mboxProcRegs(pdev).
1267 				    rx_lookahead_valid & (1 << i)) {
1268 					/* mailbox has a message and the
1269 					 * look ahead is valid
1270 					 */
1271 					l_ahead[i] = RX_LOOAHEAD_GET(pdev, i);
1272 				}
1273 			}
1274 		} /*end of for loop */
1275 	} while (false);
1276 
1277 	do {
1278 		bool bLookAheadValid = false;
1279 		/* did the interrupt status fetches succeed? */
1280 		if (QDF_IS_STATUS_ERROR(status))
1281 			break;
1282 
1283 		for (i = 0; i < MAILBOX_USED_COUNT; i++) {
1284 			if (l_ahead[i] != 0) {
1285 				bLookAheadValid = true;
1286 				break;
1287 			}
1288 		}
1289 
1290 		if ((host_int_status == 0) && !bLookAheadValid) {
1291 			/* nothing to process, the caller can use this
1292 			 * to break out of a loop
1293 			 */
1294 			*done = true;
1295 			break;
1296 		}
1297 
1298 		if (bLookAheadValid) {
1299 			for (i = 0; i < MAILBOX_USED_COUNT; i++) {
1300 				int fetched = 0;
1301 
1302 				if (l_ahead[i] == 0)
1303 					continue;
1304 				AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1305 						("mbox[%d],lookahead:0x%X\n",
1306 						i, l_ahead[i]));
1307 				/* Mailbox Interrupt, the HTC layer may issue
1308 				 * async requests to empty the mailbox...
1309 				 * When emptying the recv mailbox we use the
1310 				 * async handler from the completion routine of
1311 				 * routine of the callers read request.
1312 				 * This can improve performance by reducing
1313 				 * the  context switching when we rapidly
1314 				 * pull packets
1315 				 */
1316 				status = hif_dev_recv_message_pending_handler(
1317 							pdev, i,
1318 							&l_ahead
1319 							[i], 1,
1320 							async_processing,
1321 							&fetched);
1322 				if (QDF_IS_STATUS_ERROR(status))
1323 					break;
1324 
1325 				if (!fetched) {
1326 					/* HTC could not pull any messages out
1327 					 * due to lack of resources force DSR
1328 					 * handle to ack the interrupt
1329 					 */
1330 					*async_processing = false;
1331 					pdev->RecheckIRQStatusCnt = 0;
1332 				}
1333 			}
1334 		}
1335 
1336 		/* now handle the rest of them */
1337 		AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1338 				("Valid source for OTHER interrupts: 0x%x\n",
1339 				host_int_status));
1340 
1341 		if (HOST_INT_STATUS_CPU_GET(host_int_status)) {
1342 			/* CPU Interrupt */
1343 			status = hif_dev_service_cpu_interrupt(pdev);
1344 			if (QDF_IS_STATUS_ERROR(status))
1345 				break;
1346 		}
1347 
1348 		if (HOST_INT_STATUS_ERROR_GET(host_int_status)) {
1349 			/* Error Interrupt */
1350 			status = hif_dev_service_error_interrupt(pdev);
1351 			if (QDF_IS_STATUS_ERROR(status))
1352 				break;
1353 		}
1354 
1355 		if (HOST_INT_STATUS_COUNTER_GET(host_int_status)) {
1356 			/* Counter Interrupt */
1357 			status = hif_dev_service_counter_interrupt(pdev);
1358 			if (QDF_IS_STATUS_ERROR(status))
1359 				break;
1360 		}
1361 
1362 	} while (false);
1363 
1364 	/* an optimization to bypass reading the IRQ status registers
1365 	 * unecessarily which can re-wake the target, if upper layers
1366 	 * determine that we are in a low-throughput mode, we can
1367 	 * rely on taking another interrupt rather than re-checking
1368 	 * the status registers which can re-wake the target.
1369 	 *
1370 	 * NOTE : for host interfaces that use the special
1371 	 * GetPendingEventsFunc, this optimization cannot be used due to
1372 	 * possible side-effects.  For example, SPI requires the host
1373 	 * to drain all messages from the mailbox before exiting
1374 	 * the ISR routine.
1375 	 */
1376 	if (!(*async_processing) && (pdev->RecheckIRQStatusCnt == 0)) {
1377 		AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1378 				("Bypass IRQ Status re-check, forcing done\n"));
1379 		*done = true;
1380 	}
1381 
1382 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1383 			("-ProcessPendingIRQs: (done:%d, async:%d) status=%d\n",
1384 			 *done, *async_processing, status));
1385 
1386 	return status;
1387 }
1388 
1389 #define DEV_CHECK_RECV_YIELD(pdev) \
1390 	((pdev)->CurrentDSRRecvCount >= \
1391 	 (pdev)->HifIRQYieldParams.recv_packet_yield_count)
1392 /**
1393  * hif_dev_dsr_handler() - Synchronous interrupt handler
1394  *
1395  * @context: hif send context
1396  *
1397  * Return: 0 for success and non-zero for failure
1398  */
1399 QDF_STATUS hif_dev_dsr_handler(void *context)
1400 {
1401 	struct hif_sdio_device *pdev = (struct hif_sdio_device *)context;
1402 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1403 	bool done = false;
1404 	bool async_proc = false;
1405 
1406 	/* reset the recv counter that tracks when we need
1407 	 * to yield from the DSR
1408 	 */
1409 	pdev->CurrentDSRRecvCount = 0;
1410 	/* reset counter used to flag a re-scan of IRQ
1411 	 * status registers on the target
1412 	 */
1413 	pdev->RecheckIRQStatusCnt = 0;
1414 
1415 	while (!done) {
1416 		status = hif_dev_process_pending_irqs(pdev, &done, &async_proc);
1417 		if (QDF_IS_STATUS_ERROR(status))
1418 			break;
1419 
1420 		if (pdev->HifIRQProcessingMode == HIF_DEVICE_IRQ_SYNC_ONLY) {
1421 			/* the HIF layer does not allow async IRQ processing,
1422 			 * override the asyncProc flag
1423 			 */
1424 			async_proc = false;
1425 			/* this will cause us to re-enter ProcessPendingIRQ()
1426 			 * and re-read interrupt status registers.
1427 			 * This has a nice side effect of blocking us until all
1428 			 * async read requests are completed. This behavior is
1429 			 * required as we  do not allow ASYNC processing
1430 			 * in interrupt handlers (like Windows CE)
1431 			 */
1432 
1433 			if (pdev->DSRCanYield && DEV_CHECK_RECV_YIELD(pdev))
1434 				/* ProcessPendingIRQs() pulled enough recv
1435 				 * messages to satisfy the yield count, stop
1436 				 * checking for more messages and return
1437 				 */
1438 				break;
1439 		}
1440 
1441 		if (async_proc) {
1442 			/* the function does some async I/O for performance,
1443 			 * we need to exit the ISR immediately, the check below
1444 			 * will prevent the interrupt from being
1445 			 * Ack'd while we handle it asynchronously
1446 			 */
1447 			break;
1448 		}
1449 	}
1450 
1451 	if (QDF_IS_STATUS_SUCCESS(status) && !async_proc) {
1452 		/* Ack the interrupt only if :
1453 		 *  1. we did not get any errors in processing interrupts
1454 		 *  2. there are no outstanding async processing requests
1455 		 */
1456 		if (pdev->DSRCanYield) {
1457 			/* if the DSR can yield do not ACK the interrupt, there
1458 			 * could be more pending messages. The HIF layer
1459 			 * must ACK the interrupt on behalf of HTC
1460 			 */
1461 			hif_info("%s:  Yield (RX count: %d)",
1462 				 __func__, pdev->CurrentDSRRecvCount);
1463 		} else {
1464 			hif_ack_interrupt(pdev->HIFDevice);
1465 		}
1466 	}
1467 
1468 	return status;
1469 }
1470 
1471 /**
1472  * hif_read_write() - queue a read/write request
1473  * @device: pointer to hif device structure
1474  * @address: address to read
1475  * @buffer: buffer to hold read/write data
1476  * @length: length to read/write
1477  * @request: read/write/sync/async request
1478  * @context: pointer to hold calling context
1479  *
1480  * Return: 0 on success, error number otherwise.
1481  */
1482 QDF_STATUS
1483 hif_read_write(struct hif_sdio_dev *device,
1484 	       unsigned long address,
1485 	       char *buffer, uint32_t length,
1486 	       uint32_t request, void *context)
1487 {
1488 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1489 	struct bus_request *busrequest;
1490 
1491 	AR_DEBUG_ASSERT(device);
1492 	AR_DEBUG_ASSERT(device->func);
1493 	hif_debug("%s: device 0x%pK addr 0x%lX buffer 0x%pK",
1494 		  __func__, device, address, buffer);
1495 	hif_debug("%s: len %d req 0x%X context 0x%pK",
1496 		  __func__, length, request, context);
1497 
1498 	/*sdio r/w action is not needed when suspend, so just return */
1499 	if ((device->is_suspend) &&
1500 	    (device->power_config == HIF_DEVICE_POWER_CUT)) {
1501 		AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("skip io when suspending\n"));
1502 		return QDF_STATUS_SUCCESS;
1503 	}
1504 	do {
1505 		if ((request & HIF_ASYNCHRONOUS) ||
1506 		    (request & HIF_SYNCHRONOUS)) {
1507 			/* serialize all requests through the async thread */
1508 			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1509 					("%s: Execution mode: %s\n", __func__,
1510 					 (request & HIF_ASYNCHRONOUS) ? "Async"
1511 					 : "Synch"));
1512 			busrequest = hif_allocate_bus_request(device);
1513 			if (!busrequest) {
1514 				hif_err("%s:bus requests unavail", __func__);
1515 				hif_err("%s, addr:0x%lX, len:%d",
1516 					request & HIF_SDIO_READ ? "READ" :
1517 					"WRITE", address, length);
1518 				return QDF_STATUS_E_FAILURE;
1519 			}
1520 			busrequest->address = address;
1521 			busrequest->buffer = buffer;
1522 			busrequest->length = length;
1523 			busrequest->request = request;
1524 			busrequest->context = context;
1525 
1526 			add_to_async_list(device, busrequest);
1527 
1528 			if (request & HIF_SYNCHRONOUS) {
1529 				AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1530 						("%s: queued sync req: 0x%lX\n",
1531 						 __func__,
1532 						 (unsigned long)busrequest));
1533 
1534 				/* wait for completion */
1535 				up(&device->sem_async);
1536 				if (down_interruptible(&busrequest->sem_req) ==
1537 				    0) {
1538 					QDF_STATUS status = busrequest->status;
1539 
1540 					hif_debug("%s: sync freeing 0x%lX:0x%X",
1541 						  __func__,
1542 						  (unsigned long)busrequest,
1543 						  busrequest->status);
1544 					hif_debug("%s: freeing req: 0x%X",
1545 						  __func__,
1546 						  (unsigned int)request);
1547 					hif_free_bus_request(device,
1548 							     busrequest);
1549 					return status;
1550 				} else {
1551 					/* interrupted, exit */
1552 					return QDF_STATUS_E_FAILURE;
1553 				}
1554 			} else {
1555 				hif_debug("%s: queued async req: 0x%lX",
1556 					  __func__, (unsigned long)busrequest);
1557 				up(&device->sem_async);
1558 				return QDF_STATUS_E_PENDING;
1559 			}
1560 		} else {
1561 			hif_err("%s: Invalid execution mode: 0x%08x",
1562 				__func__, (unsigned int)request);
1563 			status = QDF_STATUS_E_INVAL;
1564 			break;
1565 		}
1566 	} while (0);
1567 
1568 	return status;
1569 }
1570 
1571 /**
1572  * hif_sdio_func_enable() - Handle device enabling as per device
1573  * @device: HIF device object
1574  * @func: function pointer
1575  *
1576  * Return QDF_STATUS
1577  */
1578 static QDF_STATUS hif_sdio_func_enable(struct hif_softc *ol_sc,
1579 				       struct sdio_func *func)
1580 {
1581 	struct hif_sdio_dev *device = get_hif_device(ol_sc, func);
1582 
1583 	if (device->is_disabled) {
1584 		int ret = 0;
1585 
1586 		sdio_claim_host(func);
1587 
1588 		ret = hif_sdio_quirk_async_intr(ol_sc, func);
1589 		if (ret) {
1590 			hif_err("%s: Error setting async intr:%d",
1591 				__func__, ret);
1592 			sdio_release_host(func);
1593 			return QDF_STATUS_E_FAILURE;
1594 		}
1595 
1596 		func->enable_timeout = 100;
1597 		ret = sdio_enable_func(func);
1598 		if (ret) {
1599 			hif_err("%s: Unable to enable function: %d",
1600 				__func__, ret);
1601 			sdio_release_host(func);
1602 			return QDF_STATUS_E_FAILURE;
1603 		}
1604 
1605 		ret = sdio_set_block_size(func, HIF_BLOCK_SIZE);
1606 		if (ret) {
1607 			hif_err("%s: Unable to set block size 0x%X : %d\n",
1608 				__func__, HIF_BLOCK_SIZE, ret);
1609 			sdio_release_host(func);
1610 			return QDF_STATUS_E_FAILURE;
1611 		}
1612 
1613 		ret = hif_sdio_quirk_mod_strength(ol_sc, func);
1614 		if (ret) {
1615 			hif_err("%s: Error setting mod strength : %d\n",
1616 				__func__, ret);
1617 			sdio_release_host(func);
1618 			return QDF_STATUS_E_FAILURE;
1619 		}
1620 
1621 		sdio_release_host(func);
1622 	}
1623 
1624 	return QDF_STATUS_SUCCESS;
1625 }
1626 
1627 /**
1628  * __hif_read_write() - sdio read/write wrapper
1629  * @device: pointer to hif device structure
1630  * @address: address to read
1631  * @buffer: buffer to hold read/write data
1632  * @length: length to read/write
1633  * @request: read/write/sync/async request
1634  * @context: pointer to hold calling context
1635  *
1636  * Return: 0 on success, error number otherwise.
1637  */
1638 static QDF_STATUS
1639 __hif_read_write(struct hif_sdio_dev *device,
1640 		 uint32_t address, char *buffer,
1641 		 uint32_t length, uint32_t request, void *context)
1642 {
1643 	uint8_t opcode;
1644 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1645 	int ret = A_OK;
1646 	uint8_t *tbuffer;
1647 	bool bounced = false;
1648 
1649 	if (!device) {
1650 		hif_err("%s: device null!", __func__);
1651 		return QDF_STATUS_E_INVAL;
1652 	}
1653 
1654 	if (!device->func) {
1655 		hif_err("%s: func null!", __func__);
1656 		return QDF_STATUS_E_INVAL;
1657 	}
1658 
1659 	hif_debug("%s: addr:0X%06X, len:%08d, %s, %s", __func__,
1660 		  address, length,
1661 		  request & HIF_SDIO_READ ? "Read " : "Write",
1662 		  request & HIF_ASYNCHRONOUS ? "Async" : "Sync ");
1663 
1664 	do {
1665 		if (request & HIF_EXTENDED_IO) {
1666 			//HIF_INFO_HI("%s: Command type: CMD53\n", __func__);
1667 		} else {
1668 			hif_err("%s: Invalid command type: 0x%08x\n",
1669 				__func__, request);
1670 			status = QDF_STATUS_E_INVAL;
1671 			break;
1672 		}
1673 
1674 		if (request & HIF_BLOCK_BASIS) {
1675 			/* round to whole block length size */
1676 			length =
1677 				(length / HIF_BLOCK_SIZE) *
1678 				HIF_BLOCK_SIZE;
1679 			hif_debug("%s: Block mode (BlockLen: %d)\n",
1680 				  __func__, length);
1681 		} else if (request & HIF_BYTE_BASIS) {
1682 			hif_debug("%s: Byte mode (BlockLen: %d)\n",
1683 				  __func__, length);
1684 		} else {
1685 			hif_err("%s: Invalid data mode: 0x%08x\n",
1686 				__func__, request);
1687 			status = QDF_STATUS_E_INVAL;
1688 			break;
1689 		}
1690 		if (request & HIF_SDIO_WRITE) {
1691 			hif_fixup_write_param(device, request,
1692 					      &length, &address);
1693 
1694 			hif_debug("addr:%08X, len:0x%08X, dummy:0x%04X\n",
1695 				  address, length,
1696 				  (request & HIF_DUMMY_SPACE_MASK) >> 16);
1697 		}
1698 
1699 		if (request & HIF_FIXED_ADDRESS) {
1700 			opcode = CMD53_FIXED_ADDRESS;
1701 			hif_debug("%s: Addr mode: fixed 0x%X\n",
1702 				  __func__, address);
1703 		} else if (request & HIF_INCREMENTAL_ADDRESS) {
1704 			opcode = CMD53_INCR_ADDRESS;
1705 			hif_debug("%s: Address mode: Incremental 0x%X\n",
1706 				  __func__, address);
1707 		} else {
1708 			hif_err("%s: Invalid address mode: 0x%08x\n",
1709 				__func__, request);
1710 			status = QDF_STATUS_E_INVAL;
1711 			break;
1712 		}
1713 
1714 		if (request & HIF_SDIO_WRITE) {
1715 #if HIF_USE_DMA_BOUNCE_BUFFER
1716 			if (BUFFER_NEEDS_BOUNCE(buffer)) {
1717 				AR_DEBUG_ASSERT(device->dma_buffer);
1718 				tbuffer = device->dma_buffer;
1719 				/* copy the write data to the dma buffer */
1720 				AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE);
1721 				if (length > HIF_DMA_BUFFER_SIZE) {
1722 					hif_err("%s: Invalid write len: %d\n",
1723 						__func__, length);
1724 					status = QDF_STATUS_E_INVAL;
1725 					break;
1726 				}
1727 				memcpy(tbuffer, buffer, length);
1728 				bounced = true;
1729 			} else {
1730 				tbuffer = buffer;
1731 			}
1732 #else
1733 			tbuffer = buffer;
1734 #endif
1735 			if (opcode == CMD53_FIXED_ADDRESS  && tbuffer) {
1736 				ret = sdio_writesb(device->func, address,
1737 						   tbuffer, length);
1738 				hif_debug("%s:r=%d addr:0x%X, len:%d, 0x%X\n",
1739 					  __func__, ret, address, length,
1740 					  *(int *)tbuffer);
1741 			} else if (tbuffer) {
1742 				ret = sdio_memcpy_toio(device->func, address,
1743 						       tbuffer, length);
1744 				hif_debug("%s:r=%d addr:0x%X, len:%d, 0x%X\n",
1745 					  __func__, ret, address, length,
1746 					  *(int *)tbuffer);
1747 			}
1748 		} else if (request & HIF_SDIO_READ) {
1749 #if HIF_USE_DMA_BOUNCE_BUFFER
1750 			if (BUFFER_NEEDS_BOUNCE(buffer)) {
1751 				AR_DEBUG_ASSERT(device->dma_buffer);
1752 				AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE);
1753 				if (length > HIF_DMA_BUFFER_SIZE) {
1754 					hif_err("%s: Invalid read len: %d\n",
1755 						__func__, length);
1756 					status = QDF_STATUS_E_INVAL;
1757 					break;
1758 				}
1759 				tbuffer = device->dma_buffer;
1760 				bounced = true;
1761 			} else {
1762 				tbuffer = buffer;
1763 			}
1764 #else
1765 			tbuffer = buffer;
1766 #endif
1767 			if (opcode == CMD53_FIXED_ADDRESS && tbuffer) {
1768 				ret = sdio_readsb(device->func, tbuffer,
1769 						  address, length);
1770 				hif_debug("%s:r=%d addr:0x%X, len:%d, 0x%X\n",
1771 					  __func__, ret, address, length,
1772 					  *(int *)tbuffer);
1773 			} else if (tbuffer) {
1774 				ret = sdio_memcpy_fromio(device->func,
1775 							 tbuffer, address,
1776 							 length);
1777 				hif_debug("%s:r=%d addr:0x%X, len:%d, 0x%X\n",
1778 					  __func__, ret, address, length,
1779 					  *(int *)tbuffer);
1780 			}
1781 #if HIF_USE_DMA_BOUNCE_BUFFER
1782 			if (bounced && tbuffer)
1783 				memcpy(buffer, tbuffer, length);
1784 #endif
1785 		} else {
1786 			hif_err("%s: Invalid dir: 0x%08x", __func__, request);
1787 			status = QDF_STATUS_E_INVAL;
1788 			return status;
1789 		}
1790 
1791 		if (ret) {
1792 			hif_err("%s: SDIO bus operation failed!", __func__);
1793 			hif_err("%s: MMC stack returned : %d", __func__, ret);
1794 			hif_err("%s: addr:0X%06X, len:%08d, %s, %s",
1795 				__func__, address, length,
1796 				request & HIF_SDIO_READ ? "Read " : "Write",
1797 				request & HIF_ASYNCHRONOUS ?
1798 				"Async" : "Sync");
1799 			status = QDF_STATUS_E_FAILURE;
1800 		}
1801 	} while (false);
1802 
1803 	return status;
1804 }
1805 
1806 /**
1807  * async_task() - thread function to serialize all bus requests
1808  * @param: pointer to hif device
1809  *
1810  * thread function to serialize all requests, both sync and async
1811  * Return: 0 on success, error number otherwise.
1812  */
1813 static int async_task(void *param)
1814 {
1815 	struct hif_sdio_dev *device;
1816 	struct bus_request *request;
1817 	QDF_STATUS status;
1818 	bool claimed = false;
1819 
1820 	device = (struct hif_sdio_dev *)param;
1821 	set_current_state(TASK_INTERRUPTIBLE);
1822 	while (!device->async_shutdown) {
1823 		/* wait for work */
1824 		if (down_interruptible(&device->sem_async) != 0) {
1825 			/* interrupted, exit */
1826 			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1827 					("%s: async task interrupted\n",
1828 					 __func__));
1829 			break;
1830 		}
1831 		if (device->async_shutdown) {
1832 			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1833 					("%s: async task stopping\n",
1834 					 __func__));
1835 			break;
1836 		}
1837 		/* we want to hold the host over multiple cmds
1838 		 * if possible, but holding the host blocks
1839 		 * card interrupts
1840 		 */
1841 		qdf_spin_lock_irqsave(&device->asynclock);
1842 		/* pull the request to work on */
1843 		while (device->asyncreq) {
1844 			request = device->asyncreq;
1845 			if (request->inusenext)
1846 				device->asyncreq = request->inusenext;
1847 			else
1848 				device->asyncreq = NULL;
1849 			qdf_spin_unlock_irqrestore(&device->asynclock);
1850 			hif_debug("%s: processing req: 0x%lX",
1851 				  __func__, (unsigned long)request);
1852 
1853 			if (!claimed) {
1854 				sdio_claim_host(device->func);
1855 				claimed = true;
1856 			}
1857 			if (request->scatter_req) {
1858 				A_ASSERT(device->scatter_enabled);
1859 				/* pass the request to scatter routine which
1860 				 * executes it synchronously, note, no need
1861 				 * to free the request since scatter requests
1862 				 * are maintained on a separate list
1863 				 */
1864 				status = do_hif_read_write_scatter(device,
1865 								   request);
1866 			} else {
1867 				/* call hif_read_write in sync mode */
1868 				status =
1869 					__hif_read_write(device,
1870 							 request->address,
1871 							 request->buffer,
1872 							 request->length,
1873 							 request->
1874 							 request &
1875 							 ~HIF_SYNCHRONOUS,
1876 							 NULL);
1877 				if (request->request & HIF_ASYNCHRONOUS) {
1878 					void *context = request->context;
1879 
1880 					hif_free_bus_request(device, request);
1881 					device->htc_callbacks.
1882 					rw_compl_handler(context, status);
1883 				} else {
1884 					hif_debug("%s: upping req: 0x%lX",
1885 						  __func__,
1886 						  (unsigned long)request);
1887 					request->status = status;
1888 					up(&request->sem_req);
1889 				}
1890 			}
1891 			qdf_spin_lock_irqsave(&device->asynclock);
1892 		}
1893 		qdf_spin_unlock_irqrestore(&device->asynclock);
1894 		if (claimed) {
1895 			sdio_release_host(device->func);
1896 			claimed = false;
1897 		}
1898 	}
1899 
1900 	kthread_complete_and_exit(&device->async_completion, 0);
1901 
1902 	return 0;
1903 }
1904 
1905 /**
1906  * hif_disable_func() - Disable SDIO function
1907  *
1908  * @device: HIF device pointer
1909  * @func: SDIO function pointer
1910  * @reset: If this is called from resume or probe
1911  *
1912  * Return: 0 in case of success, else error value
1913  */
1914 QDF_STATUS hif_disable_func(struct hif_sdio_dev *device,
1915 			    struct sdio_func *func,
1916 			    bool reset)
1917 {
1918 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1919 
1920 	HIF_ENTER();
1921 	if (!IS_ERR(device->async_task)) {
1922 		init_completion(&device->async_completion);
1923 		device->async_shutdown = 1;
1924 		up(&device->sem_async);
1925 		wait_for_completion(&device->async_completion);
1926 		device->async_task = NULL;
1927 		sema_init(&device->sem_async, 0);
1928 	}
1929 
1930 	status = hif_sdio_func_disable(device, func, reset);
1931 	if (status == QDF_STATUS_SUCCESS)
1932 		device->is_disabled = true;
1933 
1934 	cleanup_hif_scatter_resources(device);
1935 
1936 	HIF_EXIT();
1937 
1938 	return status;
1939 }
1940 
1941 /**
1942  * hif_enable_func() - Enable SDIO function
1943  *
1944  * @ol_sc: HIF object pointer
1945  * @device: HIF device pointer
1946  * @sdio_func: SDIO function pointer
1947  * @resume: If this is called from resume or probe
1948  *
1949  * Return: 0 in case of success, else error value
1950  */
1951 QDF_STATUS hif_enable_func(struct hif_softc *ol_sc, struct hif_sdio_dev *device,
1952 			   struct sdio_func *func, bool resume)
1953 {
1954 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
1955 
1956 	HIF_ENTER();
1957 
1958 	if (!device) {
1959 		hif_err("%s: HIF device is NULL", __func__);
1960 		return QDF_STATUS_E_INVAL;
1961 	}
1962 
1963 	if (hif_sdio_func_enable(ol_sc, func))
1964 		return QDF_STATUS_E_FAILURE;
1965 
1966 	/* create async I/O thread */
1967 	if (!device->async_task && device->is_disabled) {
1968 		device->async_shutdown = 0;
1969 		device->async_task = kthread_create(async_task,
1970 						    (void *)device,
1971 						    "AR6K Async");
1972 		if (IS_ERR(device->async_task)) {
1973 			hif_err("%s: Error creating async task",
1974 				__func__);
1975 			return QDF_STATUS_E_FAILURE;
1976 		}
1977 		device->is_disabled = false;
1978 		wake_up_process(device->async_task);
1979 	}
1980 
1981 	if (!resume)
1982 		ret = hif_sdio_probe(ol_sc, func, device);
1983 
1984 	HIF_EXIT();
1985 
1986 	return ret;
1987 }
1988 #endif /* CONFIG_SDIO_TRANSFER_MAILBOX */
1989