xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/sdio/transfer/mailbox.c (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifdef CONFIG_SDIO_TRANSFER_MAILBOX
20 #define ATH_MODULE_NAME hif
21 #include <linux/kthread.h>
22 #include <qdf_types.h>
23 #include <qdf_status.h>
24 #include <qdf_timer.h>
25 #include <qdf_time.h>
26 #include <qdf_lock.h>
27 #include <qdf_mem.h>
28 #include <qdf_util.h>
29 #include <qdf_defer.h>
30 #include <qdf_atomic.h>
31 #include <qdf_nbuf.h>
32 #include <qdf_threads.h>
33 #include <athdefs.h>
34 #include <qdf_net_types.h>
35 #include <a_types.h>
36 #include <athdefs.h>
37 #include <a_osapi.h>
38 #include <hif.h>
39 #include <htc_internal.h>
40 #include <htc_services.h>
41 #include <a_debug.h>
42 #include "hif_sdio_internal.h"
43 #include "if_sdio.h"
44 #include "regtable.h"
45 #include "transfer.h"
46 
47 /* by default setup a bounce buffer for the data packets,
48  * if the underlying host controller driver
49  * does not use DMA you may be able to skip this step
50  * and save the memory allocation and transfer time
51  */
52 #define HIF_USE_DMA_BOUNCE_BUFFER 1
53 #if HIF_USE_DMA_BOUNCE_BUFFER
54 /* macro to check if DMA buffer is WORD-aligned and DMA-able.
55  * Most host controllers assume the
56  * buffer is DMA'able and will bug-check otherwise (i.e. buffers on the stack).
57  * virt_addr_valid check fails on stack memory.
58  */
59 #define BUFFER_NEEDS_BOUNCE(buffer)  (((unsigned long)(buffer) & 0x3) || \
60 					!virt_addr_valid((buffer)))
61 #else
62 #define BUFFER_NEEDS_BOUNCE(buffer)   (false)
63 #endif
64 
65 #ifdef SDIO_3_0
66 /**
67  * set_extended_mbox_size() - set extended MBOX size
68  * @pinfo: sdio mailbox info
69  *
70  * Return: none.
71  */
72 static void set_extended_mbox_size(struct hif_device_mbox_info *pinfo)
73 {
74 	pinfo->mbox_prop[0].extended_size =
75 		HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0;
76 	pinfo->mbox_prop[1].extended_size =
77 		HIF_MBOX1_EXTENDED_WIDTH_AR6320;
78 }
79 
80 /**
81  * set_extended_mbox_address() - set extended MBOX address
82  * @pinfo: sdio mailbox info
83  *
84  * Return: none.
85  */
86 static void set_extended_mbox_address(struct hif_device_mbox_info *pinfo)
87 {
88 	pinfo->mbox_prop[1].extended_address =
89 		pinfo->mbox_prop[0].extended_address +
90 		pinfo->mbox_prop[0].extended_size +
91 		HIF_MBOX_DUMMY_SPACE_SIZE_AR6320;
92 }
93 #else
94 static void set_extended_mbox_size(struct hif_device_mbox_info *pinfo)
95 {
96 	pinfo->mbox_prop[0].extended_size =
97 		HIF_MBOX0_EXTENDED_WIDTH_AR6320;
98 }
99 
100 static inline void
101 set_extended_mbox_address(struct hif_device_mbox_info *pinfo)
102 {
103 }
104 #endif
105 
106 /**
107  * set_extended_mbox_window_info() - set extended MBOX window
108  * information for SDIO interconnects
109  * @manf_id: manufacturer id
110  * @pinfo: sdio mailbox info
111  *
112  * Return: none.
113  */
114 static void set_extended_mbox_window_info(uint16_t manf_id,
115 					  struct hif_device_mbox_info *pinfo)
116 {
117 	switch (manf_id & MANUFACTURER_ID_AR6K_BASE_MASK) {
118 	case MANUFACTURER_ID_AR6002_BASE:
119 		/* MBOX 0 has an extended range */
120 
121 		pinfo->mbox_prop[0].extended_address =
122 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
123 		pinfo->mbox_prop[0].extended_size =
124 			HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
125 
126 		pinfo->mbox_prop[0].extended_address =
127 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
128 		pinfo->mbox_prop[0].extended_size =
129 			HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
130 
131 		pinfo->mbox_prop[0].extended_address =
132 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004;
133 		pinfo->mbox_prop[0].extended_size =
134 			HIF_MBOX0_EXTENDED_WIDTH_AR6004;
135 
136 		break;
137 	case MANUFACTURER_ID_AR6003_BASE:
138 		/* MBOX 0 has an extended range */
139 		pinfo->mbox_prop[0].extended_address =
140 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
141 		pinfo->mbox_prop[0].extended_size =
142 			HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
143 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
144 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
145 		break;
146 	case MANUFACTURER_ID_AR6004_BASE:
147 		pinfo->mbox_prop[0].extended_address =
148 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004;
149 		pinfo->mbox_prop[0].extended_size =
150 			HIF_MBOX0_EXTENDED_WIDTH_AR6004;
151 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
152 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
153 		break;
154 	case MANUFACTURER_ID_AR6320_BASE:
155 	{
156 		uint16_t rev = manf_id & MANUFACTURER_ID_AR6K_REV_MASK;
157 
158 		pinfo->mbox_prop[0].extended_address =
159 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320;
160 		if (rev < 4)
161 			pinfo->mbox_prop[0].extended_size =
162 				HIF_MBOX0_EXTENDED_WIDTH_AR6320;
163 		else
164 			set_extended_mbox_size(pinfo);
165 		set_extended_mbox_address(pinfo);
166 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
167 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
168 		break;
169 	}
170 	case MANUFACTURER_ID_QCA9377_BASE:
171 	case MANUFACTURER_ID_QCA9379_BASE:
172 		pinfo->mbox_prop[0].extended_address =
173 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320;
174 		pinfo->mbox_prop[0].extended_size =
175 			HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0;
176 		pinfo->mbox_prop[1].extended_address =
177 			pinfo->mbox_prop[0].extended_address +
178 			pinfo->mbox_prop[0].extended_size +
179 			HIF_MBOX_DUMMY_SPACE_SIZE_AR6320;
180 		pinfo->mbox_prop[1].extended_size =
181 			HIF_MBOX1_EXTENDED_WIDTH_AR6320;
182 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
183 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
184 		break;
185 	default:
186 		A_ASSERT(false);
187 		break;
188 	}
189 }
190 
191 /** hif_dev_set_mailbox_swap() - Set the mailbox swap from firmware
192  * @pdev : The HIF layer object
193  *
194  * Return: none
195  */
196 void hif_dev_set_mailbox_swap(struct hif_sdio_dev *pdev)
197 {
198 	struct hif_sdio_device *hif_device = hif_dev_from_hif(pdev);
199 
200 	HIF_ENTER();
201 
202 	hif_device->swap_mailbox = true;
203 
204 	HIF_EXIT();
205 }
206 
207 /** hif_dev_get_mailbox_swap() - Get the mailbox swap setting
208  * @pdev : The HIF layer object
209  *
210  * Return: true or false
211  */
212 bool hif_dev_get_mailbox_swap(struct hif_sdio_dev *pdev)
213 {
214 	struct hif_sdio_device *hif_device;
215 
216 	HIF_ENTER();
217 
218 	hif_device = hif_dev_from_hif(pdev);
219 
220 	HIF_EXIT();
221 
222 	return hif_device->swap_mailbox;
223 }
224 
225 /**
226  * hif_dev_get_fifo_address() - get the fifo addresses for dma
227  * @pdev:  SDIO HIF object
228  * @config: mbox address config pointer
229  *
230  * Return : 0 for success, non-zero for error
231  */
232 int hif_dev_get_fifo_address(struct hif_sdio_dev *pdev,
233 			     void *config,
234 			     uint32_t config_len)
235 {
236 	uint32_t count;
237 	struct hif_device_mbox_info *cfg =
238 				(struct hif_device_mbox_info *)config;
239 
240 	for (count = 0; count < 4; count++)
241 		cfg->mbox_addresses[count] = HIF_MBOX_START_ADDR(count);
242 
243 	if (config_len >= sizeof(struct hif_device_mbox_info)) {
244 		set_extended_mbox_window_info((uint16_t)pdev->func->device,
245 					      cfg);
246 		return 0;
247 	}
248 
249 	return -EINVAL;
250 }
251 
252 /**
253  * hif_dev_get_block_size() - get the mbox block size for dma
254  * @config : mbox size config pointer
255  *
256  * Return : NONE
257  */
258 void hif_dev_get_block_size(void *config)
259 {
260 	((uint32_t *)config)[0] = HIF_MBOX0_BLOCK_SIZE;
261 	((uint32_t *)config)[1] = HIF_MBOX1_BLOCK_SIZE;
262 	((uint32_t *)config)[2] = HIF_MBOX2_BLOCK_SIZE;
263 	((uint32_t *)config)[3] = HIF_MBOX3_BLOCK_SIZE;
264 }
265 
266 /**
267  * hif_dev_map_service_to_pipe() - maps ul/dl pipe to service id.
268  * @pDev: SDIO HIF object
269  * @ServiceId: sevice index
270  * @ULPipe: uplink pipe id
271  * @DLPipe: down-linklink pipe id
272  *
273  * Return: 0 on success, error value on invalid map
274  */
275 QDF_STATUS hif_dev_map_service_to_pipe(struct hif_sdio_dev *pdev, uint16_t svc,
276 				       uint8_t *ul_pipe, uint8_t *dl_pipe)
277 {
278 	QDF_STATUS status = QDF_STATUS_SUCCESS;
279 
280 	switch (svc) {
281 	case HTT_DATA_MSG_SVC:
282 		if (hif_dev_get_mailbox_swap(pdev)) {
283 			*ul_pipe = 1;
284 			*dl_pipe = 0;
285 		} else {
286 			*ul_pipe = 3;
287 			*dl_pipe = 2;
288 		}
289 		break;
290 
291 	case HTC_CTRL_RSVD_SVC:
292 	case HTC_RAW_STREAMS_SVC:
293 		*ul_pipe = 1;
294 		*dl_pipe = 0;
295 		break;
296 
297 	case WMI_DATA_BE_SVC:
298 	case WMI_DATA_BK_SVC:
299 	case WMI_DATA_VI_SVC:
300 	case WMI_DATA_VO_SVC:
301 		*ul_pipe = 1;
302 		*dl_pipe = 0;
303 		break;
304 
305 	case WMI_CONTROL_SVC:
306 		if (hif_dev_get_mailbox_swap(pdev)) {
307 			*ul_pipe = 3;
308 			*dl_pipe = 2;
309 		} else {
310 			*ul_pipe = 1;
311 			*dl_pipe = 0;
312 		}
313 		break;
314 
315 	default:
316 		HIF_ERROR("%s: Err : Invalid service (%d)",
317 			  __func__, svc);
318 		status = QDF_STATUS_E_INVAL;
319 		break;
320 	}
321 	return status;
322 }
323 
324 /** hif_dev_setup_device() - Setup device specific stuff here required for hif
325  * @pdev : HIF layer object
326  *
327  * return 0 on success, error otherwise
328  */
329 int hif_dev_setup_device(struct hif_sdio_device *pdev)
330 {
331 	int status = 0;
332 	uint32_t blocksizes[MAILBOX_COUNT];
333 
334 	status = hif_configure_device(NULL, pdev->HIFDevice,
335 				      HIF_DEVICE_GET_FIFO_ADDR,
336 				      &pdev->MailBoxInfo,
337 				      sizeof(pdev->MailBoxInfo));
338 
339 	if (status != QDF_STATUS_SUCCESS)
340 		HIF_ERROR("%s: HIF_DEVICE_GET_MBOX_ADDR failed", __func__);
341 
342 	status = hif_configure_device(NULL, pdev->HIFDevice,
343 				      HIF_DEVICE_GET_BLOCK_SIZE,
344 				      blocksizes, sizeof(blocksizes));
345 	if (status != QDF_STATUS_SUCCESS)
346 		HIF_ERROR("%s: HIF_DEVICE_GET_MBOX_BLOCK_SIZE fail", __func__);
347 
348 	pdev->BlockSize = blocksizes[MAILBOX_FOR_BLOCK_SIZE];
349 
350 	return status;
351 }
352 
353 /** hif_dev_mask_interrupts() - Disable the interrupts in the device
354  * @pdev SDIO HIF Object
355  *
356  * Return: NONE
357  */
358 void hif_dev_mask_interrupts(struct hif_sdio_device *pdev)
359 {
360 	int status = QDF_STATUS_SUCCESS;
361 
362 	HIF_ENTER();
363 	/* Disable all interrupts */
364 	LOCK_HIF_DEV(pdev);
365 	mboxEnaRegs(pdev).int_status_enable = 0;
366 	mboxEnaRegs(pdev).cpu_int_status_enable = 0;
367 	mboxEnaRegs(pdev).error_status_enable = 0;
368 	mboxEnaRegs(pdev).counter_int_status_enable = 0;
369 	UNLOCK_HIF_DEV(pdev);
370 
371 	/* always synchronous */
372 	status = hif_read_write(pdev->HIFDevice,
373 				INT_STATUS_ENABLE_ADDRESS,
374 				(char *)&mboxEnaRegs(pdev),
375 				sizeof(struct MBOX_IRQ_ENABLE_REGISTERS),
376 				HIF_WR_SYNC_BYTE_INC, NULL);
377 
378 	if (status != QDF_STATUS_SUCCESS)
379 		HIF_ERROR("%s: Err updating intr reg: %d", __func__, status);
380 }
381 
382 /** hif_dev_unmask_interrupts() - Enable the interrupts in the device
383  * @pdev SDIO HIF Object
384  *
385  * Return: NONE
386  */
387 void hif_dev_unmask_interrupts(struct hif_sdio_device *pdev)
388 {
389 	QDF_STATUS status = QDF_STATUS_SUCCESS;
390 
391 	LOCK_HIF_DEV(pdev);
392 
393 	/* Enable all the interrupts except for the internal
394 	 * AR6000 CPU interrupt
395 	 */
396 	mboxEnaRegs(pdev).int_status_enable =
397 		INT_STATUS_ENABLE_ERROR_SET(0x01) |
398 		INT_STATUS_ENABLE_CPU_SET(0x01)
399 		| INT_STATUS_ENABLE_COUNTER_SET(0x01);
400 
401 	/* enable 2 mboxs INT */
402 	mboxEnaRegs(pdev).int_status_enable |=
403 		INT_STATUS_ENABLE_MBOX_DATA_SET(0x01) |
404 		INT_STATUS_ENABLE_MBOX_DATA_SET(0x02);
405 
406 	/* Set up the CPU Interrupt Status Register, enable
407 	 * CPU sourced interrupt #0, #1.
408 	 * #0 is used for report assertion from target
409 	 * #1 is used for inform host that credit arrived
410 	 */
411 	mboxEnaRegs(pdev).cpu_int_status_enable = 0x03;
412 
413 	/* Set up the Error Interrupt Status Register */
414 	mboxEnaRegs(pdev).error_status_enable =
415 		(ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(0x01)
416 		 | ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(0x01)) >> 16;
417 
418 	/* Set up the Counter Interrupt Status Register
419 	 * (only for debug interrupt to catch fatal errors)
420 	 */
421 	mboxEnaRegs(pdev).counter_int_status_enable =
422 	(COUNTER_INT_STATUS_ENABLE_BIT_SET(AR6K_TARGET_DEBUG_INTR_MASK)) >> 24;
423 
424 	UNLOCK_HIF_DEV(pdev);
425 
426 	/* always synchronous */
427 	status = hif_read_write(pdev->HIFDevice,
428 				INT_STATUS_ENABLE_ADDRESS,
429 				(char *)&mboxEnaRegs(pdev),
430 				sizeof(struct MBOX_IRQ_ENABLE_REGISTERS),
431 				HIF_WR_SYNC_BYTE_INC,
432 				NULL);
433 
434 	if (status != QDF_STATUS_SUCCESS)
435 		HIF_ERROR("%s: Err updating intr reg: %d", __func__, status);
436 }
437 
438 void hif_dev_dump_registers(struct hif_sdio_device *pdev,
439 			    struct MBOX_IRQ_PROC_REGISTERS *irq_proc,
440 			    struct MBOX_IRQ_ENABLE_REGISTERS *irq_en,
441 			    struct MBOX_COUNTER_REGISTERS *mbox_regs)
442 {
443 	int i = 0;
444 
445 	HIF_DBG("%s: Mailbox registers:", __func__);
446 
447 	if (irq_proc) {
448 		HIF_DBG("HostIntStatus: 0x%x ", irq_proc->host_int_status);
449 		HIF_DBG("CPUIntStatus: 0x%x ", irq_proc->cpu_int_status);
450 		HIF_DBG("ErrorIntStatus: 0x%x ", irq_proc->error_int_status);
451 		HIF_DBG("CounterIntStat: 0x%x ", irq_proc->counter_int_status);
452 		HIF_DBG("MboxFrame: 0x%x ", irq_proc->mbox_frame);
453 		HIF_DBG("RxLKAValid: 0x%x ", irq_proc->rx_lookahead_valid);
454 		HIF_DBG("RxLKA0: 0x%x", irq_proc->rx_lookahead[0]);
455 		HIF_DBG("RxLKA1: 0x%x ", irq_proc->rx_lookahead[1]);
456 		HIF_DBG("RxLKA2: 0x%x ", irq_proc->rx_lookahead[2]);
457 		HIF_DBG("RxLKA3: 0x%x", irq_proc->rx_lookahead[3]);
458 
459 		if (pdev->MailBoxInfo.gmbox_address != 0) {
460 			HIF_DBG("GMBOX-HostIntStatus2:  0x%x ",
461 				irq_proc->host_int_status2);
462 			HIF_DBG("GMBOX-RX-Avail: 0x%x ",
463 				irq_proc->gmbox_rx_avail);
464 		}
465 	}
466 
467 	if (irq_en) {
468 		HIF_DBG("IntStatusEnable: 0x%x\n",
469 			irq_en->int_status_enable);
470 		HIF_DBG("CounterIntStatus: 0x%x\n",
471 			irq_en->counter_int_status_enable);
472 	}
473 
474 	for (i = 0; mbox_regs && i < 4; i++)
475 		HIF_DBG("Counter[%d]: 0x%x\n", i, mbox_regs->counter[i]);
476 }
477 
478 /* under HL SDIO, with Interface Memory support, we have
479  * the following reasons to support 2 mboxs:
480  * a) we need place different buffers in different
481  * mempool, for example, data using Interface Memory,
482  * desc and other using DRAM, they need different SDIO
483  * mbox channels.
484  * b) currently, tx mempool in LL case is separated from
485  * main mempool, the structure (descs at the beginning
486  * of every pool buffer) is different, because they only
487  * need store tx desc from host. To align with LL case,
488  * we also need 2 mbox support just as PCIe LL cases.
489  */
490 
491 /**
492  * hif_dev_map_pipe_to_mail_box() - maps pipe id to mailbox.
493  * @pdev: The pointer to the hif device object
494  * @pipeid: pipe index
495  *
496  * Return: mailbox index
497  */
498 static uint8_t hif_dev_map_pipe_to_mail_box(struct hif_sdio_device *pdev,
499 					    uint8_t pipeid)
500 {
501 	if (2 == pipeid || 3 == pipeid)
502 		return 1;
503 	else if (0 == pipeid || 1 == pipeid)
504 		return 0;
505 
506 	HIF_ERROR("%s: pipeid=%d invalid", __func__, pipeid);
507 
508 	qdf_assert(0);
509 
510 	return INVALID_MAILBOX_NUMBER;
511 }
512 
513 /**
514  * hif_dev_map_mail_box_to_pipe() - map sdio mailbox to htc pipe.
515  * @pdev: The pointer to the hif device object
516  * @mboxIndex: mailbox index
517  * @upload: boolean to decide mailbox index
518  *
519  * Return: Invalid pipe index
520  */
521 static uint8_t hif_dev_map_mail_box_to_pipe(struct hif_sdio_device *pdev,
522 					    uint8_t mbox_index, bool upload)
523 {
524 	if (mbox_index == 0)
525 		return upload ? 1 : 0;
526 	else if (mbox_index == 1)
527 		return upload ? 3 : 2;
528 
529 	HIF_ERROR("%s: mbox_index=%d, upload=%d invalid",
530 		  __func__, mbox_index, upload);
531 
532 	qdf_assert(0);
533 
534 	return INVALID_MAILBOX_NUMBER; /* invalid pipe id */
535 }
536 
537 /**
538  * hif_get_send_addr() - Get the transfer pipe address
539  * @pdev: The pointer to the hif device object
540  * @pipe: The pipe identifier
541  *
542  * Return 0 for success and non-zero for failure to map
543  */
544 int hif_get_send_address(struct hif_sdio_device *pdev,
545 			 uint8_t pipe, unsigned long *addr)
546 {
547 	uint8_t mbox_index = INVALID_MAILBOX_NUMBER;
548 
549 	if (!addr)
550 		return -EINVAL;
551 
552 	mbox_index = hif_dev_map_pipe_to_mail_box(pdev, pipe);
553 
554 	if (mbox_index == INVALID_MAILBOX_NUMBER)
555 		return -EINVAL;
556 
557 	*addr = pdev->MailBoxInfo.mbox_prop[mbox_index].extended_address;
558 
559 	return 0;
560 }
561 
562 /**
563  * hif_fixup_write_param() - Tweak the address and length parameters
564  * @pdev: The pointer to the hif device object
565  * @length: The length pointer
566  * @addr: The addr pointer
567  *
568  * Return: None
569  */
570 void hif_fixup_write_param(struct hif_sdio_dev *pdev, uint32_t req,
571 			   uint32_t *length, uint32_t *addr)
572 {
573 	struct hif_device_mbox_info mboxinfo;
574 	uint32_t taddr = *addr, mboxlen = 0;
575 
576 	hif_configure_device(NULL, pdev, HIF_DEVICE_GET_FIFO_ADDR,
577 			     &mboxinfo, sizeof(mboxinfo));
578 
579 	if (taddr >= 0x800 && taddr < 0xC00) {
580 		/* Host control register and CIS Window */
581 		mboxlen = 0;
582 	} else if (taddr == mboxinfo.mbox_addresses[0] ||
583 		   taddr == mboxinfo.mbox_addresses[1] ||
584 		   taddr == mboxinfo.mbox_addresses[2] ||
585 		   taddr == mboxinfo.mbox_addresses[3]) {
586 		mboxlen = HIF_MBOX_WIDTH;
587 	} else if (taddr == mboxinfo.mbox_prop[0].extended_address) {
588 		mboxlen = mboxinfo.mbox_prop[0].extended_size;
589 	} else if (taddr == mboxinfo.mbox_prop[1].extended_address) {
590 		mboxlen = mboxinfo.mbox_prop[1].extended_size;
591 	} else {
592 		HIF_ERROR("%s: Invalid write addr: 0x%08x\n", __func__, taddr);
593 		return;
594 	}
595 
596 	if (mboxlen != 0) {
597 		if (*length > mboxlen) {
598 			HIF_ERROR("%s: Error (%u > %u)",
599 				  __func__, *length, mboxlen);
600 			return;
601 		}
602 
603 		taddr = taddr + (mboxlen - *length);
604 		taddr = taddr + ((req & HIF_DUMMY_SPACE_MASK) >> 16);
605 		*addr = taddr;
606 	}
607 }
608 
609 /**
610  * hif_dev_recv_packet() - Receieve HTC packet/packet information from device
611  * @pdev : HIF device object
612  * @packet : The HTC packet pointer
613  * @recv_length : The length of information to be received
614  * @mbox_index : The mailbox that contains this information
615  *
616  * Return 0 for success and non zero of error
617  */
618 static QDF_STATUS hif_dev_recv_packet(struct hif_sdio_device *pdev,
619 				      HTC_PACKET *packet,
620 				      uint32_t recv_length,
621 				      uint32_t mbox_index)
622 {
623 	QDF_STATUS status;
624 	uint32_t padded_length;
625 	bool sync = (packet->Completion) ? false : true;
626 	uint32_t req = sync ? HIF_RD_SYNC_BLOCK_FIX : HIF_RD_ASYNC_BLOCK_FIX;
627 
628 	/* adjust the length to be a multiple of block size if appropriate */
629 	padded_length = DEV_CALC_RECV_PADDED_LEN(pdev, recv_length);
630 
631 	if (padded_length > packet->BufferLength) {
632 		HIF_ERROR("%s: No space for padlen:%d recvlen:%d bufferlen:%d",
633 			  __func__, padded_length,
634 			  recv_length, packet->BufferLength);
635 		if (packet->Completion) {
636 			COMPLETE_HTC_PACKET(packet, QDF_STATUS_E_INVAL);
637 			return QDF_STATUS_SUCCESS;
638 		}
639 		return QDF_STATUS_E_INVAL;
640 	}
641 
642 	/* mailbox index is saved in Endpoint member */
643 	HIF_INFO_HI("%s : hdr:0x%x, len:%d, padded length: %d Mbox:0x%x",
644 		    __func__, packet->PktInfo.AsRx.ExpectedHdr, recv_length,
645 		    padded_length, mbox_index);
646 
647 	status = hif_read_write(pdev->HIFDevice,
648 				pdev->MailBoxInfo.mbox_addresses[mbox_index],
649 				packet->pBuffer,
650 				padded_length,
651 				req, sync ? NULL : packet);
652 
653 	if (status != QDF_STATUS_SUCCESS && status != QDF_STATUS_E_PENDING)
654 		HIF_ERROR("%s : Failed %d", __func__, status);
655 
656 	if (sync) {
657 		packet->Status = status;
658 		if (status == QDF_STATUS_SUCCESS) {
659 			HTC_FRAME_HDR *hdr = (HTC_FRAME_HDR *) packet->pBuffer;
660 
661 			HIF_INFO_HI("%s:EP:%d,Len:%d,Flg:%d,CB:0x%02X,0x%02X\n",
662 				    __func__,
663 				    hdr->EndpointID, hdr->PayloadLen,
664 				    hdr->Flags, hdr->ControlBytes0,
665 				    hdr->ControlBytes1);
666 		}
667 	}
668 
669 	return status;
670 }
671 
672 static QDF_STATUS hif_dev_issue_recv_packet_bundle
673 (
674 	struct hif_sdio_device *pdev,
675 	HTC_PACKET_QUEUE *recv_pkt_queue,
676 	HTC_PACKET_QUEUE *sync_completion_queue,
677 	uint8_t mail_box_index,
678 	int *num_packets_fetched,
679 	bool partial_bundle
680 )
681 {
682 	uint32_t padded_length;
683 	int i, total_length = 0;
684 	HTC_TARGET *target = NULL;
685 	int bundleSpaceRemaining = 0;
686 	unsigned char *bundle_buffer = NULL;
687 	HTC_PACKET *packet, *packet_rx_bundle;
688 	QDF_STATUS status = QDF_STATUS_SUCCESS;
689 
690 	target = (HTC_TARGET *)pdev->pTarget;
691 
692 	if ((HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue) -
693 	     HTC_MAX_MSG_PER_BUNDLE_RX) > 0) {
694 		partial_bundle = true;
695 		HIF_WARN("%s, partial bundle detected num: %d, %d\n",
696 			 __func__,
697 			 HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue),
698 			 HTC_MAX_MSG_PER_BUNDLE_RX);
699 	}
700 
701 	bundleSpaceRemaining =
702 		HTC_MAX_MSG_PER_BUNDLE_RX * target->TargetCreditSize;
703 	packet_rx_bundle = allocate_htc_bundle_packet(target);
704 	if (!packet_rx_bundle) {
705 		HIF_ERROR("%s: packet_rx_bundle is NULL\n", __func__);
706 		qdf_sleep(NBUF_ALLOC_FAIL_WAIT_TIME);  /* 100 msec sleep */
707 		return QDF_STATUS_E_NOMEM;
708 	}
709 	bundle_buffer = packet_rx_bundle->pBuffer;
710 
711 	for (i = 0;
712 	     !HTC_QUEUE_EMPTY(recv_pkt_queue) && i < HTC_MAX_MSG_PER_BUNDLE_RX;
713 	     i++) {
714 		packet = htc_packet_dequeue(recv_pkt_queue);
715 		A_ASSERT(packet);
716 		if (!packet)
717 			break;
718 		padded_length =
719 			DEV_CALC_RECV_PADDED_LEN(pdev, packet->ActualLength);
720 		if (packet->PktInfo.AsRx.HTCRxFlags &
721 				HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK)
722 			padded_length += HIF_BLOCK_SIZE;
723 		if ((bundleSpaceRemaining - padded_length) < 0) {
724 			/* exceeds what we can transfer, put the packet back */
725 			HTC_PACKET_ENQUEUE_TO_HEAD(recv_pkt_queue, packet);
726 			break;
727 		}
728 		bundleSpaceRemaining -= padded_length;
729 
730 		if (partial_bundle ||
731 		    HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue) > 0) {
732 			packet->PktInfo.AsRx.HTCRxFlags |=
733 				HTC_RX_PKT_IGNORE_LOOKAHEAD;
734 		}
735 		packet->PktInfo.AsRx.HTCRxFlags |= HTC_RX_PKT_PART_OF_BUNDLE;
736 
737 		if (sync_completion_queue)
738 			HTC_PACKET_ENQUEUE(sync_completion_queue, packet);
739 
740 		total_length += padded_length;
741 	}
742 #if DEBUG_BUNDLE
743 	qdf_print("Recv bundle count %d, length %d.",
744 		  sync_completion_queue ?
745 		  HTC_PACKET_QUEUE_DEPTH(sync_completion_queue) : 0,
746 		  total_length);
747 #endif
748 
749 	status = hif_read_write(pdev->HIFDevice,
750 				pdev->MailBoxInfo.
751 				mbox_addresses[(int)mail_box_index],
752 				bundle_buffer, total_length,
753 				HIF_RD_SYNC_BLOCK_FIX, NULL);
754 
755 	if (status != QDF_STATUS_SUCCESS) {
756 		HIF_ERROR("%s, hif_send Failed status:%d\n",
757 			  __func__, status);
758 	} else {
759 		unsigned char *buffer = bundle_buffer;
760 		*num_packets_fetched = i;
761 		if (sync_completion_queue) {
762 			HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(
763 				sync_completion_queue, packet) {
764 				padded_length =
765 				DEV_CALC_RECV_PADDED_LEN(pdev,
766 							 packet->ActualLength);
767 				if (packet->PktInfo.AsRx.HTCRxFlags &
768 				HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK)
769 					padded_length +=
770 						HIF_BLOCK_SIZE;
771 				A_MEMCPY(packet->pBuffer,
772 					 buffer, padded_length);
773 				buffer += padded_length;
774 			} HTC_PACKET_QUEUE_ITERATE_END;
775 		}
776 	}
777 	/* free bundle space under Sync mode */
778 	free_htc_bundle_packet(target, packet_rx_bundle);
779 	return status;
780 }
781 
782 #define ISSUE_BUNDLE hif_dev_issue_recv_packet_bundle
783 static
784 QDF_STATUS hif_dev_recv_message_pending_handler(struct hif_sdio_device *pdev,
785 						uint8_t mail_box_index,
786 						uint32_t msg_look_aheads[],
787 						int num_look_aheads,
788 						bool *async_proc,
789 						int *num_pkts_fetched)
790 {
791 	int pkts_fetched;
792 	HTC_PACKET *pkt;
793 	HTC_ENDPOINT_ID id;
794 	bool partial_bundle;
795 	int total_fetched = 0;
796 	bool asyncProc = false;
797 	QDF_STATUS status = QDF_STATUS_SUCCESS;
798 	uint32_t look_aheads[HTC_MAX_MSG_PER_BUNDLE_RX];
799 	HTC_PACKET_QUEUE recv_q, sync_comp_q;
800 	QDF_STATUS (*rxCompletion)(void *, qdf_nbuf_t,	uint8_t);
801 
802 	HIF_INFO_HI("%s: NumLookAheads: %d\n", __func__, num_look_aheads);
803 
804 	if (num_pkts_fetched)
805 		*num_pkts_fetched = 0;
806 
807 	if (IS_DEV_IRQ_PROCESSING_ASYNC_ALLOWED(pdev)) {
808 		/* We use async mode to get the packets if the
809 		 * device layer supports it. The device layer
810 		 * interfaces with HIF in which HIF may have
811 		 * restrictions on how interrupts are processed
812 		 */
813 		asyncProc = true;
814 	}
815 
816 	if (async_proc) {
817 		/* indicate to caller how we decided to process this */
818 		*async_proc = asyncProc;
819 	}
820 
821 	if (num_look_aheads > HTC_MAX_MSG_PER_BUNDLE_RX) {
822 		A_ASSERT(false);
823 		return QDF_STATUS_E_PROTO;
824 	}
825 
826 	A_MEMCPY(look_aheads, msg_look_aheads,
827 		 (sizeof(uint32_t)) * num_look_aheads);
828 	while (true) {
829 		/* reset packets queues */
830 		INIT_HTC_PACKET_QUEUE(&recv_q);
831 		INIT_HTC_PACKET_QUEUE(&sync_comp_q);
832 		if (num_look_aheads > HTC_MAX_MSG_PER_BUNDLE_RX) {
833 			status = QDF_STATUS_E_PROTO;
834 			A_ASSERT(false);
835 			break;
836 		}
837 
838 		/* first lookahead sets the expected endpoint IDs for
839 		 * all packets in a bundle
840 		 */
841 		id = ((HTC_FRAME_HDR *)&look_aheads[0])->EndpointID;
842 
843 		if (id >= ENDPOINT_MAX) {
844 			HIF_ERROR("%s: Invalid Endpoint in lookahead: %d\n",
845 				  __func__, id);
846 			status = QDF_STATUS_E_PROTO;
847 			break;
848 		}
849 		/* try to allocate as many HTC RX packets indicated
850 		 * by the lookaheads these packets are stored
851 		 * in the recvPkt queue
852 		 */
853 		status = hif_dev_alloc_and_prepare_rx_packets(pdev,
854 							      look_aheads,
855 							      num_look_aheads,
856 							      &recv_q);
857 		if (QDF_IS_STATUS_ERROR(status))
858 			break;
859 		total_fetched += HTC_PACKET_QUEUE_DEPTH(&recv_q);
860 
861 		/* we've got packet buffers for all we can currently fetch,
862 		 * this count is not valid anymore
863 		 */
864 		num_look_aheads = 0;
865 		partial_bundle = false;
866 
867 		/* now go fetch the list of HTC packets */
868 		while (!HTC_QUEUE_EMPTY(&recv_q)) {
869 			pkts_fetched = 0;
870 			if ((HTC_PACKET_QUEUE_DEPTH(&recv_q) > 1)) {
871 				/* there are enough packets to attempt a bundle
872 				 * transfer and recv bundling is allowed
873 				 */
874 				status = ISSUE_BUNDLE(pdev,
875 						      &recv_q,
876 						      asyncProc ? NULL :
877 						      &sync_comp_q,
878 						      mail_box_index,
879 						      &pkts_fetched,
880 						      partial_bundle);
881 				if (QDF_IS_STATUS_ERROR(status)) {
882 					hif_dev_free_recv_pkt_queue(
883 							&recv_q);
884 					break;
885 				}
886 
887 				if (HTC_PACKET_QUEUE_DEPTH(&recv_q) !=
888 					0) {
889 					/* we couldn't fetch all packets at one,
890 					 * time this creates a broken
891 					 * bundle
892 					 */
893 					partial_bundle = true;
894 				}
895 			}
896 
897 			/* see if the previous operation fetched any
898 			 * packets using bundling
899 			 */
900 			if (pkts_fetched == 0) {
901 				/* dequeue one packet */
902 				pkt = htc_packet_dequeue(&recv_q);
903 				A_ASSERT(pkt);
904 				if (!pkt)
905 					break;
906 
907 				pkt->Completion = NULL;
908 
909 				if (HTC_PACKET_QUEUE_DEPTH(&recv_q) >
910 				    0) {
911 					/* lookaheads in all packets except the
912 					 * last one in must be ignored
913 					 */
914 					pkt->PktInfo.AsRx.HTCRxFlags |=
915 						HTC_RX_PKT_IGNORE_LOOKAHEAD;
916 				}
917 
918 				/* go fetch the packet */
919 				status =
920 				hif_dev_recv_packet(pdev, pkt,
921 						    pkt->ActualLength,
922 						    mail_box_index);
923 				while (QDF_IS_STATUS_ERROR(status) &&
924 				       !HTC_QUEUE_EMPTY(&recv_q)) {
925 					qdf_nbuf_t nbuf;
926 
927 					pkt = htc_packet_dequeue(&recv_q);
928 					if (!pkt)
929 						break;
930 					nbuf = pkt->pNetBufContext;
931 					if (nbuf)
932 						qdf_nbuf_free(nbuf);
933 				}
934 
935 				if (QDF_IS_STATUS_ERROR(status))
936 					break;
937 				/* sent synchronously, queue this packet for
938 				 * synchronous completion
939 				 */
940 				HTC_PACKET_ENQUEUE(&sync_comp_q, pkt);
941 			}
942 		}
943 
944 		/* synchronous handling */
945 		if (pdev->DSRCanYield) {
946 			/* for the SYNC case, increment count that tracks
947 			 * when the DSR should yield
948 			 */
949 			pdev->CurrentDSRRecvCount++;
950 		}
951 
952 		/* in the sync case, all packet buffers are now filled,
953 		 * we can process each packet, check lookahead , then repeat
954 		 */
955 		rxCompletion = pdev->hif_callbacks.rxCompletionHandler;
956 
957 		/* unload sync completion queue */
958 		while (!HTC_QUEUE_EMPTY(&sync_comp_q)) {
959 			uint8_t pipeid;
960 			qdf_nbuf_t netbuf;
961 
962 			pkt = htc_packet_dequeue(&sync_comp_q);
963 			A_ASSERT(pkt);
964 			if (!pkt)
965 				break;
966 
967 			num_look_aheads = 0;
968 			status = hif_dev_process_recv_header(pdev, pkt,
969 							     look_aheads,
970 							     &num_look_aheads);
971 			if (QDF_IS_STATUS_ERROR(status)) {
972 				HTC_PACKET_ENQUEUE_TO_HEAD(&sync_comp_q, pkt);
973 				break;
974 			}
975 
976 			netbuf = (qdf_nbuf_t)pkt->pNetBufContext;
977 			/* set data length */
978 			qdf_nbuf_put_tail(netbuf, pkt->ActualLength);
979 
980 			if (rxCompletion) {
981 				pipeid =
982 				hif_dev_map_mail_box_to_pipe(pdev,
983 							     mail_box_index,
984 							     true);
985 				rxCompletion(pdev->hif_callbacks.Context,
986 					     netbuf, pipeid);
987 			}
988 		}
989 
990 		if (QDF_IS_STATUS_ERROR(status)) {
991 			if (!HTC_QUEUE_EMPTY(&sync_comp_q))
992 				hif_dev_free_recv_pkt_queue(
993 						&sync_comp_q);
994 			break;
995 		}
996 
997 		if (num_look_aheads == 0) {
998 			/* no more look aheads */
999 			break;
1000 		}
1001 		/* check whether other OS contexts have queued any WMI
1002 		 * command/data for WLAN. This check is needed only if WLAN
1003 		 * Tx and Rx happens in same thread context
1004 		 */
1005 		/* A_CHECK_DRV_TX(); */
1006 	}
1007 	if (num_pkts_fetched)
1008 		*num_pkts_fetched = total_fetched;
1009 
1010 	AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-HTCRecvMessagePendingHandler\n"));
1011 	return status;
1012 }
1013 
1014 /**
1015  * hif_dev_service_cpu_interrupt() - service fatal interrupts
1016  * synchronously
1017  *
1018  * @pDev: hif sdio device context
1019  *
1020  * Return: QDF_STATUS_SUCCESS for success
1021  */
1022 static QDF_STATUS hif_dev_service_cpu_interrupt(struct hif_sdio_device *pdev)
1023 {
1024 	QDF_STATUS status;
1025 	uint8_t reg_buffer[4];
1026 	uint8_t cpu_int_status;
1027 
1028 	cpu_int_status = mboxProcRegs(pdev).cpu_int_status &
1029 			 mboxEnaRegs(pdev).cpu_int_status_enable;
1030 
1031 	HIF_ERROR("%s: 0x%x", __func__, (uint32_t)cpu_int_status);
1032 
1033 	/* Clear the interrupt */
1034 	mboxProcRegs(pdev).cpu_int_status &= ~cpu_int_status;
1035 
1036 	/*set up the register transfer buffer to hit the register
1037 	 * 4 times , this is done to make the access 4-byte aligned
1038 	 * to mitigate issues with host bus interconnects that
1039 	 * restrict bus transfer lengths to be a multiple of 4-bytes
1040 	 * set W1C value to clear the interrupt, this hits the register
1041 	 * first
1042 	 */
1043 	reg_buffer[0] = cpu_int_status;
1044 	/* the remaining 4 values are set to zero which have no-effect  */
1045 	reg_buffer[1] = 0;
1046 	reg_buffer[2] = 0;
1047 	reg_buffer[3] = 0;
1048 
1049 	status = hif_read_write(pdev->HIFDevice,
1050 				CPU_INT_STATUS_ADDRESS,
1051 				reg_buffer, 4, HIF_WR_SYNC_BYTE_FIX, NULL);
1052 
1053 	A_ASSERT(status == QDF_STATUS_SUCCESS);
1054 
1055 	/* The Interrupt sent to the Host is generated via bit0
1056 	 * of CPU INT register
1057 	 */
1058 	if (cpu_int_status & 0x1) {
1059 		if (pdev->hif_callbacks.fwEventHandler)
1060 			/* It calls into HTC which propagates this
1061 			 * to ol_target_failure()
1062 			 */
1063 			pdev->hif_callbacks.fwEventHandler(
1064 				pdev->hif_callbacks.Context,
1065 				QDF_STATUS_E_FAILURE);
1066 	} else {
1067 		HIF_ERROR("%s: Unrecognized CPU event", __func__);
1068 	}
1069 
1070 	return status;
1071 }
1072 
1073 /**
1074  * hif_dev_service_error_interrupt() - service error interrupts
1075  * synchronously
1076  *
1077  * @pDev: hif sdio device context
1078  *
1079  * Return: QDF_STATUS_SUCCESS for success
1080  */
1081 static QDF_STATUS hif_dev_service_error_interrupt(struct hif_sdio_device *pdev)
1082 {
1083 	QDF_STATUS status;
1084 	uint8_t reg_buffer[4];
1085 	uint8_t error_int_status = 0;
1086 
1087 	error_int_status = mboxProcRegs(pdev).error_int_status & 0x0F;
1088 	HIF_ERROR("%s: 0x%x", __func__, error_int_status);
1089 
1090 	if (ERROR_INT_STATUS_WAKEUP_GET(error_int_status))
1091 		HIF_ERROR("%s: Error : Wakeup", __func__);
1092 
1093 	if (ERROR_INT_STATUS_RX_UNDERFLOW_GET(error_int_status))
1094 		HIF_ERROR("%s: Error : Rx Underflow", __func__);
1095 
1096 	if (ERROR_INT_STATUS_TX_OVERFLOW_GET(error_int_status))
1097 		HIF_ERROR("%s: Error : Tx Overflow", __func__);
1098 
1099 	/* Clear the interrupt */
1100 	mboxProcRegs(pdev).error_int_status &= ~error_int_status;
1101 
1102 	/* set up the register transfer buffer to hit the register
1103 	 * 4 times , this is done to make the access 4-byte
1104 	 * aligned to mitigate issues with host bus interconnects that
1105 	 * restrict bus transfer lengths to be a multiple of 4-bytes
1106 	 */
1107 
1108 	/* set W1C value to clear the interrupt */
1109 	reg_buffer[0] = error_int_status;
1110 	/* the remaining 4 values are set to zero which have no-effect  */
1111 	reg_buffer[1] = 0;
1112 	reg_buffer[2] = 0;
1113 	reg_buffer[3] = 0;
1114 
1115 	status = hif_read_write(pdev->HIFDevice,
1116 				ERROR_INT_STATUS_ADDRESS,
1117 				reg_buffer, 4, HIF_WR_SYNC_BYTE_FIX, NULL);
1118 
1119 	A_ASSERT(status == QDF_STATUS_SUCCESS);
1120 	return status;
1121 }
1122 
1123 /**
1124  * hif_dev_service_debug_interrupt() - service debug interrupts
1125  * synchronously
1126  *
1127  * @pDev: hif sdio device context
1128  *
1129  * Return: QDF_STATUS_SUCCESS for success
1130  */
1131 static QDF_STATUS hif_dev_service_debug_interrupt(struct hif_sdio_device *pdev)
1132 {
1133 	uint32_t dummy;
1134 	QDF_STATUS status;
1135 
1136 	/* Send a target failure event to the application */
1137 	HIF_ERROR("%s: Target debug interrupt", __func__);
1138 
1139 	/* clear the interrupt , the debug error interrupt is counter 0
1140 	 * read counter to clear interrupt
1141 	 */
1142 	status = hif_read_write(pdev->HIFDevice,
1143 				COUNT_DEC_ADDRESS,
1144 				(uint8_t *)&dummy,
1145 				4, HIF_RD_SYNC_BYTE_INC, NULL);
1146 
1147 	A_ASSERT(status == QDF_STATUS_SUCCESS);
1148 	return status;
1149 }
1150 
1151 /**
1152  * hif_dev_service_counter_interrupt() - service counter interrupts
1153  * synchronously
1154  *
1155  * @pDev: hif sdio device context
1156  *
1157  * Return: QDF_STATUS_SUCCESS for success
1158  */
1159 static
1160 QDF_STATUS hif_dev_service_counter_interrupt(struct hif_sdio_device *pdev)
1161 {
1162 	uint8_t counter_int_status;
1163 
1164 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Counter Interrupt\n"));
1165 
1166 	counter_int_status = mboxProcRegs(pdev).counter_int_status &
1167 			     mboxEnaRegs(pdev).counter_int_status_enable;
1168 
1169 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1170 			("Valid interrupt source in COUNTER_INT_STATUS: 0x%x\n",
1171 			 counter_int_status));
1172 
1173 	/* Check if the debug interrupt is pending
1174 	 * NOTE: other modules like GMBOX may use the counter interrupt
1175 	 * for credit flow control on other counters, we only need to
1176 	 * check for the debug assertion counter interrupt
1177 	 */
1178 	if (counter_int_status & AR6K_TARGET_DEBUG_INTR_MASK)
1179 		return hif_dev_service_debug_interrupt(pdev);
1180 
1181 	return QDF_STATUS_SUCCESS;
1182 }
1183 
1184 #define RX_LOOAHEAD_GET(pdev, i) \
1185 	mboxProcRegs(pdev).rx_lookahead[MAILBOX_LOOKAHEAD_SIZE_IN_WORD * i]
1186 /**
1187  * hif_dev_process_pending_irqs() - process pending interrupts
1188  * @pDev: hif sdio device context
1189  * @pDone: pending irq completion status
1190  * @pASyncProcessing: sync/async processing flag
1191  *
1192  * Return: QDF_STATUS_SUCCESS for success
1193  */
1194 QDF_STATUS hif_dev_process_pending_irqs(struct hif_sdio_device *pdev,
1195 					bool *done,
1196 					bool *async_processing)
1197 {
1198 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1199 	uint8_t host_int_status = 0;
1200 	uint32_t l_ahead[MAILBOX_USED_COUNT];
1201 	int i;
1202 
1203 	qdf_mem_zero(&l_ahead, sizeof(l_ahead));
1204 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1205 			("+ProcessPendingIRQs: (dev: 0x%lX)\n",
1206 			 (unsigned long)pdev));
1207 
1208 	/* NOTE: the HIF implementation guarantees that the context
1209 	 * of this call allows us to perform SYNCHRONOUS I/O,
1210 	 * that is we can block, sleep or call any API that
1211 	 * can block or switch thread/task ontexts.
1212 	 * This is a fully schedulable context.
1213 	 */
1214 	do {
1215 		if (mboxEnaRegs(pdev).int_status_enable == 0) {
1216 			/* interrupt enables have been cleared, do not try
1217 			 * to process any pending interrupts that
1218 			 * may result in more bus transactions.
1219 			 * The target may be unresponsive at this point.
1220 			 */
1221 			break;
1222 		}
1223 		status = hif_read_write(pdev->HIFDevice,
1224 					HOST_INT_STATUS_ADDRESS,
1225 					(uint8_t *)&mboxProcRegs(pdev),
1226 					sizeof(mboxProcRegs(pdev)),
1227 					HIF_RD_SYNC_BYTE_INC, NULL);
1228 
1229 		if (QDF_IS_STATUS_ERROR(status))
1230 			break;
1231 
1232 		if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_IRQ)) {
1233 			hif_dev_dump_registers(pdev,
1234 					       &mboxProcRegs(pdev),
1235 					       &mboxEnaRegs(pdev),
1236 					       &mboxCountRegs(pdev));
1237 		}
1238 
1239 		/* Update only those registers that are enabled */
1240 		host_int_status = mboxProcRegs(pdev).host_int_status
1241 				  & mboxEnaRegs(pdev).int_status_enable;
1242 
1243 		/* only look at mailbox status if the HIF layer did not
1244 		 * provide this function, on some HIF interfaces reading
1245 		 * the RX lookahead is not valid to do
1246 		 */
1247 		for (i = 0; i < MAILBOX_USED_COUNT; i++) {
1248 			l_ahead[i] = 0;
1249 			if (host_int_status & (1 << i)) {
1250 				/* mask out pending mailbox value, we use
1251 				 * "lookAhead" as the real flag for
1252 				 * mailbox processing below
1253 				 */
1254 				host_int_status &= ~(1 << i);
1255 				if (mboxProcRegs(pdev).
1256 				    rx_lookahead_valid & (1 << i)) {
1257 					/* mailbox has a message and the
1258 					 * look ahead is valid
1259 					 */
1260 					l_ahead[i] = RX_LOOAHEAD_GET(pdev, i);
1261 				}
1262 			}
1263 		} /*end of for loop */
1264 	} while (false);
1265 
1266 	do {
1267 		bool bLookAheadValid = false;
1268 		/* did the interrupt status fetches succeed? */
1269 		if (QDF_IS_STATUS_ERROR(status))
1270 			break;
1271 
1272 		for (i = 0; i < MAILBOX_USED_COUNT; i++) {
1273 			if (l_ahead[i] != 0) {
1274 				bLookAheadValid = true;
1275 				break;
1276 			}
1277 		}
1278 
1279 		if ((host_int_status == 0) && !bLookAheadValid) {
1280 			/* nothing to process, the caller can use this
1281 			 * to break out of a loop
1282 			 */
1283 			*done = true;
1284 			break;
1285 		}
1286 
1287 		if (bLookAheadValid) {
1288 			for (i = 0; i < MAILBOX_USED_COUNT; i++) {
1289 				int fetched = 0;
1290 
1291 				if (l_ahead[i] == 0)
1292 					continue;
1293 				AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1294 						("mbox[%d],lookahead:0x%X\n",
1295 						i, l_ahead[i]));
1296 				/* Mailbox Interrupt, the HTC layer may issue
1297 				 * async requests to empty the mailbox...
1298 				 * When emptying the recv mailbox we use the
1299 				 * async handler from the completion routine of
1300 				 * routine of the callers read request.
1301 				 * This can improve performance by reducing
1302 				 * the  context switching when we rapidly
1303 				 * pull packets
1304 				 */
1305 				status = hif_dev_recv_message_pending_handler(
1306 							pdev, i,
1307 							&l_ahead
1308 							[i], 1,
1309 							async_processing,
1310 							&fetched);
1311 				if (QDF_IS_STATUS_ERROR(status))
1312 					break;
1313 
1314 				if (!fetched) {
1315 					/* HTC could not pull any messages out
1316 					 * due to lack of resources force DSR
1317 					 * handle to ack the interrupt
1318 					 */
1319 					*async_processing = false;
1320 					pdev->RecheckIRQStatusCnt = 0;
1321 				}
1322 			}
1323 		}
1324 
1325 		/* now handle the rest of them */
1326 		AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1327 				("Valid source for OTHER interrupts: 0x%x\n",
1328 				host_int_status));
1329 
1330 		if (HOST_INT_STATUS_CPU_GET(host_int_status)) {
1331 			/* CPU Interrupt */
1332 			status = hif_dev_service_cpu_interrupt(pdev);
1333 			if (QDF_IS_STATUS_ERROR(status))
1334 				break;
1335 		}
1336 
1337 		if (HOST_INT_STATUS_ERROR_GET(host_int_status)) {
1338 			/* Error Interrupt */
1339 			status = hif_dev_service_error_interrupt(pdev);
1340 			if (QDF_IS_STATUS_ERROR(status))
1341 				break;
1342 		}
1343 
1344 		if (HOST_INT_STATUS_COUNTER_GET(host_int_status)) {
1345 			/* Counter Interrupt */
1346 			status = hif_dev_service_counter_interrupt(pdev);
1347 			if (QDF_IS_STATUS_ERROR(status))
1348 				break;
1349 		}
1350 
1351 	} while (false);
1352 
1353 	/* an optimization to bypass reading the IRQ status registers
1354 	 * unecessarily which can re-wake the target, if upper layers
1355 	 * determine that we are in a low-throughput mode, we can
1356 	 * rely on taking another interrupt rather than re-checking
1357 	 * the status registers which can re-wake the target.
1358 	 *
1359 	 * NOTE : for host interfaces that use the special
1360 	 * GetPendingEventsFunc, this optimization cannot be used due to
1361 	 * possible side-effects.  For example, SPI requires the host
1362 	 * to drain all messages from the mailbox before exiting
1363 	 * the ISR routine.
1364 	 */
1365 	if (!(*async_processing) && (pdev->RecheckIRQStatusCnt == 0)) {
1366 		AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1367 				("Bypass IRQ Status re-check, forcing done\n"));
1368 		*done = true;
1369 	}
1370 
1371 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1372 			("-ProcessPendingIRQs: (done:%d, async:%d) status=%d\n",
1373 			 *done, *async_processing, status));
1374 
1375 	return status;
1376 }
1377 
1378 #define DEV_CHECK_RECV_YIELD(pdev) \
1379 	((pdev)->CurrentDSRRecvCount >= \
1380 	 (pdev)->HifIRQYieldParams.recv_packet_yield_count)
1381 /**
1382  * hif_dev_dsr_handler() - Synchronous interrupt handler
1383  *
1384  * @context: hif send context
1385  *
1386  * Return: 0 for success and non-zero for failure
1387  */
1388 QDF_STATUS hif_dev_dsr_handler(void *context)
1389 {
1390 	struct hif_sdio_device *pdev = (struct hif_sdio_device *)context;
1391 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1392 	bool done = false;
1393 	bool async_proc = false;
1394 
1395 	/* reset the recv counter that tracks when we need
1396 	 * to yield from the DSR
1397 	 */
1398 	pdev->CurrentDSRRecvCount = 0;
1399 	/* reset counter used to flag a re-scan of IRQ
1400 	 * status registers on the target
1401 	 */
1402 	pdev->RecheckIRQStatusCnt = 0;
1403 
1404 	while (!done) {
1405 		status = hif_dev_process_pending_irqs(pdev, &done, &async_proc);
1406 		if (QDF_IS_STATUS_ERROR(status))
1407 			break;
1408 
1409 		if (pdev->HifIRQProcessingMode == HIF_DEVICE_IRQ_SYNC_ONLY) {
1410 			/* the HIF layer does not allow async IRQ processing,
1411 			 * override the asyncProc flag
1412 			 */
1413 			async_proc = false;
1414 			/* this will cause us to re-enter ProcessPendingIRQ()
1415 			 * and re-read interrupt status registers.
1416 			 * This has a nice side effect of blocking us until all
1417 			 * async read requests are completed. This behavior is
1418 			 * required as we  do not allow ASYNC processing
1419 			 * in interrupt handlers (like Windows CE)
1420 			 */
1421 
1422 			if (pdev->DSRCanYield && DEV_CHECK_RECV_YIELD(pdev))
1423 				/* ProcessPendingIRQs() pulled enough recv
1424 				 * messages to satisfy the yield count, stop
1425 				 * checking for more messages and return
1426 				 */
1427 				break;
1428 		}
1429 
1430 		if (async_proc) {
1431 			/* the function does some async I/O for performance,
1432 			 * we need to exit the ISR immediately, the check below
1433 			 * will prevent the interrupt from being
1434 			 * Ack'd while we handle it asynchronously
1435 			 */
1436 			break;
1437 		}
1438 	}
1439 
1440 	if (QDF_IS_STATUS_SUCCESS(status) && !async_proc) {
1441 		/* Ack the interrupt only if :
1442 		 *  1. we did not get any errors in processing interrupts
1443 		 *  2. there are no outstanding async processing requests
1444 		 */
1445 		if (pdev->DSRCanYield) {
1446 			/* if the DSR can yield do not ACK the interrupt, there
1447 			 * could be more pending messages. The HIF layer
1448 			 * must ACK the interrupt on behalf of HTC
1449 			 */
1450 			HIF_INFO("%s:  Yield (RX count: %d)",
1451 				 __func__, pdev->CurrentDSRRecvCount);
1452 		} else {
1453 			hif_ack_interrupt(pdev->HIFDevice);
1454 		}
1455 	}
1456 
1457 	return status;
1458 }
1459 
1460 /**
1461  * hif_read_write() - queue a read/write request
1462  * @device: pointer to hif device structure
1463  * @address: address to read
1464  * @buffer: buffer to hold read/write data
1465  * @length: length to read/write
1466  * @request: read/write/sync/async request
1467  * @context: pointer to hold calling context
1468  *
1469  * Return: 0 on success, error number otherwise.
1470  */
1471 QDF_STATUS
1472 hif_read_write(struct hif_sdio_dev *device,
1473 	       unsigned long address,
1474 	       char *buffer, uint32_t length,
1475 	       uint32_t request, void *context)
1476 {
1477 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1478 	struct bus_request *busrequest;
1479 
1480 	AR_DEBUG_ASSERT(device);
1481 	AR_DEBUG_ASSERT(device->func);
1482 	HIF_TRACE("%s: device 0x%pK addr 0x%lX buffer 0x%pK",
1483 		  __func__, device, address, buffer);
1484 	HIF_TRACE("%s: len %d req 0x%X context 0x%pK",
1485 		  __func__, length, request, context);
1486 
1487 	/*sdio r/w action is not needed when suspend, so just return */
1488 	if ((device->is_suspend) &&
1489 	    (device->power_config == HIF_DEVICE_POWER_CUT)) {
1490 		AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("skip io when suspending\n"));
1491 		return QDF_STATUS_SUCCESS;
1492 	}
1493 	do {
1494 		if ((request & HIF_ASYNCHRONOUS) ||
1495 		    (request & HIF_SYNCHRONOUS)) {
1496 			/* serialize all requests through the async thread */
1497 			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1498 					("%s: Execution mode: %s\n", __func__,
1499 					 (request & HIF_ASYNCHRONOUS) ? "Async"
1500 					 : "Synch"));
1501 			busrequest = hif_allocate_bus_request(device);
1502 			if (!busrequest) {
1503 				HIF_ERROR("%s:bus requests unavail", __func__);
1504 				HIF_ERROR("%s, addr:0x%lX, len:%d",
1505 					  request & HIF_SDIO_READ ? "READ" :
1506 					  "WRITE", address, length);
1507 				return QDF_STATUS_E_FAILURE;
1508 			}
1509 			busrequest->address = address;
1510 			busrequest->buffer = buffer;
1511 			busrequest->length = length;
1512 			busrequest->request = request;
1513 			busrequest->context = context;
1514 
1515 			add_to_async_list(device, busrequest);
1516 
1517 			if (request & HIF_SYNCHRONOUS) {
1518 				AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1519 						("%s: queued sync req: 0x%lX\n",
1520 						 __func__,
1521 						 (unsigned long)busrequest));
1522 
1523 				/* wait for completion */
1524 				up(&device->sem_async);
1525 				if (down_interruptible(&busrequest->sem_req) ==
1526 				    0) {
1527 					QDF_STATUS status = busrequest->status;
1528 
1529 					HIF_TRACE("%s: sync freeing 0x%lX:0x%X",
1530 						  __func__,
1531 						  (unsigned long)busrequest,
1532 						  busrequest->status);
1533 					HIF_TRACE("%s: freeing req: 0x%X",
1534 						  __func__,
1535 						  (unsigned int)request);
1536 					hif_free_bus_request(device,
1537 							     busrequest);
1538 					return status;
1539 				} else {
1540 					/* interrupted, exit */
1541 					return QDF_STATUS_E_FAILURE;
1542 				}
1543 			} else {
1544 				HIF_TRACE("%s: queued async req: 0x%lX",
1545 					  __func__, (unsigned long)busrequest);
1546 				up(&device->sem_async);
1547 				return QDF_STATUS_E_PENDING;
1548 			}
1549 		} else {
1550 			HIF_ERROR("%s: Invalid execution mode: 0x%08x",
1551 				  __func__, (unsigned int)request);
1552 			status = QDF_STATUS_E_INVAL;
1553 			break;
1554 		}
1555 	} while (0);
1556 
1557 	return status;
1558 }
1559 
1560 /**
1561  * hif_sdio_func_enable() - Handle device enabling as per device
1562  * @device: HIF device object
1563  * @func: function pointer
1564  *
1565  * Return success or failure
1566  */
1567 static int hif_sdio_func_enable(struct hif_softc *ol_sc,
1568 				struct sdio_func *func)
1569 {
1570 	struct hif_sdio_dev *device = get_hif_device(ol_sc, func);
1571 
1572 	if (device->is_disabled) {
1573 		int ret = 0;
1574 
1575 		sdio_claim_host(func);
1576 
1577 		ret = hif_sdio_quirk_async_intr(ol_sc, func);
1578 		if (ret) {
1579 			HIF_ERROR("%s: Error setting async intr:%d",
1580 				  __func__, ret);
1581 			sdio_release_host(func);
1582 			return QDF_STATUS_E_FAILURE;
1583 		}
1584 
1585 		func->enable_timeout = 100;
1586 		ret = sdio_enable_func(func);
1587 		if (ret) {
1588 			HIF_ERROR("%s: Unable to enable function: %d",
1589 				  __func__, ret);
1590 			sdio_release_host(func);
1591 			return QDF_STATUS_E_FAILURE;
1592 		}
1593 
1594 		ret = sdio_set_block_size(func, HIF_BLOCK_SIZE);
1595 		if (ret) {
1596 			HIF_ERROR("%s: Unable to set block size 0x%X : %d\n",
1597 				  __func__, HIF_BLOCK_SIZE, ret);
1598 			sdio_release_host(func);
1599 			return QDF_STATUS_E_FAILURE;
1600 		}
1601 
1602 		ret = hif_sdio_quirk_mod_strength(ol_sc, func);
1603 		if (ret) {
1604 			HIF_ERROR("%s: Error setting mod strength : %d\n",
1605 				  __func__, ret);
1606 			sdio_release_host(func);
1607 			return QDF_STATUS_E_FAILURE;
1608 		}
1609 
1610 		sdio_release_host(func);
1611 	}
1612 
1613 	return 0;
1614 }
1615 
1616 /**
1617  * __hif_read_write() - sdio read/write wrapper
1618  * @device: pointer to hif device structure
1619  * @address: address to read
1620  * @buffer: buffer to hold read/write data
1621  * @length: length to read/write
1622  * @request: read/write/sync/async request
1623  * @context: pointer to hold calling context
1624  *
1625  * Return: 0 on success, error number otherwise.
1626  */
1627 static QDF_STATUS
1628 __hif_read_write(struct hif_sdio_dev *device,
1629 		 uint32_t address, char *buffer,
1630 		 uint32_t length, uint32_t request, void *context)
1631 {
1632 	uint8_t opcode;
1633 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1634 	int ret = A_OK;
1635 	uint8_t *tbuffer;
1636 	bool bounced = false;
1637 
1638 	if (!device) {
1639 		HIF_ERROR("%s: device null!", __func__);
1640 		return QDF_STATUS_E_INVAL;
1641 	}
1642 
1643 	if (!device->func) {
1644 		HIF_ERROR("%s: func null!", __func__);
1645 		return QDF_STATUS_E_INVAL;
1646 	}
1647 
1648 	HIF_INFO_HI("%s: addr:0X%06X, len:%08d, %s, %s", __func__,
1649 		    address, length,
1650 		    request & HIF_SDIO_READ ? "Read " : "Write",
1651 		    request & HIF_ASYNCHRONOUS ? "Async" : "Sync ");
1652 
1653 	do {
1654 		if (request & HIF_EXTENDED_IO) {
1655 			HIF_INFO_HI("%s: Command type: CMD53\n", __func__);
1656 		} else {
1657 			HIF_ERROR("%s: Invalid command type: 0x%08x\n",
1658 				  __func__, request);
1659 			status = QDF_STATUS_E_INVAL;
1660 			break;
1661 		}
1662 
1663 		if (request & HIF_BLOCK_BASIS) {
1664 			/* round to whole block length size */
1665 			length =
1666 				(length / HIF_BLOCK_SIZE) *
1667 				HIF_BLOCK_SIZE;
1668 			HIF_INFO_HI("%s: Block mode (BlockLen: %d)\n",
1669 				    __func__, length);
1670 		} else if (request & HIF_BYTE_BASIS) {
1671 			HIF_INFO_HI("%s: Byte mode (BlockLen: %d)\n",
1672 				    __func__, length);
1673 		} else {
1674 			HIF_ERROR("%s: Invalid data mode: 0x%08x\n",
1675 				  __func__, request);
1676 			status = QDF_STATUS_E_INVAL;
1677 			break;
1678 		}
1679 		if (request & HIF_SDIO_WRITE) {
1680 			hif_fixup_write_param(device, request,
1681 					      &length, &address);
1682 
1683 			HIF_INFO_HI("addr:%08X, len:0x%08X, dummy:0x%04X\n",
1684 				    address, length,
1685 				    (request & HIF_DUMMY_SPACE_MASK) >> 16);
1686 		}
1687 
1688 		if (request & HIF_FIXED_ADDRESS) {
1689 			opcode = CMD53_FIXED_ADDRESS;
1690 			HIF_INFO_HI("%s: Addr mode: fixed 0x%X\n",
1691 				    __func__, address);
1692 		} else if (request & HIF_INCREMENTAL_ADDRESS) {
1693 			opcode = CMD53_INCR_ADDRESS;
1694 			HIF_INFO_HI("%s: Address mode: Incremental 0x%X\n",
1695 				    __func__, address);
1696 		} else {
1697 			HIF_ERROR("%s: Invalid address mode: 0x%08x\n",
1698 				  __func__, request);
1699 			status = QDF_STATUS_E_INVAL;
1700 			break;
1701 		}
1702 
1703 		if (request & HIF_SDIO_WRITE) {
1704 #if HIF_USE_DMA_BOUNCE_BUFFER
1705 			if (BUFFER_NEEDS_BOUNCE(buffer)) {
1706 				AR_DEBUG_ASSERT(device->dma_buffer);
1707 				tbuffer = device->dma_buffer;
1708 				/* copy the write data to the dma buffer */
1709 				AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE);
1710 				if (length > HIF_DMA_BUFFER_SIZE) {
1711 					HIF_ERROR("%s: Invalid write len: %d\n",
1712 						  __func__, length);
1713 					status = QDF_STATUS_E_INVAL;
1714 					break;
1715 				}
1716 				memcpy(tbuffer, buffer, length);
1717 				bounced = true;
1718 			} else {
1719 				tbuffer = buffer;
1720 			}
1721 #else
1722 			tbuffer = buffer;
1723 #endif
1724 			if (opcode == CMD53_FIXED_ADDRESS  && tbuffer) {
1725 				ret = sdio_writesb(device->func, address,
1726 						   tbuffer, length);
1727 				HIF_INFO_HI("%s:r=%d addr:0x%X, len:%d, 0x%X\n",
1728 					    __func__, ret, address, length,
1729 					    *(int *)tbuffer);
1730 			} else if (tbuffer) {
1731 				ret = sdio_memcpy_toio(device->func, address,
1732 						       tbuffer, length);
1733 				HIF_INFO_HI("%s:r=%d addr:0x%X, len:%d, 0x%X\n",
1734 					    __func__, ret, address, length,
1735 					    *(int *)tbuffer);
1736 			}
1737 		} else if (request & HIF_SDIO_READ) {
1738 #if HIF_USE_DMA_BOUNCE_BUFFER
1739 			if (BUFFER_NEEDS_BOUNCE(buffer)) {
1740 				AR_DEBUG_ASSERT(device->dma_buffer);
1741 				AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE);
1742 				if (length > HIF_DMA_BUFFER_SIZE) {
1743 					HIF_ERROR("%s: Invalid read len: %d\n",
1744 						  __func__, length);
1745 					status = QDF_STATUS_E_INVAL;
1746 					break;
1747 				}
1748 				tbuffer = device->dma_buffer;
1749 				bounced = true;
1750 			} else {
1751 				tbuffer = buffer;
1752 			}
1753 #else
1754 			tbuffer = buffer;
1755 #endif
1756 			if (opcode == CMD53_FIXED_ADDRESS && tbuffer) {
1757 				ret = sdio_readsb(device->func, tbuffer,
1758 						  address, length);
1759 				HIF_INFO_HI("%s:r=%d addr:0x%X, len:%d, 0x%X\n",
1760 					    __func__, ret, address, length,
1761 					    *(int *)tbuffer);
1762 			} else if (tbuffer) {
1763 				ret = sdio_memcpy_fromio(device->func,
1764 							 tbuffer, address,
1765 							 length);
1766 				HIF_INFO_HI("%s:r=%d addr:0x%X, len:%d, 0x%X\n",
1767 					    __func__, ret, address, length,
1768 					    *(int *)tbuffer);
1769 			}
1770 #if HIF_USE_DMA_BOUNCE_BUFFER
1771 			if (bounced && tbuffer)
1772 				memcpy(buffer, tbuffer, length);
1773 #endif
1774 		} else {
1775 			HIF_ERROR("%s: Invalid dir: 0x%08x", __func__, request);
1776 			status = QDF_STATUS_E_INVAL;
1777 			return status;
1778 		}
1779 
1780 		if (ret) {
1781 			HIF_ERROR("%s: SDIO bus operation failed!", __func__);
1782 			HIF_ERROR("%s: MMC stack returned : %d", __func__, ret);
1783 			HIF_ERROR("%s: addr:0X%06X, len:%08d, %s, %s",
1784 				  __func__, address, length,
1785 				  request & HIF_SDIO_READ ? "Read " : "Write",
1786 				  request & HIF_ASYNCHRONOUS ?
1787 				  "Async" : "Sync");
1788 			status = QDF_STATUS_E_FAILURE;
1789 		}
1790 	} while (false);
1791 
1792 	return status;
1793 }
1794 
1795 /**
1796  * async_task() - thread function to serialize all bus requests
1797  * @param: pointer to hif device
1798  *
1799  * thread function to serialize all requests, both sync and async
1800  * Return: 0 on success, error number otherwise.
1801  */
1802 static int async_task(void *param)
1803 {
1804 	struct hif_sdio_dev *device;
1805 	struct bus_request *request;
1806 	QDF_STATUS status;
1807 	bool claimed = false;
1808 
1809 	device = (struct hif_sdio_dev *)param;
1810 	set_current_state(TASK_INTERRUPTIBLE);
1811 	while (!device->async_shutdown) {
1812 		/* wait for work */
1813 		if (down_interruptible(&device->sem_async) != 0) {
1814 			/* interrupted, exit */
1815 			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1816 					("%s: async task interrupted\n",
1817 					 __func__));
1818 			break;
1819 		}
1820 		if (device->async_shutdown) {
1821 			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1822 					("%s: async task stopping\n",
1823 					 __func__));
1824 			break;
1825 		}
1826 		/* we want to hold the host over multiple cmds
1827 		 * if possible, but holding the host blocks
1828 		 * card interrupts
1829 		 */
1830 		qdf_spin_lock_irqsave(&device->asynclock);
1831 		/* pull the request to work on */
1832 		while (device->asyncreq) {
1833 			request = device->asyncreq;
1834 			if (request->inusenext)
1835 				device->asyncreq = request->inusenext;
1836 			else
1837 				device->asyncreq = NULL;
1838 			qdf_spin_unlock_irqrestore(&device->asynclock);
1839 			HIF_TRACE("%s: processing req: 0x%lX",
1840 				  __func__, (unsigned long)request);
1841 
1842 			if (!claimed) {
1843 				sdio_claim_host(device->func);
1844 				claimed = true;
1845 			}
1846 			if (request->scatter_req) {
1847 				A_ASSERT(device->scatter_enabled);
1848 				/* pass the request to scatter routine which
1849 				 * executes it synchronously, note, no need
1850 				 * to free the request since scatter requests
1851 				 * are maintained on a separate list
1852 				 */
1853 				status = do_hif_read_write_scatter(device,
1854 								   request);
1855 			} else {
1856 				/* call hif_read_write in sync mode */
1857 				status =
1858 					__hif_read_write(device,
1859 							 request->address,
1860 							 request->buffer,
1861 							 request->length,
1862 							 request->
1863 							 request &
1864 							 ~HIF_SYNCHRONOUS,
1865 							 NULL);
1866 				if (request->request & HIF_ASYNCHRONOUS) {
1867 					void *context = request->context;
1868 
1869 					HIF_TRACE("%s: freeing req: 0x%lX",
1870 						  __func__,
1871 						  (unsigned long)request);
1872 					hif_free_bus_request(device, request);
1873 
1874 					HIF_TRACE("%s: completion req 0x%lX",
1875 						  __func__,
1876 						  (unsigned long)request);
1877 					device->htc_callbacks.
1878 					rw_compl_handler(context, status);
1879 				} else {
1880 					HIF_TRACE("%s: upping req: 0x%lX",
1881 						  __func__,
1882 						  (unsigned long)request);
1883 					request->status = status;
1884 					up(&request->sem_req);
1885 				}
1886 			}
1887 			qdf_spin_lock_irqsave(&device->asynclock);
1888 		}
1889 		qdf_spin_unlock_irqrestore(&device->asynclock);
1890 		if (claimed) {
1891 			sdio_release_host(device->func);
1892 			claimed = false;
1893 		}
1894 	}
1895 
1896 	complete_and_exit(&device->async_completion, 0);
1897 
1898 	return 0;
1899 }
1900 
1901 /**
1902  * hif_disable_func() - Disable SDIO function
1903  *
1904  * @device: HIF device pointer
1905  * @func: SDIO function pointer
1906  * @reset: If this is called from resume or probe
1907  *
1908  * Return: 0 in case of success, else error value
1909  */
1910 QDF_STATUS hif_disable_func(struct hif_sdio_dev *device,
1911 			    struct sdio_func *func,
1912 			    bool reset)
1913 {
1914 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1915 
1916 	HIF_ENTER();
1917 	if (!IS_ERR(device->async_task)) {
1918 		init_completion(&device->async_completion);
1919 		device->async_shutdown = 1;
1920 		up(&device->sem_async);
1921 		wait_for_completion(&device->async_completion);
1922 		device->async_task = NULL;
1923 		sema_init(&device->sem_async, 0);
1924 	}
1925 
1926 	status = hif_sdio_func_disable(device, func, reset);
1927 	if (status == QDF_STATUS_SUCCESS)
1928 		device->is_disabled = true;
1929 
1930 	cleanup_hif_scatter_resources(device);
1931 
1932 	HIF_EXIT();
1933 
1934 	return status;
1935 }
1936 
1937 /**
1938  * hif_enable_func() - Enable SDIO function
1939  *
1940  * @ol_sc: HIF object pointer
1941  * @device: HIF device pointer
1942  * @sdio_func: SDIO function pointer
1943  * @resume: If this is called from resume or probe
1944  *
1945  * Return: 0 in case of success, else error value
1946  */
1947 QDF_STATUS hif_enable_func(struct hif_softc *ol_sc, struct hif_sdio_dev *device,
1948 			   struct sdio_func *func, bool resume)
1949 {
1950 	int ret = QDF_STATUS_SUCCESS;
1951 
1952 	HIF_ENTER();
1953 
1954 	if (!device) {
1955 		HIF_ERROR("%s: HIF device is NULL", __func__);
1956 		return QDF_STATUS_E_INVAL;
1957 	}
1958 
1959 	if (hif_sdio_func_enable(ol_sc, func))
1960 		return QDF_STATUS_E_FAILURE;
1961 
1962 	/* create async I/O thread */
1963 	if (!device->async_task && device->is_disabled) {
1964 		device->async_shutdown = 0;
1965 		device->async_task = kthread_create(async_task,
1966 						    (void *)device,
1967 						    "AR6K Async");
1968 		if (IS_ERR(device->async_task)) {
1969 			HIF_ERROR("%s: Error creating async task",
1970 				  __func__);
1971 			return QDF_STATUS_E_FAILURE;
1972 		}
1973 		device->is_disabled = false;
1974 		wake_up_process(device->async_task);
1975 	}
1976 
1977 	if (!resume)
1978 		ret = hif_sdio_probe(ol_sc, func, device);
1979 
1980 	HIF_EXIT();
1981 
1982 	return ret;
1983 }
1984 #endif /* CONFIG_SDIO_TRANSFER_MAILBOX */
1985