xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/sdio/transfer/mailbox.c (revision 45a38684b07295822dc8eba39e293408f203eec8)
1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifdef CONFIG_SDIO_TRANSFER_MAILBOX
20 #define ATH_MODULE_NAME hif
21 #include <linux/kthread.h>
22 #include <qdf_types.h>
23 #include <qdf_status.h>
24 #include <qdf_timer.h>
25 #include <qdf_time.h>
26 #include <qdf_lock.h>
27 #include <qdf_mem.h>
28 #include <qdf_util.h>
29 #include <qdf_defer.h>
30 #include <qdf_atomic.h>
31 #include <qdf_nbuf.h>
32 #include <qdf_threads.h>
33 #include <athdefs.h>
34 #include <qdf_net_types.h>
35 #include <a_types.h>
36 #include <athdefs.h>
37 #include <a_osapi.h>
38 #include <hif.h>
39 #include <htc_internal.h>
40 #include <htc_services.h>
41 #include <a_debug.h>
42 #include "hif_sdio_internal.h"
43 #include "if_sdio.h"
44 #include "regtable.h"
45 #include "transfer.h"
46 
47 /* by default setup a bounce buffer for the data packets,
48  * if the underlying host controller driver
49  * does not use DMA you may be able to skip this step
50  * and save the memory allocation and transfer time
51  */
52 #define HIF_USE_DMA_BOUNCE_BUFFER 1
53 #if HIF_USE_DMA_BOUNCE_BUFFER
54 /* macro to check if DMA buffer is WORD-aligned and DMA-able.
55  * Most host controllers assume the
56  * buffer is DMA'able and will bug-check otherwise (i.e. buffers on the stack).
57  * virt_addr_valid check fails on stack memory.
58  */
59 #define BUFFER_NEEDS_BOUNCE(buffer)  (((unsigned long)(buffer) & 0x3) || \
60 					!virt_addr_valid((buffer)))
61 #else
62 #define BUFFER_NEEDS_BOUNCE(buffer)   (false)
63 #endif
64 
65 #ifdef SDIO_3_0
66 /**
67  * set_extended_mbox_size() - set extended MBOX size
68  * @pinfo: sdio mailbox info
69  *
70  * Return: none.
71  */
72 static void set_extended_mbox_size(struct hif_device_mbox_info *pinfo)
73 {
74 	pinfo->mbox_prop[0].extended_size =
75 		HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0;
76 	pinfo->mbox_prop[1].extended_size =
77 		HIF_MBOX1_EXTENDED_WIDTH_AR6320;
78 }
79 
80 /**
81  * set_extended_mbox_address() - set extended MBOX address
82  * @pinfo: sdio mailbox info
83  *
84  * Return: none.
85  */
86 static void set_extended_mbox_address(struct hif_device_mbox_info *pinfo)
87 {
88 	pinfo->mbox_prop[1].extended_address =
89 		pinfo->mbox_prop[0].extended_address +
90 		pinfo->mbox_prop[0].extended_size +
91 		HIF_MBOX_DUMMY_SPACE_SIZE_AR6320;
92 }
93 #else
94 static void set_extended_mbox_size(struct hif_device_mbox_info *pinfo)
95 {
96 	pinfo->mbox_prop[0].extended_size =
97 		HIF_MBOX0_EXTENDED_WIDTH_AR6320;
98 }
99 
100 static inline void
101 set_extended_mbox_address(struct hif_device_mbox_info *pinfo)
102 {
103 }
104 #endif
105 
106 /**
107  * set_extended_mbox_window_info() - set extended MBOX window
108  * information for SDIO interconnects
109  * @manf_id: manufacturer id
110  * @pinfo: sdio mailbox info
111  *
112  * Return: none.
113  */
114 static void set_extended_mbox_window_info(uint16_t manf_id,
115 					  struct hif_device_mbox_info *pinfo)
116 {
117 	switch (manf_id & MANUFACTURER_ID_AR6K_BASE_MASK) {
118 	case MANUFACTURER_ID_AR6002_BASE:
119 		/* MBOX 0 has an extended range */
120 
121 		pinfo->mbox_prop[0].extended_address =
122 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
123 		pinfo->mbox_prop[0].extended_size =
124 			HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
125 
126 		pinfo->mbox_prop[0].extended_address =
127 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
128 		pinfo->mbox_prop[0].extended_size =
129 			HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
130 
131 		pinfo->mbox_prop[0].extended_address =
132 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004;
133 		pinfo->mbox_prop[0].extended_size =
134 			HIF_MBOX0_EXTENDED_WIDTH_AR6004;
135 
136 		break;
137 	case MANUFACTURER_ID_AR6003_BASE:
138 		/* MBOX 0 has an extended range */
139 		pinfo->mbox_prop[0].extended_address =
140 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
141 		pinfo->mbox_prop[0].extended_size =
142 			HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
143 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
144 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
145 		break;
146 	case MANUFACTURER_ID_AR6004_BASE:
147 		pinfo->mbox_prop[0].extended_address =
148 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004;
149 		pinfo->mbox_prop[0].extended_size =
150 			HIF_MBOX0_EXTENDED_WIDTH_AR6004;
151 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
152 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
153 		break;
154 	case MANUFACTURER_ID_AR6320_BASE:
155 	{
156 		uint16_t rev = manf_id & MANUFACTURER_ID_AR6K_REV_MASK;
157 
158 		pinfo->mbox_prop[0].extended_address =
159 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320;
160 		if (rev < 4)
161 			pinfo->mbox_prop[0].extended_size =
162 				HIF_MBOX0_EXTENDED_WIDTH_AR6320;
163 		else
164 			set_extended_mbox_size(pinfo);
165 		set_extended_mbox_address(pinfo);
166 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
167 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
168 		break;
169 	}
170 	case MANUFACTURER_ID_QCA9377_BASE:
171 	case MANUFACTURER_ID_QCA9379_BASE:
172 		pinfo->mbox_prop[0].extended_address =
173 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320;
174 		pinfo->mbox_prop[0].extended_size =
175 			HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0;
176 		pinfo->mbox_prop[1].extended_address =
177 			pinfo->mbox_prop[0].extended_address +
178 			pinfo->mbox_prop[0].extended_size +
179 			HIF_MBOX_DUMMY_SPACE_SIZE_AR6320;
180 		pinfo->mbox_prop[1].extended_size =
181 			HIF_MBOX1_EXTENDED_WIDTH_AR6320;
182 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
183 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
184 		break;
185 	default:
186 		A_ASSERT(false);
187 		break;
188 	}
189 }
190 
191 /** hif_dev_set_mailbox_swap() - Set the mailbox swap from firmware
192  * @pdev : The HIF layer object
193  *
194  * Return: none
195  */
196 void hif_dev_set_mailbox_swap(struct hif_sdio_dev *pdev)
197 {
198 	struct hif_sdio_device *hif_device = hif_dev_from_hif(pdev);
199 
200 	HIF_ENTER();
201 
202 	hif_device->swap_mailbox = true;
203 
204 	HIF_EXIT();
205 }
206 
207 /** hif_dev_get_mailbox_swap() - Get the mailbox swap setting
208  * @pdev : The HIF layer object
209  *
210  * Return: true or false
211  */
212 bool hif_dev_get_mailbox_swap(struct hif_sdio_dev *pdev)
213 {
214 	struct hif_sdio_device *hif_device;
215 
216 	HIF_ENTER();
217 
218 	hif_device = hif_dev_from_hif(pdev);
219 
220 	HIF_EXIT();
221 
222 	return hif_device->swap_mailbox;
223 }
224 
225 /**
226  * hif_dev_get_fifo_address() - get the fifo addresses for dma
227  * @pdev:  SDIO HIF object
228  * @config: mbox address config pointer
229  *
230  * Return : 0 for success, non-zero for error
231  */
232 int hif_dev_get_fifo_address(struct hif_sdio_dev *pdev,
233 			     void *config,
234 			     uint32_t config_len)
235 {
236 	uint32_t count;
237 	struct hif_device_mbox_info *cfg =
238 				(struct hif_device_mbox_info *)config;
239 
240 	for (count = 0; count < 4; count++)
241 		cfg->mbox_addresses[count] = HIF_MBOX_START_ADDR(count);
242 
243 	if (config_len >= sizeof(struct hif_device_mbox_info)) {
244 		set_extended_mbox_window_info((uint16_t)pdev->func->device,
245 					      cfg);
246 		return 0;
247 	}
248 
249 	return -EINVAL;
250 }
251 
252 /**
253  * hif_dev_get_block_size() - get the mbox block size for dma
254  * @config : mbox size config pointer
255  *
256  * Return : NONE
257  */
258 void hif_dev_get_block_size(void *config)
259 {
260 	((uint32_t *)config)[0] = HIF_MBOX0_BLOCK_SIZE;
261 	((uint32_t *)config)[1] = HIF_MBOX1_BLOCK_SIZE;
262 	((uint32_t *)config)[2] = HIF_MBOX2_BLOCK_SIZE;
263 	((uint32_t *)config)[3] = HIF_MBOX3_BLOCK_SIZE;
264 }
265 
266 /**
267  * hif_dev_map_service_to_pipe() - maps ul/dl pipe to service id.
268  * @pDev: SDIO HIF object
269  * @ServiceId: sevice index
270  * @ULPipe: uplink pipe id
271  * @DLPipe: down-linklink pipe id
272  *
273  * Return: 0 on success, error value on invalid map
274  */
275 QDF_STATUS hif_dev_map_service_to_pipe(struct hif_sdio_dev *pdev, uint16_t svc,
276 				       uint8_t *ul_pipe, uint8_t *dl_pipe)
277 {
278 	QDF_STATUS status = QDF_STATUS_SUCCESS;
279 
280 	switch (svc) {
281 	case HTT_DATA_MSG_SVC:
282 		if (hif_dev_get_mailbox_swap(pdev)) {
283 			*ul_pipe = 1;
284 			*dl_pipe = 0;
285 		} else {
286 			*ul_pipe = 3;
287 			*dl_pipe = 2;
288 		}
289 		break;
290 
291 	case HTC_CTRL_RSVD_SVC:
292 	case HTC_RAW_STREAMS_SVC:
293 		*ul_pipe = 1;
294 		*dl_pipe = 0;
295 		break;
296 
297 	case WMI_DATA_BE_SVC:
298 	case WMI_DATA_BK_SVC:
299 	case WMI_DATA_VI_SVC:
300 	case WMI_DATA_VO_SVC:
301 		*ul_pipe = 1;
302 		*dl_pipe = 0;
303 		break;
304 
305 	case WMI_CONTROL_SVC:
306 		if (hif_dev_get_mailbox_swap(pdev)) {
307 			*ul_pipe = 3;
308 			*dl_pipe = 2;
309 		} else {
310 			*ul_pipe = 1;
311 			*dl_pipe = 0;
312 		}
313 		break;
314 
315 	default:
316 		hif_err("%s: Err : Invalid service (%d)",
317 			__func__, svc);
318 		status = QDF_STATUS_E_INVAL;
319 		break;
320 	}
321 	return status;
322 }
323 
324 /** hif_dev_setup_device() - Setup device specific stuff here required for hif
325  * @pdev : HIF layer object
326  *
327  * return 0 on success, error otherwise
328  */
329 int hif_dev_setup_device(struct hif_sdio_device *pdev)
330 {
331 	int status = 0;
332 	uint32_t blocksizes[MAILBOX_COUNT];
333 
334 	status = hif_configure_device(NULL, pdev->HIFDevice,
335 				      HIF_DEVICE_GET_FIFO_ADDR,
336 				      &pdev->MailBoxInfo,
337 				      sizeof(pdev->MailBoxInfo));
338 
339 	if (status != QDF_STATUS_SUCCESS)
340 		hif_err("%s: HIF_DEVICE_GET_MBOX_ADDR failed", __func__);
341 
342 	status = hif_configure_device(NULL, pdev->HIFDevice,
343 				      HIF_DEVICE_GET_BLOCK_SIZE,
344 				      blocksizes, sizeof(blocksizes));
345 	if (status != QDF_STATUS_SUCCESS)
346 		hif_err("%s: HIF_DEVICE_GET_MBOX_BLOCK_SIZE fail", __func__);
347 
348 	pdev->BlockSize = blocksizes[MAILBOX_FOR_BLOCK_SIZE];
349 
350 	return status;
351 }
352 
353 /** hif_dev_mask_interrupts() - Disable the interrupts in the device
354  * @pdev SDIO HIF Object
355  *
356  * Return: NONE
357  */
358 void hif_dev_mask_interrupts(struct hif_sdio_device *pdev)
359 {
360 	int status = QDF_STATUS_SUCCESS;
361 
362 	HIF_ENTER();
363 	/* Disable all interrupts */
364 	LOCK_HIF_DEV(pdev);
365 	mboxEnaRegs(pdev).int_status_enable = 0;
366 	mboxEnaRegs(pdev).cpu_int_status_enable = 0;
367 	mboxEnaRegs(pdev).error_status_enable = 0;
368 	mboxEnaRegs(pdev).counter_int_status_enable = 0;
369 	UNLOCK_HIF_DEV(pdev);
370 
371 	/* always synchronous */
372 	status = hif_read_write(pdev->HIFDevice,
373 				INT_STATUS_ENABLE_ADDRESS,
374 				(char *)&mboxEnaRegs(pdev),
375 				sizeof(struct MBOX_IRQ_ENABLE_REGISTERS),
376 				HIF_WR_SYNC_BYTE_INC, NULL);
377 
378 	if (status != QDF_STATUS_SUCCESS)
379 		hif_err("%s: Err updating intr reg: %d", __func__, status);
380 }
381 
382 /** hif_dev_unmask_interrupts() - Enable the interrupts in the device
383  * @pdev SDIO HIF Object
384  *
385  * Return: NONE
386  */
387 void hif_dev_unmask_interrupts(struct hif_sdio_device *pdev)
388 {
389 	QDF_STATUS status = QDF_STATUS_SUCCESS;
390 
391 	LOCK_HIF_DEV(pdev);
392 
393 	/* Enable all the interrupts except for the internal
394 	 * AR6000 CPU interrupt
395 	 */
396 	mboxEnaRegs(pdev).int_status_enable =
397 		INT_STATUS_ENABLE_ERROR_SET(0x01) |
398 		INT_STATUS_ENABLE_CPU_SET(0x01)
399 		| INT_STATUS_ENABLE_COUNTER_SET(0x01);
400 
401 	/* enable 2 mboxs INT */
402 	mboxEnaRegs(pdev).int_status_enable |=
403 		INT_STATUS_ENABLE_MBOX_DATA_SET(0x01) |
404 		INT_STATUS_ENABLE_MBOX_DATA_SET(0x02);
405 
406 	/* Set up the CPU Interrupt Status Register, enable
407 	 * CPU sourced interrupt #0, #1.
408 	 * #0 is used for report assertion from target
409 	 * #1 is used for inform host that credit arrived
410 	 */
411 	mboxEnaRegs(pdev).cpu_int_status_enable = 0x03;
412 
413 	/* Set up the Error Interrupt Status Register */
414 	mboxEnaRegs(pdev).error_status_enable =
415 		(ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(0x01)
416 		 | ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(0x01)) >> 16;
417 
418 	/* Set up the Counter Interrupt Status Register
419 	 * (only for debug interrupt to catch fatal errors)
420 	 */
421 	mboxEnaRegs(pdev).counter_int_status_enable =
422 	(COUNTER_INT_STATUS_ENABLE_BIT_SET(AR6K_TARGET_DEBUG_INTR_MASK)) >> 24;
423 
424 	UNLOCK_HIF_DEV(pdev);
425 
426 	/* always synchronous */
427 	status = hif_read_write(pdev->HIFDevice,
428 				INT_STATUS_ENABLE_ADDRESS,
429 				(char *)&mboxEnaRegs(pdev),
430 				sizeof(struct MBOX_IRQ_ENABLE_REGISTERS),
431 				HIF_WR_SYNC_BYTE_INC,
432 				NULL);
433 
434 	if (status != QDF_STATUS_SUCCESS)
435 		hif_err("%s: Err updating intr reg: %d", __func__, status);
436 }
437 
438 void hif_dev_dump_registers(struct hif_sdio_device *pdev,
439 			    struct MBOX_IRQ_PROC_REGISTERS *irq_proc,
440 			    struct MBOX_IRQ_ENABLE_REGISTERS *irq_en,
441 			    struct MBOX_COUNTER_REGISTERS *mbox_regs)
442 {
443 	int i = 0;
444 
445 	hif_debug("%s: Mailbox registers:", __func__);
446 
447 	if (irq_proc) {
448 		hif_debug("HostIntStatus: 0x%x ", irq_proc->host_int_status);
449 		hif_debug("CPUIntStatus: 0x%x ", irq_proc->cpu_int_status);
450 		hif_debug("ErrorIntStatus: 0x%x ", irq_proc->error_int_status);
451 		hif_debug("CounterIntStat: 0x%x ",
452 			  irq_proc->counter_int_status);
453 		hif_debug("MboxFrame: 0x%x ", irq_proc->mbox_frame);
454 		hif_debug("RxLKAValid: 0x%x ", irq_proc->rx_lookahead_valid);
455 		hif_debug("RxLKA0: 0x%x", irq_proc->rx_lookahead[0]);
456 		hif_debug("RxLKA1: 0x%x ", irq_proc->rx_lookahead[1]);
457 		hif_debug("RxLKA2: 0x%x ", irq_proc->rx_lookahead[2]);
458 		hif_debug("RxLKA3: 0x%x", irq_proc->rx_lookahead[3]);
459 
460 		if (pdev->MailBoxInfo.gmbox_address != 0) {
461 			hif_debug("GMBOX-HostIntStatus2:  0x%x ",
462 				  irq_proc->host_int_status2);
463 			hif_debug("GMBOX-RX-Avail: 0x%x ",
464 				  irq_proc->gmbox_rx_avail);
465 		}
466 	}
467 
468 	if (irq_en) {
469 		hif_debug("IntStatusEnable: 0x%x\n",
470 			  irq_en->int_status_enable);
471 		hif_debug("CounterIntStatus: 0x%x\n",
472 			  irq_en->counter_int_status_enable);
473 	}
474 
475 	for (i = 0; mbox_regs && i < 4; i++)
476 		hif_debug("Counter[%d]: 0x%x\n", i, mbox_regs->counter[i]);
477 }
478 
479 /* under HL SDIO, with Interface Memory support, we have
480  * the following reasons to support 2 mboxs:
481  * a) we need place different buffers in different
482  * mempool, for example, data using Interface Memory,
483  * desc and other using DRAM, they need different SDIO
484  * mbox channels.
485  * b) currently, tx mempool in LL case is separated from
486  * main mempool, the structure (descs at the beginning
487  * of every pool buffer) is different, because they only
488  * need store tx desc from host. To align with LL case,
489  * we also need 2 mbox support just as PCIe LL cases.
490  */
491 
492 /**
493  * hif_dev_map_pipe_to_mail_box() - maps pipe id to mailbox.
494  * @pdev: The pointer to the hif device object
495  * @pipeid: pipe index
496  *
497  * Return: mailbox index
498  */
499 static uint8_t hif_dev_map_pipe_to_mail_box(struct hif_sdio_device *pdev,
500 					    uint8_t pipeid)
501 {
502 	if (2 == pipeid || 3 == pipeid)
503 		return 1;
504 	else if (0 == pipeid || 1 == pipeid)
505 		return 0;
506 
507 	hif_err("%s: pipeid=%d invalid", __func__, pipeid);
508 
509 	qdf_assert(0);
510 
511 	return INVALID_MAILBOX_NUMBER;
512 }
513 
514 /**
515  * hif_dev_map_mail_box_to_pipe() - map sdio mailbox to htc pipe.
516  * @pdev: The pointer to the hif device object
517  * @mboxIndex: mailbox index
518  * @upload: boolean to decide mailbox index
519  *
520  * Return: Invalid pipe index
521  */
522 static uint8_t hif_dev_map_mail_box_to_pipe(struct hif_sdio_device *pdev,
523 					    uint8_t mbox_index, bool upload)
524 {
525 	if (mbox_index == 0)
526 		return upload ? 1 : 0;
527 	else if (mbox_index == 1)
528 		return upload ? 3 : 2;
529 
530 	hif_err("%s: mbox_index=%d, upload=%d invalid",
531 		__func__, mbox_index, upload);
532 
533 	qdf_assert(0);
534 
535 	return INVALID_MAILBOX_NUMBER; /* invalid pipe id */
536 }
537 
538 /**
539  * hif_get_send_addr() - Get the transfer pipe address
540  * @pdev: The pointer to the hif device object
541  * @pipe: The pipe identifier
542  *
543  * Return 0 for success and non-zero for failure to map
544  */
545 int hif_get_send_address(struct hif_sdio_device *pdev,
546 			 uint8_t pipe, unsigned long *addr)
547 {
548 	uint8_t mbox_index = INVALID_MAILBOX_NUMBER;
549 
550 	if (!addr)
551 		return -EINVAL;
552 
553 	mbox_index = hif_dev_map_pipe_to_mail_box(pdev, pipe);
554 
555 	if (mbox_index == INVALID_MAILBOX_NUMBER)
556 		return -EINVAL;
557 
558 	*addr = pdev->MailBoxInfo.mbox_prop[mbox_index].extended_address;
559 
560 	return 0;
561 }
562 
563 /**
564  * hif_fixup_write_param() - Tweak the address and length parameters
565  * @pdev: The pointer to the hif device object
566  * @length: The length pointer
567  * @addr: The addr pointer
568  *
569  * Return: None
570  */
571 void hif_fixup_write_param(struct hif_sdio_dev *pdev, uint32_t req,
572 			   uint32_t *length, uint32_t *addr)
573 {
574 	struct hif_device_mbox_info mboxinfo;
575 	uint32_t taddr = *addr, mboxlen = 0;
576 
577 	hif_configure_device(NULL, pdev, HIF_DEVICE_GET_FIFO_ADDR,
578 			     &mboxinfo, sizeof(mboxinfo));
579 
580 	if (taddr >= 0x800 && taddr < 0xC00) {
581 		/* Host control register and CIS Window */
582 		mboxlen = 0;
583 	} else if (taddr == mboxinfo.mbox_addresses[0] ||
584 		   taddr == mboxinfo.mbox_addresses[1] ||
585 		   taddr == mboxinfo.mbox_addresses[2] ||
586 		   taddr == mboxinfo.mbox_addresses[3]) {
587 		mboxlen = HIF_MBOX_WIDTH;
588 	} else if (taddr == mboxinfo.mbox_prop[0].extended_address) {
589 		mboxlen = mboxinfo.mbox_prop[0].extended_size;
590 	} else if (taddr == mboxinfo.mbox_prop[1].extended_address) {
591 		mboxlen = mboxinfo.mbox_prop[1].extended_size;
592 	} else {
593 		hif_err("%s: Invalid write addr: 0x%08x\n", __func__, taddr);
594 		return;
595 	}
596 
597 	if (mboxlen != 0) {
598 		if (*length > mboxlen) {
599 			hif_err("%s: Error (%u > %u)",
600 				__func__, *length, mboxlen);
601 			return;
602 		}
603 
604 		taddr = taddr + (mboxlen - *length);
605 		taddr = taddr + ((req & HIF_DUMMY_SPACE_MASK) >> 16);
606 		*addr = taddr;
607 	}
608 }
609 
610 /**
611  * hif_dev_recv_packet() - Receieve HTC packet/packet information from device
612  * @pdev : HIF device object
613  * @packet : The HTC packet pointer
614  * @recv_length : The length of information to be received
615  * @mbox_index : The mailbox that contains this information
616  *
617  * Return 0 for success and non zero of error
618  */
619 static QDF_STATUS hif_dev_recv_packet(struct hif_sdio_device *pdev,
620 				      HTC_PACKET *packet,
621 				      uint32_t recv_length,
622 				      uint32_t mbox_index)
623 {
624 	QDF_STATUS status;
625 	uint32_t padded_length;
626 	bool sync = (packet->Completion) ? false : true;
627 	uint32_t req = sync ? HIF_RD_SYNC_BLOCK_FIX : HIF_RD_ASYNC_BLOCK_FIX;
628 
629 	/* adjust the length to be a multiple of block size if appropriate */
630 	padded_length = DEV_CALC_RECV_PADDED_LEN(pdev, recv_length);
631 
632 	if (padded_length > packet->BufferLength) {
633 		hif_err("%s: No space for padlen:%d recvlen:%d bufferlen:%d",
634 			__func__, padded_length,
635 			recv_length, packet->BufferLength);
636 		if (packet->Completion) {
637 			COMPLETE_HTC_PACKET(packet, QDF_STATUS_E_INVAL);
638 			return QDF_STATUS_SUCCESS;
639 		}
640 		return QDF_STATUS_E_INVAL;
641 	}
642 
643 	/* mailbox index is saved in Endpoint member */
644 	hif_debug("%s : hdr:0x%x, len:%d, padded length: %d Mbox:0x%x",
645 		  __func__, packet->PktInfo.AsRx.ExpectedHdr, recv_length,
646 		  padded_length, mbox_index);
647 
648 	status = hif_read_write(pdev->HIFDevice,
649 				pdev->MailBoxInfo.mbox_addresses[mbox_index],
650 				packet->pBuffer,
651 				padded_length,
652 				req, sync ? NULL : packet);
653 
654 	if (status != QDF_STATUS_SUCCESS && status != QDF_STATUS_E_PENDING)
655 		hif_err("%s : Failed %d", __func__, status);
656 
657 	if (sync) {
658 		packet->Status = status;
659 		if (status == QDF_STATUS_SUCCESS) {
660 			HTC_FRAME_HDR *hdr = (HTC_FRAME_HDR *) packet->pBuffer;
661 
662 			hif_debug("%s:EP:%d,Len:%d,Flg:%d,CB:0x%02X,0x%02X\n",
663 				  __func__,
664 				  hdr->EndpointID, hdr->PayloadLen,
665 				  hdr->Flags, hdr->ControlBytes0,
666 				  hdr->ControlBytes1);
667 		}
668 	}
669 
670 	return status;
671 }
672 
673 static QDF_STATUS hif_dev_issue_recv_packet_bundle
674 (
675 	struct hif_sdio_device *pdev,
676 	HTC_PACKET_QUEUE *recv_pkt_queue,
677 	HTC_PACKET_QUEUE *sync_completion_queue,
678 	uint8_t mail_box_index,
679 	int *num_packets_fetched,
680 	bool partial_bundle
681 )
682 {
683 	uint32_t padded_length;
684 	int i, total_length = 0;
685 	HTC_TARGET *target = NULL;
686 	int bundleSpaceRemaining = 0;
687 	unsigned char *bundle_buffer = NULL;
688 	HTC_PACKET *packet, *packet_rx_bundle;
689 	QDF_STATUS status = QDF_STATUS_SUCCESS;
690 
691 	target = (HTC_TARGET *)pdev->pTarget;
692 
693 	if ((HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue) -
694 	     HTC_MAX_MSG_PER_BUNDLE_RX) > 0) {
695 		partial_bundle = true;
696 		hif_warn("%s, partial bundle detected num: %d, %d\n",
697 			 __func__,
698 			 HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue),
699 			 HTC_MAX_MSG_PER_BUNDLE_RX);
700 	}
701 
702 	bundleSpaceRemaining =
703 		HTC_MAX_MSG_PER_BUNDLE_RX * target->TargetCreditSize;
704 	packet_rx_bundle = allocate_htc_bundle_packet(target);
705 	if (!packet_rx_bundle) {
706 		hif_err("%s: packet_rx_bundle is NULL\n", __func__);
707 		qdf_sleep(NBUF_ALLOC_FAIL_WAIT_TIME);  /* 100 msec sleep */
708 		return QDF_STATUS_E_NOMEM;
709 	}
710 	bundle_buffer = packet_rx_bundle->pBuffer;
711 
712 	for (i = 0;
713 	     !HTC_QUEUE_EMPTY(recv_pkt_queue) && i < HTC_MAX_MSG_PER_BUNDLE_RX;
714 	     i++) {
715 		packet = htc_packet_dequeue(recv_pkt_queue);
716 		A_ASSERT(packet);
717 		if (!packet)
718 			break;
719 		padded_length =
720 			DEV_CALC_RECV_PADDED_LEN(pdev, packet->ActualLength);
721 		if (packet->PktInfo.AsRx.HTCRxFlags &
722 				HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK)
723 			padded_length += HIF_BLOCK_SIZE;
724 		if ((bundleSpaceRemaining - padded_length) < 0) {
725 			/* exceeds what we can transfer, put the packet back */
726 			HTC_PACKET_ENQUEUE_TO_HEAD(recv_pkt_queue, packet);
727 			break;
728 		}
729 		bundleSpaceRemaining -= padded_length;
730 
731 		if (partial_bundle ||
732 		    HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue) > 0) {
733 			packet->PktInfo.AsRx.HTCRxFlags |=
734 				HTC_RX_PKT_IGNORE_LOOKAHEAD;
735 		}
736 		packet->PktInfo.AsRx.HTCRxFlags |= HTC_RX_PKT_PART_OF_BUNDLE;
737 
738 		if (sync_completion_queue)
739 			HTC_PACKET_ENQUEUE(sync_completion_queue, packet);
740 
741 		total_length += padded_length;
742 	}
743 #if DEBUG_BUNDLE
744 	qdf_print("Recv bundle count %d, length %d.",
745 		  sync_completion_queue ?
746 		  HTC_PACKET_QUEUE_DEPTH(sync_completion_queue) : 0,
747 		  total_length);
748 #endif
749 
750 	status = hif_read_write(pdev->HIFDevice,
751 				pdev->MailBoxInfo.
752 				mbox_addresses[(int)mail_box_index],
753 				bundle_buffer, total_length,
754 				HIF_RD_SYNC_BLOCK_FIX, NULL);
755 
756 	if (status != QDF_STATUS_SUCCESS) {
757 		hif_err("%s, hif_send Failed status:%d\n",
758 			__func__, status);
759 	} else {
760 		unsigned char *buffer = bundle_buffer;
761 		*num_packets_fetched = i;
762 		if (sync_completion_queue) {
763 			HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(
764 				sync_completion_queue, packet) {
765 				padded_length =
766 				DEV_CALC_RECV_PADDED_LEN(pdev,
767 							 packet->ActualLength);
768 				if (packet->PktInfo.AsRx.HTCRxFlags &
769 				HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK)
770 					padded_length +=
771 						HIF_BLOCK_SIZE;
772 				A_MEMCPY(packet->pBuffer,
773 					 buffer, padded_length);
774 				buffer += padded_length;
775 			} HTC_PACKET_QUEUE_ITERATE_END;
776 		}
777 	}
778 	/* free bundle space under Sync mode */
779 	free_htc_bundle_packet(target, packet_rx_bundle);
780 	return status;
781 }
782 
783 #define ISSUE_BUNDLE hif_dev_issue_recv_packet_bundle
784 static
785 QDF_STATUS hif_dev_recv_message_pending_handler(struct hif_sdio_device *pdev,
786 						uint8_t mail_box_index,
787 						uint32_t msg_look_aheads[],
788 						int num_look_aheads,
789 						bool *async_proc,
790 						int *num_pkts_fetched)
791 {
792 	int pkts_fetched;
793 	HTC_PACKET *pkt;
794 	HTC_ENDPOINT_ID id;
795 	bool partial_bundle;
796 	int total_fetched = 0;
797 	bool asyncProc = false;
798 	QDF_STATUS status = QDF_STATUS_SUCCESS;
799 	uint32_t look_aheads[HTC_MAX_MSG_PER_BUNDLE_RX];
800 	HTC_PACKET_QUEUE recv_q, sync_comp_q;
801 	QDF_STATUS (*rxCompletion)(void *, qdf_nbuf_t,	uint8_t);
802 
803 	hif_debug("%s: NumLookAheads: %d\n", __func__, num_look_aheads);
804 
805 	if (num_pkts_fetched)
806 		*num_pkts_fetched = 0;
807 
808 	if (IS_DEV_IRQ_PROCESSING_ASYNC_ALLOWED(pdev)) {
809 		/* We use async mode to get the packets if the
810 		 * device layer supports it. The device layer
811 		 * interfaces with HIF in which HIF may have
812 		 * restrictions on how interrupts are processed
813 		 */
814 		asyncProc = true;
815 	}
816 
817 	if (async_proc) {
818 		/* indicate to caller how we decided to process this */
819 		*async_proc = asyncProc;
820 	}
821 
822 	if (num_look_aheads > HTC_MAX_MSG_PER_BUNDLE_RX) {
823 		A_ASSERT(false);
824 		return QDF_STATUS_E_PROTO;
825 	}
826 
827 	A_MEMCPY(look_aheads, msg_look_aheads,
828 		 (sizeof(uint32_t)) * num_look_aheads);
829 	while (true) {
830 		/* reset packets queues */
831 		INIT_HTC_PACKET_QUEUE(&recv_q);
832 		INIT_HTC_PACKET_QUEUE(&sync_comp_q);
833 		if (num_look_aheads > HTC_MAX_MSG_PER_BUNDLE_RX) {
834 			status = QDF_STATUS_E_PROTO;
835 			A_ASSERT(false);
836 			break;
837 		}
838 
839 		/* first lookahead sets the expected endpoint IDs for
840 		 * all packets in a bundle
841 		 */
842 		id = ((HTC_FRAME_HDR *)&look_aheads[0])->EndpointID;
843 
844 		if (id >= ENDPOINT_MAX) {
845 			hif_err("%s: Invalid Endpoint in lookahead: %d\n",
846 				__func__, id);
847 			status = QDF_STATUS_E_PROTO;
848 			break;
849 		}
850 		/* try to allocate as many HTC RX packets indicated
851 		 * by the lookaheads these packets are stored
852 		 * in the recvPkt queue
853 		 */
854 		status = hif_dev_alloc_and_prepare_rx_packets(pdev,
855 							      look_aheads,
856 							      num_look_aheads,
857 							      &recv_q);
858 		if (QDF_IS_STATUS_ERROR(status))
859 			break;
860 		total_fetched += HTC_PACKET_QUEUE_DEPTH(&recv_q);
861 
862 		/* we've got packet buffers for all we can currently fetch,
863 		 * this count is not valid anymore
864 		 */
865 		num_look_aheads = 0;
866 		partial_bundle = false;
867 
868 		/* now go fetch the list of HTC packets */
869 		while (!HTC_QUEUE_EMPTY(&recv_q)) {
870 			pkts_fetched = 0;
871 			if ((HTC_PACKET_QUEUE_DEPTH(&recv_q) > 1)) {
872 				/* there are enough packets to attempt a bundle
873 				 * transfer and recv bundling is allowed
874 				 */
875 				status = ISSUE_BUNDLE(pdev,
876 						      &recv_q,
877 						      asyncProc ? NULL :
878 						      &sync_comp_q,
879 						      mail_box_index,
880 						      &pkts_fetched,
881 						      partial_bundle);
882 				if (QDF_IS_STATUS_ERROR(status)) {
883 					hif_dev_free_recv_pkt_queue(
884 							&recv_q);
885 					break;
886 				}
887 
888 				if (HTC_PACKET_QUEUE_DEPTH(&recv_q) !=
889 					0) {
890 					/* we couldn't fetch all packets at one,
891 					 * time this creates a broken
892 					 * bundle
893 					 */
894 					partial_bundle = true;
895 				}
896 			}
897 
898 			/* see if the previous operation fetched any
899 			 * packets using bundling
900 			 */
901 			if (pkts_fetched == 0) {
902 				/* dequeue one packet */
903 				pkt = htc_packet_dequeue(&recv_q);
904 				A_ASSERT(pkt);
905 				if (!pkt)
906 					break;
907 
908 				pkt->Completion = NULL;
909 
910 				if (HTC_PACKET_QUEUE_DEPTH(&recv_q) >
911 				    0) {
912 					/* lookaheads in all packets except the
913 					 * last one in must be ignored
914 					 */
915 					pkt->PktInfo.AsRx.HTCRxFlags |=
916 						HTC_RX_PKT_IGNORE_LOOKAHEAD;
917 				}
918 
919 				/* go fetch the packet */
920 				status =
921 				hif_dev_recv_packet(pdev, pkt,
922 						    pkt->ActualLength,
923 						    mail_box_index);
924 				while (QDF_IS_STATUS_ERROR(status) &&
925 				       !HTC_QUEUE_EMPTY(&recv_q)) {
926 					qdf_nbuf_t nbuf;
927 
928 					pkt = htc_packet_dequeue(&recv_q);
929 					if (!pkt)
930 						break;
931 					nbuf = pkt->pNetBufContext;
932 					if (nbuf)
933 						qdf_nbuf_free(nbuf);
934 				}
935 
936 				if (QDF_IS_STATUS_ERROR(status))
937 					break;
938 				/* sent synchronously, queue this packet for
939 				 * synchronous completion
940 				 */
941 				HTC_PACKET_ENQUEUE(&sync_comp_q, pkt);
942 			}
943 		}
944 
945 		/* synchronous handling */
946 		if (pdev->DSRCanYield) {
947 			/* for the SYNC case, increment count that tracks
948 			 * when the DSR should yield
949 			 */
950 			pdev->CurrentDSRRecvCount++;
951 		}
952 
953 		/* in the sync case, all packet buffers are now filled,
954 		 * we can process each packet, check lookahead , then repeat
955 		 */
956 		rxCompletion = pdev->hif_callbacks.rxCompletionHandler;
957 
958 		/* unload sync completion queue */
959 		while (!HTC_QUEUE_EMPTY(&sync_comp_q)) {
960 			uint8_t pipeid;
961 			qdf_nbuf_t netbuf;
962 
963 			pkt = htc_packet_dequeue(&sync_comp_q);
964 			A_ASSERT(pkt);
965 			if (!pkt)
966 				break;
967 
968 			num_look_aheads = 0;
969 			status = hif_dev_process_recv_header(pdev, pkt,
970 							     look_aheads,
971 							     &num_look_aheads);
972 			if (QDF_IS_STATUS_ERROR(status)) {
973 				HTC_PACKET_ENQUEUE_TO_HEAD(&sync_comp_q, pkt);
974 				break;
975 			}
976 
977 			netbuf = (qdf_nbuf_t)pkt->pNetBufContext;
978 			/* set data length */
979 			qdf_nbuf_put_tail(netbuf, pkt->ActualLength);
980 
981 			if (rxCompletion) {
982 				pipeid =
983 				hif_dev_map_mail_box_to_pipe(pdev,
984 							     mail_box_index,
985 							     true);
986 				rxCompletion(pdev->hif_callbacks.Context,
987 					     netbuf, pipeid);
988 			}
989 		}
990 
991 		if (QDF_IS_STATUS_ERROR(status)) {
992 			if (!HTC_QUEUE_EMPTY(&sync_comp_q))
993 				hif_dev_free_recv_pkt_queue(
994 						&sync_comp_q);
995 			break;
996 		}
997 
998 		if (num_look_aheads == 0) {
999 			/* no more look aheads */
1000 			break;
1001 		}
1002 		/* check whether other OS contexts have queued any WMI
1003 		 * command/data for WLAN. This check is needed only if WLAN
1004 		 * Tx and Rx happens in same thread context
1005 		 */
1006 		/* A_CHECK_DRV_TX(); */
1007 	}
1008 	if (num_pkts_fetched)
1009 		*num_pkts_fetched = total_fetched;
1010 
1011 	AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-HTCRecvMessagePendingHandler\n"));
1012 	return status;
1013 }
1014 
1015 /**
1016  * hif_dev_service_cpu_interrupt() - service fatal interrupts
1017  * synchronously
1018  *
1019  * @pDev: hif sdio device context
1020  *
1021  * Return: QDF_STATUS_SUCCESS for success
1022  */
1023 static QDF_STATUS hif_dev_service_cpu_interrupt(struct hif_sdio_device *pdev)
1024 {
1025 	QDF_STATUS status;
1026 	uint8_t reg_buffer[4];
1027 	uint8_t cpu_int_status;
1028 
1029 	cpu_int_status = mboxProcRegs(pdev).cpu_int_status &
1030 			 mboxEnaRegs(pdev).cpu_int_status_enable;
1031 
1032 	hif_err("%s: 0x%x", __func__, (uint32_t)cpu_int_status);
1033 
1034 	/* Clear the interrupt */
1035 	mboxProcRegs(pdev).cpu_int_status &= ~cpu_int_status;
1036 
1037 	/*set up the register transfer buffer to hit the register
1038 	 * 4 times , this is done to make the access 4-byte aligned
1039 	 * to mitigate issues with host bus interconnects that
1040 	 * restrict bus transfer lengths to be a multiple of 4-bytes
1041 	 * set W1C value to clear the interrupt, this hits the register
1042 	 * first
1043 	 */
1044 	reg_buffer[0] = cpu_int_status;
1045 	/* the remaining 4 values are set to zero which have no-effect  */
1046 	reg_buffer[1] = 0;
1047 	reg_buffer[2] = 0;
1048 	reg_buffer[3] = 0;
1049 
1050 	status = hif_read_write(pdev->HIFDevice,
1051 				CPU_INT_STATUS_ADDRESS,
1052 				reg_buffer, 4, HIF_WR_SYNC_BYTE_FIX, NULL);
1053 
1054 	A_ASSERT(status == QDF_STATUS_SUCCESS);
1055 
1056 	/* The Interrupt sent to the Host is generated via bit0
1057 	 * of CPU INT register
1058 	 */
1059 	if (cpu_int_status & 0x1) {
1060 		if (pdev->hif_callbacks.fwEventHandler)
1061 			/* It calls into HTC which propagates this
1062 			 * to ol_target_failure()
1063 			 */
1064 			pdev->hif_callbacks.fwEventHandler(
1065 				pdev->hif_callbacks.Context,
1066 				QDF_STATUS_E_FAILURE);
1067 	} else {
1068 		hif_err("%s: Unrecognized CPU event", __func__);
1069 	}
1070 
1071 	return status;
1072 }
1073 
1074 /**
1075  * hif_dev_service_error_interrupt() - service error interrupts
1076  * synchronously
1077  *
1078  * @pDev: hif sdio device context
1079  *
1080  * Return: QDF_STATUS_SUCCESS for success
1081  */
1082 static QDF_STATUS hif_dev_service_error_interrupt(struct hif_sdio_device *pdev)
1083 {
1084 	QDF_STATUS status;
1085 	uint8_t reg_buffer[4];
1086 	uint8_t error_int_status = 0;
1087 
1088 	error_int_status = mboxProcRegs(pdev).error_int_status & 0x0F;
1089 	hif_err("%s: 0x%x", __func__, error_int_status);
1090 
1091 	if (ERROR_INT_STATUS_WAKEUP_GET(error_int_status))
1092 		hif_err("%s: Error : Wakeup", __func__);
1093 
1094 	if (ERROR_INT_STATUS_RX_UNDERFLOW_GET(error_int_status))
1095 		hif_err("%s: Error : Rx Underflow", __func__);
1096 
1097 	if (ERROR_INT_STATUS_TX_OVERFLOW_GET(error_int_status))
1098 		hif_err("%s: Error : Tx Overflow", __func__);
1099 
1100 	/* Clear the interrupt */
1101 	mboxProcRegs(pdev).error_int_status &= ~error_int_status;
1102 
1103 	/* set up the register transfer buffer to hit the register
1104 	 * 4 times , this is done to make the access 4-byte
1105 	 * aligned to mitigate issues with host bus interconnects that
1106 	 * restrict bus transfer lengths to be a multiple of 4-bytes
1107 	 */
1108 
1109 	/* set W1C value to clear the interrupt */
1110 	reg_buffer[0] = error_int_status;
1111 	/* the remaining 4 values are set to zero which have no-effect  */
1112 	reg_buffer[1] = 0;
1113 	reg_buffer[2] = 0;
1114 	reg_buffer[3] = 0;
1115 
1116 	status = hif_read_write(pdev->HIFDevice,
1117 				ERROR_INT_STATUS_ADDRESS,
1118 				reg_buffer, 4, HIF_WR_SYNC_BYTE_FIX, NULL);
1119 
1120 	A_ASSERT(status == QDF_STATUS_SUCCESS);
1121 	return status;
1122 }
1123 
1124 /**
1125  * hif_dev_service_debug_interrupt() - service debug interrupts
1126  * synchronously
1127  *
1128  * @pDev: hif sdio device context
1129  *
1130  * Return: QDF_STATUS_SUCCESS for success
1131  */
1132 static QDF_STATUS hif_dev_service_debug_interrupt(struct hif_sdio_device *pdev)
1133 {
1134 	uint32_t dummy;
1135 	QDF_STATUS status;
1136 
1137 	/* Send a target failure event to the application */
1138 	hif_err("%s: Target debug interrupt", __func__);
1139 
1140 	/* clear the interrupt , the debug error interrupt is counter 0
1141 	 * read counter to clear interrupt
1142 	 */
1143 	status = hif_read_write(pdev->HIFDevice,
1144 				COUNT_DEC_ADDRESS,
1145 				(uint8_t *)&dummy,
1146 				4, HIF_RD_SYNC_BYTE_INC, NULL);
1147 
1148 	A_ASSERT(status == QDF_STATUS_SUCCESS);
1149 	return status;
1150 }
1151 
1152 /**
1153  * hif_dev_service_counter_interrupt() - service counter interrupts
1154  * synchronously
1155  *
1156  * @pDev: hif sdio device context
1157  *
1158  * Return: QDF_STATUS_SUCCESS for success
1159  */
1160 static
1161 QDF_STATUS hif_dev_service_counter_interrupt(struct hif_sdio_device *pdev)
1162 {
1163 	uint8_t counter_int_status;
1164 
1165 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Counter Interrupt\n"));
1166 
1167 	counter_int_status = mboxProcRegs(pdev).counter_int_status &
1168 			     mboxEnaRegs(pdev).counter_int_status_enable;
1169 
1170 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1171 			("Valid interrupt source in COUNTER_INT_STATUS: 0x%x\n",
1172 			 counter_int_status));
1173 
1174 	/* Check if the debug interrupt is pending
1175 	 * NOTE: other modules like GMBOX may use the counter interrupt
1176 	 * for credit flow control on other counters, we only need to
1177 	 * check for the debug assertion counter interrupt
1178 	 */
1179 	if (counter_int_status & AR6K_TARGET_DEBUG_INTR_MASK)
1180 		return hif_dev_service_debug_interrupt(pdev);
1181 
1182 	return QDF_STATUS_SUCCESS;
1183 }
1184 
1185 #define RX_LOOAHEAD_GET(pdev, i) \
1186 	mboxProcRegs(pdev).rx_lookahead[MAILBOX_LOOKAHEAD_SIZE_IN_WORD * i]
1187 /**
1188  * hif_dev_process_pending_irqs() - process pending interrupts
1189  * @pDev: hif sdio device context
1190  * @pDone: pending irq completion status
1191  * @pASyncProcessing: sync/async processing flag
1192  *
1193  * Return: QDF_STATUS_SUCCESS for success
1194  */
1195 QDF_STATUS hif_dev_process_pending_irqs(struct hif_sdio_device *pdev,
1196 					bool *done,
1197 					bool *async_processing)
1198 {
1199 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1200 	uint8_t host_int_status = 0;
1201 	uint32_t l_ahead[MAILBOX_USED_COUNT];
1202 	int i;
1203 
1204 	qdf_mem_zero(&l_ahead, sizeof(l_ahead));
1205 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1206 			("+ProcessPendingIRQs: (dev: 0x%lX)\n",
1207 			 (unsigned long)pdev));
1208 
1209 	/* NOTE: the HIF implementation guarantees that the context
1210 	 * of this call allows us to perform SYNCHRONOUS I/O,
1211 	 * that is we can block, sleep or call any API that
1212 	 * can block or switch thread/task ontexts.
1213 	 * This is a fully schedulable context.
1214 	 */
1215 	do {
1216 		if (mboxEnaRegs(pdev).int_status_enable == 0) {
1217 			/* interrupt enables have been cleared, do not try
1218 			 * to process any pending interrupts that
1219 			 * may result in more bus transactions.
1220 			 * The target may be unresponsive at this point.
1221 			 */
1222 			break;
1223 		}
1224 		status = hif_read_write(pdev->HIFDevice,
1225 					HOST_INT_STATUS_ADDRESS,
1226 					(uint8_t *)&mboxProcRegs(pdev),
1227 					sizeof(mboxProcRegs(pdev)),
1228 					HIF_RD_SYNC_BYTE_INC, NULL);
1229 
1230 		if (QDF_IS_STATUS_ERROR(status))
1231 			break;
1232 
1233 		if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_IRQ)) {
1234 			hif_dev_dump_registers(pdev,
1235 					       &mboxProcRegs(pdev),
1236 					       &mboxEnaRegs(pdev),
1237 					       &mboxCountRegs(pdev));
1238 		}
1239 
1240 		/* Update only those registers that are enabled */
1241 		host_int_status = mboxProcRegs(pdev).host_int_status
1242 				  & mboxEnaRegs(pdev).int_status_enable;
1243 
1244 		/* only look at mailbox status if the HIF layer did not
1245 		 * provide this function, on some HIF interfaces reading
1246 		 * the RX lookahead is not valid to do
1247 		 */
1248 		for (i = 0; i < MAILBOX_USED_COUNT; i++) {
1249 			l_ahead[i] = 0;
1250 			if (host_int_status & (1 << i)) {
1251 				/* mask out pending mailbox value, we use
1252 				 * "lookAhead" as the real flag for
1253 				 * mailbox processing below
1254 				 */
1255 				host_int_status &= ~(1 << i);
1256 				if (mboxProcRegs(pdev).
1257 				    rx_lookahead_valid & (1 << i)) {
1258 					/* mailbox has a message and the
1259 					 * look ahead is valid
1260 					 */
1261 					l_ahead[i] = RX_LOOAHEAD_GET(pdev, i);
1262 				}
1263 			}
1264 		} /*end of for loop */
1265 	} while (false);
1266 
1267 	do {
1268 		bool bLookAheadValid = false;
1269 		/* did the interrupt status fetches succeed? */
1270 		if (QDF_IS_STATUS_ERROR(status))
1271 			break;
1272 
1273 		for (i = 0; i < MAILBOX_USED_COUNT; i++) {
1274 			if (l_ahead[i] != 0) {
1275 				bLookAheadValid = true;
1276 				break;
1277 			}
1278 		}
1279 
1280 		if ((host_int_status == 0) && !bLookAheadValid) {
1281 			/* nothing to process, the caller can use this
1282 			 * to break out of a loop
1283 			 */
1284 			*done = true;
1285 			break;
1286 		}
1287 
1288 		if (bLookAheadValid) {
1289 			for (i = 0; i < MAILBOX_USED_COUNT; i++) {
1290 				int fetched = 0;
1291 
1292 				if (l_ahead[i] == 0)
1293 					continue;
1294 				AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1295 						("mbox[%d],lookahead:0x%X\n",
1296 						i, l_ahead[i]));
1297 				/* Mailbox Interrupt, the HTC layer may issue
1298 				 * async requests to empty the mailbox...
1299 				 * When emptying the recv mailbox we use the
1300 				 * async handler from the completion routine of
1301 				 * routine of the callers read request.
1302 				 * This can improve performance by reducing
1303 				 * the  context switching when we rapidly
1304 				 * pull packets
1305 				 */
1306 				status = hif_dev_recv_message_pending_handler(
1307 							pdev, i,
1308 							&l_ahead
1309 							[i], 1,
1310 							async_processing,
1311 							&fetched);
1312 				if (QDF_IS_STATUS_ERROR(status))
1313 					break;
1314 
1315 				if (!fetched) {
1316 					/* HTC could not pull any messages out
1317 					 * due to lack of resources force DSR
1318 					 * handle to ack the interrupt
1319 					 */
1320 					*async_processing = false;
1321 					pdev->RecheckIRQStatusCnt = 0;
1322 				}
1323 			}
1324 		}
1325 
1326 		/* now handle the rest of them */
1327 		AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1328 				("Valid source for OTHER interrupts: 0x%x\n",
1329 				host_int_status));
1330 
1331 		if (HOST_INT_STATUS_CPU_GET(host_int_status)) {
1332 			/* CPU Interrupt */
1333 			status = hif_dev_service_cpu_interrupt(pdev);
1334 			if (QDF_IS_STATUS_ERROR(status))
1335 				break;
1336 		}
1337 
1338 		if (HOST_INT_STATUS_ERROR_GET(host_int_status)) {
1339 			/* Error Interrupt */
1340 			status = hif_dev_service_error_interrupt(pdev);
1341 			if (QDF_IS_STATUS_ERROR(status))
1342 				break;
1343 		}
1344 
1345 		if (HOST_INT_STATUS_COUNTER_GET(host_int_status)) {
1346 			/* Counter Interrupt */
1347 			status = hif_dev_service_counter_interrupt(pdev);
1348 			if (QDF_IS_STATUS_ERROR(status))
1349 				break;
1350 		}
1351 
1352 	} while (false);
1353 
1354 	/* an optimization to bypass reading the IRQ status registers
1355 	 * unecessarily which can re-wake the target, if upper layers
1356 	 * determine that we are in a low-throughput mode, we can
1357 	 * rely on taking another interrupt rather than re-checking
1358 	 * the status registers which can re-wake the target.
1359 	 *
1360 	 * NOTE : for host interfaces that use the special
1361 	 * GetPendingEventsFunc, this optimization cannot be used due to
1362 	 * possible side-effects.  For example, SPI requires the host
1363 	 * to drain all messages from the mailbox before exiting
1364 	 * the ISR routine.
1365 	 */
1366 	if (!(*async_processing) && (pdev->RecheckIRQStatusCnt == 0)) {
1367 		AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1368 				("Bypass IRQ Status re-check, forcing done\n"));
1369 		*done = true;
1370 	}
1371 
1372 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1373 			("-ProcessPendingIRQs: (done:%d, async:%d) status=%d\n",
1374 			 *done, *async_processing, status));
1375 
1376 	return status;
1377 }
1378 
1379 #define DEV_CHECK_RECV_YIELD(pdev) \
1380 	((pdev)->CurrentDSRRecvCount >= \
1381 	 (pdev)->HifIRQYieldParams.recv_packet_yield_count)
1382 /**
1383  * hif_dev_dsr_handler() - Synchronous interrupt handler
1384  *
1385  * @context: hif send context
1386  *
1387  * Return: 0 for success and non-zero for failure
1388  */
1389 QDF_STATUS hif_dev_dsr_handler(void *context)
1390 {
1391 	struct hif_sdio_device *pdev = (struct hif_sdio_device *)context;
1392 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1393 	bool done = false;
1394 	bool async_proc = false;
1395 
1396 	/* reset the recv counter that tracks when we need
1397 	 * to yield from the DSR
1398 	 */
1399 	pdev->CurrentDSRRecvCount = 0;
1400 	/* reset counter used to flag a re-scan of IRQ
1401 	 * status registers on the target
1402 	 */
1403 	pdev->RecheckIRQStatusCnt = 0;
1404 
1405 	while (!done) {
1406 		status = hif_dev_process_pending_irqs(pdev, &done, &async_proc);
1407 		if (QDF_IS_STATUS_ERROR(status))
1408 			break;
1409 
1410 		if (pdev->HifIRQProcessingMode == HIF_DEVICE_IRQ_SYNC_ONLY) {
1411 			/* the HIF layer does not allow async IRQ processing,
1412 			 * override the asyncProc flag
1413 			 */
1414 			async_proc = false;
1415 			/* this will cause us to re-enter ProcessPendingIRQ()
1416 			 * and re-read interrupt status registers.
1417 			 * This has a nice side effect of blocking us until all
1418 			 * async read requests are completed. This behavior is
1419 			 * required as we  do not allow ASYNC processing
1420 			 * in interrupt handlers (like Windows CE)
1421 			 */
1422 
1423 			if (pdev->DSRCanYield && DEV_CHECK_RECV_YIELD(pdev))
1424 				/* ProcessPendingIRQs() pulled enough recv
1425 				 * messages to satisfy the yield count, stop
1426 				 * checking for more messages and return
1427 				 */
1428 				break;
1429 		}
1430 
1431 		if (async_proc) {
1432 			/* the function does some async I/O for performance,
1433 			 * we need to exit the ISR immediately, the check below
1434 			 * will prevent the interrupt from being
1435 			 * Ack'd while we handle it asynchronously
1436 			 */
1437 			break;
1438 		}
1439 	}
1440 
1441 	if (QDF_IS_STATUS_SUCCESS(status) && !async_proc) {
1442 		/* Ack the interrupt only if :
1443 		 *  1. we did not get any errors in processing interrupts
1444 		 *  2. there are no outstanding async processing requests
1445 		 */
1446 		if (pdev->DSRCanYield) {
1447 			/* if the DSR can yield do not ACK the interrupt, there
1448 			 * could be more pending messages. The HIF layer
1449 			 * must ACK the interrupt on behalf of HTC
1450 			 */
1451 			hif_info("%s:  Yield (RX count: %d)",
1452 				 __func__, pdev->CurrentDSRRecvCount);
1453 		} else {
1454 			hif_ack_interrupt(pdev->HIFDevice);
1455 		}
1456 	}
1457 
1458 	return status;
1459 }
1460 
1461 /**
1462  * hif_read_write() - queue a read/write request
1463  * @device: pointer to hif device structure
1464  * @address: address to read
1465  * @buffer: buffer to hold read/write data
1466  * @length: length to read/write
1467  * @request: read/write/sync/async request
1468  * @context: pointer to hold calling context
1469  *
1470  * Return: 0 on success, error number otherwise.
1471  */
1472 QDF_STATUS
1473 hif_read_write(struct hif_sdio_dev *device,
1474 	       unsigned long address,
1475 	       char *buffer, uint32_t length,
1476 	       uint32_t request, void *context)
1477 {
1478 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1479 	struct bus_request *busrequest;
1480 
1481 	AR_DEBUG_ASSERT(device);
1482 	AR_DEBUG_ASSERT(device->func);
1483 	hif_debug("%s: device 0x%pK addr 0x%lX buffer 0x%pK",
1484 		  __func__, device, address, buffer);
1485 	hif_debug("%s: len %d req 0x%X context 0x%pK",
1486 		  __func__, length, request, context);
1487 
1488 	/*sdio r/w action is not needed when suspend, so just return */
1489 	if ((device->is_suspend) &&
1490 	    (device->power_config == HIF_DEVICE_POWER_CUT)) {
1491 		AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("skip io when suspending\n"));
1492 		return QDF_STATUS_SUCCESS;
1493 	}
1494 	do {
1495 		if ((request & HIF_ASYNCHRONOUS) ||
1496 		    (request & HIF_SYNCHRONOUS)) {
1497 			/* serialize all requests through the async thread */
1498 			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1499 					("%s: Execution mode: %s\n", __func__,
1500 					 (request & HIF_ASYNCHRONOUS) ? "Async"
1501 					 : "Synch"));
1502 			busrequest = hif_allocate_bus_request(device);
1503 			if (!busrequest) {
1504 				hif_err("%s:bus requests unavail", __func__);
1505 				hif_err("%s, addr:0x%lX, len:%d",
1506 					request & HIF_SDIO_READ ? "READ" :
1507 					"WRITE", address, length);
1508 				return QDF_STATUS_E_FAILURE;
1509 			}
1510 			busrequest->address = address;
1511 			busrequest->buffer = buffer;
1512 			busrequest->length = length;
1513 			busrequest->request = request;
1514 			busrequest->context = context;
1515 
1516 			add_to_async_list(device, busrequest);
1517 
1518 			if (request & HIF_SYNCHRONOUS) {
1519 				AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1520 						("%s: queued sync req: 0x%lX\n",
1521 						 __func__,
1522 						 (unsigned long)busrequest));
1523 
1524 				/* wait for completion */
1525 				up(&device->sem_async);
1526 				if (down_interruptible(&busrequest->sem_req) ==
1527 				    0) {
1528 					QDF_STATUS status = busrequest->status;
1529 
1530 					hif_debug("%s: sync freeing 0x%lX:0x%X",
1531 						  __func__,
1532 						  (unsigned long)busrequest,
1533 						  busrequest->status);
1534 					hif_debug("%s: freeing req: 0x%X",
1535 						  __func__,
1536 						  (unsigned int)request);
1537 					hif_free_bus_request(device,
1538 							     busrequest);
1539 					return status;
1540 				} else {
1541 					/* interrupted, exit */
1542 					return QDF_STATUS_E_FAILURE;
1543 				}
1544 			} else {
1545 				hif_debug("%s: queued async req: 0x%lX",
1546 					  __func__, (unsigned long)busrequest);
1547 				up(&device->sem_async);
1548 				return QDF_STATUS_E_PENDING;
1549 			}
1550 		} else {
1551 			hif_err("%s: Invalid execution mode: 0x%08x",
1552 				__func__, (unsigned int)request);
1553 			status = QDF_STATUS_E_INVAL;
1554 			break;
1555 		}
1556 	} while (0);
1557 
1558 	return status;
1559 }
1560 
1561 /**
1562  * hif_sdio_func_enable() - Handle device enabling as per device
1563  * @device: HIF device object
1564  * @func: function pointer
1565  *
1566  * Return QDF_STATUS
1567  */
1568 static QDF_STATUS hif_sdio_func_enable(struct hif_softc *ol_sc,
1569 				       struct sdio_func *func)
1570 {
1571 	struct hif_sdio_dev *device = get_hif_device(ol_sc, func);
1572 
1573 	if (device->is_disabled) {
1574 		int ret = 0;
1575 
1576 		sdio_claim_host(func);
1577 
1578 		ret = hif_sdio_quirk_async_intr(ol_sc, func);
1579 		if (ret) {
1580 			hif_err("%s: Error setting async intr:%d",
1581 				__func__, ret);
1582 			sdio_release_host(func);
1583 			return QDF_STATUS_E_FAILURE;
1584 		}
1585 
1586 		func->enable_timeout = 100;
1587 		ret = sdio_enable_func(func);
1588 		if (ret) {
1589 			hif_err("%s: Unable to enable function: %d",
1590 				__func__, ret);
1591 			sdio_release_host(func);
1592 			return QDF_STATUS_E_FAILURE;
1593 		}
1594 
1595 		ret = sdio_set_block_size(func, HIF_BLOCK_SIZE);
1596 		if (ret) {
1597 			hif_err("%s: Unable to set block size 0x%X : %d\n",
1598 				__func__, HIF_BLOCK_SIZE, ret);
1599 			sdio_release_host(func);
1600 			return QDF_STATUS_E_FAILURE;
1601 		}
1602 
1603 		ret = hif_sdio_quirk_mod_strength(ol_sc, func);
1604 		if (ret) {
1605 			hif_err("%s: Error setting mod strength : %d\n",
1606 				__func__, ret);
1607 			sdio_release_host(func);
1608 			return QDF_STATUS_E_FAILURE;
1609 		}
1610 
1611 		sdio_release_host(func);
1612 	}
1613 
1614 	return QDF_STATUS_SUCCESS;
1615 }
1616 
1617 /**
1618  * __hif_read_write() - sdio read/write wrapper
1619  * @device: pointer to hif device structure
1620  * @address: address to read
1621  * @buffer: buffer to hold read/write data
1622  * @length: length to read/write
1623  * @request: read/write/sync/async request
1624  * @context: pointer to hold calling context
1625  *
1626  * Return: 0 on success, error number otherwise.
1627  */
1628 static QDF_STATUS
1629 __hif_read_write(struct hif_sdio_dev *device,
1630 		 uint32_t address, char *buffer,
1631 		 uint32_t length, uint32_t request, void *context)
1632 {
1633 	uint8_t opcode;
1634 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1635 	int ret = A_OK;
1636 	uint8_t *tbuffer;
1637 	bool bounced = false;
1638 
1639 	if (!device) {
1640 		hif_err("%s: device null!", __func__);
1641 		return QDF_STATUS_E_INVAL;
1642 	}
1643 
1644 	if (!device->func) {
1645 		hif_err("%s: func null!", __func__);
1646 		return QDF_STATUS_E_INVAL;
1647 	}
1648 
1649 	hif_debug("%s: addr:0X%06X, len:%08d, %s, %s", __func__,
1650 		  address, length,
1651 		  request & HIF_SDIO_READ ? "Read " : "Write",
1652 		  request & HIF_ASYNCHRONOUS ? "Async" : "Sync ");
1653 
1654 	do {
1655 		if (request & HIF_EXTENDED_IO) {
1656 			//HIF_INFO_HI("%s: Command type: CMD53\n", __func__);
1657 		} else {
1658 			hif_err("%s: Invalid command type: 0x%08x\n",
1659 				__func__, request);
1660 			status = QDF_STATUS_E_INVAL;
1661 			break;
1662 		}
1663 
1664 		if (request & HIF_BLOCK_BASIS) {
1665 			/* round to whole block length size */
1666 			length =
1667 				(length / HIF_BLOCK_SIZE) *
1668 				HIF_BLOCK_SIZE;
1669 			hif_debug("%s: Block mode (BlockLen: %d)\n",
1670 				  __func__, length);
1671 		} else if (request & HIF_BYTE_BASIS) {
1672 			hif_debug("%s: Byte mode (BlockLen: %d)\n",
1673 				  __func__, length);
1674 		} else {
1675 			hif_err("%s: Invalid data mode: 0x%08x\n",
1676 				__func__, request);
1677 			status = QDF_STATUS_E_INVAL;
1678 			break;
1679 		}
1680 		if (request & HIF_SDIO_WRITE) {
1681 			hif_fixup_write_param(device, request,
1682 					      &length, &address);
1683 
1684 			hif_debug("addr:%08X, len:0x%08X, dummy:0x%04X\n",
1685 				  address, length,
1686 				  (request & HIF_DUMMY_SPACE_MASK) >> 16);
1687 		}
1688 
1689 		if (request & HIF_FIXED_ADDRESS) {
1690 			opcode = CMD53_FIXED_ADDRESS;
1691 			hif_debug("%s: Addr mode: fixed 0x%X\n",
1692 				  __func__, address);
1693 		} else if (request & HIF_INCREMENTAL_ADDRESS) {
1694 			opcode = CMD53_INCR_ADDRESS;
1695 			hif_debug("%s: Address mode: Incremental 0x%X\n",
1696 				  __func__, address);
1697 		} else {
1698 			hif_err("%s: Invalid address mode: 0x%08x\n",
1699 				__func__, request);
1700 			status = QDF_STATUS_E_INVAL;
1701 			break;
1702 		}
1703 
1704 		if (request & HIF_SDIO_WRITE) {
1705 #if HIF_USE_DMA_BOUNCE_BUFFER
1706 			if (BUFFER_NEEDS_BOUNCE(buffer)) {
1707 				AR_DEBUG_ASSERT(device->dma_buffer);
1708 				tbuffer = device->dma_buffer;
1709 				/* copy the write data to the dma buffer */
1710 				AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE);
1711 				if (length > HIF_DMA_BUFFER_SIZE) {
1712 					hif_err("%s: Invalid write len: %d\n",
1713 						__func__, length);
1714 					status = QDF_STATUS_E_INVAL;
1715 					break;
1716 				}
1717 				memcpy(tbuffer, buffer, length);
1718 				bounced = true;
1719 			} else {
1720 				tbuffer = buffer;
1721 			}
1722 #else
1723 			tbuffer = buffer;
1724 #endif
1725 			if (opcode == CMD53_FIXED_ADDRESS  && tbuffer) {
1726 				ret = sdio_writesb(device->func, address,
1727 						   tbuffer, length);
1728 				hif_debug("%s:r=%d addr:0x%X, len:%d, 0x%X\n",
1729 					  __func__, ret, address, length,
1730 					  *(int *)tbuffer);
1731 			} else if (tbuffer) {
1732 				ret = sdio_memcpy_toio(device->func, address,
1733 						       tbuffer, length);
1734 				hif_debug("%s:r=%d addr:0x%X, len:%d, 0x%X\n",
1735 					  __func__, ret, address, length,
1736 					  *(int *)tbuffer);
1737 			}
1738 		} else if (request & HIF_SDIO_READ) {
1739 #if HIF_USE_DMA_BOUNCE_BUFFER
1740 			if (BUFFER_NEEDS_BOUNCE(buffer)) {
1741 				AR_DEBUG_ASSERT(device->dma_buffer);
1742 				AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE);
1743 				if (length > HIF_DMA_BUFFER_SIZE) {
1744 					hif_err("%s: Invalid read len: %d\n",
1745 						__func__, length);
1746 					status = QDF_STATUS_E_INVAL;
1747 					break;
1748 				}
1749 				tbuffer = device->dma_buffer;
1750 				bounced = true;
1751 			} else {
1752 				tbuffer = buffer;
1753 			}
1754 #else
1755 			tbuffer = buffer;
1756 #endif
1757 			if (opcode == CMD53_FIXED_ADDRESS && tbuffer) {
1758 				ret = sdio_readsb(device->func, tbuffer,
1759 						  address, length);
1760 				hif_debug("%s:r=%d addr:0x%X, len:%d, 0x%X\n",
1761 					  __func__, ret, address, length,
1762 					  *(int *)tbuffer);
1763 			} else if (tbuffer) {
1764 				ret = sdio_memcpy_fromio(device->func,
1765 							 tbuffer, address,
1766 							 length);
1767 				hif_debug("%s:r=%d addr:0x%X, len:%d, 0x%X\n",
1768 					  __func__, ret, address, length,
1769 					  *(int *)tbuffer);
1770 			}
1771 #if HIF_USE_DMA_BOUNCE_BUFFER
1772 			if (bounced && tbuffer)
1773 				memcpy(buffer, tbuffer, length);
1774 #endif
1775 		} else {
1776 			hif_err("%s: Invalid dir: 0x%08x", __func__, request);
1777 			status = QDF_STATUS_E_INVAL;
1778 			return status;
1779 		}
1780 
1781 		if (ret) {
1782 			hif_err("%s: SDIO bus operation failed!", __func__);
1783 			hif_err("%s: MMC stack returned : %d", __func__, ret);
1784 			hif_err("%s: addr:0X%06X, len:%08d, %s, %s",
1785 				__func__, address, length,
1786 				request & HIF_SDIO_READ ? "Read " : "Write",
1787 				request & HIF_ASYNCHRONOUS ?
1788 				"Async" : "Sync");
1789 			status = QDF_STATUS_E_FAILURE;
1790 		}
1791 	} while (false);
1792 
1793 	return status;
1794 }
1795 
1796 /**
1797  * async_task() - thread function to serialize all bus requests
1798  * @param: pointer to hif device
1799  *
1800  * thread function to serialize all requests, both sync and async
1801  * Return: 0 on success, error number otherwise.
1802  */
1803 static int async_task(void *param)
1804 {
1805 	struct hif_sdio_dev *device;
1806 	struct bus_request *request;
1807 	QDF_STATUS status;
1808 	bool claimed = false;
1809 
1810 	device = (struct hif_sdio_dev *)param;
1811 	set_current_state(TASK_INTERRUPTIBLE);
1812 	while (!device->async_shutdown) {
1813 		/* wait for work */
1814 		if (down_interruptible(&device->sem_async) != 0) {
1815 			/* interrupted, exit */
1816 			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1817 					("%s: async task interrupted\n",
1818 					 __func__));
1819 			break;
1820 		}
1821 		if (device->async_shutdown) {
1822 			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1823 					("%s: async task stopping\n",
1824 					 __func__));
1825 			break;
1826 		}
1827 		/* we want to hold the host over multiple cmds
1828 		 * if possible, but holding the host blocks
1829 		 * card interrupts
1830 		 */
1831 		qdf_spin_lock_irqsave(&device->asynclock);
1832 		/* pull the request to work on */
1833 		while (device->asyncreq) {
1834 			request = device->asyncreq;
1835 			if (request->inusenext)
1836 				device->asyncreq = request->inusenext;
1837 			else
1838 				device->asyncreq = NULL;
1839 			qdf_spin_unlock_irqrestore(&device->asynclock);
1840 			hif_debug("%s: processing req: 0x%lX",
1841 				  __func__, (unsigned long)request);
1842 
1843 			if (!claimed) {
1844 				sdio_claim_host(device->func);
1845 				claimed = true;
1846 			}
1847 			if (request->scatter_req) {
1848 				A_ASSERT(device->scatter_enabled);
1849 				/* pass the request to scatter routine which
1850 				 * executes it synchronously, note, no need
1851 				 * to free the request since scatter requests
1852 				 * are maintained on a separate list
1853 				 */
1854 				status = do_hif_read_write_scatter(device,
1855 								   request);
1856 			} else {
1857 				/* call hif_read_write in sync mode */
1858 				status =
1859 					__hif_read_write(device,
1860 							 request->address,
1861 							 request->buffer,
1862 							 request->length,
1863 							 request->
1864 							 request &
1865 							 ~HIF_SYNCHRONOUS,
1866 							 NULL);
1867 				if (request->request & HIF_ASYNCHRONOUS) {
1868 					void *context = request->context;
1869 
1870 					hif_free_bus_request(device, request);
1871 					device->htc_callbacks.
1872 					rw_compl_handler(context, status);
1873 				} else {
1874 					hif_debug("%s: upping req: 0x%lX",
1875 						  __func__,
1876 						  (unsigned long)request);
1877 					request->status = status;
1878 					up(&request->sem_req);
1879 				}
1880 			}
1881 			qdf_spin_lock_irqsave(&device->asynclock);
1882 		}
1883 		qdf_spin_unlock_irqrestore(&device->asynclock);
1884 		if (claimed) {
1885 			sdio_release_host(device->func);
1886 			claimed = false;
1887 		}
1888 	}
1889 
1890 	complete_and_exit(&device->async_completion, 0);
1891 
1892 	return 0;
1893 }
1894 
1895 /**
1896  * hif_disable_func() - Disable SDIO function
1897  *
1898  * @device: HIF device pointer
1899  * @func: SDIO function pointer
1900  * @reset: If this is called from resume or probe
1901  *
1902  * Return: 0 in case of success, else error value
1903  */
1904 QDF_STATUS hif_disable_func(struct hif_sdio_dev *device,
1905 			    struct sdio_func *func,
1906 			    bool reset)
1907 {
1908 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1909 
1910 	HIF_ENTER();
1911 	if (!IS_ERR(device->async_task)) {
1912 		init_completion(&device->async_completion);
1913 		device->async_shutdown = 1;
1914 		up(&device->sem_async);
1915 		wait_for_completion(&device->async_completion);
1916 		device->async_task = NULL;
1917 		sema_init(&device->sem_async, 0);
1918 	}
1919 
1920 	status = hif_sdio_func_disable(device, func, reset);
1921 	if (status == QDF_STATUS_SUCCESS)
1922 		device->is_disabled = true;
1923 
1924 	cleanup_hif_scatter_resources(device);
1925 
1926 	HIF_EXIT();
1927 
1928 	return status;
1929 }
1930 
1931 /**
1932  * hif_enable_func() - Enable SDIO function
1933  *
1934  * @ol_sc: HIF object pointer
1935  * @device: HIF device pointer
1936  * @sdio_func: SDIO function pointer
1937  * @resume: If this is called from resume or probe
1938  *
1939  * Return: 0 in case of success, else error value
1940  */
1941 QDF_STATUS hif_enable_func(struct hif_softc *ol_sc, struct hif_sdio_dev *device,
1942 			   struct sdio_func *func, bool resume)
1943 {
1944 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
1945 
1946 	HIF_ENTER();
1947 
1948 	if (!device) {
1949 		hif_err("%s: HIF device is NULL", __func__);
1950 		return QDF_STATUS_E_INVAL;
1951 	}
1952 
1953 	if (hif_sdio_func_enable(ol_sc, func))
1954 		return QDF_STATUS_E_FAILURE;
1955 
1956 	/* create async I/O thread */
1957 	if (!device->async_task && device->is_disabled) {
1958 		device->async_shutdown = 0;
1959 		device->async_task = kthread_create(async_task,
1960 						    (void *)device,
1961 						    "AR6K Async");
1962 		if (IS_ERR(device->async_task)) {
1963 			hif_err("%s: Error creating async task",
1964 				__func__);
1965 			return QDF_STATUS_E_FAILURE;
1966 		}
1967 		device->is_disabled = false;
1968 		wake_up_process(device->async_task);
1969 	}
1970 
1971 	if (!resume)
1972 		ret = hif_sdio_probe(ol_sc, func, device);
1973 
1974 	HIF_EXIT();
1975 
1976 	return ret;
1977 }
1978 #endif /* CONFIG_SDIO_TRANSFER_MAILBOX */
1979