xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/sdio/transfer/mailbox.c (revision 1b9674e21e24478fba4530f5ae7396b9555e9c6a)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifdef CONFIG_SDIO_TRANSFER_MAILBOX
21 #define ATH_MODULE_NAME hif
22 #include <qdf_types.h>
23 #include <qdf_status.h>
24 #include <qdf_timer.h>
25 #include <qdf_time.h>
26 #include <qdf_lock.h>
27 #include <qdf_mem.h>
28 #include <qdf_util.h>
29 #include <qdf_defer.h>
30 #include <qdf_atomic.h>
31 #include <qdf_nbuf.h>
32 #include <qdf_threads.h>
33 #include <athdefs.h>
34 #include <qdf_net_types.h>
35 #include <a_types.h>
36 #include <athdefs.h>
37 #include <a_osapi.h>
38 #include <hif.h>
39 #include <htc_internal.h>
40 #include <htc_services.h>
41 #include <a_debug.h>
42 #include "hif_sdio_internal.h"
43 #include "if_sdio.h"
44 #include "regtable.h"
45 #include "transfer.h"
46 
47 #ifdef SDIO_3_0
48 /**
49  * set_extended_mbox_size() - set extended MBOX size
50  * @pinfo: sdio mailbox info
51  *
52  * Return: none.
53  */
54 static void set_extended_mbox_size(struct hif_device_mbox_info *pinfo)
55 {
56 	pinfo->mbox_prop[0].extended_size =
57 		HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0;
58 	pinfo->mbox_prop[1].extended_size =
59 		HIF_MBOX1_EXTENDED_WIDTH_AR6320;
60 }
61 
62 /**
63  * set_extended_mbox_address() - set extended MBOX address
64  * @pinfo: sdio mailbox info
65  *
66  * Return: none.
67  */
68 static void set_extended_mbox_address(struct hif_device_mbox_info *pinfo)
69 {
70 	pinfo->mbox_prop[1].extended_address =
71 		pinfo->mbox_prop[0].extended_address +
72 		pinfo->mbox_prop[0].extended_size +
73 		HIF_MBOX_DUMMY_SPACE_SIZE_AR6320;
74 }
75 #else
76 static void set_extended_mbox_size(struct hif_device_mbox_info *pinfo)
77 {
78 	pinfo->mbox_prop[0].extended_size =
79 		HIF_MBOX0_EXTENDED_WIDTH_AR6320;
80 }
81 
82 static inline void
83 set_extended_mbox_address(struct hif_device_mbox_info *pinfo)
84 {
85 }
86 #endif
87 
88 /**
89  * set_extended_mbox_window_info() - set extended MBOX window
90  * information for SDIO interconnects
91  * @manf_id: manufacturer id
92  * @pinfo: sdio mailbox info
93  *
94  * Return: none.
95  */
96 static void set_extended_mbox_window_info(uint16_t manf_id,
97 					  struct hif_device_mbox_info *pinfo)
98 {
99 	switch (manf_id & MANUFACTURER_ID_AR6K_BASE_MASK) {
100 	case MANUFACTURER_ID_AR6002_BASE:
101 		/* MBOX 0 has an extended range */
102 
103 		pinfo->mbox_prop[0].extended_address =
104 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
105 		pinfo->mbox_prop[0].extended_size =
106 			HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
107 
108 		pinfo->mbox_prop[0].extended_address =
109 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
110 		pinfo->mbox_prop[0].extended_size =
111 			HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
112 
113 		pinfo->mbox_prop[0].extended_address =
114 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004;
115 		pinfo->mbox_prop[0].extended_size =
116 			HIF_MBOX0_EXTENDED_WIDTH_AR6004;
117 
118 		break;
119 	case MANUFACTURER_ID_AR6003_BASE:
120 		/* MBOX 0 has an extended range */
121 		pinfo->mbox_prop[0].extended_address =
122 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
123 		pinfo->mbox_prop[0].extended_size =
124 			HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
125 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
126 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
127 		break;
128 	case MANUFACTURER_ID_AR6004_BASE:
129 		pinfo->mbox_prop[0].extended_address =
130 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004;
131 		pinfo->mbox_prop[0].extended_size =
132 			HIF_MBOX0_EXTENDED_WIDTH_AR6004;
133 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
134 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
135 		break;
136 	case MANUFACTURER_ID_AR6320_BASE:
137 	{
138 		uint16_t rev = manf_id & MANUFACTURER_ID_AR6K_REV_MASK;
139 
140 		pinfo->mbox_prop[0].extended_address =
141 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320;
142 		if (rev < 4)
143 			pinfo->mbox_prop[0].extended_size =
144 				HIF_MBOX0_EXTENDED_WIDTH_AR6320;
145 		else
146 			set_extended_mbox_size(pinfo);
147 		set_extended_mbox_address(pinfo);
148 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
149 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
150 		break;
151 	}
152 	case MANUFACTURER_ID_QCA9377_BASE:
153 	case MANUFACTURER_ID_QCA9379_BASE:
154 		pinfo->mbox_prop[0].extended_address =
155 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320;
156 		pinfo->mbox_prop[0].extended_size =
157 			HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0;
158 		pinfo->mbox_prop[1].extended_address =
159 			pinfo->mbox_prop[0].extended_address +
160 			pinfo->mbox_prop[0].extended_size +
161 			HIF_MBOX_DUMMY_SPACE_SIZE_AR6320;
162 		pinfo->mbox_prop[1].extended_size =
163 			HIF_MBOX1_EXTENDED_WIDTH_AR6320;
164 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
165 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
166 		break;
167 	default:
168 		A_ASSERT(false);
169 		break;
170 	}
171 }
172 
173 /**
174  * hif_dev_get_fifo_address() - get the fifo addresses for dma
175  * @pdev:  SDIO HIF object
176  * @config: mbox address config pointer
177  *
178  * Return : 0 for success, non-zero for error
179  */
180 QDF_STATUS hif_dev_get_fifo_address(struct hif_sdio_dev *pdev,
181 				    struct hif_device_mbox_info *config,
182 				    uint32_t config_len)
183 {
184 	uint32_t count;
185 
186 	for (count = 0; count < 4; count++)
187 		config->mbox_addresses[count] = HIF_MBOX_START_ADDR(count);
188 
189 	if (config_len >= sizeof(struct hif_device_mbox_info)) {
190 		set_extended_mbox_window_info((uint16_t)pdev->func->device,
191 					      config);
192 		return QDF_STATUS_SUCCESS;
193 	}
194 
195 	return QDF_STATUS_E_INVAL;
196 }
197 
198 /**
199  * hif_dev_get_block_size() - get the mbox block size for dma
200  * @config : mbox size config pointer
201  *
202  * Return : NONE
203  */
204 void hif_dev_get_block_size(void *config)
205 {
206 	((uint32_t *)config)[0] = HIF_MBOX0_BLOCK_SIZE;
207 	((uint32_t *)config)[1] = HIF_MBOX1_BLOCK_SIZE;
208 	((uint32_t *)config)[2] = HIF_MBOX2_BLOCK_SIZE;
209 	((uint32_t *)config)[3] = HIF_MBOX3_BLOCK_SIZE;
210 }
211 
212 /**
213  * hif_dev_map_service_to_pipe() - maps ul/dl pipe to service id.
214  * @pDev: SDIO HIF object
215  * @ServiceId: sevice index
216  * @ULPipe: uplink pipe id
217  * @DLPipe: down-linklink pipe id
218  *
219  * Return: 0 on success, error value on invalid map
220  */
221 QDF_STATUS hif_dev_map_service_to_pipe(struct hif_sdio_dev *pdev, uint16_t svc,
222 				       uint8_t *ul_pipe, uint8_t *dl_pipe)
223 {
224 	QDF_STATUS status = QDF_STATUS_SUCCESS;
225 
226 	switch (svc) {
227 	case HTT_DATA_MSG_SVC:
228 		if (hif_dev_get_mailbox_swap(pdev)) {
229 			*ul_pipe = 1;
230 			*dl_pipe = 0;
231 		} else {
232 			*ul_pipe = 3;
233 			*dl_pipe = 2;
234 		}
235 		break;
236 
237 	case HTC_CTRL_RSVD_SVC:
238 	case HTC_RAW_STREAMS_SVC:
239 		*ul_pipe = 1;
240 		*dl_pipe = 0;
241 		break;
242 
243 	case WMI_DATA_BE_SVC:
244 	case WMI_DATA_BK_SVC:
245 	case WMI_DATA_VI_SVC:
246 	case WMI_DATA_VO_SVC:
247 		*ul_pipe = 1;
248 		*dl_pipe = 0;
249 		break;
250 
251 	case WMI_CONTROL_SVC:
252 		if (hif_dev_get_mailbox_swap(pdev)) {
253 			*ul_pipe = 3;
254 			*dl_pipe = 2;
255 		} else {
256 			*ul_pipe = 1;
257 			*dl_pipe = 0;
258 		}
259 		break;
260 
261 	default:
262 		HIF_ERROR("%s: Err : Invalid service (%d)",
263 			  __func__, svc);
264 		status = QDF_STATUS_E_INVAL;
265 		break;
266 	}
267 	return status;
268 }
269 
270 /** hif_dev_setup_device() - Setup device specific stuff here required for hif
271  * @pdev : HIF layer object
272  *
273  * return 0 on success, error otherwise
274  */
275 int hif_dev_setup_device(struct hif_sdio_device *pdev)
276 {
277 	int status = 0;
278 	uint32_t blocksizes[MAILBOX_COUNT];
279 
280 	status = hif_configure_device(NULL, pdev->HIFDevice,
281 				      HIF_DEVICE_GET_FIFO_ADDR,
282 				      &pdev->MailBoxInfo,
283 				      sizeof(pdev->MailBoxInfo));
284 
285 	if (status != QDF_STATUS_SUCCESS)
286 		HIF_ERROR("%s: HIF_DEVICE_GET_MBOX_ADDR failed", __func__);
287 
288 	status = hif_configure_device(NULL, pdev->HIFDevice,
289 				      HIF_DEVICE_GET_BLOCK_SIZE,
290 				      blocksizes, sizeof(blocksizes));
291 	if (status != QDF_STATUS_SUCCESS)
292 		HIF_ERROR("%s: HIF_DEVICE_GET_MBOX_BLOCK_SIZE fail", __func__);
293 
294 	pdev->BlockSize = blocksizes[MAILBOX_FOR_BLOCK_SIZE];
295 
296 	return status;
297 }
298 
299 /** hif_dev_mask_interrupts() - Disable the interrupts in the device
300  * @pdev SDIO HIF Object
301  *
302  * Return: NONE
303  */
304 void hif_dev_mask_interrupts(struct hif_sdio_device *pdev)
305 {
306 	int status = QDF_STATUS_SUCCESS;
307 
308 	HIF_ENTER();
309 	/* Disable all interrupts */
310 	LOCK_HIF_DEV(pdev);
311 	mboxEnaRegs(pdev).int_status_enable = 0;
312 	mboxEnaRegs(pdev).cpu_int_status_enable = 0;
313 	mboxEnaRegs(pdev).error_status_enable = 0;
314 	mboxEnaRegs(pdev).counter_int_status_enable = 0;
315 	UNLOCK_HIF_DEV(pdev);
316 
317 	/* always synchronous */
318 	status = hif_read_write(pdev->HIFDevice,
319 				INT_STATUS_ENABLE_ADDRESS,
320 				(char *)&mboxEnaRegs(pdev),
321 				sizeof(struct MBOX_IRQ_ENABLE_REGISTERS),
322 				HIF_WR_SYNC_BYTE_INC, NULL);
323 
324 	if (status != QDF_STATUS_SUCCESS)
325 		HIF_ERROR("%s: Err updating intr reg: %d", __func__, status);
326 }
327 
328 /** hif_dev_unmask_interrupts() - Enable the interrupts in the device
329  * @pdev SDIO HIF Object
330  *
331  * Return: NONE
332  */
333 void hif_dev_unmask_interrupts(struct hif_sdio_device *pdev)
334 {
335 	QDF_STATUS status = QDF_STATUS_SUCCESS;
336 
337 	LOCK_HIF_DEV(pdev);
338 
339 	/* Enable all the interrupts except for the internal
340 	 * AR6000 CPU interrupt
341 	 */
342 	mboxEnaRegs(pdev).int_status_enable =
343 		INT_STATUS_ENABLE_ERROR_SET(0x01) |
344 		INT_STATUS_ENABLE_CPU_SET(0x01)
345 		| INT_STATUS_ENABLE_COUNTER_SET(0x01);
346 
347 	/* enable 2 mboxs INT */
348 	mboxEnaRegs(pdev).int_status_enable |=
349 		INT_STATUS_ENABLE_MBOX_DATA_SET(0x01) |
350 		INT_STATUS_ENABLE_MBOX_DATA_SET(0x02);
351 
352 	/* Set up the CPU Interrupt Status Register, enable
353 	 * CPU sourced interrupt #0, #1.
354 	 * #0 is used for report assertion from target
355 	 * #1 is used for inform host that credit arrived
356 	 */
357 	mboxEnaRegs(pdev).cpu_int_status_enable = 0x03;
358 
359 	/* Set up the Error Interrupt Status Register */
360 	mboxEnaRegs(pdev).error_status_enable =
361 		(ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(0x01)
362 		 | ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(0x01)) >> 16;
363 
364 	/* Set up the Counter Interrupt Status Register
365 	 * (only for debug interrupt to catch fatal errors)
366 	 */
367 	mboxEnaRegs(pdev).counter_int_status_enable =
368 	(COUNTER_INT_STATUS_ENABLE_BIT_SET(AR6K_TARGET_DEBUG_INTR_MASK)) >> 24;
369 
370 	UNLOCK_HIF_DEV(pdev);
371 
372 	/* always synchronous */
373 	status = hif_read_write(pdev->HIFDevice,
374 				INT_STATUS_ENABLE_ADDRESS,
375 				(char *)&mboxEnaRegs(pdev),
376 				sizeof(struct MBOX_IRQ_ENABLE_REGISTERS),
377 				HIF_WR_SYNC_BYTE_INC,
378 				NULL);
379 
380 	if (status != QDF_STATUS_SUCCESS)
381 		HIF_ERROR("%s: Err updating intr reg: %d", __func__, status);
382 }
383 
384 void hif_dev_dump_registers(struct hif_sdio_device *pdev,
385 			    struct MBOX_IRQ_PROC_REGISTERS *irq_proc,
386 			    struct MBOX_IRQ_ENABLE_REGISTERS *irq_en,
387 			    struct MBOX_COUNTER_REGISTERS *mbox_regs)
388 {
389 	int i = 0;
390 
391 	HIF_DBG("%s: Mailbox registers:", __func__);
392 
393 	if (irq_proc) {
394 		HIF_DBG("HostIntStatus: 0x%x ", irq_proc->host_int_status);
395 		HIF_DBG("CPUIntStatus: 0x%x ", irq_proc->cpu_int_status);
396 		HIF_DBG("ErrorIntStatus: 0x%x ", irq_proc->error_int_status);
397 		HIF_DBG("CounterIntStat: 0x%x ", irq_proc->counter_int_status);
398 		HIF_DBG("MboxFrame: 0x%x ", irq_proc->mbox_frame);
399 		HIF_DBG("RxLKAValid: 0x%x ", irq_proc->rx_lookahead_valid);
400 		HIF_DBG("RxLKA0: 0x%x", irq_proc->rx_lookahead[0]);
401 		HIF_DBG("RxLKA1: 0x%x ", irq_proc->rx_lookahead[1]);
402 		HIF_DBG("RxLKA2: 0x%x ", irq_proc->rx_lookahead[2]);
403 		HIF_DBG("RxLKA3: 0x%x", irq_proc->rx_lookahead[3]);
404 
405 		if (pdev->MailBoxInfo.gmbox_address != 0) {
406 			HIF_DBG("GMBOX-HostIntStatus2:  0x%x ",
407 				irq_proc->host_int_status2);
408 			HIF_DBG("GMBOX-RX-Avail: 0x%x ",
409 				irq_proc->gmbox_rx_avail);
410 		}
411 	}
412 
413 	if (irq_en) {
414 		HIF_DBG("IntStatusEnable: 0x%x\n",
415 			irq_en->int_status_enable);
416 		HIF_DBG("CounterIntStatus: 0x%x\n",
417 			irq_en->counter_int_status_enable);
418 	}
419 
420 	for (i = 0; mbox_regs && i < 4; i++)
421 		HIF_DBG("Counter[%d]: 0x%x\n", i, mbox_regs->counter[i]);
422 }
423 
424 /* under HL SDIO, with Interface Memory support, we have
425  * the following reasons to support 2 mboxs:
426  * a) we need place different buffers in different
427  * mempool, for example, data using Interface Memory,
428  * desc and other using DRAM, they need different SDIO
429  * mbox channels.
430  * b) currently, tx mempool in LL case is separated from
431  * main mempool, the structure (descs at the beginning
432  * of every pool buffer) is different, because they only
433  * need store tx desc from host. To align with LL case,
434  * we also need 2 mbox support just as PCIe LL cases.
435  */
436 
437 /**
438  * hif_dev_map_pipe_to_mail_box() - maps pipe id to mailbox.
439  * @pdev: The pointer to the hif device object
440  * @pipeid: pipe index
441  *
442  * Return: mailbox index
443  */
444 static uint8_t hif_dev_map_pipe_to_mail_box(struct hif_sdio_device *pdev,
445 					    uint8_t pipeid)
446 {
447 	if (2 == pipeid || 3 == pipeid)
448 		return 1;
449 	else if (0 == pipeid || 1 == pipeid)
450 		return 0;
451 
452 	HIF_ERROR("%s: pipeid=%d invalid", __func__, pipeid);
453 
454 	qdf_assert(0);
455 
456 	return INVALID_MAILBOX_NUMBER;
457 }
458 
459 /**
460  * hif_dev_map_mail_box_to_pipe() - map sdio mailbox to htc pipe.
461  * @pdev: The pointer to the hif device object
462  * @mboxIndex: mailbox index
463  * @upload: boolean to decide mailbox index
464  *
465  * Return: Invalid pipe index
466  */
467 static uint8_t hif_dev_map_mail_box_to_pipe(struct hif_sdio_device *pdev,
468 					    uint8_t mbox_index, bool upload)
469 {
470 	if (mbox_index == 0)
471 		return upload ? 1 : 0;
472 	else if (mbox_index == 1)
473 		return upload ? 3 : 2;
474 
475 	HIF_ERROR("%s: mbox_index=%d, upload=%d invalid",
476 		  __func__, mbox_index, upload);
477 
478 	qdf_assert(0);
479 
480 	return INVALID_MAILBOX_NUMBER; /* invalid pipe id */
481 }
482 
483 /**
484  * hif_get_send_addr() - Get the transfer pipe address
485  * @pdev: The pointer to the hif device object
486  * @pipe: The pipe identifier
487  *
488  * Return 0 for success and non-zero for failure to map
489  */
490 int hif_get_send_address(struct hif_sdio_device *pdev,
491 			 uint8_t pipe, uint32_t *addr)
492 {
493 	uint8_t mbox_index = INVALID_MAILBOX_NUMBER;
494 
495 	if (!addr)
496 		return -EINVAL;
497 
498 	mbox_index = hif_dev_map_pipe_to_mail_box(pdev, pipe);
499 
500 	if (mbox_index == INVALID_MAILBOX_NUMBER)
501 		return -EINVAL;
502 
503 	*addr = pdev->MailBoxInfo.mbox_prop[mbox_index].extended_address;
504 
505 	return 0;
506 }
507 
508 /**
509  * hif_fixup_write_param() - Tweak the address and length parameters
510  * @pdev: The pointer to the hif device object
511  * @length: The length pointer
512  * @addr: The addr pointer
513  *
514  * Return: None
515  */
516 void hif_fixup_write_param(struct hif_sdio_dev *pdev, uint32_t req,
517 			   uint32_t *length, uint32_t *addr)
518 {
519 	struct hif_device_mbox_info mboxinfo;
520 	uint32_t taddr = *addr, mboxlen = 0;
521 
522 	hif_configure_device(NULL, pdev, HIF_DEVICE_GET_FIFO_ADDR,
523 			     &mboxinfo, sizeof(mboxinfo));
524 
525 	if (taddr >= 0x800 && taddr < 0xC00) {
526 		/* Host control register and CIS Window */
527 		mboxlen = 0;
528 	} else if (taddr == mboxinfo.mbox_addresses[0] ||
529 		   taddr == mboxinfo.mbox_addresses[1] ||
530 		   taddr == mboxinfo.mbox_addresses[2] ||
531 		   taddr == mboxinfo.mbox_addresses[3]) {
532 		mboxlen = HIF_MBOX_WIDTH;
533 	} else if (taddr == mboxinfo.mbox_prop[0].extended_address) {
534 		mboxlen = mboxinfo.mbox_prop[0].extended_size;
535 	} else if (taddr == mboxinfo.mbox_prop[1].extended_address) {
536 		mboxlen = mboxinfo.mbox_prop[1].extended_size;
537 	} else {
538 		HIF_ERROR("%s: Invalid write addr: 0x%08x\n", __func__, taddr);
539 		return;
540 	}
541 
542 	if (mboxlen != 0) {
543 		if (*length > mboxlen) {
544 			HIF_ERROR("%s: Error (%u > %u)",
545 				  __func__, *length, mboxlen);
546 			return;
547 		}
548 
549 		taddr = taddr + (mboxlen - *length);
550 		taddr = taddr + ((req & HIF_DUMMY_SPACE_MASK) >> 16);
551 		*addr = taddr;
552 	}
553 }
554 
555 /**
556  * hif_dev_recv_packet() - Receieve HTC packet/packet information from device
557  * @pdev : HIF device object
558  * @packet : The HTC packet pointer
559  * @recv_length : The length of information to be received
560  * @mbox_index : The mailbox that contains this information
561  *
562  * Return 0 for success and non zero of error
563  */
564 static QDF_STATUS hif_dev_recv_packet(struct hif_sdio_device *pdev,
565 				      HTC_PACKET *packet,
566 				      uint32_t recv_length,
567 				      uint32_t mbox_index)
568 {
569 	QDF_STATUS status;
570 	uint32_t padded_length;
571 	bool sync = (packet->Completion) ? false : true;
572 	uint32_t req = sync ? HIF_RD_SYNC_BLOCK_FIX : HIF_RD_ASYNC_BLOCK_FIX;
573 
574 	/* adjust the length to be a multiple of block size if appropriate */
575 	padded_length = DEV_CALC_RECV_PADDED_LEN(pdev, recv_length);
576 
577 	if (padded_length > packet->BufferLength) {
578 		HIF_ERROR("%s: No space for padlen:%d recvlen:%d bufferlen:%d",
579 			  __func__, padded_length,
580 			  recv_length, packet->BufferLength);
581 		if (packet->Completion) {
582 			COMPLETE_HTC_PACKET(packet, QDF_STATUS_E_INVAL);
583 			return QDF_STATUS_SUCCESS;
584 		}
585 		return QDF_STATUS_E_INVAL;
586 	}
587 
588 	/* mailbox index is saved in Endpoint member */
589 	HIF_INFO("%s : hdr:0x%x, len:%d, padded length: %d Mbox:0x%x",
590 		 __func__, packet->PktInfo.AsRx.ExpectedHdr, recv_length,
591 		 padded_length, mbox_index);
592 
593 	status = hif_read_write(pdev->HIFDevice,
594 				pdev->MailBoxInfo.mbox_addresses[mbox_index],
595 				packet->pBuffer,
596 				padded_length,
597 				req, sync ? NULL : packet);
598 
599 	if (status != QDF_STATUS_SUCCESS && status != QDF_STATUS_E_PENDING)
600 		HIF_ERROR("%s : Failed %d", __func__, status);
601 
602 	if (sync) {
603 		packet->Status = status;
604 		if (status == QDF_STATUS_SUCCESS) {
605 			HTC_FRAME_HDR *hdr = (HTC_FRAME_HDR *) packet->pBuffer;
606 
607 			HIF_INFO("%s: EP:%d,Len:%d,Flag:%d,CB:0x%02X,0x%02X\n",
608 				 __func__,
609 				 hdr->EndpointID, hdr->PayloadLen,
610 				 hdr->Flags, hdr->ControlBytes0,
611 				 hdr->ControlBytes1);
612 		}
613 	}
614 
615 	return status;
616 }
617 
618 static QDF_STATUS hif_dev_issue_recv_packet_bundle
619 (
620 	struct hif_sdio_device *pdev,
621 	HTC_PACKET_QUEUE *recv_pkt_queue,
622 	HTC_PACKET_QUEUE *sync_completion_queue,
623 	uint8_t mail_box_index,
624 	int *num_packets_fetched,
625 	bool partial_bundle
626 )
627 {
628 	uint32_t padded_length;
629 	int i, total_length = 0;
630 	HTC_TARGET *target = NULL;
631 	int bundleSpaceRemaining = 0;
632 	unsigned char *bundle_buffer = NULL;
633 	HTC_PACKET *packet, *packet_rx_bundle;
634 	QDF_STATUS status = QDF_STATUS_SUCCESS;
635 
636 	target = (HTC_TARGET *)pdev->pTarget;
637 
638 	if ((HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue) -
639 	     HTC_MAX_MSG_PER_BUNDLE_RX) > 0) {
640 		partial_bundle = true;
641 		HIF_WARN("%s, partial bundle detected num: %d, %d\n",
642 			 __func__,
643 			 HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue),
644 			 HTC_MAX_MSG_PER_BUNDLE_RX);
645 	}
646 
647 	bundleSpaceRemaining =
648 		HTC_MAX_MSG_PER_BUNDLE_RX * target->TargetCreditSize;
649 	packet_rx_bundle = allocate_htc_bundle_packet(target);
650 	if (!packet_rx_bundle) {
651 		HIF_ERROR("%s: packet_rx_bundle is NULL\n", __func__);
652 		qdf_sleep(NBUF_ALLOC_FAIL_WAIT_TIME);  /* 100 msec sleep */
653 		return QDF_STATUS_E_NOMEM;
654 	}
655 	bundle_buffer = packet_rx_bundle->pBuffer;
656 
657 	for (i = 0;
658 	     !HTC_QUEUE_EMPTY(recv_pkt_queue) && i < HTC_MAX_MSG_PER_BUNDLE_RX;
659 	     i++) {
660 		packet = htc_packet_dequeue(recv_pkt_queue);
661 		A_ASSERT(packet != NULL);
662 		if (!packet)
663 			break;
664 		padded_length =
665 			DEV_CALC_RECV_PADDED_LEN(pdev, packet->ActualLength);
666 		if (packet->PktInfo.AsRx.HTCRxFlags &
667 				HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK)
668 			padded_length += HIF_BLOCK_SIZE;
669 		if ((bundleSpaceRemaining - padded_length) < 0) {
670 			/* exceeds what we can transfer, put the packet back */
671 			HTC_PACKET_ENQUEUE_TO_HEAD(recv_pkt_queue, packet);
672 			break;
673 		}
674 		bundleSpaceRemaining -= padded_length;
675 
676 		if (partial_bundle ||
677 		    HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue) > 0) {
678 			packet->PktInfo.AsRx.HTCRxFlags |=
679 				HTC_RX_PKT_IGNORE_LOOKAHEAD;
680 		}
681 		packet->PktInfo.AsRx.HTCRxFlags |= HTC_RX_PKT_PART_OF_BUNDLE;
682 
683 		if (sync_completion_queue)
684 			HTC_PACKET_ENQUEUE(sync_completion_queue, packet);
685 
686 		total_length += padded_length;
687 	}
688 #if DEBUG_BUNDLE
689 	qdf_print("Recv bundle count %d, length %d.",
690 		  sync_completion_queue ?
691 		  HTC_PACKET_QUEUE_DEPTH(sync_completion_queue) : 0,
692 		  total_length);
693 #endif
694 
695 	status = hif_read_write(pdev->HIFDevice,
696 				pdev->MailBoxInfo.
697 				mbox_addresses[(int)mail_box_index],
698 				bundle_buffer, total_length,
699 				HIF_RD_SYNC_BLOCK_FIX, NULL);
700 
701 	if (status != QDF_STATUS_SUCCESS) {
702 		HIF_ERROR("%s, hif_send Failed status:%d\n",
703 			  __func__, status);
704 	} else {
705 		unsigned char *buffer = bundle_buffer;
706 		*num_packets_fetched = i;
707 		if (sync_completion_queue) {
708 			HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(
709 				sync_completion_queue, packet) {
710 				padded_length =
711 				DEV_CALC_RECV_PADDED_LEN(pdev,
712 							 packet->ActualLength);
713 				if (packet->PktInfo.AsRx.HTCRxFlags &
714 				HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK)
715 					padded_length +=
716 						HIF_BLOCK_SIZE;
717 				A_MEMCPY(packet->pBuffer,
718 					 buffer, padded_length);
719 				buffer += padded_length;
720 			} HTC_PACKET_QUEUE_ITERATE_END;
721 		}
722 	}
723 	/* free bundle space under Sync mode */
724 	free_htc_bundle_packet(target, packet_rx_bundle);
725 	return status;
726 }
727 
728 #define ISSUE_BUNDLE hif_dev_issue_recv_packet_bundle
729 static
730 QDF_STATUS hif_dev_recv_message_pending_handler(struct hif_sdio_device *pdev,
731 						uint8_t mail_box_index,
732 						uint32_t msg_look_aheads[],
733 						int num_look_aheads,
734 						bool *async_proc,
735 						int *num_pkts_fetched)
736 {
737 	int pkts_fetched;
738 	HTC_PACKET *pkt;
739 	HTC_ENDPOINT_ID id;
740 	bool partial_bundle;
741 	int total_fetched = 0;
742 	bool asyncProc = false;
743 	QDF_STATUS status = QDF_STATUS_SUCCESS;
744 	uint32_t look_aheads[HTC_MAX_MSG_PER_BUNDLE_RX];
745 	HTC_PACKET_QUEUE recv_q, sync_comp_q;
746 	QDF_STATUS (*rxCompletion)(void *, qdf_nbuf_t,	uint8_t);
747 
748 	HIF_INFO("%s: NumLookAheads: %d\n", __func__, num_look_aheads);
749 
750 	if (num_pkts_fetched)
751 		*num_pkts_fetched = 0;
752 
753 	if (IS_DEV_IRQ_PROCESSING_ASYNC_ALLOWED(pdev)) {
754 		/* We use async mode to get the packets if the
755 		 * device layer supports it. The device layer
756 		 * interfaces with HIF in which HIF may have
757 		 * restrictions on how interrupts are processed
758 		 */
759 		asyncProc = true;
760 	}
761 
762 	if (async_proc) {
763 		/* indicate to caller how we decided to process this */
764 		*async_proc = asyncProc;
765 	}
766 
767 	if (num_look_aheads > HTC_MAX_MSG_PER_BUNDLE_RX) {
768 		A_ASSERT(false);
769 		return QDF_STATUS_E_PROTO;
770 	}
771 
772 	A_MEMCPY(look_aheads, msg_look_aheads,
773 		 (sizeof(uint32_t)) * num_look_aheads);
774 	while (true) {
775 		/* reset packets queues */
776 		INIT_HTC_PACKET_QUEUE(&recv_q);
777 		INIT_HTC_PACKET_QUEUE(&sync_comp_q);
778 		if (num_look_aheads > HTC_MAX_MSG_PER_BUNDLE_RX) {
779 			status = QDF_STATUS_E_PROTO;
780 			A_ASSERT(false);
781 			break;
782 		}
783 
784 		/* first lookahead sets the expected endpoint IDs for
785 		 * all packets in a bundle
786 		 */
787 		id = ((HTC_FRAME_HDR *)&look_aheads[0])->EndpointID;
788 
789 		if (id >= ENDPOINT_MAX) {
790 			HIF_ERROR("%s: Invalid Endpoint in lookahead: %d\n",
791 				  __func__, id);
792 			status = QDF_STATUS_E_PROTO;
793 			break;
794 		}
795 		/* try to allocate as many HTC RX packets indicated
796 		 * by the lookaheads these packets are stored
797 		 * in the recvPkt queue
798 		 */
799 		status = hif_dev_alloc_and_prepare_rx_packets(pdev,
800 							      look_aheads,
801 							      num_look_aheads,
802 							      &recv_q);
803 		if (QDF_IS_STATUS_ERROR(status))
804 			break;
805 		total_fetched += HTC_PACKET_QUEUE_DEPTH(&recv_q);
806 
807 		/* we've got packet buffers for all we can currently fetch,
808 		 * this count is not valid anymore
809 		 */
810 		num_look_aheads = 0;
811 		partial_bundle = false;
812 
813 		/* now go fetch the list of HTC packets */
814 		while (!HTC_QUEUE_EMPTY(&recv_q)) {
815 			pkts_fetched = 0;
816 			if ((HTC_PACKET_QUEUE_DEPTH(&recv_q) > 1)) {
817 				/* there are enough packets to attempt a bundle
818 				 * transfer and recv bundling is allowed
819 				 */
820 				status = ISSUE_BUNDLE(pdev,
821 						      &recv_q,
822 						      asyncProc ? NULL :
823 						      &sync_comp_q,
824 						      mail_box_index,
825 						      &pkts_fetched,
826 						      partial_bundle);
827 				if (QDF_IS_STATUS_ERROR(status)) {
828 					hif_dev_free_recv_pkt_queue(
829 							&recv_q);
830 					break;
831 				}
832 
833 				if (HTC_PACKET_QUEUE_DEPTH(&recv_q) !=
834 					0) {
835 					/* we couldn't fetch all packets at one,
836 					 * time this creates a broken
837 					 * bundle
838 					 */
839 					partial_bundle = true;
840 				}
841 			}
842 
843 			/* see if the previous operation fetched any
844 			 * packets using bundling
845 			 */
846 			if (pkts_fetched == 0) {
847 				/* dequeue one packet */
848 				pkt = htc_packet_dequeue(&recv_q);
849 				A_ASSERT(pkt != NULL);
850 				if (!pkt)
851 					break;
852 
853 				pkt->Completion = NULL;
854 
855 				if (HTC_PACKET_QUEUE_DEPTH(&recv_q) >
856 				    0) {
857 					/* lookaheads in all packets except the
858 					 * last one in must be ignored
859 					 */
860 					pkt->PktInfo.AsRx.HTCRxFlags |=
861 						HTC_RX_PKT_IGNORE_LOOKAHEAD;
862 				}
863 
864 				/* go fetch the packet */
865 				status =
866 				hif_dev_recv_packet(pdev, pkt,
867 						    pkt->ActualLength,
868 						    mail_box_index);
869 				while (QDF_IS_STATUS_ERROR(status) &&
870 				       !HTC_QUEUE_EMPTY(&recv_q)) {
871 					qdf_nbuf_t nbuf;
872 
873 					pkt = htc_packet_dequeue(&recv_q);
874 					if (pkt == NULL)
875 						break;
876 					nbuf = pkt->pNetBufContext;
877 					if (nbuf)
878 						qdf_nbuf_free(nbuf);
879 				}
880 
881 				if (QDF_IS_STATUS_ERROR(status))
882 					break;
883 				/* sent synchronously, queue this packet for
884 				 * synchronous completion
885 				 */
886 				HTC_PACKET_ENQUEUE(&sync_comp_q, pkt);
887 			}
888 		}
889 
890 		/* synchronous handling */
891 		if (pdev->DSRCanYield) {
892 			/* for the SYNC case, increment count that tracks
893 			 * when the DSR should yield
894 			 */
895 			pdev->CurrentDSRRecvCount++;
896 		}
897 
898 		/* in the sync case, all packet buffers are now filled,
899 		 * we can process each packet, check lookahead , then repeat
900 		 */
901 		rxCompletion = pdev->hif_callbacks.rxCompletionHandler;
902 
903 		/* unload sync completion queue */
904 		while (!HTC_QUEUE_EMPTY(&sync_comp_q)) {
905 			uint8_t pipeid;
906 			qdf_nbuf_t netbuf;
907 
908 			pkt = htc_packet_dequeue(&sync_comp_q);
909 			A_ASSERT(pkt != NULL);
910 			if (!pkt)
911 				break;
912 
913 			num_look_aheads = 0;
914 			status = hif_dev_process_recv_header(pdev, pkt,
915 							     look_aheads,
916 							     &num_look_aheads);
917 			if (QDF_IS_STATUS_ERROR(status)) {
918 				HTC_PACKET_ENQUEUE_TO_HEAD(&sync_comp_q, pkt);
919 				break;
920 			}
921 
922 			netbuf = (qdf_nbuf_t)pkt->pNetBufContext;
923 			/* set data length */
924 			qdf_nbuf_put_tail(netbuf, pkt->ActualLength);
925 
926 			if (rxCompletion) {
927 				pipeid =
928 				hif_dev_map_mail_box_to_pipe(pdev,
929 							     mail_box_index,
930 							     true);
931 				rxCompletion(pdev->hif_callbacks.Context,
932 					     netbuf, pipeid);
933 			}
934 		}
935 
936 		if (QDF_IS_STATUS_ERROR(status)) {
937 			if (!HTC_QUEUE_EMPTY(&sync_comp_q))
938 				hif_dev_free_recv_pkt_queue(
939 						&sync_comp_q);
940 			break;
941 		}
942 
943 		if (num_look_aheads == 0) {
944 			/* no more look aheads */
945 			break;
946 		}
947 		/* check whether other OS contexts have queued any WMI
948 		 * command/data for WLAN. This check is needed only if WLAN
949 		 * Tx and Rx happens in same thread context
950 		 */
951 		/* A_CHECK_DRV_TX(); */
952 	}
953 	if (num_pkts_fetched)
954 		*num_pkts_fetched = total_fetched;
955 
956 	AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-HTCRecvMessagePendingHandler\n"));
957 	return status;
958 }
959 
960 /**
961  * hif_dev_service_cpu_interrupt() - service fatal interrupts
962  * synchronously
963  *
964  * @pDev: hif sdio device context
965  *
966  * Return: QDF_STATUS_SUCCESS for success
967  */
968 static QDF_STATUS hif_dev_service_cpu_interrupt(struct hif_sdio_device *pdev)
969 {
970 	QDF_STATUS status;
971 	uint8_t reg_buffer[4];
972 	uint8_t cpu_int_status;
973 
974 	cpu_int_status = mboxProcRegs(pdev).cpu_int_status &
975 			 mboxEnaRegs(pdev).cpu_int_status_enable;
976 
977 	HIF_ERROR("%s: 0x%x", __func__, (uint32_t)cpu_int_status);
978 
979 	/* Clear the interrupt */
980 	mboxProcRegs(pdev).cpu_int_status &= ~cpu_int_status;
981 
982 	/*set up the register transfer buffer to hit the register
983 	 * 4 times , this is done to make the access 4-byte aligned
984 	 * to mitigate issues with host bus interconnects that
985 	 * restrict bus transfer lengths to be a multiple of 4-bytes
986 	 * set W1C value to clear the interrupt, this hits the register
987 	 * first
988 	 */
989 	reg_buffer[0] = cpu_int_status;
990 	/* the remaining 4 values are set to zero which have no-effect  */
991 	reg_buffer[1] = 0;
992 	reg_buffer[2] = 0;
993 	reg_buffer[3] = 0;
994 
995 	status = hif_read_write(pdev->HIFDevice,
996 				CPU_INT_STATUS_ADDRESS,
997 				reg_buffer, 4, HIF_WR_SYNC_BYTE_FIX, NULL);
998 
999 	A_ASSERT(status == QDF_STATUS_SUCCESS);
1000 
1001 	/* The Interrupt sent to the Host is generated via bit0
1002 	 * of CPU INT register
1003 	 */
1004 	if (cpu_int_status & 0x1) {
1005 		if (pdev->hif_callbacks.fwEventHandler)
1006 			/* It calls into HTC which propagates this
1007 			 * to ol_target_failure()
1008 			 */
1009 			pdev->hif_callbacks.fwEventHandler(
1010 				pdev->hif_callbacks.Context,
1011 				QDF_STATUS_E_FAILURE);
1012 	} else {
1013 		HIF_ERROR("%s: Unrecognized CPU event", __func__);
1014 	}
1015 
1016 	return status;
1017 }
1018 
1019 /**
1020  * hif_dev_service_error_interrupt() - service error interrupts
1021  * synchronously
1022  *
1023  * @pDev: hif sdio device context
1024  *
1025  * Return: QDF_STATUS_SUCCESS for success
1026  */
1027 static QDF_STATUS hif_dev_service_error_interrupt(struct hif_sdio_device *pdev)
1028 {
1029 	QDF_STATUS status;
1030 	uint8_t reg_buffer[4];
1031 	uint8_t error_int_status = 0;
1032 
1033 	error_int_status = mboxProcRegs(pdev).error_int_status & 0x0F;
1034 	HIF_ERROR("%s: 0x%x", __func__, error_int_status);
1035 
1036 	if (ERROR_INT_STATUS_WAKEUP_GET(error_int_status))
1037 		HIF_ERROR("%s: Error : Wakeup", __func__);
1038 
1039 	if (ERROR_INT_STATUS_RX_UNDERFLOW_GET(error_int_status))
1040 		HIF_ERROR("%s: Error : Rx Underflow", __func__);
1041 
1042 	if (ERROR_INT_STATUS_TX_OVERFLOW_GET(error_int_status))
1043 		HIF_ERROR("%s: Error : Tx Overflow", __func__);
1044 
1045 	/* Clear the interrupt */
1046 	mboxProcRegs(pdev).error_int_status &= ~error_int_status;
1047 
1048 	/* set up the register transfer buffer to hit the register
1049 	 * 4 times , this is done to make the access 4-byte
1050 	 * aligned to mitigate issues with host bus interconnects that
1051 	 * restrict bus transfer lengths to be a multiple of 4-bytes
1052 	 */
1053 
1054 	/* set W1C value to clear the interrupt */
1055 	reg_buffer[0] = error_int_status;
1056 	/* the remaining 4 values are set to zero which have no-effect  */
1057 	reg_buffer[1] = 0;
1058 	reg_buffer[2] = 0;
1059 	reg_buffer[3] = 0;
1060 
1061 	status = hif_read_write(pdev->HIFDevice,
1062 				ERROR_INT_STATUS_ADDRESS,
1063 				reg_buffer, 4, HIF_WR_SYNC_BYTE_FIX, NULL);
1064 
1065 	A_ASSERT(status == QDF_STATUS_SUCCESS);
1066 	return status;
1067 }
1068 
1069 /**
1070  * hif_dev_service_debug_interrupt() - service debug interrupts
1071  * synchronously
1072  *
1073  * @pDev: hif sdio device context
1074  *
1075  * Return: QDF_STATUS_SUCCESS for success
1076  */
1077 static QDF_STATUS hif_dev_service_debug_interrupt(struct hif_sdio_device *pdev)
1078 {
1079 	uint32_t dummy;
1080 	QDF_STATUS status;
1081 
1082 	/* Send a target failure event to the application */
1083 	HIF_ERROR("%s: Target debug interrupt", __func__);
1084 
1085 	/* clear the interrupt , the debug error interrupt is counter 0
1086 	 * read counter to clear interrupt
1087 	 */
1088 	status = hif_read_write(pdev->HIFDevice,
1089 				COUNT_DEC_ADDRESS,
1090 				(uint8_t *)&dummy,
1091 				4, HIF_RD_SYNC_BYTE_INC, NULL);
1092 
1093 	A_ASSERT(status == QDF_STATUS_SUCCESS);
1094 	return status;
1095 }
1096 
1097 /**
1098  * hif_dev_service_counter_interrupt() - service counter interrupts
1099  * synchronously
1100  *
1101  * @pDev: hif sdio device context
1102  *
1103  * Return: QDF_STATUS_SUCCESS for success
1104  */
1105 static
1106 QDF_STATUS hif_dev_service_counter_interrupt(struct hif_sdio_device *pdev)
1107 {
1108 	uint8_t counter_int_status;
1109 
1110 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Counter Interrupt\n"));
1111 
1112 	counter_int_status = mboxProcRegs(pdev).counter_int_status &
1113 			     mboxEnaRegs(pdev).counter_int_status_enable;
1114 
1115 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1116 			("Valid interrupt source in COUNTER_INT_STATUS: 0x%x\n",
1117 			 counter_int_status));
1118 
1119 	/* Check if the debug interrupt is pending
1120 	 * NOTE: other modules like GMBOX may use the counter interrupt
1121 	 * for credit flow control on other counters, we only need to
1122 	 * check for the debug assertion counter interrupt
1123 	 */
1124 	if (counter_int_status & AR6K_TARGET_DEBUG_INTR_MASK)
1125 		return hif_dev_service_debug_interrupt(pdev);
1126 
1127 	return QDF_STATUS_SUCCESS;
1128 }
1129 
1130 #define RX_LOOAHEAD_GET(pdev, i) \
1131 	mboxProcRegs(pdev).rx_lookahead[MAILBOX_LOOKAHEAD_SIZE_IN_WORD * i]
1132 /**
1133  * hif_dev_process_pending_irqs() - process pending interrupts
1134  * @pDev: hif sdio device context
1135  * @pDone: pending irq completion status
1136  * @pASyncProcessing: sync/async processing flag
1137  *
1138  * Return: QDF_STATUS_SUCCESS for success
1139  */
1140 QDF_STATUS hif_dev_process_pending_irqs(struct hif_sdio_device *pdev,
1141 					bool *done,
1142 					bool *async_processing)
1143 {
1144 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1145 	uint8_t host_int_status = 0;
1146 	uint32_t l_ahead[MAILBOX_USED_COUNT];
1147 	int i;
1148 
1149 	qdf_mem_zero(&l_ahead, sizeof(l_ahead));
1150 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1151 			("+ProcessPendingIRQs: (dev: 0x%lX)\n",
1152 			 (unsigned long)pdev));
1153 
1154 	/* NOTE: the HIF implementation guarantees that the context
1155 	 * of this call allows us to perform SYNCHRONOUS I/O,
1156 	 * that is we can block, sleep or call any API that
1157 	 * can block or switch thread/task ontexts.
1158 	 * This is a fully schedulable context.
1159 	 */
1160 	do {
1161 		if (mboxEnaRegs(pdev).int_status_enable == 0) {
1162 			/* interrupt enables have been cleared, do not try
1163 			 * to process any pending interrupts that
1164 			 * may result in more bus transactions.
1165 			 * The target may be unresponsive at this point.
1166 			 */
1167 			break;
1168 		}
1169 		status = hif_read_write(pdev->HIFDevice,
1170 					HOST_INT_STATUS_ADDRESS,
1171 					(uint8_t *)&mboxProcRegs(pdev),
1172 					sizeof(mboxProcRegs(pdev)),
1173 					HIF_RD_SYNC_BYTE_INC, NULL);
1174 
1175 		if (QDF_IS_STATUS_ERROR(status))
1176 			break;
1177 
1178 		if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_IRQ)) {
1179 			hif_dev_dump_registers(pdev,
1180 					       &mboxProcRegs(pdev),
1181 					       &mboxEnaRegs(pdev),
1182 					       &mboxCountRegs(pdev));
1183 		}
1184 
1185 		/* Update only those registers that are enabled */
1186 		host_int_status = mboxProcRegs(pdev).host_int_status
1187 				  & mboxEnaRegs(pdev).int_status_enable;
1188 
1189 		/* only look at mailbox status if the HIF layer did not
1190 		 * provide this function, on some HIF interfaces reading
1191 		 * the RX lookahead is not valid to do
1192 		 */
1193 		for (i = 0; i < MAILBOX_USED_COUNT; i++) {
1194 			l_ahead[i] = 0;
1195 			if (host_int_status & (1 << i)) {
1196 				/* mask out pending mailbox value, we use
1197 				 * "lookAhead" as the real flag for
1198 				 * mailbox processing below
1199 				 */
1200 				host_int_status &= ~(1 << i);
1201 				if (mboxProcRegs(pdev).
1202 				    rx_lookahead_valid & (1 << i)) {
1203 					/* mailbox has a message and the
1204 					 * look ahead is valid
1205 					 */
1206 					l_ahead[i] = RX_LOOAHEAD_GET(pdev, i);
1207 				}
1208 			}
1209 		} /*end of for loop */
1210 	} while (false);
1211 
1212 	do {
1213 		bool bLookAheadValid = false;
1214 		/* did the interrupt status fetches succeed? */
1215 		if (QDF_IS_STATUS_ERROR(status))
1216 			break;
1217 
1218 		for (i = 0; i < MAILBOX_USED_COUNT; i++) {
1219 			if (l_ahead[i] != 0) {
1220 				bLookAheadValid = true;
1221 				break;
1222 			}
1223 		}
1224 
1225 		if ((host_int_status == 0) && !bLookAheadValid) {
1226 			/* nothing to process, the caller can use this
1227 			 * to break out of a loop
1228 			 */
1229 			*done = true;
1230 			break;
1231 		}
1232 
1233 		if (bLookAheadValid) {
1234 			for (i = 0; i < MAILBOX_USED_COUNT; i++) {
1235 				int fetched = 0;
1236 
1237 				if (l_ahead[i] == 0)
1238 					continue;
1239 				AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1240 						("mbox[%d],lookahead:0x%X\n",
1241 						i, l_ahead[i]));
1242 				/* Mailbox Interrupt, the HTC layer may issue
1243 				 * async requests to empty the mailbox...
1244 				 * When emptying the recv mailbox we use the
1245 				 * async handler from the completion routine of
1246 				 * routine of the callers read request.
1247 				 * This can improve performance by reducing
1248 				 * the  context switching when we rapidly
1249 				 * pull packets
1250 				 */
1251 				status = hif_dev_recv_message_pending_handler(
1252 							pdev, i,
1253 							&l_ahead
1254 							[i], 1,
1255 							async_processing,
1256 							&fetched);
1257 				if (QDF_IS_STATUS_ERROR(status))
1258 					break;
1259 
1260 				if (!fetched) {
1261 					/* HTC could not pull any messages out
1262 					 * due to lack of resources force DSR
1263 					 * handle to ack the interrupt
1264 					 */
1265 					*async_processing = false;
1266 					pdev->RecheckIRQStatusCnt = 0;
1267 				}
1268 			}
1269 		}
1270 
1271 		/* now handle the rest of them */
1272 		AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1273 				("Valid source for OTHER interrupts: 0x%x\n",
1274 				host_int_status));
1275 
1276 		if (HOST_INT_STATUS_CPU_GET(host_int_status)) {
1277 			/* CPU Interrupt */
1278 			status = hif_dev_service_cpu_interrupt(pdev);
1279 			if (QDF_IS_STATUS_ERROR(status))
1280 				break;
1281 		}
1282 
1283 		if (HOST_INT_STATUS_ERROR_GET(host_int_status)) {
1284 			/* Error Interrupt */
1285 			status = hif_dev_service_error_interrupt(pdev);
1286 			if (QDF_IS_STATUS_ERROR(status))
1287 				break;
1288 		}
1289 
1290 		if (HOST_INT_STATUS_COUNTER_GET(host_int_status)) {
1291 			/* Counter Interrupt */
1292 			status = hif_dev_service_counter_interrupt(pdev);
1293 			if (QDF_IS_STATUS_ERROR(status))
1294 				break;
1295 		}
1296 
1297 	} while (false);
1298 
1299 	/* an optimization to bypass reading the IRQ status registers
1300 	 * unecessarily which can re-wake the target, if upper layers
1301 	 * determine that we are in a low-throughput mode, we can
1302 	 * rely on taking another interrupt rather than re-checking
1303 	 * the status registers which can re-wake the target.
1304 	 *
1305 	 * NOTE : for host interfaces that use the special
1306 	 * GetPendingEventsFunc, this optimization cannot be used due to
1307 	 * possible side-effects.  For example, SPI requires the host
1308 	 * to drain all messages from the mailbox before exiting
1309 	 * the ISR routine.
1310 	 */
1311 	if (!(*async_processing) && (pdev->RecheckIRQStatusCnt == 0)) {
1312 		AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1313 				("Bypass IRQ Status re-check, forcing done\n"));
1314 		*done = true;
1315 	}
1316 
1317 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1318 			("-ProcessPendingIRQs: (done:%d, async:%d) status=%d\n",
1319 			 *done, *async_processing, status));
1320 
1321 	return status;
1322 }
1323 
1324 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)) && \
1325 		 !defined(WITH_BACKPORTS)
1326 /**
1327  * hif_sdio_set_drvdata() - set wlan driver data into upper layer private
1328  * @func: pointer to sdio function
1329  * @hifdevice: pointer to hif device
1330  *
1331  * Return: non zero for success.
1332  */
1333 int hif_sdio_set_drvdata(struct sdio_func *func,
1334 			 struct hif_sdio_dev *hifdevice)
1335 {
1336 	return sdio_set_drvdata(func, hifdevice);
1337 }
1338 #else
1339 int hif_sdio_set_drvdata(struct sdio_func *func,
1340 			 struct hif_sdio_dev *hifdevice)
1341 {
1342 	sdio_set_drvdata(func, hifdevice);
1343 	return 0;
1344 }
1345 #endif /* LINUX VERSION */
1346 
1347 struct hif_sdio_dev *get_hif_device(struct sdio_func *func)
1348 {
1349 	qdf_assert(func != NULL);
1350 
1351 	return (struct hif_sdio_dev *)sdio_get_drvdata(func);
1352 }
1353 #endif /* CONFIG_SDIO_TRANSFER_MAILBOX */
1354