xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/sdio/transfer/mailbox.c (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifdef CONFIG_SDIO_TRANSFER_MAILBOX
21 #define ATH_MODULE_NAME hif
22 #include <linux/kthread.h>
23 #include <qdf_types.h>
24 #include <qdf_status.h>
25 #include <qdf_timer.h>
26 #include <qdf_time.h>
27 #include <qdf_lock.h>
28 #include <qdf_mem.h>
29 #include <qdf_util.h>
30 #include <qdf_defer.h>
31 #include <qdf_atomic.h>
32 #include <qdf_nbuf.h>
33 #include <qdf_threads.h>
34 #include <athdefs.h>
35 #include <qdf_net_types.h>
36 #include <a_types.h>
37 #include <athdefs.h>
38 #include <a_osapi.h>
39 #include <hif.h>
40 #include <htc_internal.h>
41 #include <htc_services.h>
42 #include <a_debug.h>
43 #include "hif_sdio_internal.h"
44 #include "if_sdio.h"
45 #include "regtable.h"
46 #include "transfer.h"
47 
48 /*
49  * The following commit was introduced in v5.17:
50  * cead18552660 ("exit: Rename complete_and_exit to kthread_complete_and_exit")
51  * Use the old name for kernels before 5.17
52  */
53 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0))
54 #define kthread_complete_and_exit(c, s) complete_and_exit(c, s)
55 #endif
56 
57 /* by default setup a bounce buffer for the data packets,
58  * if the underlying host controller driver
59  * does not use DMA you may be able to skip this step
60  * and save the memory allocation and transfer time
61  */
62 #define HIF_USE_DMA_BOUNCE_BUFFER 1
63 #if HIF_USE_DMA_BOUNCE_BUFFER
64 /* macro to check if DMA buffer is WORD-aligned and DMA-able.
65  * Most host controllers assume the
66  * buffer is DMA'able and will bug-check otherwise (i.e. buffers on the stack).
67  * virt_addr_valid check fails on stack memory.
68  */
69 #define BUFFER_NEEDS_BOUNCE(buffer)  (((unsigned long)(buffer) & 0x3) || \
70 					!virt_addr_valid((buffer)))
71 #else
72 #define BUFFER_NEEDS_BOUNCE(buffer)   (false)
73 #endif
74 
75 #ifdef SDIO_3_0
76 /**
77  * set_extended_mbox_size() - set extended MBOX size
78  * @pinfo: sdio mailbox info
79  *
80  * Return: none.
81  */
82 static void set_extended_mbox_size(struct hif_device_mbox_info *pinfo)
83 {
84 	pinfo->mbox_prop[0].extended_size =
85 		HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0;
86 	pinfo->mbox_prop[1].extended_size =
87 		HIF_MBOX1_EXTENDED_WIDTH_AR6320;
88 }
89 
90 /**
91  * set_extended_mbox_address() - set extended MBOX address
92  * @pinfo: sdio mailbox info
93  *
94  * Return: none.
95  */
96 static void set_extended_mbox_address(struct hif_device_mbox_info *pinfo)
97 {
98 	pinfo->mbox_prop[1].extended_address =
99 		pinfo->mbox_prop[0].extended_address +
100 		pinfo->mbox_prop[0].extended_size +
101 		HIF_MBOX_DUMMY_SPACE_SIZE_AR6320;
102 }
103 #else
104 static void set_extended_mbox_size(struct hif_device_mbox_info *pinfo)
105 {
106 	pinfo->mbox_prop[0].extended_size =
107 		HIF_MBOX0_EXTENDED_WIDTH_AR6320;
108 }
109 
110 static inline void
111 set_extended_mbox_address(struct hif_device_mbox_info *pinfo)
112 {
113 }
114 #endif
115 
116 /**
117  * set_extended_mbox_window_info() - set extended MBOX window
118  * information for SDIO interconnects
119  * @manf_id: manufacturer id
120  * @pinfo: sdio mailbox info
121  *
122  * Return: none.
123  */
124 static void set_extended_mbox_window_info(uint16_t manf_id,
125 					  struct hif_device_mbox_info *pinfo)
126 {
127 	switch (manf_id & MANUFACTURER_ID_AR6K_BASE_MASK) {
128 	case MANUFACTURER_ID_AR6002_BASE:
129 		/* MBOX 0 has an extended range */
130 
131 		pinfo->mbox_prop[0].extended_address =
132 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
133 		pinfo->mbox_prop[0].extended_size =
134 			HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
135 
136 		pinfo->mbox_prop[0].extended_address =
137 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
138 		pinfo->mbox_prop[0].extended_size =
139 			HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
140 
141 		pinfo->mbox_prop[0].extended_address =
142 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004;
143 		pinfo->mbox_prop[0].extended_size =
144 			HIF_MBOX0_EXTENDED_WIDTH_AR6004;
145 
146 		break;
147 	case MANUFACTURER_ID_AR6003_BASE:
148 		/* MBOX 0 has an extended range */
149 		pinfo->mbox_prop[0].extended_address =
150 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
151 		pinfo->mbox_prop[0].extended_size =
152 			HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
153 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
154 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
155 		break;
156 	case MANUFACTURER_ID_AR6004_BASE:
157 		pinfo->mbox_prop[0].extended_address =
158 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004;
159 		pinfo->mbox_prop[0].extended_size =
160 			HIF_MBOX0_EXTENDED_WIDTH_AR6004;
161 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
162 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
163 		break;
164 	case MANUFACTURER_ID_AR6320_BASE:
165 	{
166 		uint16_t rev = manf_id & MANUFACTURER_ID_AR6K_REV_MASK;
167 
168 		pinfo->mbox_prop[0].extended_address =
169 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320;
170 		if (rev < 4)
171 			pinfo->mbox_prop[0].extended_size =
172 				HIF_MBOX0_EXTENDED_WIDTH_AR6320;
173 		else
174 			set_extended_mbox_size(pinfo);
175 		set_extended_mbox_address(pinfo);
176 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
177 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
178 		break;
179 	}
180 	case MANUFACTURER_ID_QCA9377_BASE:
181 	case MANUFACTURER_ID_QCA9379_BASE:
182 		pinfo->mbox_prop[0].extended_address =
183 			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320;
184 		pinfo->mbox_prop[0].extended_size =
185 			HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0;
186 		pinfo->mbox_prop[1].extended_address =
187 			pinfo->mbox_prop[0].extended_address +
188 			pinfo->mbox_prop[0].extended_size +
189 			HIF_MBOX_DUMMY_SPACE_SIZE_AR6320;
190 		pinfo->mbox_prop[1].extended_size =
191 			HIF_MBOX1_EXTENDED_WIDTH_AR6320;
192 		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
193 		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
194 		break;
195 	default:
196 		A_ASSERT(false);
197 		break;
198 	}
199 }
200 
201 /** hif_dev_set_mailbox_swap() - Set the mailbox swap from firmware
202  * @pdev : The HIF layer object
203  *
204  * Return: none
205  */
206 void hif_dev_set_mailbox_swap(struct hif_sdio_dev *pdev)
207 {
208 	struct hif_sdio_device *hif_device = hif_dev_from_hif(pdev);
209 
210 	HIF_ENTER();
211 
212 	hif_device->swap_mailbox = true;
213 
214 	HIF_EXIT();
215 }
216 
217 /** hif_dev_get_mailbox_swap() - Get the mailbox swap setting
218  * @pdev : The HIF layer object
219  *
220  * Return: true or false
221  */
222 bool hif_dev_get_mailbox_swap(struct hif_sdio_dev *pdev)
223 {
224 	struct hif_sdio_device *hif_device;
225 
226 	HIF_ENTER();
227 
228 	hif_device = hif_dev_from_hif(pdev);
229 
230 	HIF_EXIT();
231 
232 	return hif_device->swap_mailbox;
233 }
234 
235 /**
236  * hif_dev_get_fifo_address() - get the fifo addresses for dma
237  * @pdev:  SDIO HIF object
238  * @config: mbox address config pointer
239  * @config_len: config length
240  *
241  * Return : 0 for success, non-zero for error
242  */
243 int hif_dev_get_fifo_address(struct hif_sdio_dev *pdev,
244 			     void *config,
245 			     uint32_t config_len)
246 {
247 	uint32_t count;
248 	struct hif_device_mbox_info *cfg =
249 				(struct hif_device_mbox_info *)config;
250 
251 	for (count = 0; count < 4; count++)
252 		cfg->mbox_addresses[count] = HIF_MBOX_START_ADDR(count);
253 
254 	if (config_len >= sizeof(struct hif_device_mbox_info)) {
255 		set_extended_mbox_window_info((uint16_t)pdev->func->device,
256 					      cfg);
257 		return 0;
258 	}
259 
260 	return -EINVAL;
261 }
262 
263 /**
264  * hif_dev_get_block_size() - get the mbox block size for dma
265  * @config : mbox size config pointer
266  *
267  * Return : NONE
268  */
269 void hif_dev_get_block_size(void *config)
270 {
271 	((uint32_t *)config)[0] = HIF_MBOX0_BLOCK_SIZE;
272 	((uint32_t *)config)[1] = HIF_MBOX1_BLOCK_SIZE;
273 	((uint32_t *)config)[2] = HIF_MBOX2_BLOCK_SIZE;
274 	((uint32_t *)config)[3] = HIF_MBOX3_BLOCK_SIZE;
275 }
276 
277 /**
278  * hif_dev_map_service_to_pipe() - maps ul/dl pipe to service id.
279  * @pdev: SDIO HIF object
280  * @svc: service index
281  * @ul_pipe: uplink pipe id
282  * @dl_pipe: down-linklink pipe id
283  *
284  * Return: 0 on success, error value on invalid map
285  */
286 QDF_STATUS hif_dev_map_service_to_pipe(struct hif_sdio_dev *pdev, uint16_t svc,
287 				       uint8_t *ul_pipe, uint8_t *dl_pipe)
288 {
289 	QDF_STATUS status = QDF_STATUS_SUCCESS;
290 
291 	switch (svc) {
292 	case HTT_DATA_MSG_SVC:
293 		if (hif_dev_get_mailbox_swap(pdev)) {
294 			*ul_pipe = 1;
295 			*dl_pipe = 0;
296 		} else {
297 			*ul_pipe = 3;
298 			*dl_pipe = 2;
299 		}
300 		break;
301 
302 	case HTC_CTRL_RSVD_SVC:
303 	case HTC_RAW_STREAMS_SVC:
304 		*ul_pipe = 1;
305 		*dl_pipe = 0;
306 		break;
307 
308 	case WMI_DATA_BE_SVC:
309 	case WMI_DATA_BK_SVC:
310 	case WMI_DATA_VI_SVC:
311 	case WMI_DATA_VO_SVC:
312 		*ul_pipe = 1;
313 		*dl_pipe = 0;
314 		break;
315 
316 	case WMI_CONTROL_SVC:
317 		if (hif_dev_get_mailbox_swap(pdev)) {
318 			*ul_pipe = 3;
319 			*dl_pipe = 2;
320 		} else {
321 			*ul_pipe = 1;
322 			*dl_pipe = 0;
323 		}
324 		break;
325 
326 	default:
327 		hif_err("%s: Err : Invalid service (%d)",
328 			__func__, svc);
329 		status = QDF_STATUS_E_INVAL;
330 		break;
331 	}
332 	return status;
333 }
334 
335 /** hif_dev_setup_device() - Setup device specific stuff here required for hif
336  * @pdev : HIF layer object
337  *
338  * return 0 on success, error otherwise
339  */
340 int hif_dev_setup_device(struct hif_sdio_device *pdev)
341 {
342 	int status = 0;
343 	uint32_t blocksizes[MAILBOX_COUNT];
344 
345 	status = hif_configure_device(NULL, pdev->HIFDevice,
346 				      HIF_DEVICE_GET_FIFO_ADDR,
347 				      &pdev->MailBoxInfo,
348 				      sizeof(pdev->MailBoxInfo));
349 
350 	if (status != QDF_STATUS_SUCCESS)
351 		hif_err("%s: HIF_DEVICE_GET_MBOX_ADDR failed", __func__);
352 
353 	status = hif_configure_device(NULL, pdev->HIFDevice,
354 				      HIF_DEVICE_GET_BLOCK_SIZE,
355 				      blocksizes, sizeof(blocksizes));
356 	if (status != QDF_STATUS_SUCCESS)
357 		hif_err("%s: HIF_DEVICE_GET_MBOX_BLOCK_SIZE fail", __func__);
358 
359 	pdev->BlockSize = blocksizes[MAILBOX_FOR_BLOCK_SIZE];
360 
361 	return status;
362 }
363 
364 /** hif_dev_mask_interrupts() - Disable the interrupts in the device
365  * @pdev SDIO HIF Object
366  *
367  * Return: NONE
368  */
369 void hif_dev_mask_interrupts(struct hif_sdio_device *pdev)
370 {
371 	int status = QDF_STATUS_SUCCESS;
372 
373 	HIF_ENTER();
374 	/* Disable all interrupts */
375 	LOCK_HIF_DEV(pdev);
376 	mboxEnaRegs(pdev).int_status_enable = 0;
377 	mboxEnaRegs(pdev).cpu_int_status_enable = 0;
378 	mboxEnaRegs(pdev).error_status_enable = 0;
379 	mboxEnaRegs(pdev).counter_int_status_enable = 0;
380 	UNLOCK_HIF_DEV(pdev);
381 
382 	/* always synchronous */
383 	status = hif_read_write(pdev->HIFDevice,
384 				INT_STATUS_ENABLE_ADDRESS,
385 				(char *)&mboxEnaRegs(pdev),
386 				sizeof(struct MBOX_IRQ_ENABLE_REGISTERS),
387 				HIF_WR_SYNC_BYTE_INC, NULL);
388 
389 	if (status != QDF_STATUS_SUCCESS)
390 		hif_err("%s: Err updating intr reg: %d", __func__, status);
391 }
392 
393 /** hif_dev_unmask_interrupts() - Enable the interrupts in the device
394  * @pdev SDIO HIF Object
395  *
396  * Return: NONE
397  */
398 void hif_dev_unmask_interrupts(struct hif_sdio_device *pdev)
399 {
400 	QDF_STATUS status = QDF_STATUS_SUCCESS;
401 
402 	LOCK_HIF_DEV(pdev);
403 
404 	/* Enable all the interrupts except for the internal
405 	 * AR6000 CPU interrupt
406 	 */
407 	mboxEnaRegs(pdev).int_status_enable =
408 		INT_STATUS_ENABLE_ERROR_SET(0x01) |
409 		INT_STATUS_ENABLE_CPU_SET(0x01)
410 		| INT_STATUS_ENABLE_COUNTER_SET(0x01);
411 
412 	/* enable 2 mboxs INT */
413 	mboxEnaRegs(pdev).int_status_enable |=
414 		INT_STATUS_ENABLE_MBOX_DATA_SET(0x01) |
415 		INT_STATUS_ENABLE_MBOX_DATA_SET(0x02);
416 
417 	/* Set up the CPU Interrupt Status Register, enable
418 	 * CPU sourced interrupt #0, #1.
419 	 * #0 is used for report assertion from target
420 	 * #1 is used for inform host that credit arrived
421 	 */
422 	mboxEnaRegs(pdev).cpu_int_status_enable = 0x03;
423 
424 	/* Set up the Error Interrupt Status Register */
425 	mboxEnaRegs(pdev).error_status_enable =
426 		(ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(0x01)
427 		 | ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(0x01)) >> 16;
428 
429 	/* Set up the Counter Interrupt Status Register
430 	 * (only for debug interrupt to catch fatal errors)
431 	 */
432 	mboxEnaRegs(pdev).counter_int_status_enable =
433 	(COUNTER_INT_STATUS_ENABLE_BIT_SET(AR6K_TARGET_DEBUG_INTR_MASK)) >> 24;
434 
435 	UNLOCK_HIF_DEV(pdev);
436 
437 	/* always synchronous */
438 	status = hif_read_write(pdev->HIFDevice,
439 				INT_STATUS_ENABLE_ADDRESS,
440 				(char *)&mboxEnaRegs(pdev),
441 				sizeof(struct MBOX_IRQ_ENABLE_REGISTERS),
442 				HIF_WR_SYNC_BYTE_INC,
443 				NULL);
444 
445 	if (status != QDF_STATUS_SUCCESS)
446 		hif_err("%s: Err updating intr reg: %d", __func__, status);
447 }
448 
449 void hif_dev_dump_registers(struct hif_sdio_device *pdev,
450 			    struct MBOX_IRQ_PROC_REGISTERS *irq_proc,
451 			    struct MBOX_IRQ_ENABLE_REGISTERS *irq_en,
452 			    struct MBOX_COUNTER_REGISTERS *mbox_regs)
453 {
454 	int i = 0;
455 
456 	hif_debug("%s: Mailbox registers:", __func__);
457 
458 	if (irq_proc) {
459 		hif_debug("HostIntStatus: 0x%x ", irq_proc->host_int_status);
460 		hif_debug("CPUIntStatus: 0x%x ", irq_proc->cpu_int_status);
461 		hif_debug("ErrorIntStatus: 0x%x ", irq_proc->error_int_status);
462 		hif_debug("CounterIntStat: 0x%x ",
463 			  irq_proc->counter_int_status);
464 		hif_debug("MboxFrame: 0x%x ", irq_proc->mbox_frame);
465 		hif_debug("RxLKAValid: 0x%x ", irq_proc->rx_lookahead_valid);
466 		hif_debug("RxLKA0: 0x%x", irq_proc->rx_lookahead[0]);
467 		hif_debug("RxLKA1: 0x%x ", irq_proc->rx_lookahead[1]);
468 		hif_debug("RxLKA2: 0x%x ", irq_proc->rx_lookahead[2]);
469 		hif_debug("RxLKA3: 0x%x", irq_proc->rx_lookahead[3]);
470 
471 		if (pdev->MailBoxInfo.gmbox_address != 0) {
472 			hif_debug("GMBOX-HostIntStatus2:  0x%x ",
473 				  irq_proc->host_int_status2);
474 			hif_debug("GMBOX-RX-Avail: 0x%x ",
475 				  irq_proc->gmbox_rx_avail);
476 		}
477 	}
478 
479 	if (irq_en) {
480 		hif_debug("IntStatusEnable: 0x%x\n",
481 			  irq_en->int_status_enable);
482 		hif_debug("CounterIntStatus: 0x%x\n",
483 			  irq_en->counter_int_status_enable);
484 	}
485 
486 	for (i = 0; mbox_regs && i < 4; i++)
487 		hif_debug("Counter[%d]: 0x%x\n", i, mbox_regs->counter[i]);
488 }
489 
490 /* under HL SDIO, with Interface Memory support, we have
491  * the following reasons to support 2 mboxs:
492  * a) we need place different buffers in different
493  * mempool, for example, data using Interface Memory,
494  * desc and other using DRAM, they need different SDIO
495  * mbox channels.
496  * b) currently, tx mempool in LL case is separated from
497  * main mempool, the structure (descs at the beginning
498  * of every pool buffer) is different, because they only
499  * need store tx desc from host. To align with LL case,
500  * we also need 2 mbox support just as PCIe LL cases.
501  */
502 
503 /**
504  * hif_dev_map_pipe_to_mail_box() - maps pipe id to mailbox.
505  * @pdev: The pointer to the hif device object
506  * @pipeid: pipe index
507  *
508  * Return: mailbox index
509  */
510 static uint8_t hif_dev_map_pipe_to_mail_box(struct hif_sdio_device *pdev,
511 					    uint8_t pipeid)
512 {
513 	if (2 == pipeid || 3 == pipeid)
514 		return 1;
515 	else if (0 == pipeid || 1 == pipeid)
516 		return 0;
517 
518 	hif_err("%s: pipeid=%d invalid", __func__, pipeid);
519 
520 	qdf_assert(0);
521 
522 	return INVALID_MAILBOX_NUMBER;
523 }
524 
525 /**
526  * hif_dev_map_mail_box_to_pipe() - map sdio mailbox to htc pipe.
527  * @pdev: The pointer to the hif device object
528  * @mbox_index: mailbox index
529  * @upload: boolean to decide mailbox index
530  *
531  * Return: Invalid pipe index
532  */
533 static uint8_t hif_dev_map_mail_box_to_pipe(struct hif_sdio_device *pdev,
534 					    uint8_t mbox_index, bool upload)
535 {
536 	if (mbox_index == 0)
537 		return upload ? 1 : 0;
538 	else if (mbox_index == 1)
539 		return upload ? 3 : 2;
540 
541 	hif_err("%s: mbox_index=%d, upload=%d invalid",
542 		__func__, mbox_index, upload);
543 
544 	qdf_assert(0);
545 
546 	return INVALID_MAILBOX_NUMBER; /* invalid pipe id */
547 }
548 
549 /**
550  * hif_get_send_address() - Get the transfer pipe address
551  * @pdev: The pointer to the hif device object
552  * @pipe: The pipe identifier
553  * @addr:
554  *
555  * Return 0 for success and non-zero for failure to map
556  */
557 int hif_get_send_address(struct hif_sdio_device *pdev,
558 			 uint8_t pipe, unsigned long *addr)
559 {
560 	uint8_t mbox_index = INVALID_MAILBOX_NUMBER;
561 
562 	if (!addr)
563 		return -EINVAL;
564 
565 	mbox_index = hif_dev_map_pipe_to_mail_box(pdev, pipe);
566 
567 	if (mbox_index == INVALID_MAILBOX_NUMBER)
568 		return -EINVAL;
569 
570 	*addr = pdev->MailBoxInfo.mbox_prop[mbox_index].extended_address;
571 
572 	return 0;
573 }
574 
575 /**
576  * hif_fixup_write_param() - Tweak the address and length parameters
577  * @pdev: The pointer to the hif device object
578  * @req:
579  * @length: The length pointer
580  * @addr: The addr pointer
581  *
582  * Return: None
583  */
584 void hif_fixup_write_param(struct hif_sdio_dev *pdev, uint32_t req,
585 			   uint32_t *length, uint32_t *addr)
586 {
587 	struct hif_device_mbox_info mboxinfo;
588 	uint32_t taddr = *addr, mboxlen = 0;
589 
590 	hif_configure_device(NULL, pdev, HIF_DEVICE_GET_FIFO_ADDR,
591 			     &mboxinfo, sizeof(mboxinfo));
592 
593 	if (taddr >= 0x800 && taddr < 0xC00) {
594 		/* Host control register and CIS Window */
595 		mboxlen = 0;
596 	} else if (taddr == mboxinfo.mbox_addresses[0] ||
597 		   taddr == mboxinfo.mbox_addresses[1] ||
598 		   taddr == mboxinfo.mbox_addresses[2] ||
599 		   taddr == mboxinfo.mbox_addresses[3]) {
600 		mboxlen = HIF_MBOX_WIDTH;
601 	} else if (taddr == mboxinfo.mbox_prop[0].extended_address) {
602 		mboxlen = mboxinfo.mbox_prop[0].extended_size;
603 	} else if (taddr == mboxinfo.mbox_prop[1].extended_address) {
604 		mboxlen = mboxinfo.mbox_prop[1].extended_size;
605 	} else {
606 		hif_err("%s: Invalid write addr: 0x%08x\n", __func__, taddr);
607 		return;
608 	}
609 
610 	if (mboxlen != 0) {
611 		if (*length > mboxlen) {
612 			hif_err("%s: Error (%u > %u)",
613 				__func__, *length, mboxlen);
614 			return;
615 		}
616 
617 		taddr = taddr + (mboxlen - *length);
618 		taddr = taddr + ((req & HIF_DUMMY_SPACE_MASK) >> 16);
619 		*addr = taddr;
620 	}
621 }
622 
623 /**
624  * hif_dev_recv_packet() - Receive HTC packet/packet information from device
625  * @pdev : HIF device object
626  * @packet : The HTC packet pointer
627  * @recv_length : The length of information to be received
628  * @mbox_index : The mailbox that contains this information
629  *
630  * Return 0 for success and non zero of error
631  */
632 static QDF_STATUS hif_dev_recv_packet(struct hif_sdio_device *pdev,
633 				      HTC_PACKET *packet,
634 				      uint32_t recv_length,
635 				      uint32_t mbox_index)
636 {
637 	QDF_STATUS status;
638 	uint32_t padded_length;
639 	bool sync = (packet->Completion) ? false : true;
640 	uint32_t req = sync ? HIF_RD_SYNC_BLOCK_FIX : HIF_RD_ASYNC_BLOCK_FIX;
641 
642 	/* adjust the length to be a multiple of block size if appropriate */
643 	padded_length = DEV_CALC_RECV_PADDED_LEN(pdev, recv_length);
644 
645 	if (padded_length > packet->BufferLength) {
646 		hif_err("%s: No space for padlen:%d recvlen:%d bufferlen:%d",
647 			__func__, padded_length,
648 			recv_length, packet->BufferLength);
649 		if (packet->Completion) {
650 			COMPLETE_HTC_PACKET(packet, QDF_STATUS_E_INVAL);
651 			return QDF_STATUS_SUCCESS;
652 		}
653 		return QDF_STATUS_E_INVAL;
654 	}
655 
656 	/* mailbox index is saved in Endpoint member */
657 	hif_debug("%s : hdr:0x%x, len:%d, padded length: %d Mbox:0x%x",
658 		  __func__, packet->PktInfo.AsRx.ExpectedHdr, recv_length,
659 		  padded_length, mbox_index);
660 
661 	status = hif_read_write(pdev->HIFDevice,
662 				pdev->MailBoxInfo.mbox_addresses[mbox_index],
663 				packet->pBuffer,
664 				padded_length,
665 				req, sync ? NULL : packet);
666 
667 	if (status != QDF_STATUS_SUCCESS && status != QDF_STATUS_E_PENDING)
668 		hif_err("%s : Failed %d", __func__, status);
669 
670 	if (sync) {
671 		packet->Status = status;
672 		if (status == QDF_STATUS_SUCCESS) {
673 			HTC_FRAME_HDR *hdr = (HTC_FRAME_HDR *) packet->pBuffer;
674 
675 			hif_debug("%s:EP:%d,Len:%d,Flg:%d,CB:0x%02X,0x%02X\n",
676 				  __func__,
677 				  hdr->EndpointID, hdr->PayloadLen,
678 				  hdr->Flags, hdr->ControlBytes0,
679 				  hdr->ControlBytes1);
680 		}
681 	}
682 
683 	return status;
684 }
685 
686 static QDF_STATUS hif_dev_issue_recv_packet_bundle
687 (
688 	struct hif_sdio_device *pdev,
689 	HTC_PACKET_QUEUE *recv_pkt_queue,
690 	HTC_PACKET_QUEUE *sync_completion_queue,
691 	uint8_t mail_box_index,
692 	int *num_packets_fetched,
693 	bool partial_bundle
694 )
695 {
696 	uint32_t padded_length;
697 	int i, total_length = 0;
698 	HTC_TARGET *target = NULL;
699 	int bundleSpaceRemaining = 0;
700 	unsigned char *bundle_buffer = NULL;
701 	HTC_PACKET *packet, *packet_rx_bundle;
702 	QDF_STATUS status = QDF_STATUS_SUCCESS;
703 
704 	target = (HTC_TARGET *)pdev->pTarget;
705 
706 	if ((HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue) -
707 	     HTC_MAX_MSG_PER_BUNDLE_RX) > 0) {
708 		partial_bundle = true;
709 		hif_warn("%s, partial bundle detected num: %d, %d\n",
710 			 __func__,
711 			 HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue),
712 			 HTC_MAX_MSG_PER_BUNDLE_RX);
713 	}
714 
715 	bundleSpaceRemaining =
716 		HTC_MAX_MSG_PER_BUNDLE_RX * target->TargetCreditSize;
717 	packet_rx_bundle = allocate_htc_bundle_packet(target);
718 	if (!packet_rx_bundle) {
719 		hif_err("%s: packet_rx_bundle is NULL\n", __func__);
720 		qdf_sleep(NBUF_ALLOC_FAIL_WAIT_TIME);  /* 100 msec sleep */
721 		return QDF_STATUS_E_NOMEM;
722 	}
723 	bundle_buffer = packet_rx_bundle->pBuffer;
724 
725 	for (i = 0;
726 	     !HTC_QUEUE_EMPTY(recv_pkt_queue) && i < HTC_MAX_MSG_PER_BUNDLE_RX;
727 	     i++) {
728 		packet = htc_packet_dequeue(recv_pkt_queue);
729 		A_ASSERT(packet);
730 		if (!packet)
731 			break;
732 		padded_length =
733 			DEV_CALC_RECV_PADDED_LEN(pdev, packet->ActualLength);
734 		if (packet->PktInfo.AsRx.HTCRxFlags &
735 				HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK)
736 			padded_length += HIF_BLOCK_SIZE;
737 		if ((bundleSpaceRemaining - padded_length) < 0) {
738 			/* exceeds what we can transfer, put the packet back */
739 			HTC_PACKET_ENQUEUE_TO_HEAD(recv_pkt_queue, packet);
740 			break;
741 		}
742 		bundleSpaceRemaining -= padded_length;
743 
744 		if (partial_bundle ||
745 		    HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue) > 0) {
746 			packet->PktInfo.AsRx.HTCRxFlags |=
747 				HTC_RX_PKT_IGNORE_LOOKAHEAD;
748 		}
749 		packet->PktInfo.AsRx.HTCRxFlags |= HTC_RX_PKT_PART_OF_BUNDLE;
750 
751 		if (sync_completion_queue)
752 			HTC_PACKET_ENQUEUE(sync_completion_queue, packet);
753 
754 		total_length += padded_length;
755 	}
756 #if DEBUG_BUNDLE
757 	qdf_print("Recv bundle count %d, length %d.",
758 		  sync_completion_queue ?
759 		  HTC_PACKET_QUEUE_DEPTH(sync_completion_queue) : 0,
760 		  total_length);
761 #endif
762 
763 	status = hif_read_write(pdev->HIFDevice,
764 				pdev->MailBoxInfo.
765 				mbox_addresses[(int)mail_box_index],
766 				bundle_buffer, total_length,
767 				HIF_RD_SYNC_BLOCK_FIX, NULL);
768 
769 	if (status != QDF_STATUS_SUCCESS) {
770 		hif_err("%s, hif_send Failed status:%d\n",
771 			__func__, status);
772 	} else {
773 		unsigned char *buffer = bundle_buffer;
774 		*num_packets_fetched = i;
775 		if (sync_completion_queue) {
776 			HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(
777 				sync_completion_queue, packet) {
778 				padded_length =
779 				DEV_CALC_RECV_PADDED_LEN(pdev,
780 							 packet->ActualLength);
781 				if (packet->PktInfo.AsRx.HTCRxFlags &
782 				HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK)
783 					padded_length +=
784 						HIF_BLOCK_SIZE;
785 				A_MEMCPY(packet->pBuffer,
786 					 buffer, padded_length);
787 				buffer += padded_length;
788 			} HTC_PACKET_QUEUE_ITERATE_END;
789 		}
790 	}
791 	/* free bundle space under Sync mode */
792 	free_htc_bundle_packet(target, packet_rx_bundle);
793 	return status;
794 }
795 
796 #define ISSUE_BUNDLE hif_dev_issue_recv_packet_bundle
797 static
798 QDF_STATUS hif_dev_recv_message_pending_handler(struct hif_sdio_device *pdev,
799 						uint8_t mail_box_index,
800 						uint32_t msg_look_aheads[],
801 						int num_look_aheads,
802 						bool *async_proc,
803 						int *num_pkts_fetched)
804 {
805 	int pkts_fetched;
806 	HTC_PACKET *pkt;
807 	HTC_ENDPOINT_ID id;
808 	bool partial_bundle;
809 	int total_fetched = 0;
810 	bool asyncProc = false;
811 	QDF_STATUS status = QDF_STATUS_SUCCESS;
812 	uint32_t look_aheads[HTC_MAX_MSG_PER_BUNDLE_RX];
813 	HTC_PACKET_QUEUE recv_q, sync_comp_q;
814 	QDF_STATUS (*rxCompletion)(void *, qdf_nbuf_t,	uint8_t);
815 
816 	hif_debug("%s: NumLookAheads: %d\n", __func__, num_look_aheads);
817 
818 	if (num_pkts_fetched)
819 		*num_pkts_fetched = 0;
820 
821 	if (IS_DEV_IRQ_PROCESSING_ASYNC_ALLOWED(pdev)) {
822 		/* We use async mode to get the packets if the
823 		 * device layer supports it. The device layer
824 		 * interfaces with HIF in which HIF may have
825 		 * restrictions on how interrupts are processed
826 		 */
827 		asyncProc = true;
828 	}
829 
830 	if (async_proc) {
831 		/* indicate to caller how we decided to process this */
832 		*async_proc = asyncProc;
833 	}
834 
835 	if (num_look_aheads > HTC_MAX_MSG_PER_BUNDLE_RX) {
836 		A_ASSERT(false);
837 		return QDF_STATUS_E_PROTO;
838 	}
839 
840 	A_MEMCPY(look_aheads, msg_look_aheads,
841 		 (sizeof(uint32_t)) * num_look_aheads);
842 	while (true) {
843 		/* reset packets queues */
844 		INIT_HTC_PACKET_QUEUE(&recv_q);
845 		INIT_HTC_PACKET_QUEUE(&sync_comp_q);
846 		if (num_look_aheads > HTC_MAX_MSG_PER_BUNDLE_RX) {
847 			status = QDF_STATUS_E_PROTO;
848 			A_ASSERT(false);
849 			break;
850 		}
851 
852 		/* first lookahead sets the expected endpoint IDs for
853 		 * all packets in a bundle
854 		 */
855 		id = ((HTC_FRAME_HDR *)&look_aheads[0])->EndpointID;
856 
857 		if (id >= ENDPOINT_MAX) {
858 			hif_err("%s: Invalid Endpoint in lookahead: %d\n",
859 				__func__, id);
860 			status = QDF_STATUS_E_PROTO;
861 			break;
862 		}
863 		/* try to allocate as many HTC RX packets indicated
864 		 * by the lookaheads these packets are stored
865 		 * in the recvPkt queue
866 		 */
867 		status = hif_dev_alloc_and_prepare_rx_packets(pdev,
868 							      look_aheads,
869 							      num_look_aheads,
870 							      &recv_q);
871 		if (QDF_IS_STATUS_ERROR(status))
872 			break;
873 		total_fetched += HTC_PACKET_QUEUE_DEPTH(&recv_q);
874 
875 		/* we've got packet buffers for all we can currently fetch,
876 		 * this count is not valid anymore
877 		 */
878 		num_look_aheads = 0;
879 		partial_bundle = false;
880 
881 		/* now go fetch the list of HTC packets */
882 		while (!HTC_QUEUE_EMPTY(&recv_q)) {
883 			pkts_fetched = 0;
884 			if ((HTC_PACKET_QUEUE_DEPTH(&recv_q) > 1)) {
885 				/* there are enough packets to attempt a bundle
886 				 * transfer and recv bundling is allowed
887 				 */
888 				status = ISSUE_BUNDLE(pdev,
889 						      &recv_q,
890 						      asyncProc ? NULL :
891 						      &sync_comp_q,
892 						      mail_box_index,
893 						      &pkts_fetched,
894 						      partial_bundle);
895 				if (QDF_IS_STATUS_ERROR(status)) {
896 					hif_dev_free_recv_pkt_queue(
897 							&recv_q);
898 					break;
899 				}
900 
901 				if (HTC_PACKET_QUEUE_DEPTH(&recv_q) !=
902 					0) {
903 					/* we couldn't fetch all packets at one,
904 					 * time this creates a broken
905 					 * bundle
906 					 */
907 					partial_bundle = true;
908 				}
909 			}
910 
911 			/* see if the previous operation fetched any
912 			 * packets using bundling
913 			 */
914 			if (pkts_fetched == 0) {
915 				/* dequeue one packet */
916 				pkt = htc_packet_dequeue(&recv_q);
917 				A_ASSERT(pkt);
918 				if (!pkt)
919 					break;
920 
921 				pkt->Completion = NULL;
922 
923 				if (HTC_PACKET_QUEUE_DEPTH(&recv_q) >
924 				    0) {
925 					/* lookaheads in all packets except the
926 					 * last one in must be ignored
927 					 */
928 					pkt->PktInfo.AsRx.HTCRxFlags |=
929 						HTC_RX_PKT_IGNORE_LOOKAHEAD;
930 				}
931 
932 				/* go fetch the packet */
933 				status =
934 				hif_dev_recv_packet(pdev, pkt,
935 						    pkt->ActualLength,
936 						    mail_box_index);
937 				while (QDF_IS_STATUS_ERROR(status) &&
938 				       !HTC_QUEUE_EMPTY(&recv_q)) {
939 					qdf_nbuf_t nbuf;
940 
941 					pkt = htc_packet_dequeue(&recv_q);
942 					if (!pkt)
943 						break;
944 					nbuf = pkt->pNetBufContext;
945 					if (nbuf)
946 						qdf_nbuf_free(nbuf);
947 				}
948 
949 				if (QDF_IS_STATUS_ERROR(status))
950 					break;
951 				/* sent synchronously, queue this packet for
952 				 * synchronous completion
953 				 */
954 				HTC_PACKET_ENQUEUE(&sync_comp_q, pkt);
955 			}
956 		}
957 
958 		/* synchronous handling */
959 		if (pdev->DSRCanYield) {
960 			/* for the SYNC case, increment count that tracks
961 			 * when the DSR should yield
962 			 */
963 			pdev->CurrentDSRRecvCount++;
964 		}
965 
966 		/* in the sync case, all packet buffers are now filled,
967 		 * we can process each packet, check lookahead , then repeat
968 		 */
969 		rxCompletion = pdev->hif_callbacks.rxCompletionHandler;
970 
971 		/* unload sync completion queue */
972 		while (!HTC_QUEUE_EMPTY(&sync_comp_q)) {
973 			uint8_t pipeid;
974 			qdf_nbuf_t netbuf;
975 
976 			pkt = htc_packet_dequeue(&sync_comp_q);
977 			A_ASSERT(pkt);
978 			if (!pkt)
979 				break;
980 
981 			num_look_aheads = 0;
982 			status = hif_dev_process_recv_header(pdev, pkt,
983 							     look_aheads,
984 							     &num_look_aheads);
985 			if (QDF_IS_STATUS_ERROR(status)) {
986 				HTC_PACKET_ENQUEUE_TO_HEAD(&sync_comp_q, pkt);
987 				break;
988 			}
989 
990 			netbuf = (qdf_nbuf_t)pkt->pNetBufContext;
991 			/* set data length */
992 			qdf_nbuf_put_tail(netbuf, pkt->ActualLength);
993 
994 			if (rxCompletion) {
995 				pipeid =
996 				hif_dev_map_mail_box_to_pipe(pdev,
997 							     mail_box_index,
998 							     true);
999 				rxCompletion(pdev->hif_callbacks.Context,
1000 					     netbuf, pipeid);
1001 			}
1002 		}
1003 
1004 		if (QDF_IS_STATUS_ERROR(status)) {
1005 			if (!HTC_QUEUE_EMPTY(&sync_comp_q))
1006 				hif_dev_free_recv_pkt_queue(
1007 						&sync_comp_q);
1008 			break;
1009 		}
1010 
1011 		if (num_look_aheads == 0) {
1012 			/* no more look aheads */
1013 			break;
1014 		}
1015 		/* check whether other OS contexts have queued any WMI
1016 		 * command/data for WLAN. This check is needed only if WLAN
1017 		 * Tx and Rx happens in same thread context
1018 		 */
1019 		/* A_CHECK_DRV_TX(); */
1020 	}
1021 	if (num_pkts_fetched)
1022 		*num_pkts_fetched = total_fetched;
1023 
1024 	AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-HTCRecvMessagePendingHandler\n"));
1025 	return status;
1026 }
1027 
1028 /**
1029  * hif_dev_service_cpu_interrupt() - service fatal interrupts
1030  * synchronously
1031  *
1032  * @pdev: hif sdio device context
1033  *
1034  * Return: QDF_STATUS_SUCCESS for success
1035  */
1036 static QDF_STATUS hif_dev_service_cpu_interrupt(struct hif_sdio_device *pdev)
1037 {
1038 	QDF_STATUS status;
1039 	uint8_t reg_buffer[4];
1040 	uint8_t cpu_int_status;
1041 
1042 	cpu_int_status = mboxProcRegs(pdev).cpu_int_status &
1043 			 mboxEnaRegs(pdev).cpu_int_status_enable;
1044 
1045 	hif_err("%s: 0x%x", __func__, (uint32_t)cpu_int_status);
1046 
1047 	/* Clear the interrupt */
1048 	mboxProcRegs(pdev).cpu_int_status &= ~cpu_int_status;
1049 
1050 	/*set up the register transfer buffer to hit the register
1051 	 * 4 times , this is done to make the access 4-byte aligned
1052 	 * to mitigate issues with host bus interconnects that
1053 	 * restrict bus transfer lengths to be a multiple of 4-bytes
1054 	 * set W1C value to clear the interrupt, this hits the register
1055 	 * first
1056 	 */
1057 	reg_buffer[0] = cpu_int_status;
1058 	/* the remaining 4 values are set to zero which have no-effect  */
1059 	reg_buffer[1] = 0;
1060 	reg_buffer[2] = 0;
1061 	reg_buffer[3] = 0;
1062 
1063 	status = hif_read_write(pdev->HIFDevice,
1064 				CPU_INT_STATUS_ADDRESS,
1065 				reg_buffer, 4, HIF_WR_SYNC_BYTE_FIX, NULL);
1066 
1067 	A_ASSERT(status == QDF_STATUS_SUCCESS);
1068 
1069 	/* The Interrupt sent to the Host is generated via bit0
1070 	 * of CPU INT register
1071 	 */
1072 	if (cpu_int_status & 0x1) {
1073 		if (pdev->hif_callbacks.fwEventHandler)
1074 			/* It calls into HTC which propagates this
1075 			 * to ol_target_failure()
1076 			 */
1077 			pdev->hif_callbacks.fwEventHandler(
1078 				pdev->hif_callbacks.Context,
1079 				QDF_STATUS_E_FAILURE);
1080 	} else {
1081 		hif_err("%s: Unrecognized CPU event", __func__);
1082 	}
1083 
1084 	return status;
1085 }
1086 
1087 /**
1088  * hif_dev_service_error_interrupt() - service error interrupts
1089  * synchronously
1090  *
1091  * @pdev: hif sdio device context
1092  *
1093  * Return: QDF_STATUS_SUCCESS for success
1094  */
1095 static QDF_STATUS hif_dev_service_error_interrupt(struct hif_sdio_device *pdev)
1096 {
1097 	QDF_STATUS status;
1098 	uint8_t reg_buffer[4];
1099 	uint8_t error_int_status = 0;
1100 
1101 	error_int_status = mboxProcRegs(pdev).error_int_status & 0x0F;
1102 	hif_err("%s: 0x%x", __func__, error_int_status);
1103 
1104 	if (ERROR_INT_STATUS_WAKEUP_GET(error_int_status))
1105 		hif_err("%s: Error : Wakeup", __func__);
1106 
1107 	if (ERROR_INT_STATUS_RX_UNDERFLOW_GET(error_int_status))
1108 		hif_err("%s: Error : Rx Underflow", __func__);
1109 
1110 	if (ERROR_INT_STATUS_TX_OVERFLOW_GET(error_int_status))
1111 		hif_err("%s: Error : Tx Overflow", __func__);
1112 
1113 	/* Clear the interrupt */
1114 	mboxProcRegs(pdev).error_int_status &= ~error_int_status;
1115 
1116 	/* set up the register transfer buffer to hit the register
1117 	 * 4 times , this is done to make the access 4-byte
1118 	 * aligned to mitigate issues with host bus interconnects that
1119 	 * restrict bus transfer lengths to be a multiple of 4-bytes
1120 	 */
1121 
1122 	/* set W1C value to clear the interrupt */
1123 	reg_buffer[0] = error_int_status;
1124 	/* the remaining 4 values are set to zero which have no-effect  */
1125 	reg_buffer[1] = 0;
1126 	reg_buffer[2] = 0;
1127 	reg_buffer[3] = 0;
1128 
1129 	status = hif_read_write(pdev->HIFDevice,
1130 				ERROR_INT_STATUS_ADDRESS,
1131 				reg_buffer, 4, HIF_WR_SYNC_BYTE_FIX, NULL);
1132 
1133 	A_ASSERT(status == QDF_STATUS_SUCCESS);
1134 	return status;
1135 }
1136 
1137 /**
1138  * hif_dev_service_debug_interrupt() - service debug interrupts
1139  * synchronously
1140  *
1141  * @pdev: hif sdio device context
1142  *
1143  * Return: QDF_STATUS_SUCCESS for success
1144  */
1145 static QDF_STATUS hif_dev_service_debug_interrupt(struct hif_sdio_device *pdev)
1146 {
1147 	uint32_t dummy;
1148 	QDF_STATUS status;
1149 
1150 	/* Send a target failure event to the application */
1151 	hif_err("%s: Target debug interrupt", __func__);
1152 
1153 	/* clear the interrupt , the debug error interrupt is counter 0
1154 	 * read counter to clear interrupt
1155 	 */
1156 	status = hif_read_write(pdev->HIFDevice,
1157 				COUNT_DEC_ADDRESS,
1158 				(uint8_t *)&dummy,
1159 				4, HIF_RD_SYNC_BYTE_INC, NULL);
1160 
1161 	A_ASSERT(status == QDF_STATUS_SUCCESS);
1162 	return status;
1163 }
1164 
1165 /**
1166  * hif_dev_service_counter_interrupt() - service counter interrupts
1167  *                                       synchronously
1168  * @pdev: hif sdio device context
1169  *
1170  * Return: QDF_STATUS_SUCCESS for success
1171  */
1172 static
1173 QDF_STATUS hif_dev_service_counter_interrupt(struct hif_sdio_device *pdev)
1174 {
1175 	uint8_t counter_int_status;
1176 
1177 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Counter Interrupt\n"));
1178 
1179 	counter_int_status = mboxProcRegs(pdev).counter_int_status &
1180 			     mboxEnaRegs(pdev).counter_int_status_enable;
1181 
1182 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1183 			("Valid interrupt source in COUNTER_INT_STATUS: 0x%x\n",
1184 			 counter_int_status));
1185 
1186 	/* Check if the debug interrupt is pending
1187 	 * NOTE: other modules like GMBOX may use the counter interrupt
1188 	 * for credit flow control on other counters, we only need to
1189 	 * check for the debug assertion counter interrupt
1190 	 */
1191 	if (counter_int_status & AR6K_TARGET_DEBUG_INTR_MASK)
1192 		return hif_dev_service_debug_interrupt(pdev);
1193 
1194 	return QDF_STATUS_SUCCESS;
1195 }
1196 
1197 #define RX_LOOAHEAD_GET(pdev, i) \
1198 	mboxProcRegs(pdev).rx_lookahead[MAILBOX_LOOKAHEAD_SIZE_IN_WORD * i]
1199 /**
1200  * hif_dev_process_pending_irqs() - process pending interrupts
1201  * @pdev: hif sdio device context
1202  * @done: pending irq completion status
1203  * @async_processing: sync/async processing flag
1204  *
1205  * Return: QDF_STATUS_SUCCESS for success
1206  */
1207 QDF_STATUS hif_dev_process_pending_irqs(struct hif_sdio_device *pdev,
1208 					bool *done,
1209 					bool *async_processing)
1210 {
1211 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1212 	uint8_t host_int_status = 0;
1213 	uint32_t l_ahead[MAILBOX_USED_COUNT];
1214 	int i;
1215 
1216 	qdf_mem_zero(&l_ahead, sizeof(l_ahead));
1217 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1218 			("+ProcessPendingIRQs: (dev: 0x%lX)\n",
1219 			 (unsigned long)pdev));
1220 
1221 	/* NOTE: the HIF implementation guarantees that the context
1222 	 * of this call allows us to perform SYNCHRONOUS I/O,
1223 	 * that is we can block, sleep or call any API that
1224 	 * can block or switch thread/task ontexts.
1225 	 * This is a fully schedulable context.
1226 	 */
1227 	do {
1228 		if (mboxEnaRegs(pdev).int_status_enable == 0) {
1229 			/* interrupt enables have been cleared, do not try
1230 			 * to process any pending interrupts that
1231 			 * may result in more bus transactions.
1232 			 * The target may be unresponsive at this point.
1233 			 */
1234 			break;
1235 		}
1236 		status = hif_read_write(pdev->HIFDevice,
1237 					HOST_INT_STATUS_ADDRESS,
1238 					(uint8_t *)&mboxProcRegs(pdev),
1239 					sizeof(mboxProcRegs(pdev)),
1240 					HIF_RD_SYNC_BYTE_INC, NULL);
1241 
1242 		if (QDF_IS_STATUS_ERROR(status))
1243 			break;
1244 
1245 		if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_IRQ)) {
1246 			hif_dev_dump_registers(pdev,
1247 					       &mboxProcRegs(pdev),
1248 					       &mboxEnaRegs(pdev),
1249 					       &mboxCountRegs(pdev));
1250 		}
1251 
1252 		/* Update only those registers that are enabled */
1253 		host_int_status = mboxProcRegs(pdev).host_int_status
1254 				  & mboxEnaRegs(pdev).int_status_enable;
1255 
1256 		/* only look at mailbox status if the HIF layer did not
1257 		 * provide this function, on some HIF interfaces reading
1258 		 * the RX lookahead is not valid to do
1259 		 */
1260 		for (i = 0; i < MAILBOX_USED_COUNT; i++) {
1261 			l_ahead[i] = 0;
1262 			if (host_int_status & (1 << i)) {
1263 				/* mask out pending mailbox value, we use
1264 				 * "lookAhead" as the real flag for
1265 				 * mailbox processing below
1266 				 */
1267 				host_int_status &= ~(1 << i);
1268 				if (mboxProcRegs(pdev).
1269 				    rx_lookahead_valid & (1 << i)) {
1270 					/* mailbox has a message and the
1271 					 * look ahead is valid
1272 					 */
1273 					l_ahead[i] = RX_LOOAHEAD_GET(pdev, i);
1274 				}
1275 			}
1276 		} /*end of for loop */
1277 	} while (false);
1278 
1279 	do {
1280 		bool bLookAheadValid = false;
1281 		/* did the interrupt status fetches succeed? */
1282 		if (QDF_IS_STATUS_ERROR(status))
1283 			break;
1284 
1285 		for (i = 0; i < MAILBOX_USED_COUNT; i++) {
1286 			if (l_ahead[i] != 0) {
1287 				bLookAheadValid = true;
1288 				break;
1289 			}
1290 		}
1291 
1292 		if ((host_int_status == 0) && !bLookAheadValid) {
1293 			/* nothing to process, the caller can use this
1294 			 * to break out of a loop
1295 			 */
1296 			*done = true;
1297 			break;
1298 		}
1299 
1300 		if (bLookAheadValid) {
1301 			for (i = 0; i < MAILBOX_USED_COUNT; i++) {
1302 				int fetched = 0;
1303 
1304 				if (l_ahead[i] == 0)
1305 					continue;
1306 				AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1307 						("mbox[%d],lookahead:0x%X\n",
1308 						i, l_ahead[i]));
1309 				/* Mailbox Interrupt, the HTC layer may issue
1310 				 * async requests to empty the mailbox...
1311 				 * When emptying the recv mailbox we use the
1312 				 * async handler from the completion routine of
1313 				 * routine of the callers read request.
1314 				 * This can improve performance by reducing
1315 				 * the  context switching when we rapidly
1316 				 * pull packets
1317 				 */
1318 				status = hif_dev_recv_message_pending_handler(
1319 							pdev, i,
1320 							&l_ahead
1321 							[i], 1,
1322 							async_processing,
1323 							&fetched);
1324 				if (QDF_IS_STATUS_ERROR(status))
1325 					break;
1326 
1327 				if (!fetched) {
1328 					/* HTC could not pull any messages out
1329 					 * due to lack of resources force DSR
1330 					 * handle to ack the interrupt
1331 					 */
1332 					*async_processing = false;
1333 					pdev->RecheckIRQStatusCnt = 0;
1334 				}
1335 			}
1336 		}
1337 
1338 		/* now handle the rest of them */
1339 		AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1340 				("Valid source for OTHER interrupts: 0x%x\n",
1341 				host_int_status));
1342 
1343 		if (HOST_INT_STATUS_CPU_GET(host_int_status)) {
1344 			/* CPU Interrupt */
1345 			status = hif_dev_service_cpu_interrupt(pdev);
1346 			if (QDF_IS_STATUS_ERROR(status))
1347 				break;
1348 		}
1349 
1350 		if (HOST_INT_STATUS_ERROR_GET(host_int_status)) {
1351 			/* Error Interrupt */
1352 			status = hif_dev_service_error_interrupt(pdev);
1353 			if (QDF_IS_STATUS_ERROR(status))
1354 				break;
1355 		}
1356 
1357 		if (HOST_INT_STATUS_COUNTER_GET(host_int_status)) {
1358 			/* Counter Interrupt */
1359 			status = hif_dev_service_counter_interrupt(pdev);
1360 			if (QDF_IS_STATUS_ERROR(status))
1361 				break;
1362 		}
1363 
1364 	} while (false);
1365 
1366 	/* an optimization to bypass reading the IRQ status registers
1367 	 * unnecessarily which can re-wake the target, if upper layers
1368 	 * determine that we are in a low-throughput mode, we can
1369 	 * rely on taking another interrupt rather than re-checking
1370 	 * the status registers which can re-wake the target.
1371 	 *
1372 	 * NOTE : for host interfaces that use the special
1373 	 * GetPendingEventsFunc, this optimization cannot be used due to
1374 	 * possible side-effects.  For example, SPI requires the host
1375 	 * to drain all messages from the mailbox before exiting
1376 	 * the ISR routine.
1377 	 */
1378 	if (!(*async_processing) && (pdev->RecheckIRQStatusCnt == 0)) {
1379 		AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1380 				("Bypass IRQ Status re-check, forcing done\n"));
1381 		*done = true;
1382 	}
1383 
1384 	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1385 			("-ProcessPendingIRQs: (done:%d, async:%d) status=%d\n",
1386 			 *done, *async_processing, status));
1387 
1388 	return status;
1389 }
1390 
1391 #define DEV_CHECK_RECV_YIELD(pdev) \
1392 	((pdev)->CurrentDSRRecvCount >= \
1393 	 (pdev)->HifIRQYieldParams.recv_packet_yield_count)
1394 /**
1395  * hif_dev_dsr_handler() - Synchronous interrupt handler
1396  *
1397  * @context: hif send context
1398  *
1399  * Return: 0 for success and non-zero for failure
1400  */
1401 QDF_STATUS hif_dev_dsr_handler(void *context)
1402 {
1403 	struct hif_sdio_device *pdev = (struct hif_sdio_device *)context;
1404 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1405 	bool done = false;
1406 	bool async_proc = false;
1407 
1408 	/* reset the recv counter that tracks when we need
1409 	 * to yield from the DSR
1410 	 */
1411 	pdev->CurrentDSRRecvCount = 0;
1412 	/* reset counter used to flag a re-scan of IRQ
1413 	 * status registers on the target
1414 	 */
1415 	pdev->RecheckIRQStatusCnt = 0;
1416 
1417 	while (!done) {
1418 		status = hif_dev_process_pending_irqs(pdev, &done, &async_proc);
1419 		if (QDF_IS_STATUS_ERROR(status))
1420 			break;
1421 
1422 		if (pdev->HifIRQProcessingMode == HIF_DEVICE_IRQ_SYNC_ONLY) {
1423 			/* the HIF layer does not allow async IRQ processing,
1424 			 * override the asyncProc flag
1425 			 */
1426 			async_proc = false;
1427 			/* this will cause us to re-enter ProcessPendingIRQ()
1428 			 * and re-read interrupt status registers.
1429 			 * This has a nice side effect of blocking us until all
1430 			 * async read requests are completed. This behavior is
1431 			 * required as we  do not allow ASYNC processing
1432 			 * in interrupt handlers (like Windows CE)
1433 			 */
1434 
1435 			if (pdev->DSRCanYield && DEV_CHECK_RECV_YIELD(pdev))
1436 				/* ProcessPendingIRQs() pulled enough recv
1437 				 * messages to satisfy the yield count, stop
1438 				 * checking for more messages and return
1439 				 */
1440 				break;
1441 		}
1442 
1443 		if (async_proc) {
1444 			/* the function does some async I/O for performance,
1445 			 * we need to exit the ISR immediately, the check below
1446 			 * will prevent the interrupt from being
1447 			 * Ack'd while we handle it asynchronously
1448 			 */
1449 			break;
1450 		}
1451 	}
1452 
1453 	if (QDF_IS_STATUS_SUCCESS(status) && !async_proc) {
1454 		/* Ack the interrupt only if :
1455 		 *  1. we did not get any errors in processing interrupts
1456 		 *  2. there are no outstanding async processing requests
1457 		 */
1458 		if (pdev->DSRCanYield) {
1459 			/* if the DSR can yield do not ACK the interrupt, there
1460 			 * could be more pending messages. The HIF layer
1461 			 * must ACK the interrupt on behalf of HTC
1462 			 */
1463 			hif_info("%s:  Yield (RX count: %d)",
1464 				 __func__, pdev->CurrentDSRRecvCount);
1465 		} else {
1466 			hif_ack_interrupt(pdev->HIFDevice);
1467 		}
1468 	}
1469 
1470 	return status;
1471 }
1472 
1473 /**
1474  * hif_read_write() - queue a read/write request
1475  * @device: pointer to hif device structure
1476  * @address: address to read
1477  * @buffer: buffer to hold read/write data
1478  * @length: length to read/write
1479  * @request: read/write/sync/async request
1480  * @context: pointer to hold calling context
1481  *
1482  * Return: 0 on success, error number otherwise.
1483  */
1484 QDF_STATUS
1485 hif_read_write(struct hif_sdio_dev *device,
1486 	       unsigned long address,
1487 	       char *buffer, uint32_t length,
1488 	       uint32_t request, void *context)
1489 {
1490 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1491 	struct bus_request *busrequest;
1492 
1493 	AR_DEBUG_ASSERT(device);
1494 	AR_DEBUG_ASSERT(device->func);
1495 	hif_debug("%s: device 0x%pK addr 0x%lX buffer 0x%pK",
1496 		  __func__, device, address, buffer);
1497 	hif_debug("%s: len %d req 0x%X context 0x%pK",
1498 		  __func__, length, request, context);
1499 
1500 	/*sdio r/w action is not needed when suspend, so just return */
1501 	if ((device->is_suspend) &&
1502 	    (device->power_config == HIF_DEVICE_POWER_CUT)) {
1503 		AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("skip io when suspending\n"));
1504 		return QDF_STATUS_SUCCESS;
1505 	}
1506 	do {
1507 		if ((request & HIF_ASYNCHRONOUS) ||
1508 		    (request & HIF_SYNCHRONOUS)) {
1509 			/* serialize all requests through the async thread */
1510 			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1511 					("%s: Execution mode: %s\n", __func__,
1512 					 (request & HIF_ASYNCHRONOUS) ? "Async"
1513 					 : "Synch"));
1514 			busrequest = hif_allocate_bus_request(device);
1515 			if (!busrequest) {
1516 				hif_err("%s:bus requests unavail", __func__);
1517 				hif_err("%s, addr:0x%lX, len:%d",
1518 					request & HIF_SDIO_READ ? "READ" :
1519 					"WRITE", address, length);
1520 				return QDF_STATUS_E_FAILURE;
1521 			}
1522 			busrequest->address = address;
1523 			busrequest->buffer = buffer;
1524 			busrequest->length = length;
1525 			busrequest->request = request;
1526 			busrequest->context = context;
1527 
1528 			add_to_async_list(device, busrequest);
1529 
1530 			if (request & HIF_SYNCHRONOUS) {
1531 				AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1532 						("%s: queued sync req: 0x%lX\n",
1533 						 __func__,
1534 						 (unsigned long)busrequest));
1535 
1536 				/* wait for completion */
1537 				up(&device->sem_async);
1538 				if (down_interruptible(&busrequest->sem_req) ==
1539 				    0) {
1540 					QDF_STATUS status = busrequest->status;
1541 
1542 					hif_debug("%s: sync freeing 0x%lX:0x%X",
1543 						  __func__,
1544 						  (unsigned long)busrequest,
1545 						  busrequest->status);
1546 					hif_debug("%s: freeing req: 0x%X",
1547 						  __func__,
1548 						  (unsigned int)request);
1549 					hif_free_bus_request(device,
1550 							     busrequest);
1551 					return status;
1552 				} else {
1553 					/* interrupted, exit */
1554 					return QDF_STATUS_E_FAILURE;
1555 				}
1556 			} else {
1557 				hif_debug("%s: queued async req: 0x%lX",
1558 					  __func__, (unsigned long)busrequest);
1559 				up(&device->sem_async);
1560 				return QDF_STATUS_E_PENDING;
1561 			}
1562 		} else {
1563 			hif_err("%s: Invalid execution mode: 0x%08x",
1564 				__func__, (unsigned int)request);
1565 			status = QDF_STATUS_E_INVAL;
1566 			break;
1567 		}
1568 	} while (0);
1569 
1570 	return status;
1571 }
1572 
1573 /**
1574  * hif_sdio_func_enable() - Handle device enabling as per device
1575  * @ol_sc: HIF device object
1576  * @func: function pointer
1577  *
1578  * Return QDF_STATUS
1579  */
1580 static QDF_STATUS hif_sdio_func_enable(struct hif_softc *ol_sc,
1581 				       struct sdio_func *func)
1582 {
1583 	struct hif_sdio_dev *device = get_hif_device(ol_sc, func);
1584 
1585 	if (device->is_disabled) {
1586 		int ret = 0;
1587 
1588 		sdio_claim_host(func);
1589 
1590 		ret = hif_sdio_quirk_async_intr(ol_sc, func);
1591 		if (ret) {
1592 			hif_err("%s: Error setting async intr:%d",
1593 				__func__, ret);
1594 			sdio_release_host(func);
1595 			return QDF_STATUS_E_FAILURE;
1596 		}
1597 
1598 		func->enable_timeout = 100;
1599 		ret = sdio_enable_func(func);
1600 		if (ret) {
1601 			hif_err("%s: Unable to enable function: %d",
1602 				__func__, ret);
1603 			sdio_release_host(func);
1604 			return QDF_STATUS_E_FAILURE;
1605 		}
1606 
1607 		ret = sdio_set_block_size(func, HIF_BLOCK_SIZE);
1608 		if (ret) {
1609 			hif_err("%s: Unable to set block size 0x%X : %d\n",
1610 				__func__, HIF_BLOCK_SIZE, ret);
1611 			sdio_release_host(func);
1612 			return QDF_STATUS_E_FAILURE;
1613 		}
1614 
1615 		ret = hif_sdio_quirk_mod_strength(ol_sc, func);
1616 		if (ret) {
1617 			hif_err("%s: Error setting mod strength : %d\n",
1618 				__func__, ret);
1619 			sdio_release_host(func);
1620 			return QDF_STATUS_E_FAILURE;
1621 		}
1622 
1623 		sdio_release_host(func);
1624 	}
1625 
1626 	return QDF_STATUS_SUCCESS;
1627 }
1628 
1629 /**
1630  * __hif_read_write() - sdio read/write wrapper
1631  * @device: pointer to hif device structure
1632  * @address: address to read
1633  * @buffer: buffer to hold read/write data
1634  * @length: length to read/write
1635  * @request: read/write/sync/async request
1636  * @context: pointer to hold calling context
1637  *
1638  * Return: 0 on success, error number otherwise.
1639  */
1640 static QDF_STATUS
1641 __hif_read_write(struct hif_sdio_dev *device,
1642 		 uint32_t address, char *buffer,
1643 		 uint32_t length, uint32_t request, void *context)
1644 {
1645 	uint8_t opcode;
1646 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1647 	int ret = A_OK;
1648 	uint8_t *tbuffer;
1649 	bool bounced = false;
1650 
1651 	if (!device) {
1652 		hif_err("%s: device null!", __func__);
1653 		return QDF_STATUS_E_INVAL;
1654 	}
1655 
1656 	if (!device->func) {
1657 		hif_err("%s: func null!", __func__);
1658 		return QDF_STATUS_E_INVAL;
1659 	}
1660 
1661 	hif_debug("%s: addr:0X%06X, len:%08d, %s, %s", __func__,
1662 		  address, length,
1663 		  request & HIF_SDIO_READ ? "Read " : "Write",
1664 		  request & HIF_ASYNCHRONOUS ? "Async" : "Sync ");
1665 
1666 	do {
1667 		if (request & HIF_EXTENDED_IO) {
1668 			//HIF_INFO_HI("%s: Command type: CMD53\n", __func__);
1669 		} else {
1670 			hif_err("%s: Invalid command type: 0x%08x\n",
1671 				__func__, request);
1672 			status = QDF_STATUS_E_INVAL;
1673 			break;
1674 		}
1675 
1676 		if (request & HIF_BLOCK_BASIS) {
1677 			/* round to whole block length size */
1678 			length =
1679 				(length / HIF_BLOCK_SIZE) *
1680 				HIF_BLOCK_SIZE;
1681 			hif_debug("%s: Block mode (BlockLen: %d)\n",
1682 				  __func__, length);
1683 		} else if (request & HIF_BYTE_BASIS) {
1684 			hif_debug("%s: Byte mode (BlockLen: %d)\n",
1685 				  __func__, length);
1686 		} else {
1687 			hif_err("%s: Invalid data mode: 0x%08x\n",
1688 				__func__, request);
1689 			status = QDF_STATUS_E_INVAL;
1690 			break;
1691 		}
1692 		if (request & HIF_SDIO_WRITE) {
1693 			hif_fixup_write_param(device, request,
1694 					      &length, &address);
1695 
1696 			hif_debug("addr:%08X, len:0x%08X, dummy:0x%04X\n",
1697 				  address, length,
1698 				  (request & HIF_DUMMY_SPACE_MASK) >> 16);
1699 		}
1700 
1701 		if (request & HIF_FIXED_ADDRESS) {
1702 			opcode = CMD53_FIXED_ADDRESS;
1703 			hif_debug("%s: Addr mode: fixed 0x%X\n",
1704 				  __func__, address);
1705 		} else if (request & HIF_INCREMENTAL_ADDRESS) {
1706 			opcode = CMD53_INCR_ADDRESS;
1707 			hif_debug("%s: Address mode: Incremental 0x%X\n",
1708 				  __func__, address);
1709 		} else {
1710 			hif_err("%s: Invalid address mode: 0x%08x\n",
1711 				__func__, request);
1712 			status = QDF_STATUS_E_INVAL;
1713 			break;
1714 		}
1715 
1716 		if (request & HIF_SDIO_WRITE) {
1717 #if HIF_USE_DMA_BOUNCE_BUFFER
1718 			if (BUFFER_NEEDS_BOUNCE(buffer)) {
1719 				AR_DEBUG_ASSERT(device->dma_buffer);
1720 				tbuffer = device->dma_buffer;
1721 				/* copy the write data to the dma buffer */
1722 				AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE);
1723 				if (length > HIF_DMA_BUFFER_SIZE) {
1724 					hif_err("%s: Invalid write len: %d\n",
1725 						__func__, length);
1726 					status = QDF_STATUS_E_INVAL;
1727 					break;
1728 				}
1729 				memcpy(tbuffer, buffer, length);
1730 				bounced = true;
1731 			} else {
1732 				tbuffer = buffer;
1733 			}
1734 #else
1735 			tbuffer = buffer;
1736 #endif
1737 			if (opcode == CMD53_FIXED_ADDRESS  && tbuffer) {
1738 				ret = sdio_writesb(device->func, address,
1739 						   tbuffer, length);
1740 				hif_debug("%s:r=%d addr:0x%X, len:%d, 0x%X\n",
1741 					  __func__, ret, address, length,
1742 					  *(int *)tbuffer);
1743 			} else if (tbuffer) {
1744 				ret = sdio_memcpy_toio(device->func, address,
1745 						       tbuffer, length);
1746 				hif_debug("%s:r=%d addr:0x%X, len:%d, 0x%X\n",
1747 					  __func__, ret, address, length,
1748 					  *(int *)tbuffer);
1749 			}
1750 		} else if (request & HIF_SDIO_READ) {
1751 #if HIF_USE_DMA_BOUNCE_BUFFER
1752 			if (BUFFER_NEEDS_BOUNCE(buffer)) {
1753 				AR_DEBUG_ASSERT(device->dma_buffer);
1754 				AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE);
1755 				if (length > HIF_DMA_BUFFER_SIZE) {
1756 					hif_err("%s: Invalid read len: %d\n",
1757 						__func__, length);
1758 					status = QDF_STATUS_E_INVAL;
1759 					break;
1760 				}
1761 				tbuffer = device->dma_buffer;
1762 				bounced = true;
1763 			} else {
1764 				tbuffer = buffer;
1765 			}
1766 #else
1767 			tbuffer = buffer;
1768 #endif
1769 			if (opcode == CMD53_FIXED_ADDRESS && tbuffer) {
1770 				ret = sdio_readsb(device->func, tbuffer,
1771 						  address, length);
1772 				hif_debug("%s:r=%d addr:0x%X, len:%d, 0x%X\n",
1773 					  __func__, ret, address, length,
1774 					  *(int *)tbuffer);
1775 			} else if (tbuffer) {
1776 				ret = sdio_memcpy_fromio(device->func,
1777 							 tbuffer, address,
1778 							 length);
1779 				hif_debug("%s:r=%d addr:0x%X, len:%d, 0x%X\n",
1780 					  __func__, ret, address, length,
1781 					  *(int *)tbuffer);
1782 			}
1783 #if HIF_USE_DMA_BOUNCE_BUFFER
1784 			if (bounced && tbuffer)
1785 				memcpy(buffer, tbuffer, length);
1786 #endif
1787 		} else {
1788 			hif_err("%s: Invalid dir: 0x%08x", __func__, request);
1789 			status = QDF_STATUS_E_INVAL;
1790 			return status;
1791 		}
1792 
1793 		if (ret) {
1794 			hif_err("%s: SDIO bus operation failed!", __func__);
1795 			hif_err("%s: MMC stack returned : %d", __func__, ret);
1796 			hif_err("%s: addr:0X%06X, len:%08d, %s, %s",
1797 				__func__, address, length,
1798 				request & HIF_SDIO_READ ? "Read " : "Write",
1799 				request & HIF_ASYNCHRONOUS ?
1800 				"Async" : "Sync");
1801 			status = QDF_STATUS_E_FAILURE;
1802 		}
1803 	} while (false);
1804 
1805 	return status;
1806 }
1807 
1808 /**
1809  * async_task() - thread function to serialize all bus requests
1810  * @param: pointer to hif device
1811  *
1812  * thread function to serialize all requests, both sync and async
1813  * Return: 0 on success, error number otherwise.
1814  */
1815 static int async_task(void *param)
1816 {
1817 	struct hif_sdio_dev *device;
1818 	struct bus_request *request;
1819 	QDF_STATUS status;
1820 	bool claimed = false;
1821 
1822 	device = (struct hif_sdio_dev *)param;
1823 	set_current_state(TASK_INTERRUPTIBLE);
1824 	while (!device->async_shutdown) {
1825 		/* wait for work */
1826 		if (down_interruptible(&device->sem_async) != 0) {
1827 			/* interrupted, exit */
1828 			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1829 					("%s: async task interrupted\n",
1830 					 __func__));
1831 			break;
1832 		}
1833 		if (device->async_shutdown) {
1834 			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1835 					("%s: async task stopping\n",
1836 					 __func__));
1837 			break;
1838 		}
1839 		/* we want to hold the host over multiple cmds
1840 		 * if possible, but holding the host blocks
1841 		 * card interrupts
1842 		 */
1843 		qdf_spin_lock_irqsave(&device->asynclock);
1844 		/* pull the request to work on */
1845 		while (device->asyncreq) {
1846 			request = device->asyncreq;
1847 			if (request->inusenext)
1848 				device->asyncreq = request->inusenext;
1849 			else
1850 				device->asyncreq = NULL;
1851 			qdf_spin_unlock_irqrestore(&device->asynclock);
1852 			hif_debug("%s: processing req: 0x%lX",
1853 				  __func__, (unsigned long)request);
1854 
1855 			if (!claimed) {
1856 				sdio_claim_host(device->func);
1857 				claimed = true;
1858 			}
1859 			if (request->scatter_req) {
1860 				A_ASSERT(device->scatter_enabled);
1861 				/* pass the request to scatter routine which
1862 				 * executes it synchronously, note, no need
1863 				 * to free the request since scatter requests
1864 				 * are maintained on a separate list
1865 				 */
1866 				status = do_hif_read_write_scatter(device,
1867 								   request);
1868 			} else {
1869 				/* call hif_read_write in sync mode */
1870 				status =
1871 					__hif_read_write(device,
1872 							 request->address,
1873 							 request->buffer,
1874 							 request->length,
1875 							 request->
1876 							 request &
1877 							 ~HIF_SYNCHRONOUS,
1878 							 NULL);
1879 				if (request->request & HIF_ASYNCHRONOUS) {
1880 					void *context = request->context;
1881 
1882 					hif_free_bus_request(device, request);
1883 					device->htc_callbacks.
1884 					rw_compl_handler(context, status);
1885 				} else {
1886 					hif_debug("%s: upping req: 0x%lX",
1887 						  __func__,
1888 						  (unsigned long)request);
1889 					request->status = status;
1890 					up(&request->sem_req);
1891 				}
1892 			}
1893 			qdf_spin_lock_irqsave(&device->asynclock);
1894 		}
1895 		qdf_spin_unlock_irqrestore(&device->asynclock);
1896 		if (claimed) {
1897 			sdio_release_host(device->func);
1898 			claimed = false;
1899 		}
1900 	}
1901 
1902 	kthread_complete_and_exit(&device->async_completion, 0);
1903 
1904 	return 0;
1905 }
1906 
1907 /**
1908  * hif_disable_func() - Disable SDIO function
1909  *
1910  * @device: HIF device pointer
1911  * @func: SDIO function pointer
1912  * @reset: If this is called from resume or probe
1913  *
1914  * Return: 0 in case of success, else error value
1915  */
1916 QDF_STATUS hif_disable_func(struct hif_sdio_dev *device,
1917 			    struct sdio_func *func,
1918 			    bool reset)
1919 {
1920 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1921 
1922 	HIF_ENTER();
1923 	if (!IS_ERR(device->async_task)) {
1924 		init_completion(&device->async_completion);
1925 		device->async_shutdown = 1;
1926 		up(&device->sem_async);
1927 		wait_for_completion(&device->async_completion);
1928 		device->async_task = NULL;
1929 		sema_init(&device->sem_async, 0);
1930 	}
1931 
1932 	status = hif_sdio_func_disable(device, func, reset);
1933 	if (status == QDF_STATUS_SUCCESS)
1934 		device->is_disabled = true;
1935 
1936 	cleanup_hif_scatter_resources(device);
1937 
1938 	HIF_EXIT();
1939 
1940 	return status;
1941 }
1942 
1943 /**
1944  * hif_enable_func() - Enable SDIO function
1945  *
1946  * @ol_sc: HIF object pointer
1947  * @device: HIF device pointer
1948  * @func: SDIO function pointer
1949  * @resume: If this is called from resume or probe
1950  *
1951  * Return: 0 in case of success, else error value
1952  */
1953 QDF_STATUS hif_enable_func(struct hif_softc *ol_sc, struct hif_sdio_dev *device,
1954 			   struct sdio_func *func, bool resume)
1955 {
1956 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
1957 
1958 	HIF_ENTER();
1959 
1960 	if (!device) {
1961 		hif_err("%s: HIF device is NULL", __func__);
1962 		return QDF_STATUS_E_INVAL;
1963 	}
1964 
1965 	if (hif_sdio_func_enable(ol_sc, func))
1966 		return QDF_STATUS_E_FAILURE;
1967 
1968 	/* create async I/O thread */
1969 	if (!device->async_task && device->is_disabled) {
1970 		device->async_shutdown = 0;
1971 		device->async_task = kthread_create(async_task,
1972 						    (void *)device,
1973 						    "AR6K Async");
1974 		if (IS_ERR(device->async_task)) {
1975 			hif_err("%s: Error creating async task",
1976 				__func__);
1977 			return QDF_STATUS_E_FAILURE;
1978 		}
1979 		device->is_disabled = false;
1980 		wake_up_process(device->async_task);
1981 	}
1982 
1983 	if (!resume)
1984 		ret = hif_sdio_probe(ol_sc, func, device);
1985 
1986 	HIF_EXIT();
1987 
1988 	return ret;
1989 }
1990 #endif /* CONFIG_SDIO_TRANSFER_MAILBOX */
1991