Lines Matching +full:no +full:- +full:sdio
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
24 #include <linux/mmc/sdio.h>
27 #include "hif-ops.h"
82 return ar->hif_priv; in ath6kl_sdio_priv()
86 * Macro to check if DMA buffer is WORD-aligned and DMA-able.
88 * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid
98 struct ath6kl_mbox_info *mbox_info = &ar->mbox_info; in ath6kl_sdio_set_mbox_info()
101 mbox_info->htc_addr = HIF_MBOX_BASE_ADDR; in ath6kl_sdio_set_mbox_info()
102 mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR; in ath6kl_sdio_set_mbox_info()
103 mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH; in ath6kl_sdio_set_mbox_info()
104 mbox_info->block_size = HIF_MBOX_BLOCK_SIZE; in ath6kl_sdio_set_mbox_info()
105 mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR; in ath6kl_sdio_set_mbox_info()
106 mbox_info->gmbox_sz = HIF_GMBOX_WIDTH; in ath6kl_sdio_set_mbox_info()
147 return mmc_wait_for_cmd(card->host, &io_cmd, 0); in ath6kl_sdio_func0_cmd52_wr_byte()
161 addr += (HIF_MBOX_WIDTH - len); in ath6kl_sdio_io()
165 addr += HIF_MBOX0_EXT_WIDTH - len; in ath6kl_sdio_io()
183 ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len); in ath6kl_sdio_io()
194 spin_lock_bh(&ar_sdio->lock); in ath6kl_sdio_alloc_busreq()
196 if (list_empty(&ar_sdio->bus_req_freeq)) { in ath6kl_sdio_alloc_busreq()
197 spin_unlock_bh(&ar_sdio->lock); in ath6kl_sdio_alloc_busreq()
201 bus_req = list_first_entry(&ar_sdio->bus_req_freeq, in ath6kl_sdio_alloc_busreq()
203 list_del(&bus_req->list); in ath6kl_sdio_alloc_busreq()
205 spin_unlock_bh(&ar_sdio->lock); in ath6kl_sdio_alloc_busreq()
218 spin_lock_bh(&ar_sdio->lock); in ath6kl_sdio_free_bus_req()
219 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq); in ath6kl_sdio_free_bus_req()
220 spin_unlock_bh(&ar_sdio->lock); in ath6kl_sdio_free_bus_req()
229 data->blksz = HIF_MBOX_BLOCK_SIZE; in ath6kl_sdio_setup_scat_data()
230 data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE; in ath6kl_sdio_setup_scat_data()
233 "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n", in ath6kl_sdio_setup_scat_data()
234 (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr, in ath6kl_sdio_setup_scat_data()
235 data->blksz, data->blocks, scat_req->len, in ath6kl_sdio_setup_scat_data()
236 scat_req->scat_entries); in ath6kl_sdio_setup_scat_data()
238 data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE : in ath6kl_sdio_setup_scat_data()
242 sg = scat_req->sgentries; in ath6kl_sdio_setup_scat_data()
243 sg_init_table(sg, scat_req->scat_entries); in ath6kl_sdio_setup_scat_data()
246 for (i = 0; i < scat_req->scat_entries; i++, sg++) { in ath6kl_sdio_setup_scat_data()
248 i, scat_req->scat_list[i].buf, in ath6kl_sdio_setup_scat_data()
249 scat_req->scat_list[i].len); in ath6kl_sdio_setup_scat_data()
251 sg_set_buf(sg, scat_req->scat_list[i].buf, in ath6kl_sdio_setup_scat_data()
252 scat_req->scat_list[i].len); in ath6kl_sdio_setup_scat_data()
255 /* set scatter-gather table for request */ in ath6kl_sdio_setup_scat_data()
256 data->sg = scat_req->sgentries; in ath6kl_sdio_setup_scat_data()
257 data->sg_len = scat_req->scat_entries; in ath6kl_sdio_setup_scat_data()
270 scat_req = req->scat_req; in ath6kl_sdio_scat_rw()
272 if (scat_req->virt_scat) { in ath6kl_sdio_scat_rw()
273 len = scat_req->len; in ath6kl_sdio_scat_rw()
274 if (scat_req->req & HIF_BLOCK_BASIS) in ath6kl_sdio_scat_rw()
277 status = ath6kl_sdio_io(ar_sdio->func, scat_req->req, in ath6kl_sdio_scat_rw()
278 scat_req->addr, scat_req->virt_dma_buf, in ath6kl_sdio_scat_rw()
289 opcode = (scat_req->req & HIF_FIXED_ADDRESS) ? in ath6kl_sdio_scat_rw()
292 rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ; in ath6kl_sdio_scat_rw()
295 if (scat_req->req & HIF_WRITE) { in ath6kl_sdio_scat_rw()
296 if (scat_req->addr == HIF_MBOX_BASE_ADDR) in ath6kl_sdio_scat_rw()
297 scat_req->addr += HIF_MBOX_WIDTH - scat_req->len; in ath6kl_sdio_scat_rw()
300 scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len; in ath6kl_sdio_scat_rw()
304 ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num, in ath6kl_sdio_scat_rw()
305 CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr, in ath6kl_sdio_scat_rw()
314 sdio_claim_host(ar_sdio->func); in ath6kl_sdio_scat_rw()
316 mmc_set_data_timeout(&data, ar_sdio->func->card); in ath6kl_sdio_scat_rw()
318 trace_ath6kl_sdio_scat(scat_req->addr, in ath6kl_sdio_scat_rw()
319 scat_req->req, in ath6kl_sdio_scat_rw()
320 scat_req->len, in ath6kl_sdio_scat_rw()
321 scat_req->scat_entries, in ath6kl_sdio_scat_rw()
322 scat_req->scat_list); in ath6kl_sdio_scat_rw()
325 mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req); in ath6kl_sdio_scat_rw()
327 sdio_release_host(ar_sdio->func); in ath6kl_sdio_scat_rw()
332 scat_req->status = status; in ath6kl_sdio_scat_rw()
334 if (scat_req->status) in ath6kl_sdio_scat_rw()
336 scat_req->status); in ath6kl_sdio_scat_rw()
338 if (scat_req->req & HIF_ASYNCHRONOUS) in ath6kl_sdio_scat_rw()
339 scat_req->complete(ar_sdio->ar->htc_target, scat_req); in ath6kl_sdio_scat_rw()
366 return -ENOMEM; in ath6kl_sdio_alloc_prep_scat_req()
372 return -ENOMEM; in ath6kl_sdio_alloc_prep_scat_req()
375 s_req->virt_dma_buf = in ath6kl_sdio_alloc_prep_scat_req()
379 s_req->sgentries = kzalloc(size, GFP_KERNEL); in ath6kl_sdio_alloc_prep_scat_req()
381 if (!s_req->sgentries) { in ath6kl_sdio_alloc_prep_scat_req()
383 return -ENOMEM; in ath6kl_sdio_alloc_prep_scat_req()
390 kfree(s_req->sgentries); in ath6kl_sdio_alloc_prep_scat_req()
391 kfree(s_req->virt_dma_buf); in ath6kl_sdio_alloc_prep_scat_req()
393 return -ENOMEM; in ath6kl_sdio_alloc_prep_scat_req()
397 bus_req->scat_req = s_req; in ath6kl_sdio_alloc_prep_scat_req()
398 s_req->busrequest = bus_req; in ath6kl_sdio_alloc_prep_scat_req()
400 s_req->virt_scat = virt_scat; in ath6kl_sdio_alloc_prep_scat_req()
403 hif_scatter_req_add(ar_sdio->ar, s_req); in ath6kl_sdio_alloc_prep_scat_req()
421 if (!ar_sdio->dma_buffer) in ath6kl_sdio_read_write_sync()
422 return -ENOMEM; in ath6kl_sdio_read_write_sync()
423 mutex_lock(&ar_sdio->dma_buffer_mutex); in ath6kl_sdio_read_write_sync()
424 tbuf = ar_sdio->dma_buffer; in ath6kl_sdio_read_write_sync()
434 ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len); in ath6kl_sdio_read_write_sync()
439 mutex_unlock(&ar_sdio->dma_buffer_mutex); in ath6kl_sdio_read_write_sync()
447 if (req->scat_req) { in __ath6kl_sdio_write_async()
453 status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address, in __ath6kl_sdio_write_async()
454 req->buffer, req->length, in __ath6kl_sdio_write_async()
455 req->request); in __ath6kl_sdio_write_async()
456 context = req->packet; in __ath6kl_sdio_write_async()
469 spin_lock_bh(&ar_sdio->wr_async_lock); in ath6kl_sdio_write_async_work()
470 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { in ath6kl_sdio_write_async_work()
471 list_del(&req->list); in ath6kl_sdio_write_async_work()
472 spin_unlock_bh(&ar_sdio->wr_async_lock); in ath6kl_sdio_write_async_work()
474 spin_lock_bh(&ar_sdio->wr_async_lock); in ath6kl_sdio_write_async_work()
476 spin_unlock_bh(&ar_sdio->wr_async_lock); in ath6kl_sdio_write_async_work()
487 atomic_set(&ar_sdio->irq_handling, 1); in ath6kl_sdio_irq_handler()
492 sdio_release_host(ar_sdio->func); in ath6kl_sdio_irq_handler()
494 status = ath6kl_hif_intr_bh_handler(ar_sdio->ar); in ath6kl_sdio_irq_handler()
495 sdio_claim_host(ar_sdio->func); in ath6kl_sdio_irq_handler()
497 atomic_set(&ar_sdio->irq_handling, 0); in ath6kl_sdio_irq_handler()
498 wake_up(&ar_sdio->irq_wq); in ath6kl_sdio_irq_handler()
500 WARN_ON(status && status != -ECANCELED); in ath6kl_sdio_irq_handler()
506 struct sdio_func *func = ar_sdio->func; in ath6kl_sdio_power_on()
509 if (!ar_sdio->is_disabled) in ath6kl_sdio_power_on()
512 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n"); in ath6kl_sdio_power_on()
518 ath6kl_err("Unable to enable sdio func: %d)\n", ret); in ath6kl_sdio_power_on()
533 ath6kl_err("Failed to config sdio: %d\n", ret); in ath6kl_sdio_power_on()
537 ar_sdio->is_disabled = false; in ath6kl_sdio_power_on()
548 if (ar_sdio->is_disabled) in ath6kl_sdio_power_off()
551 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n"); in ath6kl_sdio_power_off()
554 sdio_claim_host(ar_sdio->func); in ath6kl_sdio_power_off()
555 ret = sdio_disable_func(ar_sdio->func); in ath6kl_sdio_power_off()
556 sdio_release_host(ar_sdio->func); in ath6kl_sdio_power_off()
561 ar_sdio->is_disabled = true; in ath6kl_sdio_power_off()
576 return -ENOMEM; in ath6kl_sdio_write_async()
578 bus_req->address = address; in ath6kl_sdio_write_async()
579 bus_req->buffer = buffer; in ath6kl_sdio_write_async()
580 bus_req->length = length; in ath6kl_sdio_write_async()
581 bus_req->request = request; in ath6kl_sdio_write_async()
582 bus_req->packet = packet; in ath6kl_sdio_write_async()
584 spin_lock_bh(&ar_sdio->wr_async_lock); in ath6kl_sdio_write_async()
585 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq); in ath6kl_sdio_write_async()
586 spin_unlock_bh(&ar_sdio->wr_async_lock); in ath6kl_sdio_write_async()
587 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); in ath6kl_sdio_write_async()
597 sdio_claim_host(ar_sdio->func); in ath6kl_sdio_irq_enable()
600 ret = sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler); in ath6kl_sdio_irq_enable()
602 ath6kl_err("Failed to claim sdio irq: %d\n", ret); in ath6kl_sdio_irq_enable()
604 sdio_release_host(ar_sdio->func); in ath6kl_sdio_irq_enable()
611 return !atomic_read(&ar_sdio->irq_handling); in ath6kl_sdio_is_on_irq()
619 sdio_claim_host(ar_sdio->func); in ath6kl_sdio_irq_disable()
621 if (atomic_read(&ar_sdio->irq_handling)) { in ath6kl_sdio_irq_disable()
622 sdio_release_host(ar_sdio->func); in ath6kl_sdio_irq_disable()
624 ret = wait_event_interruptible(ar_sdio->irq_wq, in ath6kl_sdio_irq_disable()
629 sdio_claim_host(ar_sdio->func); in ath6kl_sdio_irq_disable()
632 ret = sdio_release_irq(ar_sdio->func); in ath6kl_sdio_irq_disable()
634 ath6kl_err("Failed to release sdio irq: %d\n", ret); in ath6kl_sdio_irq_disable()
636 sdio_release_host(ar_sdio->func); in ath6kl_sdio_irq_disable()
644 spin_lock_bh(&ar_sdio->scat_lock); in ath6kl_sdio_scatter_req_get()
646 if (!list_empty(&ar_sdio->scat_req)) { in ath6kl_sdio_scatter_req_get()
647 node = list_first_entry(&ar_sdio->scat_req, in ath6kl_sdio_scatter_req_get()
649 list_del(&node->list); in ath6kl_sdio_scatter_req_get()
651 node->scat_q_depth = get_queue_depth(&ar_sdio->scat_req); in ath6kl_sdio_scatter_req_get()
654 spin_unlock_bh(&ar_sdio->scat_lock); in ath6kl_sdio_scatter_req_get()
664 spin_lock_bh(&ar_sdio->scat_lock); in ath6kl_sdio_scatter_req_add()
666 list_add_tail(&s_req->list, &ar_sdio->scat_req); in ath6kl_sdio_scatter_req_add()
668 spin_unlock_bh(&ar_sdio->scat_lock); in ath6kl_sdio_scatter_req_add()
676 u32 request = scat_req->req; in ath6kl_sdio_async_rw_scatter()
679 if (!scat_req->len) in ath6kl_sdio_async_rw_scatter()
680 return -EINVAL; in ath6kl_sdio_async_rw_scatter()
683 "hif-scatter: total len: %d scatter entries: %d\n", in ath6kl_sdio_async_rw_scatter()
684 scat_req->len, scat_req->scat_entries); in ath6kl_sdio_async_rw_scatter()
687 status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest); in ath6kl_sdio_async_rw_scatter()
689 spin_lock_bh(&ar_sdio->wr_async_lock); in ath6kl_sdio_async_rw_scatter()
690 list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq); in ath6kl_sdio_async_rw_scatter()
691 spin_unlock_bh(&ar_sdio->wr_async_lock); in ath6kl_sdio_async_rw_scatter()
692 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); in ath6kl_sdio_async_rw_scatter()
705 spin_lock_bh(&ar_sdio->scat_lock); in ath6kl_sdio_cleanup_scatter()
706 list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) { in ath6kl_sdio_cleanup_scatter()
707 list_del(&s_req->list); in ath6kl_sdio_cleanup_scatter()
708 spin_unlock_bh(&ar_sdio->scat_lock); in ath6kl_sdio_cleanup_scatter()
712 * ath6kl_hif_rw_comp_handler() with status -ECANCELED so in ath6kl_sdio_cleanup_scatter()
715 if (s_req->busrequest) { in ath6kl_sdio_cleanup_scatter()
716 s_req->busrequest->scat_req = NULL; in ath6kl_sdio_cleanup_scatter()
717 ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest); in ath6kl_sdio_cleanup_scatter()
719 kfree(s_req->virt_dma_buf); in ath6kl_sdio_cleanup_scatter()
720 kfree(s_req->sgentries); in ath6kl_sdio_cleanup_scatter()
723 spin_lock_bh(&ar_sdio->scat_lock); in ath6kl_sdio_cleanup_scatter()
725 spin_unlock_bh(&ar_sdio->scat_lock); in ath6kl_sdio_cleanup_scatter()
727 ar_sdio->scatter_enabled = false; in ath6kl_sdio_cleanup_scatter()
734 struct htc_target *target = ar->htc_target; in ath6kl_sdio_enable_scatter()
738 if (ar_sdio->scatter_enabled) in ath6kl_sdio_enable_scatter()
741 ar_sdio->scatter_enabled = true; in ath6kl_sdio_enable_scatter()
744 if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) { in ath6kl_sdio_enable_scatter()
746 ar_sdio->func->card->host->max_segs, in ath6kl_sdio_enable_scatter()
758 "hif-scatter enabled requests %d entries %d\n", in ath6kl_sdio_enable_scatter()
762 target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ; in ath6kl_sdio_enable_scatter()
763 target->max_xfer_szper_scatreq = in ath6kl_sdio_enable_scatter()
786 target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ; in ath6kl_sdio_enable_scatter()
787 target->max_xfer_szper_scatreq = in ath6kl_sdio_enable_scatter()
797 struct sdio_func *func = ar_sdio->func; in ath6kl_sdio_config()
802 if (ar_sdio->id->device >= SDIO_DEVICE_ID_ATHEROS_AR6003_00) { in ath6kl_sdio_config()
803 /* enable 4-bit ASYNC interrupt on AR6003 or later */ in ath6kl_sdio_config()
804 ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card, in ath6kl_sdio_config()
808 ath6kl_err("Failed to enable 4-bit async irq mode %d\n", in ath6kl_sdio_config()
813 ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n"); in ath6kl_sdio_config()
817 func->enable_timeout = 100; in ath6kl_sdio_config()
821 ath6kl_err("Set sdio block size %d failed: %d)\n", in ath6kl_sdio_config()
835 struct sdio_func *func = ar_sdio->func; in ath6kl_set_sdio_pm_caps()
841 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags); in ath6kl_set_sdio_pm_caps()
845 return -EINVAL; in ath6kl_set_sdio_pm_caps()
849 ath6kl_err("set sdio keep pwr flag failed: %d\n", ret); in ath6kl_set_sdio_pm_caps()
853 /* sdio irq wakes up host */ in ath6kl_set_sdio_pm_caps()
856 ath6kl_err("set sdio wake irq flag failed: %d\n", ret); in ath6kl_set_sdio_pm_caps()
864 struct sdio_func *func = ar_sdio->func; in ath6kl_sdio_suspend()
869 if (ar->suspend_mode == WLAN_POWER_STATE_WOW || in ath6kl_sdio_suspend()
870 (!ar->suspend_mode && wow)) { in ath6kl_sdio_suspend()
876 if (ret && ret != -ENOTCONN) in ath6kl_sdio_suspend()
880 (!ar->wow_suspend_mode || in ath6kl_sdio_suspend()
881 ar->wow_suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP)) in ath6kl_sdio_suspend()
884 ar->wow_suspend_mode == WLAN_POWER_STATE_CUT_PWR) in ath6kl_sdio_suspend()
890 if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP || in ath6kl_sdio_suspend()
891 !ar->suspend_mode || try_deepsleep) { in ath6kl_sdio_suspend()
922 if (func->card && func->card->host) in ath6kl_sdio_suspend()
923 func->card->host->pm_flags &= ~MMC_PM_KEEP_POWER; in ath6kl_sdio_suspend()
930 switch (ar->state) { in ath6kl_sdio_resume()
934 "sdio resume configuring sdio\n"); in ath6kl_sdio_resume()
936 /* need to set sdio settings after power is cut from sdio */ in ath6kl_sdio_resume()
964 /* set the window address register (using 4-byte register access ). */
984 * Hit each byte of the register address with a 4-byte in ath6kl_set_addrwin_reg()
1002 * 4-byte value. The effect here is that the LSB write causes the in ath6kl_set_addrwin_reg()
1003 * cycle to start, the extra 3 byte write to bytes 1,2,3 has no in ath6kl_set_addrwin_reg()
1067 ar->bmi.cmd_credits = 0; in ath6kl_sdio_bmi_credits()
1073 while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) { in ath6kl_sdio_bmi_credits()
1075 * Hit the credit counter with a 4-byte access, the first byte in ath6kl_sdio_bmi_credits()
1077 * remaining 3 bytes has no effect. The rationale behind this in ath6kl_sdio_bmi_credits()
1078 * is to make all HIF accesses 4-byte aligned. in ath6kl_sdio_bmi_credits()
1081 (u8 *)&ar->bmi.cmd_credits, 4, in ath6kl_sdio_bmi_credits()
1092 ar->bmi.cmd_credits &= 0xFF; in ath6kl_sdio_bmi_credits()
1095 if (!ar->bmi.cmd_credits) { in ath6kl_sdio_bmi_credits()
1097 return -ETIMEDOUT; in ath6kl_sdio_bmi_credits()
1126 return -EINVAL; in ath6kl_bmi_get_rx_lkahd()
1141 addr = ar->mbox_info.htc_addr; in ath6kl_sdio_bmi_write()
1166 * In particular, this avoids SDIO timeouts and possibly garbage in ath6kl_sdio_bmi_read()
1168 * such as Compact Flash (as well as some SDIO masters) which in ath6kl_sdio_bmi_read()
1178 * not occur in practice -- they're supported for debug/development. in ath6kl_sdio_bmi_read()
1199 * If BMI_EXECUTE ever needs to support longer-latency execution, in ath6kl_sdio_bmi_read()
1210 addr = ar->mbox_info.htc_addr; in ath6kl_sdio_bmi_read()
1230 cancel_work_sync(&ar_sdio->wr_async_work); in ath6kl_sdio_stop()
1232 spin_lock_bh(&ar_sdio->wr_async_lock); in ath6kl_sdio_stop()
1234 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { in ath6kl_sdio_stop()
1235 list_del(&req->list); in ath6kl_sdio_stop()
1237 if (req->scat_req) { in ath6kl_sdio_stop()
1239 req->scat_req->status = -ECANCELED; in ath6kl_sdio_stop()
1240 req->scat_req->complete(ar_sdio->ar->htc_target, in ath6kl_sdio_stop()
1241 req->scat_req); in ath6kl_sdio_stop()
1243 context = req->packet; in ath6kl_sdio_stop()
1245 ath6kl_hif_rw_comp_handler(context, -ECANCELED); in ath6kl_sdio_stop()
1249 spin_unlock_bh(&ar_sdio->wr_async_lock); in ath6kl_sdio_stop()
1251 WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4); in ath6kl_sdio_stop()
1283 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n"); in ath6kl_sdio_pm_suspend()
1290 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n"); in ath6kl_sdio_pm_resume()
1315 "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n", in ath6kl_sdio_probe()
1316 func->num, func->vendor, func->device, in ath6kl_sdio_probe()
1317 func->max_blksize, func->cur_blksize); in ath6kl_sdio_probe()
1321 return -ENOMEM; in ath6kl_sdio_probe()
1323 ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL); in ath6kl_sdio_probe()
1324 if (!ar_sdio->dma_buffer) { in ath6kl_sdio_probe()
1325 ret = -ENOMEM; in ath6kl_sdio_probe()
1329 ar_sdio->func = func; in ath6kl_sdio_probe()
1332 ar_sdio->id = id; in ath6kl_sdio_probe()
1333 ar_sdio->is_disabled = true; in ath6kl_sdio_probe()
1335 spin_lock_init(&ar_sdio->lock); in ath6kl_sdio_probe()
1336 spin_lock_init(&ar_sdio->scat_lock); in ath6kl_sdio_probe()
1337 spin_lock_init(&ar_sdio->wr_async_lock); in ath6kl_sdio_probe()
1338 mutex_init(&ar_sdio->dma_buffer_mutex); in ath6kl_sdio_probe()
1340 INIT_LIST_HEAD(&ar_sdio->scat_req); in ath6kl_sdio_probe()
1341 INIT_LIST_HEAD(&ar_sdio->bus_req_freeq); in ath6kl_sdio_probe()
1342 INIT_LIST_HEAD(&ar_sdio->wr_asyncq); in ath6kl_sdio_probe()
1344 INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work); in ath6kl_sdio_probe()
1346 init_waitqueue_head(&ar_sdio->irq_wq); in ath6kl_sdio_probe()
1349 ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]); in ath6kl_sdio_probe()
1351 ar = ath6kl_core_create(&ar_sdio->func->dev); in ath6kl_sdio_probe()
1354 ret = -ENOMEM; in ath6kl_sdio_probe()
1358 ar_sdio->ar = ar; in ath6kl_sdio_probe()
1359 ar->hif_type = ATH6KL_HIF_TYPE_SDIO; in ath6kl_sdio_probe()
1360 ar->hif_priv = ar_sdio; in ath6kl_sdio_probe()
1361 ar->hif_ops = &ath6kl_sdio_ops; in ath6kl_sdio_probe()
1362 ar->bmi.max_data_size = 256; in ath6kl_sdio_probe()
1368 ath6kl_err("Failed to config sdio: %d\n", ret); in ath6kl_sdio_probe()
1381 ath6kl_core_destroy(ar_sdio->ar); in ath6kl_sdio_probe()
1383 kfree(ar_sdio->dma_buffer); in ath6kl_sdio_probe()
1395 "sdio removed func %d vendor 0x%x device 0x%x\n", in ath6kl_sdio_remove()
1396 func->num, func->vendor, func->device); in ath6kl_sdio_remove()
1400 ath6kl_stop_txrx(ar_sdio->ar); in ath6kl_sdio_remove()
1401 cancel_work_sync(&ar_sdio->wr_async_work); in ath6kl_sdio_remove()
1403 ath6kl_core_cleanup(ar_sdio->ar); in ath6kl_sdio_remove()
1404 ath6kl_core_destroy(ar_sdio->ar); in ath6kl_sdio_remove()
1406 kfree(ar_sdio->dma_buffer); in ath6kl_sdio_remove()
1421 MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
1433 MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");