Lines Matching +full:no +full:- +full:sdio

1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2004-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
6 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
15 #include <linux/mmc/sdio.h>
26 #include "sdio.h"
38 return __ALIGN_MASK((len), ar_sdio->mbox_info.block_mask); in ath10k_sdio_calc_txrx_padded_len()
48 dev_kfree_skb(pkt->skb); in ath10k_sdio_mbox_free_rx_pkt()
49 pkt->skb = NULL; in ath10k_sdio_mbox_free_rx_pkt()
50 pkt->alloc_len = 0; in ath10k_sdio_mbox_free_rx_pkt()
51 pkt->act_len = 0; in ath10k_sdio_mbox_free_rx_pkt()
52 pkt->trailer_only = false; in ath10k_sdio_mbox_free_rx_pkt()
60 pkt->skb = dev_alloc_skb(full_len); in ath10k_sdio_mbox_alloc_rx_pkt()
61 if (!pkt->skb) in ath10k_sdio_mbox_alloc_rx_pkt()
62 return -ENOMEM; in ath10k_sdio_mbox_alloc_rx_pkt()
64 pkt->act_len = act_len; in ath10k_sdio_mbox_alloc_rx_pkt()
65 pkt->alloc_len = full_len; in ath10k_sdio_mbox_alloc_rx_pkt()
66 pkt->part_of_bundle = part_of_bundle; in ath10k_sdio_mbox_alloc_rx_pkt()
67 pkt->last_in_bundle = last_in_bundle; in ath10k_sdio_mbox_alloc_rx_pkt()
68 pkt->trailer_only = false; in ath10k_sdio_mbox_alloc_rx_pkt()
77 (struct ath10k_htc_hdr *)pkt->skb->data; in is_trailer_only_msg()
78 u16 len = __le16_to_cpu(htc_hdr->len); in is_trailer_only_msg()
80 if (len == htc_hdr->trailer_len) in is_trailer_only_msg()
86 /* sdio/mmc functions */
111 return mmc_wait_for_cmd(card->host, &io_cmd, 0); in ath10k_sdio_func0_cmd52_wr_byte()
126 ret = mmc_wait_for_cmd(card->host, &io_cmd, 0); in ath10k_sdio_func0_cmd52_rd_byte()
136 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_config()
140 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio configuration\n"); in ath10k_sdio_config()
145 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card, in ath10k_sdio_config()
153 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, in ath10k_sdio_config()
159 func->card, in ath10k_sdio_config()
167 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, in ath10k_sdio_config()
176 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card, in ath10k_sdio_config()
182 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, in ath10k_sdio_config()
186 ath10k_warn(ar, "failed to enable 4-bit async irq mode: %d\n", in ath10k_sdio_config()
192 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card, in ath10k_sdio_config()
199 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, in ath10k_sdio_config()
204 func->enable_timeout = 100; in ath10k_sdio_config()
206 ret = sdio_set_block_size(func, ar_sdio->mbox_info.block_size); in ath10k_sdio_config()
208 ath10k_warn(ar, "failed to set sdio block size to %d: %d\n", in ath10k_sdio_config()
209 ar_sdio->mbox_info.block_size, ret); in ath10k_sdio_config()
221 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_write32()
233 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write32 addr 0x%x val 0x%x\n", in ath10k_sdio_write32()
245 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_writesb32()
251 return -ENOMEM; in ath10k_sdio_writesb32()
264 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio writesb32 addr 0x%x val 0x%x\n", in ath10k_sdio_writesb32()
278 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_read32()
289 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read32 addr 0x%x val 0x%x\n", in ath10k_sdio_read32()
301 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_read()
313 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read addr 0x%x buf 0x%p len %zu\n", in ath10k_sdio_read()
315 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio read ", buf, len); in ath10k_sdio_read()
326 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_write()
341 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write addr 0x%x buf 0x%p len %zu\n", in ath10k_sdio_write()
343 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio write ", buf, len); in ath10k_sdio_write()
354 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_readsb()
359 len = round_down(len, ar_sdio->mbox_info.block_size); in ath10k_sdio_readsb()
368 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio readsb addr 0x%x buf 0x%p len %zu\n", in ath10k_sdio_readsb()
370 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio readsb ", buf, len); in ath10k_sdio_readsb()
385 struct ath10k_htc *htc = &ar->htc; in ath10k_sdio_mbox_rx_process_packet()
386 struct sk_buff *skb = pkt->skb; in ath10k_sdio_mbox_rx_process_packet()
387 struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data; in ath10k_sdio_mbox_rx_process_packet()
388 bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT; in ath10k_sdio_mbox_rx_process_packet()
394 trailer = skb->data + skb->len - htc_hdr->trailer_len; in ath10k_sdio_mbox_rx_process_packet()
396 eid = pipe_id_to_eid(htc_hdr->eid); in ath10k_sdio_mbox_rx_process_packet()
400 htc_hdr->trailer_len, in ath10k_sdio_mbox_rx_process_packet()
408 pkt->trailer_only = true; in ath10k_sdio_mbox_rx_process_packet()
410 skb_trim(skb, skb->len - htc_hdr->trailer_len); in ath10k_sdio_mbox_rx_process_packet()
423 struct ath10k_htc *htc = &ar->htc; in ath10k_sdio_mbox_rx_process_packets()
432 for (i = 0; i < ar_sdio->n_rx_pkts; i++) { in ath10k_sdio_mbox_rx_process_packets()
437 &lookaheads[lookahead_idx++])->eid; in ath10k_sdio_mbox_rx_process_packets()
440 ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n", in ath10k_sdio_mbox_rx_process_packets()
442 ret = -ENOMEM; in ath10k_sdio_mbox_rx_process_packets()
446 ep = &htc->endpoint[id]; in ath10k_sdio_mbox_rx_process_packets()
448 if (ep->service_id == 0) { in ath10k_sdio_mbox_rx_process_packets()
450 ret = -ENOMEM; in ath10k_sdio_mbox_rx_process_packets()
454 pkt = &ar_sdio->rx_pkts[i]; in ath10k_sdio_mbox_rx_process_packets()
456 if (pkt->part_of_bundle && !pkt->last_in_bundle) { in ath10k_sdio_mbox_rx_process_packets()
460 lookahead_idx--; in ath10k_sdio_mbox_rx_process_packets()
472 if (!pkt->trailer_only) { in ath10k_sdio_mbox_rx_process_packets()
473 cb = ATH10K_SKB_RXCB(pkt->skb); in ath10k_sdio_mbox_rx_process_packets()
474 cb->eid = id; in ath10k_sdio_mbox_rx_process_packets()
476 skb_queue_tail(&ar_sdio->rx_head, pkt->skb); in ath10k_sdio_mbox_rx_process_packets()
477 queue_work(ar->workqueue_aux, in ath10k_sdio_mbox_rx_process_packets()
478 &ar_sdio->async_work_rx); in ath10k_sdio_mbox_rx_process_packets()
480 kfree_skb(pkt->skb); in ath10k_sdio_mbox_rx_process_packets()
484 pkt->skb = NULL; in ath10k_sdio_mbox_rx_process_packets()
485 pkt->alloc_len = 0; in ath10k_sdio_mbox_rx_process_packets()
494 for (; i < ar_sdio->n_rx_pkts; i++) in ath10k_sdio_mbox_rx_process_packets()
495 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]); in ath10k_sdio_mbox_rx_process_packets()
507 u8 max_msgs = ar->htc.max_msgs_per_htc_bundle; in ath10k_sdio_mbox_alloc_bundle()
509 *bndl_cnt = ath10k_htc_get_bundle_count(max_msgs, htc_hdr->flags); in ath10k_sdio_mbox_alloc_bundle()
514 le16_to_cpu(htc_hdr->len), in ath10k_sdio_mbox_alloc_bundle()
516 return -ENOMEM; in ath10k_sdio_mbox_alloc_bundle()
551 ret = -ENOMEM; in ath10k_sdio_mbox_rx_alloc()
559 if (le16_to_cpu(htc_hdr->len) > ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) { in ath10k_sdio_mbox_rx_alloc()
561 le16_to_cpu(htc_hdr->len), in ath10k_sdio_mbox_rx_alloc()
563 ret = -ENOMEM; in ath10k_sdio_mbox_rx_alloc()
571 act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr); in ath10k_sdio_mbox_rx_alloc()
576 htc_hdr->eid, htc_hdr->flags, in ath10k_sdio_mbox_rx_alloc()
577 le16_to_cpu(htc_hdr->len)); in ath10k_sdio_mbox_rx_alloc()
578 ret = -EINVAL; in ath10k_sdio_mbox_rx_alloc()
583 ar->htc.max_msgs_per_htc_bundle, htc_hdr->flags)) { in ath10k_sdio_mbox_rx_alloc()
591 &ar_sdio->rx_pkts[pkt_cnt], in ath10k_sdio_mbox_rx_alloc()
613 if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK) in ath10k_sdio_mbox_rx_alloc()
616 ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[pkt_cnt], in ath10k_sdio_mbox_rx_alloc()
629 ar_sdio->n_rx_pkts = pkt_cnt; in ath10k_sdio_mbox_rx_alloc()
635 if (!ar_sdio->rx_pkts[i].alloc_len) in ath10k_sdio_mbox_rx_alloc()
637 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]); in ath10k_sdio_mbox_rx_alloc()
646 struct ath10k_sdio_rx_data *pkt = &ar_sdio->rx_pkts[0]; in ath10k_sdio_mbox_rx_fetch()
647 struct sk_buff *skb = pkt->skb; in ath10k_sdio_mbox_rx_fetch()
651 ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr, in ath10k_sdio_mbox_rx_fetch()
652 skb->data, pkt->alloc_len); in ath10k_sdio_mbox_rx_fetch()
656 htc_hdr = (struct ath10k_htc_hdr *)skb->data; in ath10k_sdio_mbox_rx_fetch()
657 pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr); in ath10k_sdio_mbox_rx_fetch()
659 if (pkt->act_len > pkt->alloc_len) { in ath10k_sdio_mbox_rx_fetch()
660 ret = -EINVAL; in ath10k_sdio_mbox_rx_fetch()
664 skb_put(skb, pkt->act_len); in ath10k_sdio_mbox_rx_fetch()
668 ar_sdio->n_rx_pkts = 0; in ath10k_sdio_mbox_rx_fetch()
683 for (i = 0; i < ar_sdio->n_rx_pkts; i++) in ath10k_sdio_mbox_rx_fetch_bundle()
684 virt_pkt_len += ar_sdio->rx_pkts[i].alloc_len; in ath10k_sdio_mbox_rx_fetch_bundle()
687 ath10k_warn(ar, "sdio vsg buffer size limit: %d\n", virt_pkt_len); in ath10k_sdio_mbox_rx_fetch_bundle()
688 ret = -E2BIG; in ath10k_sdio_mbox_rx_fetch_bundle()
692 ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr, in ath10k_sdio_mbox_rx_fetch_bundle()
693 ar_sdio->vsg_buffer, virt_pkt_len); in ath10k_sdio_mbox_rx_fetch_bundle()
700 for (i = 0; i < ar_sdio->n_rx_pkts; i++) { in ath10k_sdio_mbox_rx_fetch_bundle()
701 pkt = &ar_sdio->rx_pkts[i]; in ath10k_sdio_mbox_rx_fetch_bundle()
702 htc_hdr = (struct ath10k_htc_hdr *)(ar_sdio->vsg_buffer + pkt_offset); in ath10k_sdio_mbox_rx_fetch_bundle()
703 pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr); in ath10k_sdio_mbox_rx_fetch_bundle()
705 if (pkt->act_len > pkt->alloc_len) { in ath10k_sdio_mbox_rx_fetch_bundle()
706 ret = -EINVAL; in ath10k_sdio_mbox_rx_fetch_bundle()
710 skb_put_data(pkt->skb, htc_hdr, pkt->act_len); in ath10k_sdio_mbox_rx_fetch_bundle()
711 pkt_offset += pkt->alloc_len; in ath10k_sdio_mbox_rx_fetch_bundle()
718 for (i = 0; i < ar_sdio->n_rx_pkts; i++) in ath10k_sdio_mbox_rx_fetch_bundle()
719 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]); in ath10k_sdio_mbox_rx_fetch_bundle()
721 ar_sdio->n_rx_pkts = 0; in ath10k_sdio_mbox_rx_fetch_bundle()
726 /* This is the timeout for mailbox processing done in the sdio irq
727 * handler. The timeout is deliberately set quite high since SDIO dump logs
759 if (ar_sdio->n_rx_pkts >= 2) in ath10k_sdio_mbox_rxmsg_pending_handler()
761 * re-check again. in ath10k_sdio_mbox_rxmsg_pending_handler()
765 if (ar_sdio->n_rx_pkts > 1) in ath10k_sdio_mbox_rxmsg_pending_handler()
784 * flag that we should re-check IRQ status registers again in ath10k_sdio_mbox_rxmsg_pending_handler()
791 if (ret && (ret != -ECANCELED)) in ath10k_sdio_mbox_rxmsg_pending_handler()
819 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_mbox_proc_counter_intr()
823 mutex_lock(&irq_data->mtx); in ath10k_sdio_mbox_proc_counter_intr()
824 counter_int_status = irq_data->irq_proc_reg->counter_int_status & in ath10k_sdio_mbox_proc_counter_intr()
825 irq_data->irq_en_reg->cntr_int_status_en; in ath10k_sdio_mbox_proc_counter_intr()
836 mutex_unlock(&irq_data->mtx); in ath10k_sdio_mbox_proc_counter_intr()
844 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_mbox_proc_err_intr()
848 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio error interrupt\n"); in ath10k_sdio_mbox_proc_err_intr()
850 error_int_status = irq_data->irq_proc_reg->error_int_status & 0x0F; in ath10k_sdio_mbox_proc_err_intr()
854 return -EIO; in ath10k_sdio_mbox_proc_err_intr()
858 "sdio error_int_status 0x%x\n", error_int_status); in ath10k_sdio_mbox_proc_err_intr()
862 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio interrupt error wakeup\n"); in ath10k_sdio_mbox_proc_err_intr()
873 irq_data->irq_proc_reg->error_int_status &= ~error_int_status; in ath10k_sdio_mbox_proc_err_intr()
890 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_mbox_proc_cpu_intr()
894 mutex_lock(&irq_data->mtx); in ath10k_sdio_mbox_proc_cpu_intr()
895 cpu_int_status = irq_data->irq_proc_reg->cpu_int_status & in ath10k_sdio_mbox_proc_cpu_intr()
896 irq_data->irq_en_reg->cpu_int_status_en; in ath10k_sdio_mbox_proc_cpu_intr()
899 ret = -EIO; in ath10k_sdio_mbox_proc_cpu_intr()
904 irq_data->irq_proc_reg->cpu_int_status &= ~cpu_int_status; in ath10k_sdio_mbox_proc_cpu_intr()
907 * this is done to make the access 4-byte aligned to mitigate issues in ath10k_sdio_mbox_proc_cpu_intr()
909 * be a multiple of 4-bytes. in ath10k_sdio_mbox_proc_cpu_intr()
922 mutex_unlock(&irq_data->mtx); in ath10k_sdio_mbox_proc_cpu_intr()
934 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_mbox_read_int_status()
935 struct ath10k_sdio_irq_proc_regs *irq_proc_reg = irq_data->irq_proc_reg; in ath10k_sdio_mbox_read_int_status()
936 struct ath10k_sdio_irq_enable_regs *irq_en_reg = irq_data->irq_en_reg; in ath10k_sdio_mbox_read_int_status()
940 mutex_lock(&irq_data->mtx); in ath10k_sdio_mbox_read_int_status()
951 if (!irq_en_reg->int_status_en) { in ath10k_sdio_mbox_read_int_status()
970 *host_int_status = irq_proc_reg->host_int_status & in ath10k_sdio_mbox_read_int_status()
971 irq_en_reg->int_status_en; in ath10k_sdio_mbox_read_int_status()
984 if (irq_proc_reg->rx_lookahead_valid & htc_mbox) { in ath10k_sdio_mbox_read_int_status()
986 irq_proc_reg->rx_lookahead[ATH10K_HTC_MAILBOX]); in ath10k_sdio_mbox_read_int_status()
988 ath10k_warn(ar, "sdio mbox lookahead is zero\n"); in ath10k_sdio_mbox_read_int_status()
992 mutex_unlock(&irq_data->mtx); in ath10k_sdio_mbox_read_int_status()
1025 "sdio pending mailbox msg lookahead 0x%08x\n", in ath10k_sdio_mbox_proc_pending_irqs()
1037 "sdio host_int_status 0x%x\n", host_int_status); in ath10k_sdio_mbox_proc_pending_irqs()
1061 * unnecessarily which can re-wake the target, if upper layers in ath10k_sdio_mbox_proc_pending_irqs()
1062 * determine that we are in a low-throughput mode, we can rely on in ath10k_sdio_mbox_proc_pending_irqs()
1063 * taking another interrupt rather than re-checking the status in ath10k_sdio_mbox_proc_pending_irqs()
1064 * registers which can re-wake the target. in ath10k_sdio_mbox_proc_pending_irqs()
1073 "sdio pending irqs done %d status %d", in ath10k_sdio_mbox_proc_pending_irqs()
1082 struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info; in ath10k_sdio_set_mbox_info()
1083 u16 device = ar_sdio->func->device, dev_id_base, dev_id_chiprev; in ath10k_sdio_set_mbox_info()
1085 mbox_info->htc_addr = ATH10K_HIF_MBOX_BASE_ADDR; in ath10k_sdio_set_mbox_info()
1086 mbox_info->block_size = ATH10K_HIF_MBOX_BLOCK_SIZE; in ath10k_sdio_set_mbox_info()
1087 mbox_info->block_mask = ATH10K_HIF_MBOX_BLOCK_SIZE - 1; in ath10k_sdio_set_mbox_info()
1088 mbox_info->gmbox_addr = ATH10K_HIF_GMBOX_BASE_ADDR; in ath10k_sdio_set_mbox_info()
1089 mbox_info->gmbox_sz = ATH10K_HIF_GMBOX_WIDTH; in ath10k_sdio_set_mbox_info()
1091 mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR; in ath10k_sdio_set_mbox_info()
1098 mbox_info->ext_info[0].htc_ext_sz = in ath10k_sdio_set_mbox_info()
1104 mbox_info->ext_info[0].htc_ext_sz = in ath10k_sdio_set_mbox_info()
1108 mbox_info->ext_info[0].htc_ext_sz = in ath10k_sdio_set_mbox_info()
1112 mbox_info->ext_info[0].htc_ext_sz = in ath10k_sdio_set_mbox_info()
1116 mbox_info->ext_info[1].htc_ext_addr = in ath10k_sdio_set_mbox_info()
1117 mbox_info->ext_info[0].htc_ext_addr + in ath10k_sdio_set_mbox_info()
1118 mbox_info->ext_info[0].htc_ext_sz + in ath10k_sdio_set_mbox_info()
1120 mbox_info->ext_info[1].htc_ext_sz = ATH10K_HIF_MBOX1_EXT_WIDTH; in ath10k_sdio_set_mbox_info()
1137 /* Hit the credit counter with a 4-byte access, the first byte in ath10k_sdio_bmi_credits()
1139 * remaining 3 bytes has no effect. The rationale behind this in ath10k_sdio_bmi_credits()
1140 * is to make all HIF accesses 4-byte aligned. in ath10k_sdio_bmi_credits()
1158 return -ETIMEDOUT; in ath10k_sdio_bmi_credits()
1188 return -EINVAL; in ath10k_sdio_bmi_get_rx_lookahead()
1207 addr = ar_sdio->mbox_info.htc_addr; in ath10k_sdio_bmi_exchange_msg()
1209 memcpy(ar_sdio->bmi_buf, req, req_len); in ath10k_sdio_bmi_exchange_msg()
1210 ret = ath10k_sdio_write(ar, addr, ar_sdio->bmi_buf, req_len); in ath10k_sdio_bmi_exchange_msg()
1220 /* No response expected */ in ath10k_sdio_bmi_exchange_msg()
1230 * In particular, this avoids SDIO timeouts and possibly garbage in ath10k_sdio_bmi_exchange_msg()
1232 * such as Compact Flash (as well as some SDIO masters) which in ath10k_sdio_bmi_exchange_msg()
1242 * not occur in practice -- they're supported for debug/development. in ath10k_sdio_bmi_exchange_msg()
1263 * If BMI_EXECUTE ever needs to support longer-latency execution, in ath10k_sdio_bmi_exchange_msg()
1273 addr = ar_sdio->mbox_info.htc_addr; in ath10k_sdio_bmi_exchange_msg()
1274 ret = ath10k_sdio_read(ar, addr, ar_sdio->bmi_buf, *resp_len); in ath10k_sdio_bmi_exchange_msg()
1282 memcpy(resp, ar_sdio->bmi_buf, *resp_len); in ath10k_sdio_bmi_exchange_msg()
1287 /* sdio async handling functions */
1295 spin_lock_bh(&ar_sdio->lock); in ath10k_sdio_alloc_busreq()
1297 if (list_empty(&ar_sdio->bus_req_freeq)) { in ath10k_sdio_alloc_busreq()
1302 bus_req = list_first_entry(&ar_sdio->bus_req_freeq, in ath10k_sdio_alloc_busreq()
1304 list_del(&bus_req->list); in ath10k_sdio_alloc_busreq()
1307 spin_unlock_bh(&ar_sdio->lock); in ath10k_sdio_alloc_busreq()
1318 spin_lock_bh(&ar_sdio->lock); in ath10k_sdio_free_bus_req()
1319 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq); in ath10k_sdio_free_bus_req()
1320 spin_unlock_bh(&ar_sdio->lock); in ath10k_sdio_free_bus_req()
1330 skb = req->skb; in __ath10k_sdio_write_async()
1331 ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len); in __ath10k_sdio_write_async()
1334 req->address, ret); in __ath10k_sdio_write_async()
1336 if (req->htc_msg) { in __ath10k_sdio_write_async()
1337 ep = &ar->htc.endpoint[req->eid]; in __ath10k_sdio_write_async()
1339 } else if (req->comp) { in __ath10k_sdio_write_async()
1340 complete(req->comp); in __ath10k_sdio_write_async()
1347 * this way SDIO bus is utilised much better.
1353 struct ath10k *ar = ar_sdio->ar; in ath10k_rx_indication_async_work()
1359 skb = skb_dequeue(&ar_sdio->rx_head); in ath10k_rx_indication_async_work()
1363 ep = &ar->htc.endpoint[cb->eid]; in ath10k_rx_indication_async_work()
1364 ep->ep_ops.ep_rx_complete(ar, skb); in ath10k_rx_indication_async_work()
1367 if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) { in ath10k_rx_indication_async_work()
1369 napi_schedule(&ar->napi); in ath10k_rx_indication_async_work()
1376 struct ath10k *ar = ar_sdio->ar; in ath10k_sdio_read_rtc_state()
1380 rtc_state = sdio_f0_readb(ar_sdio->func, ATH10K_CIS_RTC_STATE_ADDR, &ret); in ath10k_sdio_read_rtc_state()
1398 sdio_claim_host(ar_sdio->func); in ath10k_sdio_set_mbox_sleep()
1409 ar_sdio->mbox_state = SDIO_MBOX_SLEEP_STATE; in ath10k_sdio_set_mbox_sleep()
1412 ar_sdio->mbox_state = SDIO_MBOX_AWAKE_STATE; in ath10k_sdio_set_mbox_sleep()
1431 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read rtc state: %d\n", in ath10k_sdio_set_mbox_sleep()
1438 retry--; in ath10k_sdio_set_mbox_sleep()
1443 sdio_release_host(ar_sdio->func); in ath10k_sdio_set_mbox_sleep()
1452 ar_sdio->mbox_state = SDIO_MBOX_REQUEST_TO_SLEEP_STATE; in ath10k_sdio_sleep_timer_handler()
1453 queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work); in ath10k_sdio_sleep_timer_handler()
1460 struct ath10k *ar = ar_sdio->ar; in ath10k_sdio_write_async_work()
1462 struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info; in ath10k_sdio_write_async_work()
1464 spin_lock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_write_async_work()
1466 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { in ath10k_sdio_write_async_work()
1467 list_del(&req->list); in ath10k_sdio_write_async_work()
1468 spin_unlock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_write_async_work()
1470 if (req->address >= mbox_info->htc_addr && in ath10k_sdio_write_async_work()
1471 ar_sdio->mbox_state == SDIO_MBOX_SLEEP_STATE) { in ath10k_sdio_write_async_work()
1473 mod_timer(&ar_sdio->sleep_timer, jiffies + in ath10k_sdio_write_async_work()
1478 spin_lock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_write_async_work()
1481 spin_unlock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_write_async_work()
1483 if (ar_sdio->mbox_state == SDIO_MBOX_REQUEST_TO_SLEEP_STATE) in ath10k_sdio_write_async_work()
1496 * SDIO workqueue. in ath10k_sdio_prep_async_req()
1502 return -ENOMEM; in ath10k_sdio_prep_async_req()
1505 bus_req->skb = skb; in ath10k_sdio_prep_async_req()
1506 bus_req->eid = eid; in ath10k_sdio_prep_async_req()
1507 bus_req->address = addr; in ath10k_sdio_prep_async_req()
1508 bus_req->htc_msg = htc_msg; in ath10k_sdio_prep_async_req()
1509 bus_req->comp = comp; in ath10k_sdio_prep_async_req()
1511 spin_lock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_prep_async_req()
1512 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq); in ath10k_sdio_prep_async_req()
1513 spin_unlock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_prep_async_req()
1523 struct ath10k *ar = ar_sdio->ar; in ath10k_sdio_irq_handler()
1531 sdio_release_host(ar_sdio->func); in ath10k_sdio_irq_handler()
1542 sdio_claim_host(ar_sdio->func); in ath10k_sdio_irq_handler()
1544 if (ret && ret != -ECANCELED) in ath10k_sdio_irq_handler()
1545 ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n", in ath10k_sdio_irq_handler()
1549 /* sdio HIF functions */
1554 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_disable_intrs()
1555 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg; in ath10k_sdio_disable_intrs()
1558 mutex_lock(&irq_data->mtx); in ath10k_sdio_disable_intrs()
1562 &regs->int_status_en, sizeof(*regs)); in ath10k_sdio_disable_intrs()
1564 ath10k_warn(ar, "unable to disable sdio interrupts: %d\n", ret); in ath10k_sdio_disable_intrs()
1566 mutex_unlock(&irq_data->mtx); in ath10k_sdio_disable_intrs()
1575 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_hif_power_up()
1578 if (!ar_sdio->is_disabled) in ath10k_sdio_hif_power_up()
1581 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n"); in ath10k_sdio_hif_power_up()
1585 ath10k_err(ar, "failed to config sdio: %d\n", ret); in ath10k_sdio_hif_power_up()
1593 ath10k_warn(ar, "unable to enable sdio function: %d)\n", ret); in ath10k_sdio_hif_power_up()
1605 ar_sdio->is_disabled = false; in ath10k_sdio_hif_power_up()
1619 if (ar_sdio->is_disabled) in ath10k_sdio_hif_power_down()
1622 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n"); in ath10k_sdio_hif_power_down()
1624 del_timer_sync(&ar_sdio->sleep_timer); in ath10k_sdio_hif_power_down()
1628 sdio_claim_host(ar_sdio->func); in ath10k_sdio_hif_power_down()
1630 ret = sdio_disable_func(ar_sdio->func); in ath10k_sdio_hif_power_down()
1632 ath10k_warn(ar, "unable to disable sdio function: %d\n", ret); in ath10k_sdio_hif_power_down()
1633 sdio_release_host(ar_sdio->func); in ath10k_sdio_hif_power_down()
1637 ret = mmc_hw_reset(ar_sdio->func->card); in ath10k_sdio_hif_power_down()
1639 ath10k_warn(ar, "unable to reset sdio: %d\n", ret); in ath10k_sdio_hif_power_down()
1641 sdio_release_host(ar_sdio->func); in ath10k_sdio_hif_power_down()
1643 ar_sdio->is_disabled = true; in ath10k_sdio_hif_power_down()
1662 skb->len); in ath10k_sdio_hif_tx_sg()
1666 address = ar_sdio->mbox_addr[eid] + ar_sdio->mbox_size[eid] - in ath10k_sdio_hif_tx_sg()
1667 skb->len; in ath10k_sdio_hif_tx_sg()
1674 queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work); in ath10k_sdio_hif_tx_sg()
1682 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_enable_intrs()
1683 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg; in ath10k_sdio_enable_intrs()
1686 mutex_lock(&irq_data->mtx); in ath10k_sdio_enable_intrs()
1689 regs->int_status_en = FIELD_PREP(MBOX_INT_STATUS_ENABLE_ERROR_MASK, 1) | in ath10k_sdio_enable_intrs()
1696 regs->int_status_en |= in ath10k_sdio_enable_intrs()
1702 regs->cpu_int_status_en = FIELD_PREP(MBOX_CPU_STATUS_ENABLE_ASSERT_MASK, 1); in ath10k_sdio_enable_intrs()
1705 regs->err_int_status_en = in ath10k_sdio_enable_intrs()
1712 regs->cntr_int_status_en = in ath10k_sdio_enable_intrs()
1717 &regs->int_status_en, sizeof(*regs)); in ath10k_sdio_enable_intrs()
1723 mutex_unlock(&irq_data->mtx); in ath10k_sdio_enable_intrs()
1737 return -ENOMEM; in ath10k_sdio_hif_diag_read()
1770 return -ENOMEM; in ath10k_sdio_diag_read32()
1824 "sdio mailbox swap service enabled\n"); in ath10k_sdio_hif_start_post()
1825 ar_sdio->swap_mbox = true; in ath10k_sdio_hif_start_post()
1828 "sdio mailbox swap service disabled\n"); in ath10k_sdio_hif_start_post()
1829 ar_sdio->swap_mbox = false; in ath10k_sdio_hif_start_post()
1853 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio reduce tx complete fw%sack\n", in ath10k_sdio_get_htt_tx_complete()
1880 ar_sdio->mbox_addr[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_addr; in ath10k_sdio_hif_start()
1881 ar_sdio->mbox_size[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_sz; in ath10k_sdio_hif_start()
1883 sdio_claim_host(ar_sdio->func); in ath10k_sdio_hif_start()
1886 ret = sdio_claim_irq(ar_sdio->func, ath10k_sdio_irq_handler); in ath10k_sdio_hif_start()
1888 ath10k_warn(ar, "failed to claim sdio interrupt: %d\n", ret); in ath10k_sdio_hif_start()
1889 sdio_release_host(ar_sdio->func); in ath10k_sdio_hif_start()
1893 sdio_release_host(ar_sdio->func); in ath10k_sdio_hif_start()
1897 ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret); in ath10k_sdio_hif_start()
1919 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_irq_disable()
1920 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg; in ath10k_sdio_irq_disable()
1929 mutex_lock(&irq_data->mtx); in ath10k_sdio_irq_disable()
1932 memcpy(skb->data, regs, sizeof(*regs)); in ath10k_sdio_irq_disable()
1935 mutex_unlock(&irq_data->mtx); in ath10k_sdio_irq_disable()
1943 queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work); in ath10k_sdio_irq_disable()
1951 ath10k_warn(ar, "sdio irq disable request timed out\n"); in ath10k_sdio_irq_disable()
1953 sdio_claim_host(ar_sdio->func); in ath10k_sdio_irq_disable()
1955 ret = sdio_release_irq(ar_sdio->func); in ath10k_sdio_irq_disable()
1957 ath10k_warn(ar, "failed to release sdio interrupt: %d\n", ret); in ath10k_sdio_irq_disable()
1959 sdio_release_host(ar_sdio->func); in ath10k_sdio_irq_disable()
1973 cancel_work_sync(&ar_sdio->async_work_rx); in ath10k_sdio_hif_stop()
1975 while ((skb = skb_dequeue(&ar_sdio->rx_head))) in ath10k_sdio_hif_stop()
1978 cancel_work_sync(&ar_sdio->wr_async_work); in ath10k_sdio_hif_stop()
1980 spin_lock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_hif_stop()
1983 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { in ath10k_sdio_hif_stop()
1986 list_del(&req->list); in ath10k_sdio_hif_stop()
1988 if (req->htc_msg) { in ath10k_sdio_hif_stop()
1989 ep = &ar->htc.endpoint[req->eid]; in ath10k_sdio_hif_stop()
1990 ath10k_htc_notify_tx_completion(ep, req->skb); in ath10k_sdio_hif_stop()
1991 } else if (req->skb) { in ath10k_sdio_hif_stop()
1992 kfree_skb(req->skb); in ath10k_sdio_hif_stop()
1997 spin_unlock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_hif_stop()
2011 switch (ar->state) { in ath10k_sdio_hif_resume()
2014 "sdio resume configuring sdio\n"); in ath10k_sdio_hif_resume()
2016 /* need to set sdio settings after power is cut from sdio */ in ath10k_sdio_hif_resume()
2034 struct ath10k_htc *htc = &ar->htc; in ath10k_sdio_hif_map_service_to_pipe()
2040 /* For sdio, we are interested in the mapping between eid in ath10k_sdio_hif_map_service_to_pipe()
2046 if (htc->endpoint[i].service_id == service_id) { in ath10k_sdio_hif_map_service_to_pipe()
2047 eid = htc->endpoint[i].eid; in ath10k_sdio_hif_map_service_to_pipe()
2054 return -EINVAL; in ath10k_sdio_hif_map_service_to_pipe()
2066 if (ar_sdio->swap_mbox) { in ath10k_sdio_hif_map_service_to_pipe()
2067 htt_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr; in ath10k_sdio_hif_map_service_to_pipe()
2068 wmi_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr; in ath10k_sdio_hif_map_service_to_pipe()
2069 htt_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz; in ath10k_sdio_hif_map_service_to_pipe()
2070 wmi_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz; in ath10k_sdio_hif_map_service_to_pipe()
2072 htt_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr; in ath10k_sdio_hif_map_service_to_pipe()
2073 wmi_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr; in ath10k_sdio_hif_map_service_to_pipe()
2074 htt_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz; in ath10k_sdio_hif_map_service_to_pipe()
2075 wmi_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz; in ath10k_sdio_hif_map_service_to_pipe()
2085 ar_sdio->mbox_addr[eid] = wmi_addr; in ath10k_sdio_hif_map_service_to_pipe()
2086 ar_sdio->mbox_size[eid] = wmi_mbox_size; in ath10k_sdio_hif_map_service_to_pipe()
2088 "sdio wmi ctrl mbox_addr 0x%x mbox_size %d\n", in ath10k_sdio_hif_map_service_to_pipe()
2089 ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]); in ath10k_sdio_hif_map_service_to_pipe()
2092 ar_sdio->mbox_addr[eid] = htt_addr; in ath10k_sdio_hif_map_service_to_pipe()
2093 ar_sdio->mbox_size[eid] = htt_mbox_size; in ath10k_sdio_hif_map_service_to_pipe()
2095 "sdio htt data mbox_addr 0x%x mbox_size %d\n", in ath10k_sdio_hif_map_service_to_pipe()
2096 ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]); in ath10k_sdio_hif_map_service_to_pipe()
2101 return -EINVAL; in ath10k_sdio_hif_map_service_to_pipe()
2110 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hif get default pipe\n"); in ath10k_sdio_hif_get_default_pipe()
2147 struct ath10k *ar = ar_sdio->ar; in ath10k_sdio_pm_suspend()
2151 if (!device_may_wakeup(ar->dev)) in ath10k_sdio_pm_suspend()
2161 ath10k_warn(ar, "failed to set sdio host pm flags (0x%x, 0x%x): %d\n", in ath10k_sdio_pm_suspend()
2241 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hi_option_flag2 %x\n", param); in ath10k_sdio_is_fast_dump_supported()
2286 crash_data->registers[i] = __cpu_to_le32(reg_dump_values[i]); in ath10k_sdio_dump_registers()
2300 cur_section = &mem_region->section_table.sections[0]; in ath10k_sdio_dump_memory_section()
2302 if (mem_region->start > cur_section->start) { in ath10k_sdio_dump_memory_section()
2304 mem_region->start, cur_section->start); in ath10k_sdio_dump_memory_section()
2308 skip_size = cur_section->start - mem_region->start; in ath10k_sdio_dump_memory_section()
2321 section_size = cur_section->end - cur_section->start; in ath10k_sdio_dump_memory_section()
2325 cur_section->start, in ath10k_sdio_dump_memory_section()
2326 cur_section->end); in ath10k_sdio_dump_memory_section()
2330 if (++i == mem_region->section_table.size) { in ath10k_sdio_dump_memory_section()
2337 if (cur_section->end > next_section->start) { in ath10k_sdio_dump_memory_section()
2339 next_section->start, in ath10k_sdio_dump_memory_section()
2340 cur_section->end); in ath10k_sdio_dump_memory_section()
2344 skip_size = next_section->start - cur_section->end; in ath10k_sdio_dump_memory_section()
2352 buf_len -= skip_size + section_size; in ath10k_sdio_dump_memory_section()
2355 ret = ath10k_sdio_read_mem(ar, cur_section->start, in ath10k_sdio_dump_memory_section()
2359 cur_section->start, ret); in ath10k_sdio_dump_memory_section()
2386 if (current_region->section_table.size > 0) in ath10k_sdio_dump_memory_generic()
2391 current_region->len); in ath10k_sdio_dump_memory_generic()
2393 /* No individual memory sections defined so we can in ath10k_sdio_dump_memory_generic()
2398 current_region->start, in ath10k_sdio_dump_memory_generic()
2400 current_region->len); in ath10k_sdio_dump_memory_generic()
2403 current_region->start, in ath10k_sdio_dump_memory_generic()
2405 current_region->len); in ath10k_sdio_dump_memory_generic()
2409 current_region->name, ret); in ath10k_sdio_dump_memory_generic()
2413 return current_region->len; in ath10k_sdio_dump_memory_generic()
2435 current_region = &mem_layout->region_table.regions[0]; in ath10k_sdio_dump_memory()
2437 buf = crash_data->ramdump_buf; in ath10k_sdio_dump_memory()
2438 buf_len = crash_data->ramdump_buf_len; in ath10k_sdio_dump_memory()
2442 for (i = 0; i < mem_layout->region_table.size; i++) { in ath10k_sdio_dump_memory()
2445 if (current_region->len > buf_len) { in ath10k_sdio_dump_memory()
2447 current_region->name, in ath10k_sdio_dump_memory()
2448 current_region->len, in ath10k_sdio_dump_memory()
2456 buf_len -= sizeof(*hdr); in ath10k_sdio_dump_memory()
2463 hdr->region_type = cpu_to_le32(current_region->type); in ath10k_sdio_dump_memory()
2464 hdr->start = cpu_to_le32(current_region->start); in ath10k_sdio_dump_memory()
2465 hdr->length = cpu_to_le32(count); in ath10k_sdio_dump_memory()
2472 buf_len -= count; in ath10k_sdio_dump_memory()
2489 ar->stats.fw_crash_counter++; in ath10k_sdio_fw_crashed_dump()
2496 scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid); in ath10k_sdio_fw_crashed_dump()
2520 /* Assumption: All SDIO based chipsets (so far) are QCA6174 based. in ath10k_sdio_probe()
2523 * assumption is no longer valid and hw_rev must be setup differently in ath10k_sdio_probe()
2528 ar = ath10k_core_create(sizeof(*ar_sdio), &func->dev, ATH10K_BUS_SDIO, in ath10k_sdio_probe()
2531 dev_err(&func->dev, "failed to allocate core\n"); in ath10k_sdio_probe()
2532 return -ENOMEM; in ath10k_sdio_probe()
2535 netif_napi_add(ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll); in ath10k_sdio_probe()
2538 "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n", in ath10k_sdio_probe()
2539 func->num, func->vendor, func->device, in ath10k_sdio_probe()
2540 func->max_blksize, func->cur_blksize); in ath10k_sdio_probe()
2544 ar_sdio->irq_data.irq_proc_reg = in ath10k_sdio_probe()
2545 devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs), in ath10k_sdio_probe()
2547 if (!ar_sdio->irq_data.irq_proc_reg) { in ath10k_sdio_probe()
2548 ret = -ENOMEM; in ath10k_sdio_probe()
2552 ar_sdio->vsg_buffer = devm_kmalloc(ar->dev, ATH10K_SDIO_VSG_BUF_SIZE, GFP_KERNEL); in ath10k_sdio_probe()
2553 if (!ar_sdio->vsg_buffer) { in ath10k_sdio_probe()
2554 ret = -ENOMEM; in ath10k_sdio_probe()
2558 ar_sdio->irq_data.irq_en_reg = in ath10k_sdio_probe()
2559 devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs), in ath10k_sdio_probe()
2561 if (!ar_sdio->irq_data.irq_en_reg) { in ath10k_sdio_probe()
2562 ret = -ENOMEM; in ath10k_sdio_probe()
2566 ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_LARGE_CMDBUF_SIZE, GFP_KERNEL); in ath10k_sdio_probe()
2567 if (!ar_sdio->bmi_buf) { in ath10k_sdio_probe()
2568 ret = -ENOMEM; in ath10k_sdio_probe()
2572 ar_sdio->func = func; in ath10k_sdio_probe()
2575 ar_sdio->is_disabled = true; in ath10k_sdio_probe()
2576 ar_sdio->ar = ar; in ath10k_sdio_probe()
2578 spin_lock_init(&ar_sdio->lock); in ath10k_sdio_probe()
2579 spin_lock_init(&ar_sdio->wr_async_lock); in ath10k_sdio_probe()
2580 mutex_init(&ar_sdio->irq_data.mtx); in ath10k_sdio_probe()
2582 INIT_LIST_HEAD(&ar_sdio->bus_req_freeq); in ath10k_sdio_probe()
2583 INIT_LIST_HEAD(&ar_sdio->wr_asyncq); in ath10k_sdio_probe()
2585 INIT_WORK(&ar_sdio->wr_async_work, ath10k_sdio_write_async_work); in ath10k_sdio_probe()
2586 ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq"); in ath10k_sdio_probe()
2587 if (!ar_sdio->workqueue) { in ath10k_sdio_probe()
2588 ret = -ENOMEM; in ath10k_sdio_probe()
2593 ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]); in ath10k_sdio_probe()
2595 skb_queue_head_init(&ar_sdio->rx_head); in ath10k_sdio_probe()
2596 INIT_WORK(&ar_sdio->async_work_rx, ath10k_rx_indication_async_work); in ath10k_sdio_probe()
2598 dev_id_base = (id->device & 0x0F00); in ath10k_sdio_probe()
2601 ret = -ENODEV; in ath10k_sdio_probe()
2603 dev_id_base, id->device); in ath10k_sdio_probe()
2607 ar->dev_id = QCA9377_1_0_DEVICE_ID; in ath10k_sdio_probe()
2608 ar->id.vendor = id->vendor; in ath10k_sdio_probe()
2609 ar->id.device = id->device; in ath10k_sdio_probe()
2614 /* TODO: don't know yet how to get chip_id with SDIO */ in ath10k_sdio_probe()
2618 ar->hw->max_mtu = ETH_DATA_LEN; in ath10k_sdio_probe()
2626 timer_setup(&ar_sdio->sleep_timer, ath10k_sdio_sleep_timer_handler, 0); in ath10k_sdio_probe()
2631 destroy_workqueue(ar_sdio->workqueue); in ath10k_sdio_probe()
2641 struct ath10k *ar = ar_sdio->ar; in ath10k_sdio_remove()
2644 "sdio removed func %d vendor 0x%x device 0x%x\n", in ath10k_sdio_remove()
2645 func->num, func->vendor, func->device); in ath10k_sdio_remove()
2649 netif_napi_del(&ar->napi); in ath10k_sdio_remove()
2653 destroy_workqueue(ar_sdio->workqueue); in ath10k_sdio_remove()
2662 MODULE_DEVICE_TABLE(sdio, ath10k_sdio_devices);
2676 MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices");