Lines Matching +full:no +full:- +full:sdio
1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2004-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
14 #include <linux/mmc/sdio.h>
25 #include "sdio.h"
37 return __ALIGN_MASK((len), ar_sdio->mbox_info.block_mask); in ath10k_sdio_calc_txrx_padded_len()
47 dev_kfree_skb(pkt->skb); in ath10k_sdio_mbox_free_rx_pkt()
48 pkt->skb = NULL; in ath10k_sdio_mbox_free_rx_pkt()
49 pkt->alloc_len = 0; in ath10k_sdio_mbox_free_rx_pkt()
50 pkt->act_len = 0; in ath10k_sdio_mbox_free_rx_pkt()
51 pkt->trailer_only = false; in ath10k_sdio_mbox_free_rx_pkt()
59 pkt->skb = dev_alloc_skb(full_len); in ath10k_sdio_mbox_alloc_rx_pkt()
60 if (!pkt->skb) in ath10k_sdio_mbox_alloc_rx_pkt()
61 return -ENOMEM; in ath10k_sdio_mbox_alloc_rx_pkt()
63 pkt->act_len = act_len; in ath10k_sdio_mbox_alloc_rx_pkt()
64 pkt->alloc_len = full_len; in ath10k_sdio_mbox_alloc_rx_pkt()
65 pkt->part_of_bundle = part_of_bundle; in ath10k_sdio_mbox_alloc_rx_pkt()
66 pkt->last_in_bundle = last_in_bundle; in ath10k_sdio_mbox_alloc_rx_pkt()
67 pkt->trailer_only = false; in ath10k_sdio_mbox_alloc_rx_pkt()
76 (struct ath10k_htc_hdr *)pkt->skb->data; in is_trailer_only_msg()
77 u16 len = __le16_to_cpu(htc_hdr->len); in is_trailer_only_msg()
79 if (len == htc_hdr->trailer_len) in is_trailer_only_msg()
85 /* sdio/mmc functions */
110 return mmc_wait_for_cmd(card->host, &io_cmd, 0); in ath10k_sdio_func0_cmd52_wr_byte()
125 ret = mmc_wait_for_cmd(card->host, &io_cmd, 0); in ath10k_sdio_func0_cmd52_rd_byte()
135 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_config()
139 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio configuration\n"); in ath10k_sdio_config()
144 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card, in ath10k_sdio_config()
152 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, in ath10k_sdio_config()
158 func->card, in ath10k_sdio_config()
166 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, in ath10k_sdio_config()
175 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card, in ath10k_sdio_config()
181 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, in ath10k_sdio_config()
185 ath10k_warn(ar, "failed to enable 4-bit async irq mode: %d\n", in ath10k_sdio_config()
191 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card, in ath10k_sdio_config()
198 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, in ath10k_sdio_config()
203 func->enable_timeout = 100; in ath10k_sdio_config()
205 ret = sdio_set_block_size(func, ar_sdio->mbox_info.block_size); in ath10k_sdio_config()
207 ath10k_warn(ar, "failed to set sdio block size to %d: %d\n", in ath10k_sdio_config()
208 ar_sdio->mbox_info.block_size, ret); in ath10k_sdio_config()
220 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_write32()
232 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write32 addr 0x%x val 0x%x\n", in ath10k_sdio_write32()
244 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_writesb32()
250 return -ENOMEM; in ath10k_sdio_writesb32()
263 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio writesb32 addr 0x%x val 0x%x\n", in ath10k_sdio_writesb32()
277 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_read32()
288 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read32 addr 0x%x val 0x%x\n", in ath10k_sdio_read32()
300 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_read()
312 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read addr 0x%x buf 0x%p len %zu\n", in ath10k_sdio_read()
314 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio read ", buf, len); in ath10k_sdio_read()
325 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_write()
340 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write addr 0x%x buf 0x%p len %zu\n", in ath10k_sdio_write()
342 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio write ", buf, len); in ath10k_sdio_write()
353 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_readsb()
358 len = round_down(len, ar_sdio->mbox_info.block_size); in ath10k_sdio_readsb()
367 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio readsb addr 0x%x buf 0x%p len %zu\n", in ath10k_sdio_readsb()
369 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio readsb ", buf, len); in ath10k_sdio_readsb()
384 struct ath10k_htc *htc = &ar->htc; in ath10k_sdio_mbox_rx_process_packet()
385 struct sk_buff *skb = pkt->skb; in ath10k_sdio_mbox_rx_process_packet()
386 struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data; in ath10k_sdio_mbox_rx_process_packet()
387 bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT; in ath10k_sdio_mbox_rx_process_packet()
393 trailer = skb->data + skb->len - htc_hdr->trailer_len; in ath10k_sdio_mbox_rx_process_packet()
395 eid = pipe_id_to_eid(htc_hdr->eid); in ath10k_sdio_mbox_rx_process_packet()
399 htc_hdr->trailer_len, in ath10k_sdio_mbox_rx_process_packet()
407 pkt->trailer_only = true; in ath10k_sdio_mbox_rx_process_packet()
409 skb_trim(skb, skb->len - htc_hdr->trailer_len); in ath10k_sdio_mbox_rx_process_packet()
422 struct ath10k_htc *htc = &ar->htc; in ath10k_sdio_mbox_rx_process_packets()
431 for (i = 0; i < ar_sdio->n_rx_pkts; i++) { in ath10k_sdio_mbox_rx_process_packets()
436 &lookaheads[lookahead_idx++])->eid; in ath10k_sdio_mbox_rx_process_packets()
439 ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n", in ath10k_sdio_mbox_rx_process_packets()
441 ret = -ENOMEM; in ath10k_sdio_mbox_rx_process_packets()
445 ep = &htc->endpoint[id]; in ath10k_sdio_mbox_rx_process_packets()
447 if (ep->service_id == 0) { in ath10k_sdio_mbox_rx_process_packets()
449 ret = -ENOMEM; in ath10k_sdio_mbox_rx_process_packets()
453 pkt = &ar_sdio->rx_pkts[i]; in ath10k_sdio_mbox_rx_process_packets()
455 if (pkt->part_of_bundle && !pkt->last_in_bundle) { in ath10k_sdio_mbox_rx_process_packets()
459 lookahead_idx--; in ath10k_sdio_mbox_rx_process_packets()
471 if (!pkt->trailer_only) { in ath10k_sdio_mbox_rx_process_packets()
472 cb = ATH10K_SKB_RXCB(pkt->skb); in ath10k_sdio_mbox_rx_process_packets()
473 cb->eid = id; in ath10k_sdio_mbox_rx_process_packets()
475 skb_queue_tail(&ar_sdio->rx_head, pkt->skb); in ath10k_sdio_mbox_rx_process_packets()
476 queue_work(ar->workqueue_aux, in ath10k_sdio_mbox_rx_process_packets()
477 &ar_sdio->async_work_rx); in ath10k_sdio_mbox_rx_process_packets()
479 kfree_skb(pkt->skb); in ath10k_sdio_mbox_rx_process_packets()
483 pkt->skb = NULL; in ath10k_sdio_mbox_rx_process_packets()
484 pkt->alloc_len = 0; in ath10k_sdio_mbox_rx_process_packets()
493 for (; i < ar_sdio->n_rx_pkts; i++) in ath10k_sdio_mbox_rx_process_packets()
494 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]); in ath10k_sdio_mbox_rx_process_packets()
506 u8 max_msgs = ar->htc.max_msgs_per_htc_bundle; in ath10k_sdio_mbox_alloc_bundle()
508 *bndl_cnt = ath10k_htc_get_bundle_count(max_msgs, htc_hdr->flags); in ath10k_sdio_mbox_alloc_bundle()
513 le16_to_cpu(htc_hdr->len), in ath10k_sdio_mbox_alloc_bundle()
515 return -ENOMEM; in ath10k_sdio_mbox_alloc_bundle()
550 ret = -ENOMEM; in ath10k_sdio_mbox_rx_alloc()
558 if (le16_to_cpu(htc_hdr->len) > ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) { in ath10k_sdio_mbox_rx_alloc()
560 le16_to_cpu(htc_hdr->len), in ath10k_sdio_mbox_rx_alloc()
562 ret = -ENOMEM; in ath10k_sdio_mbox_rx_alloc()
564 queue_work(ar->workqueue, &ar->restart_work); in ath10k_sdio_mbox_rx_alloc()
570 act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr); in ath10k_sdio_mbox_rx_alloc()
575 htc_hdr->eid, htc_hdr->flags, in ath10k_sdio_mbox_rx_alloc()
576 le16_to_cpu(htc_hdr->len)); in ath10k_sdio_mbox_rx_alloc()
577 ret = -EINVAL; in ath10k_sdio_mbox_rx_alloc()
582 ar->htc.max_msgs_per_htc_bundle, htc_hdr->flags)) { in ath10k_sdio_mbox_rx_alloc()
590 &ar_sdio->rx_pkts[pkt_cnt], in ath10k_sdio_mbox_rx_alloc()
612 if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK) in ath10k_sdio_mbox_rx_alloc()
615 ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[pkt_cnt], in ath10k_sdio_mbox_rx_alloc()
628 ar_sdio->n_rx_pkts = pkt_cnt; in ath10k_sdio_mbox_rx_alloc()
634 if (!ar_sdio->rx_pkts[i].alloc_len) in ath10k_sdio_mbox_rx_alloc()
636 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]); in ath10k_sdio_mbox_rx_alloc()
645 struct ath10k_sdio_rx_data *pkt = &ar_sdio->rx_pkts[0]; in ath10k_sdio_mbox_rx_fetch()
646 struct sk_buff *skb = pkt->skb; in ath10k_sdio_mbox_rx_fetch()
650 ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr, in ath10k_sdio_mbox_rx_fetch()
651 skb->data, pkt->alloc_len); in ath10k_sdio_mbox_rx_fetch()
655 htc_hdr = (struct ath10k_htc_hdr *)skb->data; in ath10k_sdio_mbox_rx_fetch()
656 pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr); in ath10k_sdio_mbox_rx_fetch()
658 if (pkt->act_len > pkt->alloc_len) { in ath10k_sdio_mbox_rx_fetch()
659 ret = -EINVAL; in ath10k_sdio_mbox_rx_fetch()
663 skb_put(skb, pkt->act_len); in ath10k_sdio_mbox_rx_fetch()
667 ar_sdio->n_rx_pkts = 0; in ath10k_sdio_mbox_rx_fetch()
682 for (i = 0; i < ar_sdio->n_rx_pkts; i++) in ath10k_sdio_mbox_rx_fetch_bundle()
683 virt_pkt_len += ar_sdio->rx_pkts[i].alloc_len; in ath10k_sdio_mbox_rx_fetch_bundle()
686 ath10k_warn(ar, "sdio vsg buffer size limit: %d\n", virt_pkt_len); in ath10k_sdio_mbox_rx_fetch_bundle()
687 ret = -E2BIG; in ath10k_sdio_mbox_rx_fetch_bundle()
691 ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr, in ath10k_sdio_mbox_rx_fetch_bundle()
692 ar_sdio->vsg_buffer, virt_pkt_len); in ath10k_sdio_mbox_rx_fetch_bundle()
699 for (i = 0; i < ar_sdio->n_rx_pkts; i++) { in ath10k_sdio_mbox_rx_fetch_bundle()
700 pkt = &ar_sdio->rx_pkts[i]; in ath10k_sdio_mbox_rx_fetch_bundle()
701 htc_hdr = (struct ath10k_htc_hdr *)(ar_sdio->vsg_buffer + pkt_offset); in ath10k_sdio_mbox_rx_fetch_bundle()
702 pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr); in ath10k_sdio_mbox_rx_fetch_bundle()
704 if (pkt->act_len > pkt->alloc_len) { in ath10k_sdio_mbox_rx_fetch_bundle()
705 ret = -EINVAL; in ath10k_sdio_mbox_rx_fetch_bundle()
709 skb_put_data(pkt->skb, htc_hdr, pkt->act_len); in ath10k_sdio_mbox_rx_fetch_bundle()
710 pkt_offset += pkt->alloc_len; in ath10k_sdio_mbox_rx_fetch_bundle()
717 for (i = 0; i < ar_sdio->n_rx_pkts; i++) in ath10k_sdio_mbox_rx_fetch_bundle()
718 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]); in ath10k_sdio_mbox_rx_fetch_bundle()
720 ar_sdio->n_rx_pkts = 0; in ath10k_sdio_mbox_rx_fetch_bundle()
725 /* This is the timeout for mailbox processing done in the sdio irq
726 * handler. The timeout is deliberately set quite high since SDIO dump logs
758 if (ar_sdio->n_rx_pkts >= 2) in ath10k_sdio_mbox_rxmsg_pending_handler()
760 * re-check again. in ath10k_sdio_mbox_rxmsg_pending_handler()
764 if (ar_sdio->n_rx_pkts > 1) in ath10k_sdio_mbox_rxmsg_pending_handler()
783 * flag that we should re-check IRQ status registers again in ath10k_sdio_mbox_rxmsg_pending_handler()
790 if (ret && (ret != -ECANCELED)) in ath10k_sdio_mbox_rxmsg_pending_handler()
818 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_mbox_proc_counter_intr()
822 mutex_lock(&irq_data->mtx); in ath10k_sdio_mbox_proc_counter_intr()
823 counter_int_status = irq_data->irq_proc_reg->counter_int_status & in ath10k_sdio_mbox_proc_counter_intr()
824 irq_data->irq_en_reg->cntr_int_status_en; in ath10k_sdio_mbox_proc_counter_intr()
835 mutex_unlock(&irq_data->mtx); in ath10k_sdio_mbox_proc_counter_intr()
843 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_mbox_proc_err_intr()
847 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio error interrupt\n"); in ath10k_sdio_mbox_proc_err_intr()
849 error_int_status = irq_data->irq_proc_reg->error_int_status & 0x0F; in ath10k_sdio_mbox_proc_err_intr()
853 return -EIO; in ath10k_sdio_mbox_proc_err_intr()
857 "sdio error_int_status 0x%x\n", error_int_status); in ath10k_sdio_mbox_proc_err_intr()
861 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio interrupt error wakeup\n"); in ath10k_sdio_mbox_proc_err_intr()
872 irq_data->irq_proc_reg->error_int_status &= ~error_int_status; in ath10k_sdio_mbox_proc_err_intr()
889 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_mbox_proc_cpu_intr()
893 mutex_lock(&irq_data->mtx); in ath10k_sdio_mbox_proc_cpu_intr()
894 cpu_int_status = irq_data->irq_proc_reg->cpu_int_status & in ath10k_sdio_mbox_proc_cpu_intr()
895 irq_data->irq_en_reg->cpu_int_status_en; in ath10k_sdio_mbox_proc_cpu_intr()
898 ret = -EIO; in ath10k_sdio_mbox_proc_cpu_intr()
903 irq_data->irq_proc_reg->cpu_int_status &= ~cpu_int_status; in ath10k_sdio_mbox_proc_cpu_intr()
906 * this is done to make the access 4-byte aligned to mitigate issues in ath10k_sdio_mbox_proc_cpu_intr()
908 * be a multiple of 4-bytes. in ath10k_sdio_mbox_proc_cpu_intr()
921 mutex_unlock(&irq_data->mtx); in ath10k_sdio_mbox_proc_cpu_intr()
933 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_mbox_read_int_status()
934 struct ath10k_sdio_irq_proc_regs *irq_proc_reg = irq_data->irq_proc_reg; in ath10k_sdio_mbox_read_int_status()
935 struct ath10k_sdio_irq_enable_regs *irq_en_reg = irq_data->irq_en_reg; in ath10k_sdio_mbox_read_int_status()
939 mutex_lock(&irq_data->mtx); in ath10k_sdio_mbox_read_int_status()
950 if (!irq_en_reg->int_status_en) { in ath10k_sdio_mbox_read_int_status()
963 queue_work(ar->workqueue, &ar->restart_work); in ath10k_sdio_mbox_read_int_status()
969 *host_int_status = irq_proc_reg->host_int_status & in ath10k_sdio_mbox_read_int_status()
970 irq_en_reg->int_status_en; in ath10k_sdio_mbox_read_int_status()
983 if (irq_proc_reg->rx_lookahead_valid & htc_mbox) { in ath10k_sdio_mbox_read_int_status()
985 irq_proc_reg->rx_lookahead[ATH10K_HTC_MAILBOX]); in ath10k_sdio_mbox_read_int_status()
987 ath10k_warn(ar, "sdio mbox lookahead is zero\n"); in ath10k_sdio_mbox_read_int_status()
991 mutex_unlock(&irq_data->mtx); in ath10k_sdio_mbox_read_int_status()
1024 "sdio pending mailbox msg lookahead 0x%08x\n", in ath10k_sdio_mbox_proc_pending_irqs()
1036 "sdio host_int_status 0x%x\n", host_int_status); in ath10k_sdio_mbox_proc_pending_irqs()
1060 * unecessarily which can re-wake the target, if upper layers in ath10k_sdio_mbox_proc_pending_irqs()
1061 * determine that we are in a low-throughput mode, we can rely on in ath10k_sdio_mbox_proc_pending_irqs()
1062 * taking another interrupt rather than re-checking the status in ath10k_sdio_mbox_proc_pending_irqs()
1063 * registers which can re-wake the target. in ath10k_sdio_mbox_proc_pending_irqs()
1072 "sdio pending irqs done %d status %d", in ath10k_sdio_mbox_proc_pending_irqs()
1081 struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info; in ath10k_sdio_set_mbox_info()
1082 u16 device = ar_sdio->func->device, dev_id_base, dev_id_chiprev; in ath10k_sdio_set_mbox_info()
1084 mbox_info->htc_addr = ATH10K_HIF_MBOX_BASE_ADDR; in ath10k_sdio_set_mbox_info()
1085 mbox_info->block_size = ATH10K_HIF_MBOX_BLOCK_SIZE; in ath10k_sdio_set_mbox_info()
1086 mbox_info->block_mask = ATH10K_HIF_MBOX_BLOCK_SIZE - 1; in ath10k_sdio_set_mbox_info()
1087 mbox_info->gmbox_addr = ATH10K_HIF_GMBOX_BASE_ADDR; in ath10k_sdio_set_mbox_info()
1088 mbox_info->gmbox_sz = ATH10K_HIF_GMBOX_WIDTH; in ath10k_sdio_set_mbox_info()
1090 mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR; in ath10k_sdio_set_mbox_info()
1097 mbox_info->ext_info[0].htc_ext_sz = in ath10k_sdio_set_mbox_info()
1103 mbox_info->ext_info[0].htc_ext_sz = in ath10k_sdio_set_mbox_info()
1107 mbox_info->ext_info[0].htc_ext_sz = in ath10k_sdio_set_mbox_info()
1111 mbox_info->ext_info[0].htc_ext_sz = in ath10k_sdio_set_mbox_info()
1115 mbox_info->ext_info[1].htc_ext_addr = in ath10k_sdio_set_mbox_info()
1116 mbox_info->ext_info[0].htc_ext_addr + in ath10k_sdio_set_mbox_info()
1117 mbox_info->ext_info[0].htc_ext_sz + in ath10k_sdio_set_mbox_info()
1119 mbox_info->ext_info[1].htc_ext_sz = ATH10K_HIF_MBOX1_EXT_WIDTH; in ath10k_sdio_set_mbox_info()
1136 /* Hit the credit counter with a 4-byte access, the first byte in ath10k_sdio_bmi_credits()
1138 * remaining 3 bytes has no effect. The rationale behind this in ath10k_sdio_bmi_credits()
1139 * is to make all HIF accesses 4-byte aligned. in ath10k_sdio_bmi_credits()
1157 return -ETIMEDOUT; in ath10k_sdio_bmi_credits()
1187 return -EINVAL; in ath10k_sdio_bmi_get_rx_lookahead()
1206 addr = ar_sdio->mbox_info.htc_addr; in ath10k_sdio_bmi_exchange_msg()
1208 memcpy(ar_sdio->bmi_buf, req, req_len); in ath10k_sdio_bmi_exchange_msg()
1209 ret = ath10k_sdio_write(ar, addr, ar_sdio->bmi_buf, req_len); in ath10k_sdio_bmi_exchange_msg()
1219 /* No response expected */ in ath10k_sdio_bmi_exchange_msg()
1229 * In particular, this avoids SDIO timeouts and possibly garbage in ath10k_sdio_bmi_exchange_msg()
1231 * such as Compact Flash (as well as some SDIO masters) which in ath10k_sdio_bmi_exchange_msg()
1241 * not occur in practice -- they're supported for debug/development. in ath10k_sdio_bmi_exchange_msg()
1262 * If BMI_EXECUTE ever needs to support longer-latency execution, in ath10k_sdio_bmi_exchange_msg()
1272 addr = ar_sdio->mbox_info.htc_addr; in ath10k_sdio_bmi_exchange_msg()
1273 ret = ath10k_sdio_read(ar, addr, ar_sdio->bmi_buf, *resp_len); in ath10k_sdio_bmi_exchange_msg()
1281 memcpy(resp, ar_sdio->bmi_buf, *resp_len); in ath10k_sdio_bmi_exchange_msg()
1286 /* sdio async handling functions */
1294 spin_lock_bh(&ar_sdio->lock); in ath10k_sdio_alloc_busreq()
1296 if (list_empty(&ar_sdio->bus_req_freeq)) { in ath10k_sdio_alloc_busreq()
1301 bus_req = list_first_entry(&ar_sdio->bus_req_freeq, in ath10k_sdio_alloc_busreq()
1303 list_del(&bus_req->list); in ath10k_sdio_alloc_busreq()
1306 spin_unlock_bh(&ar_sdio->lock); in ath10k_sdio_alloc_busreq()
1317 spin_lock_bh(&ar_sdio->lock); in ath10k_sdio_free_bus_req()
1318 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq); in ath10k_sdio_free_bus_req()
1319 spin_unlock_bh(&ar_sdio->lock); in ath10k_sdio_free_bus_req()
1329 skb = req->skb; in __ath10k_sdio_write_async()
1330 ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len); in __ath10k_sdio_write_async()
1333 req->address, ret); in __ath10k_sdio_write_async()
1335 if (req->htc_msg) { in __ath10k_sdio_write_async()
1336 ep = &ar->htc.endpoint[req->eid]; in __ath10k_sdio_write_async()
1338 } else if (req->comp) { in __ath10k_sdio_write_async()
1339 complete(req->comp); in __ath10k_sdio_write_async()
1346 * this way SDIO bus is utilised much better.
1352 struct ath10k *ar = ar_sdio->ar; in ath10k_rx_indication_async_work()
1358 skb = skb_dequeue(&ar_sdio->rx_head); in ath10k_rx_indication_async_work()
1362 ep = &ar->htc.endpoint[cb->eid]; in ath10k_rx_indication_async_work()
1363 ep->ep_ops.ep_rx_complete(ar, skb); in ath10k_rx_indication_async_work()
1366 if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) in ath10k_rx_indication_async_work()
1367 napi_schedule(&ar->napi); in ath10k_rx_indication_async_work()
1372 struct ath10k *ar = ar_sdio->ar; in ath10k_sdio_read_rtc_state()
1376 rtc_state = sdio_f0_readb(ar_sdio->func, ATH10K_CIS_RTC_STATE_ADDR, &ret); in ath10k_sdio_read_rtc_state()
1394 sdio_claim_host(ar_sdio->func); in ath10k_sdio_set_mbox_sleep()
1405 ar_sdio->mbox_state = SDIO_MBOX_SLEEP_STATE; in ath10k_sdio_set_mbox_sleep()
1408 ar_sdio->mbox_state = SDIO_MBOX_AWAKE_STATE; in ath10k_sdio_set_mbox_sleep()
1427 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read rtc state: %d\n", in ath10k_sdio_set_mbox_sleep()
1434 retry--; in ath10k_sdio_set_mbox_sleep()
1439 sdio_release_host(ar_sdio->func); in ath10k_sdio_set_mbox_sleep()
1448 ar_sdio->mbox_state = SDIO_MBOX_REQUEST_TO_SLEEP_STATE; in ath10k_sdio_sleep_timer_handler()
1449 queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work); in ath10k_sdio_sleep_timer_handler()
1456 struct ath10k *ar = ar_sdio->ar; in ath10k_sdio_write_async_work()
1458 struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info; in ath10k_sdio_write_async_work()
1460 spin_lock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_write_async_work()
1462 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { in ath10k_sdio_write_async_work()
1463 list_del(&req->list); in ath10k_sdio_write_async_work()
1464 spin_unlock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_write_async_work()
1466 if (req->address >= mbox_info->htc_addr && in ath10k_sdio_write_async_work()
1467 ar_sdio->mbox_state == SDIO_MBOX_SLEEP_STATE) { in ath10k_sdio_write_async_work()
1469 mod_timer(&ar_sdio->sleep_timer, jiffies + in ath10k_sdio_write_async_work()
1474 spin_lock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_write_async_work()
1477 spin_unlock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_write_async_work()
1479 if (ar_sdio->mbox_state == SDIO_MBOX_REQUEST_TO_SLEEP_STATE) in ath10k_sdio_write_async_work()
1492 * SDIO workqueue. in ath10k_sdio_prep_async_req()
1498 return -ENOMEM; in ath10k_sdio_prep_async_req()
1501 bus_req->skb = skb; in ath10k_sdio_prep_async_req()
1502 bus_req->eid = eid; in ath10k_sdio_prep_async_req()
1503 bus_req->address = addr; in ath10k_sdio_prep_async_req()
1504 bus_req->htc_msg = htc_msg; in ath10k_sdio_prep_async_req()
1505 bus_req->comp = comp; in ath10k_sdio_prep_async_req()
1507 spin_lock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_prep_async_req()
1508 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq); in ath10k_sdio_prep_async_req()
1509 spin_unlock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_prep_async_req()
1519 struct ath10k *ar = ar_sdio->ar; in ath10k_sdio_irq_handler()
1527 sdio_release_host(ar_sdio->func); in ath10k_sdio_irq_handler()
1538 sdio_claim_host(ar_sdio->func); in ath10k_sdio_irq_handler()
1540 if (ret && ret != -ECANCELED) in ath10k_sdio_irq_handler()
1541 ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n", in ath10k_sdio_irq_handler()
1545 /* sdio HIF functions */
1550 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_disable_intrs()
1551 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg; in ath10k_sdio_disable_intrs()
1554 mutex_lock(&irq_data->mtx); in ath10k_sdio_disable_intrs()
1558 ®s->int_status_en, sizeof(*regs)); in ath10k_sdio_disable_intrs()
1560 ath10k_warn(ar, "unable to disable sdio interrupts: %d\n", ret); in ath10k_sdio_disable_intrs()
1562 mutex_unlock(&irq_data->mtx); in ath10k_sdio_disable_intrs()
1571 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_hif_power_up()
1574 if (!ar_sdio->is_disabled) in ath10k_sdio_hif_power_up()
1577 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n"); in ath10k_sdio_hif_power_up()
1581 ath10k_err(ar, "failed to config sdio: %d\n", ret); in ath10k_sdio_hif_power_up()
1589 ath10k_warn(ar, "unable to enable sdio function: %d)\n", ret); in ath10k_sdio_hif_power_up()
1601 ar_sdio->is_disabled = false; in ath10k_sdio_hif_power_up()
1615 if (ar_sdio->is_disabled) in ath10k_sdio_hif_power_down()
1618 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n"); in ath10k_sdio_hif_power_down()
1620 del_timer_sync(&ar_sdio->sleep_timer); in ath10k_sdio_hif_power_down()
1624 sdio_claim_host(ar_sdio->func); in ath10k_sdio_hif_power_down()
1626 ret = sdio_disable_func(ar_sdio->func); in ath10k_sdio_hif_power_down()
1628 ath10k_warn(ar, "unable to disable sdio function: %d\n", ret); in ath10k_sdio_hif_power_down()
1629 sdio_release_host(ar_sdio->func); in ath10k_sdio_hif_power_down()
1633 ret = mmc_hw_reset(ar_sdio->func->card->host); in ath10k_sdio_hif_power_down()
1635 ath10k_warn(ar, "unable to reset sdio: %d\n", ret); in ath10k_sdio_hif_power_down()
1637 sdio_release_host(ar_sdio->func); in ath10k_sdio_hif_power_down()
1639 ar_sdio->is_disabled = true; in ath10k_sdio_hif_power_down()
1658 skb->len); in ath10k_sdio_hif_tx_sg()
1662 address = ar_sdio->mbox_addr[eid] + ar_sdio->mbox_size[eid] - in ath10k_sdio_hif_tx_sg()
1663 skb->len; in ath10k_sdio_hif_tx_sg()
1670 queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work); in ath10k_sdio_hif_tx_sg()
1678 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_enable_intrs()
1679 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg; in ath10k_sdio_enable_intrs()
1682 mutex_lock(&irq_data->mtx); in ath10k_sdio_enable_intrs()
1685 regs->int_status_en = FIELD_PREP(MBOX_INT_STATUS_ENABLE_ERROR_MASK, 1) | in ath10k_sdio_enable_intrs()
1692 regs->int_status_en |= in ath10k_sdio_enable_intrs()
1698 regs->cpu_int_status_en = FIELD_PREP(MBOX_CPU_STATUS_ENABLE_ASSERT_MASK, 1); in ath10k_sdio_enable_intrs()
1701 regs->err_int_status_en = in ath10k_sdio_enable_intrs()
1708 regs->cntr_int_status_en = in ath10k_sdio_enable_intrs()
1713 ®s->int_status_en, sizeof(*regs)); in ath10k_sdio_enable_intrs()
1719 mutex_unlock(&irq_data->mtx); in ath10k_sdio_enable_intrs()
1733 return -ENOMEM; in ath10k_sdio_hif_diag_read()
1766 return -ENOMEM; in ath10k_sdio_diag_read32()
1820 "sdio mailbox swap service enabled\n"); in ath10k_sdio_hif_start_post()
1821 ar_sdio->swap_mbox = true; in ath10k_sdio_hif_start_post()
1824 "sdio mailbox swap service disabled\n"); in ath10k_sdio_hif_start_post()
1825 ar_sdio->swap_mbox = false; in ath10k_sdio_hif_start_post()
1849 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio reduce tx complete fw%sack\n", in ath10k_sdio_get_htt_tx_complete()
1862 napi_enable(&ar->napi); in ath10k_sdio_hif_start()
1876 ar_sdio->mbox_addr[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_addr; in ath10k_sdio_hif_start()
1877 ar_sdio->mbox_size[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_sz; in ath10k_sdio_hif_start()
1879 sdio_claim_host(ar_sdio->func); in ath10k_sdio_hif_start()
1882 ret = sdio_claim_irq(ar_sdio->func, ath10k_sdio_irq_handler); in ath10k_sdio_hif_start()
1884 ath10k_warn(ar, "failed to claim sdio interrupt: %d\n", ret); in ath10k_sdio_hif_start()
1885 sdio_release_host(ar_sdio->func); in ath10k_sdio_hif_start()
1889 sdio_release_host(ar_sdio->func); in ath10k_sdio_hif_start()
1893 ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret); in ath10k_sdio_hif_start()
1915 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_irq_disable()
1916 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg; in ath10k_sdio_irq_disable()
1925 mutex_lock(&irq_data->mtx); in ath10k_sdio_irq_disable()
1928 memcpy(skb->data, regs, sizeof(*regs)); in ath10k_sdio_irq_disable()
1931 mutex_unlock(&irq_data->mtx); in ath10k_sdio_irq_disable()
1939 queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work); in ath10k_sdio_irq_disable()
1947 ath10k_warn(ar, "sdio irq disable request timed out\n"); in ath10k_sdio_irq_disable()
1949 sdio_claim_host(ar_sdio->func); in ath10k_sdio_irq_disable()
1951 ret = sdio_release_irq(ar_sdio->func); in ath10k_sdio_irq_disable()
1953 ath10k_warn(ar, "failed to release sdio interrupt: %d\n", ret); in ath10k_sdio_irq_disable()
1955 sdio_release_host(ar_sdio->func); in ath10k_sdio_irq_disable()
1968 cancel_work_sync(&ar_sdio->wr_async_work); in ath10k_sdio_hif_stop()
1970 spin_lock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_hif_stop()
1973 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { in ath10k_sdio_hif_stop()
1976 list_del(&req->list); in ath10k_sdio_hif_stop()
1978 if (req->htc_msg) { in ath10k_sdio_hif_stop()
1979 ep = &ar->htc.endpoint[req->eid]; in ath10k_sdio_hif_stop()
1980 ath10k_htc_notify_tx_completion(ep, req->skb); in ath10k_sdio_hif_stop()
1981 } else if (req->skb) { in ath10k_sdio_hif_stop()
1982 kfree_skb(req->skb); in ath10k_sdio_hif_stop()
1987 spin_unlock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_hif_stop()
1989 napi_synchronize(&ar->napi); in ath10k_sdio_hif_stop()
1990 napi_disable(&ar->napi); in ath10k_sdio_hif_stop()
2002 switch (ar->state) { in ath10k_sdio_hif_resume()
2005 "sdio resume configuring sdio\n"); in ath10k_sdio_hif_resume()
2007 /* need to set sdio settings after power is cut from sdio */ in ath10k_sdio_hif_resume()
2025 struct ath10k_htc *htc = &ar->htc; in ath10k_sdio_hif_map_service_to_pipe()
2031 /* For sdio, we are interested in the mapping between eid in ath10k_sdio_hif_map_service_to_pipe()
2037 if (htc->endpoint[i].service_id == service_id) { in ath10k_sdio_hif_map_service_to_pipe()
2038 eid = htc->endpoint[i].eid; in ath10k_sdio_hif_map_service_to_pipe()
2045 return -EINVAL; in ath10k_sdio_hif_map_service_to_pipe()
2057 if (ar_sdio->swap_mbox) { in ath10k_sdio_hif_map_service_to_pipe()
2058 htt_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr; in ath10k_sdio_hif_map_service_to_pipe()
2059 wmi_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr; in ath10k_sdio_hif_map_service_to_pipe()
2060 htt_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz; in ath10k_sdio_hif_map_service_to_pipe()
2061 wmi_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz; in ath10k_sdio_hif_map_service_to_pipe()
2063 htt_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr; in ath10k_sdio_hif_map_service_to_pipe()
2064 wmi_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr; in ath10k_sdio_hif_map_service_to_pipe()
2065 htt_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz; in ath10k_sdio_hif_map_service_to_pipe()
2066 wmi_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz; in ath10k_sdio_hif_map_service_to_pipe()
2076 ar_sdio->mbox_addr[eid] = wmi_addr; in ath10k_sdio_hif_map_service_to_pipe()
2077 ar_sdio->mbox_size[eid] = wmi_mbox_size; in ath10k_sdio_hif_map_service_to_pipe()
2079 "sdio wmi ctrl mbox_addr 0x%x mbox_size %d\n", in ath10k_sdio_hif_map_service_to_pipe()
2080 ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]); in ath10k_sdio_hif_map_service_to_pipe()
2083 ar_sdio->mbox_addr[eid] = htt_addr; in ath10k_sdio_hif_map_service_to_pipe()
2084 ar_sdio->mbox_size[eid] = htt_mbox_size; in ath10k_sdio_hif_map_service_to_pipe()
2086 "sdio htt data mbox_addr 0x%x mbox_size %d\n", in ath10k_sdio_hif_map_service_to_pipe()
2087 ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]); in ath10k_sdio_hif_map_service_to_pipe()
2092 return -EINVAL; in ath10k_sdio_hif_map_service_to_pipe()
2101 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hif get default pipe\n"); in ath10k_sdio_hif_get_default_pipe()
2138 struct ath10k *ar = ar_sdio->ar; in ath10k_sdio_pm_suspend()
2142 if (!device_may_wakeup(ar->dev)) in ath10k_sdio_pm_suspend()
2152 ath10k_warn(ar, "failed to set sdio host pm flags (0x%x, 0x%x): %d\n", in ath10k_sdio_pm_suspend()
2232 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hi_option_flag2 %x\n", param); in ath10k_sdio_is_fast_dump_supported()
2277 crash_data->registers[i] = __cpu_to_le32(reg_dump_values[i]); in ath10k_sdio_dump_registers()
2291 cur_section = &mem_region->section_table.sections[0]; in ath10k_sdio_dump_memory_section()
2293 if (mem_region->start > cur_section->start) { in ath10k_sdio_dump_memory_section()
2295 mem_region->start, cur_section->start); in ath10k_sdio_dump_memory_section()
2299 skip_size = cur_section->start - mem_region->start; in ath10k_sdio_dump_memory_section()
2312 section_size = cur_section->end - cur_section->start; in ath10k_sdio_dump_memory_section()
2316 cur_section->start, in ath10k_sdio_dump_memory_section()
2317 cur_section->end); in ath10k_sdio_dump_memory_section()
2321 if ((i + 1) == mem_region->section_table.size) { in ath10k_sdio_dump_memory_section()
2328 if (cur_section->end > next_section->start) { in ath10k_sdio_dump_memory_section()
2330 next_section->start, in ath10k_sdio_dump_memory_section()
2331 cur_section->end); in ath10k_sdio_dump_memory_section()
2335 skip_size = next_section->start - cur_section->end; in ath10k_sdio_dump_memory_section()
2343 buf_len -= skip_size + section_size; in ath10k_sdio_dump_memory_section()
2346 ret = ath10k_sdio_read_mem(ar, cur_section->start, in ath10k_sdio_dump_memory_section()
2350 cur_section->start, ret); in ath10k_sdio_dump_memory_section()
2383 if (current_region->section_table.size > 0) in ath10k_sdio_dump_memory_generic()
2388 current_region->len); in ath10k_sdio_dump_memory_generic()
2390 /* No individiual memory sections defined so we can in ath10k_sdio_dump_memory_generic()
2395 current_region->start, in ath10k_sdio_dump_memory_generic()
2397 current_region->len); in ath10k_sdio_dump_memory_generic()
2400 current_region->start, in ath10k_sdio_dump_memory_generic()
2402 current_region->len); in ath10k_sdio_dump_memory_generic()
2406 current_region->name, ret); in ath10k_sdio_dump_memory_generic()
2410 return current_region->len; in ath10k_sdio_dump_memory_generic()
2432 current_region = &mem_layout->region_table.regions[0]; in ath10k_sdio_dump_memory()
2434 buf = crash_data->ramdump_buf; in ath10k_sdio_dump_memory()
2435 buf_len = crash_data->ramdump_buf_len; in ath10k_sdio_dump_memory()
2439 for (i = 0; i < mem_layout->region_table.size; i++) { in ath10k_sdio_dump_memory()
2442 if (current_region->len > buf_len) { in ath10k_sdio_dump_memory()
2444 current_region->name, in ath10k_sdio_dump_memory()
2445 current_region->len, in ath10k_sdio_dump_memory()
2453 buf_len -= sizeof(*hdr); in ath10k_sdio_dump_memory()
2460 hdr->region_type = cpu_to_le32(current_region->type); in ath10k_sdio_dump_memory()
2461 hdr->start = cpu_to_le32(current_region->start); in ath10k_sdio_dump_memory()
2462 hdr->length = cpu_to_le32(count); in ath10k_sdio_dump_memory()
2469 buf_len -= count; in ath10k_sdio_dump_memory()
2486 ar->stats.fw_crash_counter++; in ath10k_sdio_fw_crashed_dump()
2493 scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid); in ath10k_sdio_fw_crashed_dump()
2504 queue_work(ar->workqueue, &ar->restart_work); in ath10k_sdio_fw_crashed_dump()
2517 /* Assumption: All SDIO based chipsets (so far) are QCA6174 based. in ath10k_sdio_probe()
2520 * assumption is no longer valid and hw_rev must be setup differently in ath10k_sdio_probe()
2525 ar = ath10k_core_create(sizeof(*ar_sdio), &func->dev, ATH10K_BUS_SDIO, in ath10k_sdio_probe()
2528 dev_err(&func->dev, "failed to allocate core\n"); in ath10k_sdio_probe()
2529 return -ENOMEM; in ath10k_sdio_probe()
2532 netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll, in ath10k_sdio_probe()
2536 "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n", in ath10k_sdio_probe()
2537 func->num, func->vendor, func->device, in ath10k_sdio_probe()
2538 func->max_blksize, func->cur_blksize); in ath10k_sdio_probe()
2542 ar_sdio->irq_data.irq_proc_reg = in ath10k_sdio_probe()
2543 devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs), in ath10k_sdio_probe()
2545 if (!ar_sdio->irq_data.irq_proc_reg) { in ath10k_sdio_probe()
2546 ret = -ENOMEM; in ath10k_sdio_probe()
2550 ar_sdio->vsg_buffer = devm_kmalloc(ar->dev, ATH10K_SDIO_VSG_BUF_SIZE, GFP_KERNEL); in ath10k_sdio_probe()
2551 if (!ar_sdio->vsg_buffer) { in ath10k_sdio_probe()
2552 ret = -ENOMEM; in ath10k_sdio_probe()
2556 ar_sdio->irq_data.irq_en_reg = in ath10k_sdio_probe()
2557 devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs), in ath10k_sdio_probe()
2559 if (!ar_sdio->irq_data.irq_en_reg) { in ath10k_sdio_probe()
2560 ret = -ENOMEM; in ath10k_sdio_probe()
2564 ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_LARGE_CMDBUF_SIZE, GFP_KERNEL); in ath10k_sdio_probe()
2565 if (!ar_sdio->bmi_buf) { in ath10k_sdio_probe()
2566 ret = -ENOMEM; in ath10k_sdio_probe()
2570 ar_sdio->func = func; in ath10k_sdio_probe()
2573 ar_sdio->is_disabled = true; in ath10k_sdio_probe()
2574 ar_sdio->ar = ar; in ath10k_sdio_probe()
2576 spin_lock_init(&ar_sdio->lock); in ath10k_sdio_probe()
2577 spin_lock_init(&ar_sdio->wr_async_lock); in ath10k_sdio_probe()
2578 mutex_init(&ar_sdio->irq_data.mtx); in ath10k_sdio_probe()
2580 INIT_LIST_HEAD(&ar_sdio->bus_req_freeq); in ath10k_sdio_probe()
2581 INIT_LIST_HEAD(&ar_sdio->wr_asyncq); in ath10k_sdio_probe()
2583 INIT_WORK(&ar_sdio->wr_async_work, ath10k_sdio_write_async_work); in ath10k_sdio_probe()
2584 ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq"); in ath10k_sdio_probe()
2585 if (!ar_sdio->workqueue) { in ath10k_sdio_probe()
2586 ret = -ENOMEM; in ath10k_sdio_probe()
2591 ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]); in ath10k_sdio_probe()
2593 skb_queue_head_init(&ar_sdio->rx_head); in ath10k_sdio_probe()
2594 INIT_WORK(&ar_sdio->async_work_rx, ath10k_rx_indication_async_work); in ath10k_sdio_probe()
2596 dev_id_base = (id->device & 0x0F00); in ath10k_sdio_probe()
2599 ret = -ENODEV; in ath10k_sdio_probe()
2601 dev_id_base, id->device); in ath10k_sdio_probe()
2605 ar->dev_id = QCA9377_1_0_DEVICE_ID; in ath10k_sdio_probe()
2606 ar->id.vendor = id->vendor; in ath10k_sdio_probe()
2607 ar->id.device = id->device; in ath10k_sdio_probe()
2612 /* TODO: don't know yet how to get chip_id with SDIO */ in ath10k_sdio_probe()
2616 ar->hw->max_mtu = ETH_DATA_LEN; in ath10k_sdio_probe()
2624 timer_setup(&ar_sdio->sleep_timer, ath10k_sdio_sleep_timer_handler, 0); in ath10k_sdio_probe()
2629 destroy_workqueue(ar_sdio->workqueue); in ath10k_sdio_probe()
2639 struct ath10k *ar = ar_sdio->ar; in ath10k_sdio_remove()
2642 "sdio removed func %d vendor 0x%x device 0x%x\n", in ath10k_sdio_remove()
2643 func->num, func->vendor, func->device); in ath10k_sdio_remove()
2647 netif_napi_del(&ar->napi); in ath10k_sdio_remove()
2651 flush_workqueue(ar_sdio->workqueue); in ath10k_sdio_remove()
2652 destroy_workqueue(ar_sdio->workqueue); in ath10k_sdio_remove()
2661 MODULE_DEVICE_TABLE(sdio, ath10k_sdio_devices);
2680 pr_err("sdio driver registration failed: %d\n", ret); in ath10k_sdio_init()
2694 MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices");