Lines Matching +full:rx +full:- +full:num +full:- +full:evt

1 // SPDX-License-Identifier: GPL-2.0-or-later
89 * @ver: For Driver-FW compatibility
91 * @num_buf: Num of allocated debug bufs
112 data->dbgc.count = BTINTEL_PCIE_DBGC_BUFFER_COUNT; in btintel_pcie_setup_dbgc()
113 data->dbgc.bufs = devm_kcalloc(&data->pdev->dev, data->dbgc.count, in btintel_pcie_setup_dbgc()
115 if (!data->dbgc.bufs) in btintel_pcie_setup_dbgc()
116 return -ENOMEM; in btintel_pcie_setup_dbgc()
118 data->dbgc.buf_v_addr = dmam_alloc_coherent(&data->pdev->dev, in btintel_pcie_setup_dbgc()
119 data->dbgc.count * in btintel_pcie_setup_dbgc()
121 &data->dbgc.buf_p_addr, in btintel_pcie_setup_dbgc()
123 if (!data->dbgc.buf_v_addr) in btintel_pcie_setup_dbgc()
124 return -ENOMEM; in btintel_pcie_setup_dbgc()
126 data->dbgc.frag_v_addr = dmam_alloc_coherent(&data->pdev->dev, in btintel_pcie_setup_dbgc()
128 &data->dbgc.frag_p_addr, in btintel_pcie_setup_dbgc()
130 if (!data->dbgc.frag_v_addr) in btintel_pcie_setup_dbgc()
131 return -ENOMEM; in btintel_pcie_setup_dbgc()
133 data->dbgc.frag_size = sizeof(struct btintel_pcie_dbgc_ctxt); in btintel_pcie_setup_dbgc()
140 for (i = 0; i < data->dbgc.count; i++) { in btintel_pcie_setup_dbgc()
141 buf = &data->dbgc.bufs[i]; in btintel_pcie_setup_dbgc()
142 buf->data_p_addr = data->dbgc.buf_p_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE; in btintel_pcie_setup_dbgc()
143 buf->data = data->dbgc.buf_v_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE; in btintel_pcie_setup_dbgc()
144 db_frag.bufs[i].buf_addr_lsb = lower_32_bits(buf->data_p_addr); in btintel_pcie_setup_dbgc()
145 db_frag.bufs[i].buf_addr_msb = upper_32_bits(buf->data_p_addr); in btintel_pcie_setup_dbgc()
149 memcpy(data->dbgc.frag_v_addr, &db_frag, sizeof(db_frag)); in btintel_pcie_setup_dbgc()
156 bt_dev_dbg(hdev, "IA: %s: tr-h:%02u tr-t:%02u cr-h:%02u cr-t:%02u", in ipc_print_ia_ring()
158 ia->tr_hia[queue_num], ia->tr_tia[queue_num], in ipc_print_ia_ring()
159 ia->cr_hia[queue_num], ia->cr_tia[queue_num]); in ipc_print_ia_ring()
166 index, urbd1->frbd_tag, urbd1->status, urbd1->fixed); in ipc_print_urbd1()
171 u8 queue = entry->entry; in btintel_pcie_get_data()
172 struct msix_entry *entries = entry - queue; in btintel_pcie_get_data()
177 /* Set the doorbell for TXQ to notify the device that @index (actually index-1)
199 tfd = &txq->tfds[tfd_index]; in btintel_pcie_prepare_tx()
202 buf = &txq->bufs[tfd_index]; in btintel_pcie_prepare_tx()
204 tfd->size = skb->len; in btintel_pcie_prepare_tx()
205 tfd->addr = buf->data_p_addr; in btintel_pcie_prepare_tx()
208 memcpy(buf->data, skb->data, tfd->size); in btintel_pcie_prepare_tx()
216 struct txq *txq = &data->txq; in btintel_pcie_send_sync()
218 tfd_index = data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM]; in btintel_pcie_send_sync()
220 if (tfd_index > txq->count) in btintel_pcie_send_sync()
221 return -ERANGE; in btintel_pcie_send_sync()
228 tfd_index = (tfd_index + 1) % txq->count; in btintel_pcie_send_sync()
229 data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM] = tfd_index; in btintel_pcie_send_sync()
232 data->tx_wait_done = false; in btintel_pcie_send_sync()
237 /* Wait for the complete interrupt - URBD0 */ in btintel_pcie_send_sync()
238 ret = wait_event_timeout(data->tx_wait_q, data->tx_wait_done, in btintel_pcie_send_sync()
241 return -ETIME; in btintel_pcie_send_sync()
246 /* Set the doorbell for RXQ to notify the device that @index (actually index-1)
268 buf = &rxq->bufs[frbd_index]; in btintel_pcie_prepare_rx()
270 frbd = &rxq->frbds[frbd_index]; in btintel_pcie_prepare_rx()
274 frbd->tag = frbd_index; in btintel_pcie_prepare_rx()
275 frbd->addr = buf->data_p_addr; in btintel_pcie_prepare_rx()
281 struct rxq *rxq = &data->rxq; in btintel_pcie_submit_rx()
283 frbd_index = data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM]; in btintel_pcie_submit_rx()
285 if (frbd_index > rxq->count) in btintel_pcie_submit_rx()
286 return -ERANGE; in btintel_pcie_submit_rx()
288 /* Prepare for RX submit. It updates the FRBD with the address of DMA in btintel_pcie_submit_rx()
293 frbd_index = (frbd_index + 1) % rxq->count; in btintel_pcie_submit_rx()
294 data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM] = frbd_index; in btintel_pcie_submit_rx()
295 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM); in btintel_pcie_submit_rx()
318 memset(data->ia.tr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES); in btintel_pcie_reset_ia()
319 memset(data->ia.tr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES); in btintel_pcie_reset_ia()
320 memset(data->ia.cr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES); in btintel_pcie_reset_ia()
321 memset(data->ia.cr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES); in btintel_pcie_reset_ia()
344 } while (--retry > 0); in btintel_pcie_reset_bt()
357 bt_dev_dbg(data->hdev, "csr register after reset: 0x%8.8x", reg); in btintel_pcie_reset_bt()
364 return reg == 0 ? 0 : -ENODEV; in btintel_pcie_reset_bt()
388 return -ENOMEM; in btintel_pcie_add_dmp_data()
421 } while (--retry > 0); in btintel_pcie_get_mac_access()
423 return -ETIME; in btintel_pcie_get_mac_access()
450 tlv->type = type; in btintel_pcie_copy_tlv()
451 tlv->len = size; in btintel_pcie_copy_tlv()
452 memcpy(tlv->val, data, tlv->len); in btintel_pcie_copy_tlv()
458 struct btintel_pcie_dbgc *dbgc = &data->dbgc; in btintel_pcie_read_dram_buffers()
460 struct hci_dev *hdev = data->hdev; in btintel_pcie_read_dram_buffers()
473 if (buf_idx > dbgc->count) { in btintel_pcie_read_dram_buffers()
475 return -EINVAL; in btintel_pcie_read_dram_buffers()
480 data->dmp_hdr.write_ptr = prev_size + offset; in btintel_pcie_read_dram_buffers()
482 return -EINVAL; in btintel_pcie_read_dram_buffers()
486 dump_time_len = snprintf(buf, sizeof(buf), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d", in btintel_pcie_read_dram_buffers()
490 fw_build = snprintf(buf + dump_time_len, sizeof(buf) - dump_time_len, in btintel_pcie_read_dram_buffers()
492 2000 + (data->dmp_hdr.fw_timestamp >> 8), in btintel_pcie_read_dram_buffers()
493 data->dmp_hdr.fw_timestamp & 0xff, data->dmp_hdr.fw_build_type, in btintel_pcie_read_dram_buffers()
494 data->dmp_hdr.fw_build_num); in btintel_pcie_read_dram_buffers()
496 hdr_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) + in btintel_pcie_read_dram_buffers()
497 sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) + in btintel_pcie_read_dram_buffers()
498 sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) + in btintel_pcie_read_dram_buffers()
499 sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) + in btintel_pcie_read_dram_buffers()
500 sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) + in btintel_pcie_read_dram_buffers()
501 sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) + in btintel_pcie_read_dram_buffers()
502 sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) + in btintel_pcie_read_dram_buffers()
510 return -ENOMEM; in btintel_pcie_read_dram_buffers()
513 dump_size += BTINTEL_PCIE_DBGC_BUFFER_SIZE * dbgc->count; in btintel_pcie_read_dram_buffers()
524 btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt, in btintel_pcie_read_dram_buffers()
525 sizeof(data->dmp_hdr.cnvi_bt)); in btintel_pcie_read_dram_buffers()
527 btintel_pcie_copy_tlv(skb, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr, in btintel_pcie_read_dram_buffers()
528 sizeof(data->dmp_hdr.write_ptr)); in btintel_pcie_read_dram_buffers()
530 data->dmp_hdr.wrap_ctr = btintel_pcie_rd_dev_mem(data, in btintel_pcie_read_dram_buffers()
533 btintel_pcie_copy_tlv(skb, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr, in btintel_pcie_read_dram_buffers()
534 sizeof(data->dmp_hdr.wrap_ctr)); in btintel_pcie_read_dram_buffers()
536 btintel_pcie_copy_tlv(skb, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason, in btintel_pcie_read_dram_buffers()
537 sizeof(data->dmp_hdr.trigger_reason)); in btintel_pcie_read_dram_buffers()
539 btintel_pcie_copy_tlv(skb, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1, in btintel_pcie_read_dram_buffers()
540 sizeof(data->dmp_hdr.fw_git_sha1)); in btintel_pcie_read_dram_buffers()
542 btintel_pcie_copy_tlv(skb, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top, in btintel_pcie_read_dram_buffers()
543 sizeof(data->dmp_hdr.cnvr_top)); in btintel_pcie_read_dram_buffers()
545 btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top, in btintel_pcie_read_dram_buffers()
546 sizeof(data->dmp_hdr.cnvi_top)); in btintel_pcie_read_dram_buffers()
556 for (i = 0; i < dbgc->count; i++) { in btintel_pcie_read_dram_buffers()
557 ret = btintel_pcie_add_dmp_data(hdev, dbgc->bufs[i].data, in btintel_pcie_read_dram_buffers()
590 u16 len = skb->len; in btintel_pcie_dump_hdr()
597 INTEL_HW_VARIANT(data->dmp_hdr.cnvi_bt)); in btintel_pcie_dump_hdr()
601 data->dmp_hdr.fw_build_num); in btintel_pcie_dump_hdr()
604 snprintf(buf, sizeof(buf), "Driver: %s\n", data->dmp_hdr.driver_name); in btintel_pcie_dump_hdr()
610 *hdrlen_ptr = skb->len - len; in btintel_pcie_dump_hdr()
619 data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE; in btintel_pcie_dump_notify()
622 data->dmp_hdr.state = HCI_DEVCOREDUMP_ACTIVE; in btintel_pcie_dump_notify()
627 data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE; in btintel_pcie_dump_notify()
633 * BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with
643 data->gp0_received = false; in btintel_pcie_enable_bt()
647 data->ci_p_addr & 0xffffffff); in btintel_pcie_enable_bt()
649 (u64)data->ci_p_addr >> 32); in btintel_pcie_enable_bt()
651 /* Reset the cached value of boot stage. it is updated by the MSI-X in btintel_pcie_enable_bt()
654 data->boot_stage_cache = 0x0; in btintel_pcie_enable_bt()
675 data->alive_intr_ctxt = BTINTEL_PCIE_ROM; in btintel_pcie_enable_bt()
676 err = wait_event_timeout(data->gp0_wait_q, data->gp0_received, in btintel_pcie_enable_bt()
679 return -ETIME; in btintel_pcie_enable_bt()
682 if (~data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ROM) in btintel_pcie_enable_bt()
683 return -ENODEV; in btintel_pcie_enable_bt()
690 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW; in btintel_pcie_in_op()
695 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML && in btintel_pcie_in_iml()
696 !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW); in btintel_pcie_in_iml()
701 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY; in btintel_pcie_in_d3()
706 return !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY); in btintel_pcie_in_d0()
712 bt_dev_dbg(data->hdev, "writing sleep_ctl_reg: 0x%8.8x", dxstate); in btintel_pcie_wr_sleep_cntrl()
747 bt_dev_err(data->hdev, "Failed to get mac access %d", err); in btintel_pcie_read_device_mem()
751 for (; len > 0; len -= 4, dev_addr += 4, val++) in btintel_pcie_read_device_mem()
759 /* This function handles the MSI-X interrupt for gp0 cause (bit 0 in
772 if (reg != data->boot_stage_cache) in btintel_pcie_msix_gp0_handler()
773 data->boot_stage_cache = reg; in btintel_pcie_msix_gp0_handler()
775 bt_dev_dbg(data->hdev, "Alive context: %s old_boot_stage: 0x%8.8x new_boot_stage: 0x%8.8x", in btintel_pcie_msix_gp0_handler()
776 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt), in btintel_pcie_msix_gp0_handler()
777 data->boot_stage_cache, reg); in btintel_pcie_msix_gp0_handler()
779 if (reg != data->img_resp_cache) in btintel_pcie_msix_gp0_handler()
780 data->img_resp_cache = reg; in btintel_pcie_msix_gp0_handler()
782 data->gp0_received = true; in btintel_pcie_msix_gp0_handler()
784 old_ctxt = data->alive_intr_ctxt; in btintel_pcie_msix_gp0_handler()
788 switch (data->alive_intr_ctxt) { in btintel_pcie_msix_gp0_handler()
790 data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL; in btintel_pcie_msix_gp0_handler()
806 data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL; in btintel_pcie_msix_gp0_handler()
811 if (btintel_test_and_clear_flag(data->hdev, INTEL_WAIT_FOR_D0)) { in btintel_pcie_msix_gp0_handler()
812 btintel_wake_up_flag(data->hdev, INTEL_WAIT_FOR_D0); in btintel_pcie_msix_gp0_handler()
813 data->alive_intr_ctxt = BTINTEL_PCIE_D0; in btintel_pcie_msix_gp0_handler()
818 data->alive_intr_ctxt = BTINTEL_PCIE_D3; in btintel_pcie_msix_gp0_handler()
825 data->alive_intr_ctxt = BTINTEL_PCIE_D0; in btintel_pcie_msix_gp0_handler()
832 data->alive_intr_ctxt = BTINTEL_PCIE_D0; in btintel_pcie_msix_gp0_handler()
837 bt_dev_err(data->hdev, "Unknown state: 0x%2.2x", in btintel_pcie_msix_gp0_handler()
838 data->alive_intr_ctxt); in btintel_pcie_msix_gp0_handler()
848 bt_dev_dbg(data->hdev, "wake up gp0 wait_q"); in btintel_pcie_msix_gp0_handler()
849 wake_up(&data->gp0_wait_q); in btintel_pcie_msix_gp0_handler()
852 if (old_ctxt != data->alive_intr_ctxt) in btintel_pcie_msix_gp0_handler()
853 bt_dev_dbg(data->hdev, "alive context changed: %s -> %s", in btintel_pcie_msix_gp0_handler()
855 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt)); in btintel_pcie_msix_gp0_handler()
858 /* This function handles the MSX-X interrupt for rx queue 0 which is for TX
866 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM]; in btintel_pcie_msix_tx_handle()
867 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM]; in btintel_pcie_msix_tx_handle()
872 txq = &data->txq; in btintel_pcie_msix_tx_handle()
875 data->tx_wait_done = true; in btintel_pcie_msix_tx_handle()
876 wake_up(&data->tx_wait_q); in btintel_pcie_msix_tx_handle()
878 urbd0 = &txq->urbd0s[cr_tia]; in btintel_pcie_msix_tx_handle()
880 if (urbd0->tfd_index > txq->count) in btintel_pcie_msix_tx_handle()
883 cr_tia = (cr_tia + 1) % txq->count; in btintel_pcie_msix_tx_handle()
884 data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] = cr_tia; in btintel_pcie_msix_tx_handle()
885 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_TXQ_NUM); in btintel_pcie_msix_tx_handle()
891 struct hci_event_hdr *hdr = (void *)skb->data; in btintel_pcie_recv_event()
895 if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff && in btintel_pcie_recv_event()
896 hdr->plen > 0) { in btintel_pcie_recv_event()
897 const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1; in btintel_pcie_recv_event()
898 unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1; in btintel_pcie_recv_event()
901 switch (skb->data[2]) { in btintel_pcie_recv_event()
915 data->alive_intr_ctxt = BTINTEL_PCIE_INTEL_HCI_RESET2; in btintel_pcie_recv_event()
929 if (btintel_test_and_clear_flag(data->hdev, in btintel_pcie_recv_event()
931 btintel_wake_up_flag(data->hdev, in btintel_pcie_recv_event()
952 memcmp(&skb->data[2], diagnostics_hdr, in btintel_pcie_recv_event()
960 if (skb->data[2] == 0x97) { in btintel_pcie_recv_event()
968 /* Process the received rx data
980 struct hci_dev *hdev = data->hdev; in btintel_pcie_recv_frame()
982 spin_lock(&data->hci_rx_lock); in btintel_pcie_recv_frame()
988 ret = -EILSEQ; in btintel_pcie_recv_frame()
996 if (skb->len >= HCI_ACL_HDR_SIZE) { in btintel_pcie_recv_frame()
997 plen = HCI_ACL_HDR_SIZE + __le16_to_cpu(hci_acl_hdr(skb)->dlen); in btintel_pcie_recv_frame()
1001 ret = -EILSEQ; in btintel_pcie_recv_frame()
1007 if (skb->len >= HCI_SCO_HDR_SIZE) { in btintel_pcie_recv_frame()
1008 plen = HCI_SCO_HDR_SIZE + hci_sco_hdr(skb)->dlen; in btintel_pcie_recv_frame()
1012 ret = -EILSEQ; in btintel_pcie_recv_frame()
1018 if (skb->len >= HCI_EVENT_HDR_SIZE) { in btintel_pcie_recv_frame()
1019 plen = HCI_EVENT_HDR_SIZE + hci_event_hdr(skb)->plen; in btintel_pcie_recv_frame()
1023 ret = -EILSEQ; in btintel_pcie_recv_frame()
1029 if (skb->len >= HCI_ISO_HDR_SIZE) { in btintel_pcie_recv_frame()
1030 plen = HCI_ISO_HDR_SIZE + __le16_to_cpu(hci_iso_hdr(skb)->dlen); in btintel_pcie_recv_frame()
1034 ret = -EILSEQ; in btintel_pcie_recv_frame()
1042 ret = -EINVAL; in btintel_pcie_recv_frame()
1046 if (skb->len < plen) { in btintel_pcie_recv_frame()
1049 ret = -EILSEQ; in btintel_pcie_recv_frame()
1056 hdev->stat.byte_rx += plen; in btintel_pcie_recv_frame()
1070 hdev->stat.err_rx++; in btintel_pcie_recv_frame()
1072 spin_unlock(&data->hci_rx_lock); in btintel_pcie_recv_frame()
1093 switch (data->dmp_hdr.cnvi_top & 0xfff) { in btintel_pcie_read_hwexp()
1097 if (INTEL_CNVX_TOP_STEP(data->dmp_hdr.cnvi_top) != 0x01) in btintel_pcie_read_hwexp()
1107 bt_dev_err(data->hdev, "Unsupported cnvi 0x%8.8x", data->dmp_hdr.cnvi_top); in btintel_pcie_read_hwexp()
1123 bt_dev_err(data->hdev, "Invalid exception dump signature: 0x%8.8x", in btintel_pcie_read_hwexp()
1128 snprintf(prefix, sizeof(prefix), "Bluetooth: %s: ", bt_dev_name(data->hdev)); in btintel_pcie_read_hwexp()
1132 pending = len - offset; in btintel_pcie_read_hwexp()
1138 if (!tlv->type) { in btintel_pcie_read_hwexp()
1139 bt_dev_dbg(data->hdev, "Invalid TLV type 0"); in btintel_pcie_read_hwexp()
1142 pkt_len = le16_to_cpu(tlv->len); in btintel_pcie_read_hwexp()
1144 pending = len - offset; in btintel_pcie_read_hwexp()
1153 if (tlv->type != 1) in btintel_pcie_read_hwexp()
1156 bt_dev_dbg(data->hdev, "TLV packet length: %u", pkt_len); in btintel_pcie_read_hwexp()
1163 skb_put_data(skb, tlv->val, pkt_len); in btintel_pcie_read_hwexp()
1171 tlv->val, pkt_len, false); in btintel_pcie_read_hwexp()
1182 bt_dev_err(data->hdev, "Received hw exception interrupt"); in btintel_pcie_msix_hw_exp_handler()
1184 if (test_and_set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags)) in btintel_pcie_msix_hw_exp_handler()
1187 if (test_and_set_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) in btintel_pcie_msix_hw_exp_handler()
1191 if (!test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) in btintel_pcie_msix_hw_exp_handler()
1192 data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT; in btintel_pcie_msix_hw_exp_handler()
1194 queue_work(data->workqueue, &data->rx_work); in btintel_pcie_msix_hw_exp_handler()
1203 if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) { in btintel_pcie_rx_work()
1212 clear_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags); in btintel_pcie_rx_work()
1215 if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) { in btintel_pcie_rx_work()
1216 btintel_pcie_dump_traces(data->hdev); in btintel_pcie_rx_work()
1217 clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags); in btintel_pcie_rx_work()
1221 while ((skb = skb_dequeue(&data->rx_skb_q))) { in btintel_pcie_rx_work()
1226 /* create sk_buff with data and save it to queue and start RX work */
1236 len = rfh_hdr->packet_len; in btintel_pcie_submit_rx_work()
1238 ret = -EINVAL; in btintel_pcie_submit_rx_work()
1250 skb_queue_tail(&data->rx_skb_q, skb); in btintel_pcie_submit_rx_work()
1251 queue_work(data->workqueue, &data->rx_work); in btintel_pcie_submit_rx_work()
1259 /* Handles the MSI-X interrupt for rx queue 1 which is for RX */
1267 struct hci_dev *hdev = data->hdev; in btintel_pcie_msix_rx_handle()
1269 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM]; in btintel_pcie_msix_rx_handle()
1270 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM]; in btintel_pcie_msix_rx_handle()
1278 rxq = &data->rxq; in btintel_pcie_msix_rx_handle()
1280 /* The firmware sends multiple CD in a single MSI-X and it needs to in btintel_pcie_msix_rx_handle()
1284 urbd1 = &rxq->urbd1s[cr_tia]; in btintel_pcie_msix_rx_handle()
1285 ipc_print_urbd1(data->hdev, urbd1, cr_tia); in btintel_pcie_msix_rx_handle()
1287 buf = &rxq->bufs[urbd1->frbd_tag]; in btintel_pcie_msix_rx_handle()
1290 urbd1->frbd_tag); in btintel_pcie_msix_rx_handle()
1294 ret = btintel_pcie_submit_rx_work(data, urbd1->status, in btintel_pcie_msix_rx_handle()
1295 buf->data); in btintel_pcie_msix_rx_handle()
1297 bt_dev_err(hdev, "RXQ: failed to submit rx request"); in btintel_pcie_msix_rx_handle()
1301 cr_tia = (cr_tia + 1) % rxq->count; in btintel_pcie_msix_rx_handle()
1302 data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM] = cr_tia; in btintel_pcie_msix_rx_handle()
1303 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM); in btintel_pcie_msix_rx_handle()
1314 return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM]; in btintel_pcie_is_rxq_empty()
1319 return data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] == data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM]; in btintel_pcie_is_txackq_empty()
1328 spin_lock(&data->irq_lock); in btintel_pcie_irq_msix_handler()
1335 spin_unlock(&data->irq_lock); in btintel_pcie_irq_msix_handler()
1359 /* For RX */ in btintel_pcie_irq_msix_handler()
1370 * re-enabled by clearing this bit. This register is defined as write 1 in btintel_pcie_irq_msix_handler()
1375 BIT(entry->entry)); in btintel_pcie_irq_msix_handler()
1380 /* This function requests the irq for MSI-X and registers the handlers per irq.
1389 data->msix_entries[i].entry = i; in btintel_pcie_setup_irq()
1391 num_irqs = pci_alloc_irq_vectors(data->pdev, BTINTEL_PCIE_MSIX_VEC_MIN, in btintel_pcie_setup_irq()
1396 data->alloc_vecs = num_irqs; in btintel_pcie_setup_irq()
1397 data->msix_enabled = 1; in btintel_pcie_setup_irq()
1398 data->def_irq = 0; in btintel_pcie_setup_irq()
1401 for (i = 0; i < data->alloc_vecs; i++) { in btintel_pcie_setup_irq()
1404 msix_entry = &data->msix_entries[i]; in btintel_pcie_setup_irq()
1405 msix_entry->vector = pci_irq_vector(data->pdev, i); in btintel_pcie_setup_irq()
1407 err = devm_request_threaded_irq(&data->pdev->dev, in btintel_pcie_setup_irq()
1408 msix_entry->vector, in btintel_pcie_setup_irq()
1415 pci_free_irq_vectors(data->pdev); in btintel_pcie_setup_irq()
1416 data->alloc_vecs = 0; in btintel_pcie_setup_irq()
1446 int val = data->def_irq | BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE; in btintel_pcie_config_msix()
1459 data->fh_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK); in btintel_pcie_config_msix()
1460 data->hw_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK); in btintel_pcie_config_msix()
1474 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in btintel_pcie_config_pcie()
1476 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in btintel_pcie_config_pcie()
1481 data->base_addr = pcim_iomap_region(pdev, 0, KBUILD_MODNAME); in btintel_pcie_config_pcie()
1482 if (IS_ERR(data->base_addr)) in btintel_pcie_config_pcie()
1483 return PTR_ERR(data->base_addr); in btintel_pcie_config_pcie()
1489 /* Configure MSI-X with causes list */ in btintel_pcie_config_pcie()
1498 ci->version = 0x1; in btintel_pcie_init_ci()
1499 ci->size = sizeof(*ci); in btintel_pcie_init_ci()
1500 ci->config = 0x0000; in btintel_pcie_init_ci()
1501 ci->addr_cr_hia = data->ia.cr_hia_p_addr; in btintel_pcie_init_ci()
1502 ci->addr_tr_tia = data->ia.tr_tia_p_addr; in btintel_pcie_init_ci()
1503 ci->addr_cr_tia = data->ia.cr_tia_p_addr; in btintel_pcie_init_ci()
1504 ci->addr_tr_hia = data->ia.tr_hia_p_addr; in btintel_pcie_init_ci()
1505 ci->num_cr_ia = BTINTEL_PCIE_NUM_QUEUES; in btintel_pcie_init_ci()
1506 ci->num_tr_ia = BTINTEL_PCIE_NUM_QUEUES; in btintel_pcie_init_ci()
1507 ci->addr_urbdq0 = data->txq.urbd0s_p_addr; in btintel_pcie_init_ci()
1508 ci->addr_tfdq = data->txq.tfds_p_addr; in btintel_pcie_init_ci()
1509 ci->num_tfdq = data->txq.count; in btintel_pcie_init_ci()
1510 ci->num_urbdq0 = data->txq.count; in btintel_pcie_init_ci()
1511 ci->tfdq_db_vec = BTINTEL_PCIE_TXQ_NUM; in btintel_pcie_init_ci()
1512 ci->urbdq0_db_vec = BTINTEL_PCIE_TXQ_NUM; in btintel_pcie_init_ci()
1513 ci->rbd_size = BTINTEL_PCIE_RBD_SIZE_4K; in btintel_pcie_init_ci()
1514 ci->addr_frbdq = data->rxq.frbds_p_addr; in btintel_pcie_init_ci()
1515 ci->num_frbdq = data->rxq.count; in btintel_pcie_init_ci()
1516 ci->frbdq_db_vec = BTINTEL_PCIE_RXQ_NUM; in btintel_pcie_init_ci()
1517 ci->addr_urbdq1 = data->rxq.urbd1s_p_addr; in btintel_pcie_init_ci()
1518 ci->num_urbdq1 = data->rxq.count; in btintel_pcie_init_ci()
1519 ci->urbdq_db_vec = BTINTEL_PCIE_RXQ_NUM; in btintel_pcie_init_ci()
1521 ci->dbg_output_mode = 0x01; in btintel_pcie_init_ci()
1522 ci->dbgc_addr = data->dbgc.frag_p_addr; in btintel_pcie_init_ci()
1523 ci->dbgc_size = data->dbgc.frag_size; in btintel_pcie_init_ci()
1524 ci->dbg_preset = 0x00; in btintel_pcie_init_ci()
1531 dma_free_coherent(&data->pdev->dev, txq->count * BTINTEL_PCIE_BUFFER_SIZE, in btintel_pcie_free_txq_bufs()
1532 txq->buf_v_addr, txq->buf_p_addr); in btintel_pcie_free_txq_bufs()
1533 kfree(txq->bufs); in btintel_pcie_free_txq_bufs()
1543 txq->bufs = kmalloc_array(txq->count, sizeof(*buf), GFP_KERNEL); in btintel_pcie_setup_txq_bufs()
1544 if (!txq->bufs) in btintel_pcie_setup_txq_bufs()
1545 return -ENOMEM; in btintel_pcie_setup_txq_bufs()
1550 txq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev, in btintel_pcie_setup_txq_bufs()
1551 txq->count * BTINTEL_PCIE_BUFFER_SIZE, in btintel_pcie_setup_txq_bufs()
1552 &txq->buf_p_addr, in btintel_pcie_setup_txq_bufs()
1554 if (!txq->buf_v_addr) { in btintel_pcie_setup_txq_bufs()
1555 kfree(txq->bufs); in btintel_pcie_setup_txq_bufs()
1556 return -ENOMEM; in btintel_pcie_setup_txq_bufs()
1562 for (i = 0; i < txq->count; i++) { in btintel_pcie_setup_txq_bufs()
1563 buf = &txq->bufs[i]; in btintel_pcie_setup_txq_bufs()
1564 buf->data_p_addr = txq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE); in btintel_pcie_setup_txq_bufs()
1565 buf->data = txq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE); in btintel_pcie_setup_txq_bufs()
1575 dma_free_coherent(&data->pdev->dev, rxq->count * BTINTEL_PCIE_BUFFER_SIZE, in btintel_pcie_free_rxq_bufs()
1576 rxq->buf_v_addr, rxq->buf_p_addr); in btintel_pcie_free_rxq_bufs()
1577 kfree(rxq->bufs); in btintel_pcie_free_rxq_bufs()
1587 rxq->bufs = kmalloc_array(rxq->count, sizeof(*buf), GFP_KERNEL); in btintel_pcie_setup_rxq_bufs()
1588 if (!rxq->bufs) in btintel_pcie_setup_rxq_bufs()
1589 return -ENOMEM; in btintel_pcie_setup_rxq_bufs()
1594 rxq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev, in btintel_pcie_setup_rxq_bufs()
1595 rxq->count * BTINTEL_PCIE_BUFFER_SIZE, in btintel_pcie_setup_rxq_bufs()
1596 &rxq->buf_p_addr, in btintel_pcie_setup_rxq_bufs()
1598 if (!rxq->buf_v_addr) { in btintel_pcie_setup_rxq_bufs()
1599 kfree(rxq->bufs); in btintel_pcie_setup_rxq_bufs()
1600 return -ENOMEM; in btintel_pcie_setup_rxq_bufs()
1606 for (i = 0; i < rxq->count; i++) { in btintel_pcie_setup_rxq_bufs()
1607 buf = &rxq->bufs[i]; in btintel_pcie_setup_rxq_bufs()
1608 buf->data_p_addr = rxq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE); in btintel_pcie_setup_rxq_bufs()
1609 buf->data = rxq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE); in btintel_pcie_setup_rxq_bufs()
1620 ia->tr_hia_p_addr = p_addr; in btintel_pcie_setup_ia()
1621 ia->tr_hia = v_addr; in btintel_pcie_setup_ia()
1624 ia->tr_tia_p_addr = p_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES; in btintel_pcie_setup_ia()
1625 ia->tr_tia = v_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES; in btintel_pcie_setup_ia()
1628 ia->cr_hia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2); in btintel_pcie_setup_ia()
1629 ia->cr_hia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2); in btintel_pcie_setup_ia()
1632 ia->cr_tia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3); in btintel_pcie_setup_ia()
1633 ia->cr_tia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3); in btintel_pcie_setup_ia()
1638 btintel_pcie_free_rxq_bufs(data, &data->rxq); in btintel_pcie_free()
1639 btintel_pcie_free_txq_bufs(data, &data->txq); in btintel_pcie_free()
1641 dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr); in btintel_pcie_free()
1642 dma_pool_destroy(data->dma_pool); in btintel_pcie_free()
1645 /* Allocate tx and rx queues, any related data structures and buffers.
1674 data->dma_pool = dma_pool_create(KBUILD_MODNAME, &data->pdev->dev, in btintel_pcie_alloc()
1676 if (!data->dma_pool) { in btintel_pcie_alloc()
1677 err = -ENOMEM; in btintel_pcie_alloc()
1681 v_addr = dma_pool_zalloc(data->dma_pool, GFP_KERNEL | __GFP_NOWARN, in btintel_pcie_alloc()
1684 dma_pool_destroy(data->dma_pool); in btintel_pcie_alloc()
1685 err = -ENOMEM; in btintel_pcie_alloc()
1689 data->dma_p_addr = p_addr; in btintel_pcie_alloc()
1690 data->dma_v_addr = v_addr; in btintel_pcie_alloc()
1693 data->txq.count = BTINTEL_DESCS_COUNT; in btintel_pcie_alloc()
1694 data->rxq.count = BTINTEL_DESCS_COUNT; in btintel_pcie_alloc()
1697 data->txq.tfds_p_addr = p_addr; in btintel_pcie_alloc()
1698 data->txq.tfds = v_addr; in btintel_pcie_alloc()
1704 data->txq.urbd0s_p_addr = p_addr; in btintel_pcie_alloc()
1705 data->txq.urbd0s = v_addr; in btintel_pcie_alloc()
1711 data->rxq.frbds_p_addr = p_addr; in btintel_pcie_alloc()
1712 data->rxq.frbds = v_addr; in btintel_pcie_alloc()
1718 data->rxq.urbd1s_p_addr = p_addr; in btintel_pcie_alloc()
1719 data->rxq.urbd1s = v_addr; in btintel_pcie_alloc()
1725 err = btintel_pcie_setup_txq_bufs(data, &data->txq); in btintel_pcie_alloc()
1730 err = btintel_pcie_setup_rxq_bufs(data, &data->rxq); in btintel_pcie_alloc()
1735 btintel_pcie_setup_ia(data, p_addr, v_addr, &data->ia); in btintel_pcie_alloc()
1746 data->ci = v_addr; in btintel_pcie_alloc()
1747 data->ci_p_addr = p_addr; in btintel_pcie_alloc()
1750 btintel_pcie_init_ci(data, data->ci); in btintel_pcie_alloc()
1755 btintel_pcie_free_txq_bufs(data, &data->txq); in btintel_pcie_alloc()
1757 dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr); in btintel_pcie_alloc()
1758 dma_pool_destroy(data->dma_pool); in btintel_pcie_alloc()
1781 struct hci_ev_cmd_complete *evt; in btintel_pcie_inject_cmd_complete() local
1783 skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL); in btintel_pcie_inject_cmd_complete()
1785 return -ENOMEM; in btintel_pcie_inject_cmd_complete()
1788 hdr->evt = HCI_EV_CMD_COMPLETE; in btintel_pcie_inject_cmd_complete()
1789 hdr->plen = sizeof(*evt) + 1; in btintel_pcie_inject_cmd_complete()
1791 evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt)); in btintel_pcie_inject_cmd_complete()
1792 evt->ncmd = 0x01; in btintel_pcie_inject_cmd_complete()
1793 evt->opcode = cpu_to_le16(opcode); in btintel_pcie_inject_cmd_complete()
1829 cmd = (void *)skb->data; in btintel_pcie_send_frame()
1830 opcode = le16_to_cpu(cmd->opcode); in btintel_pcie_send_frame()
1832 struct hci_command_hdr *cmd = (void *)skb->data; in btintel_pcie_send_frame()
1833 __u16 opcode = le16_to_cpu(cmd->opcode); in btintel_pcie_send_frame()
1845 data->gp0_received = false; in btintel_pcie_send_frame()
1847 hdev->stat.cmd_tx++; in btintel_pcie_send_frame()
1851 hdev->stat.acl_tx++; in btintel_pcie_send_frame()
1855 hdev->stat.sco_tx++; in btintel_pcie_send_frame()
1862 return -EILSEQ; in btintel_pcie_send_frame()
1869 hdev->stat.err_tx++; in btintel_pcie_send_frame()
1876 old_ctxt = data->alive_intr_ctxt; in btintel_pcie_send_frame()
1877 data->alive_intr_ctxt = in btintel_pcie_send_frame()
1880 bt_dev_dbg(data->hdev, "sent cmd: 0x%4.4x alive context changed: %s -> %s", in btintel_pcie_send_frame()
1882 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt)); in btintel_pcie_send_frame()
1884 ret = wait_event_timeout(data->gp0_wait_q, in btintel_pcie_send_frame()
1885 data->gp0_received, in btintel_pcie_send_frame()
1888 hdev->stat.err_tx++; in btintel_pcie_send_frame()
1890 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt)); in btintel_pcie_send_frame()
1891 ret = -ETIME; in btintel_pcie_send_frame()
1896 hdev->stat.byte_tx += skb->len; in btintel_pcie_send_frame()
1907 hdev = data->hdev; in btintel_pcie_release_hdev()
1910 data->hdev = NULL; in btintel_pcie_release_hdev()
1921 BT_DBG("%s", hdev->name); in btintel_pcie_setup_internal()
1931 if (skb->data[0]) { in btintel_pcie_setup_internal()
1933 skb->data[0]); in btintel_pcie_setup_internal()
1934 err = -EIO; in btintel_pcie_setup_internal()
1939 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); in btintel_pcie_setup_internal()
1940 set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); in btintel_pcie_setup_internal()
1941 set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks); in btintel_pcie_setup_internal()
1944 hdev->set_quality_report = btintel_set_quality_report; in btintel_pcie_setup_internal()
1960 err = -EINVAL; in btintel_pcie_setup_internal()
1981 set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); in btintel_pcie_setup_internal()
1994 err = -EINVAL; in btintel_pcie_setup_internal()
1999 data->dmp_hdr.cnvi_top = ver_tlv.cnvi_top; in btintel_pcie_setup_internal()
2000 data->dmp_hdr.cnvr_top = ver_tlv.cnvr_top; in btintel_pcie_setup_internal()
2001 data->dmp_hdr.fw_timestamp = ver_tlv.timestamp; in btintel_pcie_setup_internal()
2002 data->dmp_hdr.fw_build_type = ver_tlv.build_type; in btintel_pcie_setup_internal()
2003 data->dmp_hdr.fw_build_num = ver_tlv.build_num; in btintel_pcie_setup_internal()
2004 data->dmp_hdr.cnvi_bt = ver_tlv.cnvi_bt; in btintel_pcie_setup_internal()
2007 data->dmp_hdr.fw_git_sha1 = ver_tlv.git_sha1; in btintel_pcie_setup_internal()
2056 return -ENOMEM; in btintel_pcie_setup_hdev()
2058 hdev->bus = HCI_PCI; in btintel_pcie_setup_hdev()
2061 data->hdev = hdev; in btintel_pcie_setup_hdev()
2062 SET_HCIDEV_DEV(hdev, &data->pdev->dev); in btintel_pcie_setup_hdev()
2064 hdev->manufacturer = 2; in btintel_pcie_setup_hdev()
2065 hdev->open = btintel_pcie_open; in btintel_pcie_setup_hdev()
2066 hdev->close = btintel_pcie_close; in btintel_pcie_setup_hdev()
2067 hdev->send = btintel_pcie_send_frame; in btintel_pcie_setup_hdev()
2068 hdev->setup = btintel_pcie_setup; in btintel_pcie_setup_hdev()
2069 hdev->shutdown = btintel_shutdown_combined; in btintel_pcie_setup_hdev()
2070 hdev->hw_error = btintel_hw_error; in btintel_pcie_setup_hdev()
2071 hdev->set_diag = btintel_set_diag; in btintel_pcie_setup_hdev()
2072 hdev->set_bdaddr = btintel_set_bdaddr; in btintel_pcie_setup_hdev()
2080 data->dmp_hdr.driver_name = KBUILD_MODNAME; in btintel_pcie_setup_hdev()
2095 return -ENODEV; in btintel_pcie_probe()
2097 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); in btintel_pcie_probe()
2099 return -ENOMEM; in btintel_pcie_probe()
2101 data->pdev = pdev; in btintel_pcie_probe()
2103 spin_lock_init(&data->irq_lock); in btintel_pcie_probe()
2104 spin_lock_init(&data->hci_rx_lock); in btintel_pcie_probe()
2106 init_waitqueue_head(&data->gp0_wait_q); in btintel_pcie_probe()
2107 data->gp0_received = false; in btintel_pcie_probe()
2109 init_waitqueue_head(&data->tx_wait_q); in btintel_pcie_probe()
2110 data->tx_wait_done = false; in btintel_pcie_probe()
2112 data->workqueue = alloc_ordered_workqueue(KBUILD_MODNAME, WQ_HIGHPRI); in btintel_pcie_probe()
2113 if (!data->workqueue) in btintel_pcie_probe()
2114 return -ENOMEM; in btintel_pcie_probe()
2116 skb_queue_head_init(&data->rx_skb_q); in btintel_pcie_probe()
2117 INIT_WORK(&data->rx_work, btintel_pcie_rx_work); in btintel_pcie_probe()
2119 data->boot_stage_cache = 0x00; in btintel_pcie_probe()
2120 data->img_resp_cache = 0x00; in btintel_pcie_probe()
2137 data->cnvi = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_HW_REV_REG); in btintel_pcie_probe()
2139 data->cnvr = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_RF_ID_REG); in btintel_pcie_probe()
2149 bt_dev_dbg(data->hdev, "cnvi: 0x%8.8x cnvr: 0x%8.8x", data->cnvi, in btintel_pcie_probe()
2150 data->cnvr); in btintel_pcie_probe()
2171 for (int i = 0; i < data->alloc_vecs; i++) { in btintel_pcie_remove()
2174 msix_entry = &data->msix_entries[i]; in btintel_pcie_remove()
2175 free_irq(msix_entry->vector, msix_entry); in btintel_pcie_remove()
2182 flush_work(&data->rx_work); in btintel_pcie_remove()
2184 destroy_workqueue(data->workqueue); in btintel_pcie_remove()
2199 if (test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) in btintel_pcie_coredump()
2202 data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER; in btintel_pcie_coredump()
2203 queue_work(data->workqueue, &data->rx_work); in btintel_pcie_coredump()
2218 MODULE_AUTHOR("Tedd Ho-Jeong An <tedd.an@intel.com>");