Lines Matching +full:smem +full:- +full:state +full:- +full:names

8  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
9 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
10 * Copyright(c) 2007 - 2015, 2018 - 2020 Intel Corporation
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
31 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
32 * Copyright(c) 2007 - 2015, 2018 - 2020 Intel Corporation
45 * * Neither the name Intel Corporation nor the names of its
73 #include "iwl-drv.h"
74 #include "iwl-trans.h"
75 #include "iwl-csr.h"
76 #include "iwl-prph.h"
77 #include "iwl-scd.h"
78 #include "iwl-agn-hw.h"
79 #include "fw/error-dump.h"
83 #include "iwl-fh.h"
84 #include "iwl-context-info-gen3.h"
97 struct pci_dev *pdev = trans_pcie->pci_dev; in iwl_trans_pcie_dump_regs()
101 if (trans_pcie->pcie_dbg_dumped_once) in iwl_trans_pcie_dump_regs()
118 prefix = (char *)buf + alloc_size - PREFIX_LEN; in iwl_trans_pcie_dump_regs()
146 if (!pdev->bus->self) in iwl_trans_pcie_dump_regs()
149 pdev = pdev->bus->self; in iwl_trans_pcie_dump_regs()
180 trans_pcie->pcie_dbg_dumped_once = 1; in iwl_trans_pcie_dump_regs()
186 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ in iwl_trans_pcie_sw_reset()
193 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; in iwl_pcie_free_fw_monitor()
195 if (!fw_mon->size) in iwl_pcie_free_fw_monitor()
198 dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block, in iwl_pcie_free_fw_monitor()
199 fw_mon->physical); in iwl_pcie_free_fw_monitor()
201 fw_mon->block = NULL; in iwl_pcie_free_fw_monitor()
202 fw_mon->physical = 0; in iwl_pcie_free_fw_monitor()
203 fw_mon->size = 0; in iwl_pcie_free_fw_monitor()
209 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; in iwl_pcie_alloc_fw_monitor_block()
215 if (fw_mon->size) in iwl_pcie_alloc_fw_monitor_block()
218 for (power = max_power; power >= min_power; power--) { in iwl_pcie_alloc_fw_monitor_block()
220 block = dma_alloc_coherent(trans->dev, size, &physical, in iwl_pcie_alloc_fw_monitor_block()
236 "Sorry - debug buffer is only %luK while you requested %luK\n", in iwl_pcie_alloc_fw_monitor_block()
237 (unsigned long)BIT(power - 10), in iwl_pcie_alloc_fw_monitor_block()
238 (unsigned long)BIT(max_power - 10)); in iwl_pcie_alloc_fw_monitor_block()
240 fw_mon->block = block; in iwl_pcie_alloc_fw_monitor_block()
241 fw_mon->physical = physical; in iwl_pcie_alloc_fw_monitor_block()
242 fw_mon->size = size; in iwl_pcie_alloc_fw_monitor_block()
259 if (trans->dbg.fw_mon.size) in iwl_pcie_alloc_fw_monitor()
281 if (trans->cfg->apmg_not_supported) in iwl_pcie_set_pwr()
284 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) in iwl_pcie_set_pwr()
310 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); in iwl_pcie_apm_config()
311 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); in iwl_pcie_apm_config()
313 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); in iwl_pcie_apm_config()
314 trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN; in iwl_pcie_apm_config()
315 IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n", in iwl_pcie_apm_config()
317 trans->ltr_enabled ? "En" : "Dis"); in iwl_pcie_apm_config()
337 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) in iwl_pcie_apm_init()
353 * wake device's PCI Express link L1a -> L0s in iwl_pcie_apm_init()
360 /* Configure analog phase-lock-loop before activating to D0A */ in iwl_pcie_apm_init()
361 if (trans->trans_cfg->base_params->pll_cfg) in iwl_pcie_apm_init()
364 ret = iwl_finish_nic_init(trans, trans->trans_cfg); in iwl_pcie_apm_init()
368 if (trans->cfg->host_interrupt_operation_mode) { in iwl_pcie_apm_init()
370 * This is a bit of an abuse - This is needed for 7260 / 3160 in iwl_pcie_apm_init()
375 * consumes slightly more power (100uA) - but allows to be sure in iwl_pcie_apm_init()
397 if (!trans->cfg->apmg_not_supported) { in iwl_pcie_apm_init()
402 /* Disable L1-Active */ in iwl_pcie_apm_init()
411 set_bit(STATUS_DEVICE_ENABLED, &trans->status); in iwl_pcie_apm_init()
436 ret = iwl_finish_nic_init(trans, trans->trans_cfg); in iwl_pcie_apm_lp_xtal_enable()
453 * caused by APMG idle state. in iwl_pcie_apm_lp_xtal_enable()
483 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. in iwl_pcie_apm_lp_xtal_enable()
520 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); in iwl_pcie_apm_stop()
523 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) in iwl_pcie_apm_stop()
527 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) in iwl_pcie_apm_stop()
530 else if (trans->trans_cfg->device_family >= in iwl_pcie_apm_stop()
544 clear_bit(STATUS_DEVICE_ENABLED, &trans->status); in iwl_pcie_apm_stop()
549 if (trans->cfg->lp_xtal_workaround) { in iwl_pcie_apm_stop()
558 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. in iwl_pcie_apm_stop()
569 spin_lock(&trans_pcie->irq_lock); in iwl_pcie_nic_init()
571 spin_unlock(&trans_pcie->irq_lock); in iwl_pcie_nic_init()
578 iwl_op_mode_nic_config(trans->op_mode); in iwl_pcie_nic_init()
585 return -ENOMEM; in iwl_pcie_nic_init()
587 if (trans->trans_cfg->base_params->shadow_reg_enable) { in iwl_pcie_nic_init()
619 /* Note: returns standard 0/-ERROR code */
697 trans_pcie->ucode_write_complete = false; in iwl_pcie_load_firmware_chunk()
700 return -EIO; in iwl_pcie_load_firmware_chunk()
706 ret = wait_event_timeout(trans_pcie->ucode_write_waitq, in iwl_pcie_load_firmware_chunk()
707 trans_pcie->ucode_write_complete, 5 * HZ); in iwl_pcie_load_firmware_chunk()
711 return -ETIMEDOUT; in iwl_pcie_load_firmware_chunk()
722 u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len); in iwl_pcie_load_section()
728 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, in iwl_pcie_load_section()
733 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, in iwl_pcie_load_section()
736 return -ENOMEM; in iwl_pcie_load_section()
739 for (offset = 0; offset < section->len; offset += chunk_sz) { in iwl_pcie_load_section()
743 copy_size = min_t(u32, chunk_sz, section->len - offset); in iwl_pcie_load_section()
744 dst_addr = section->offset + offset; in iwl_pcie_load_section()
754 memcpy(v_addr, (u8 *)section->data + offset, copy_size); in iwl_pcie_load_section()
770 dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr); in iwl_pcie_load_section()
791 for (i = *first_ucode_section; i < image->num_sec; i++) { in iwl_pcie_load_cpu_sections_8000()
795 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between in iwl_pcie_load_cpu_sections_8000()
797 * PAGING_SEPARATOR_SECTION delimiter - separate between in iwl_pcie_load_cpu_sections_8000()
800 if (!image->sec[i].data || in iwl_pcie_load_cpu_sections_8000()
801 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || in iwl_pcie_load_cpu_sections_8000()
802 image->sec[i].offset == PAGING_SEPARATOR_SECTION) { in iwl_pcie_load_cpu_sections_8000()
809 ret = iwl_pcie_load_section(trans, i, &image->sec[i]); in iwl_pcie_load_cpu_sections_8000()
825 if (trans->trans_cfg->use_tfh) { in iwl_pcie_load_cpu_sections_8000()
857 for (i = *first_ucode_section; i < image->num_sec; i++) { in iwl_pcie_load_cpu_sections()
861 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between in iwl_pcie_load_cpu_sections()
863 * PAGING_SEPARATOR_SECTION delimiter - separate between in iwl_pcie_load_cpu_sections()
866 if (!image->sec[i].data || in iwl_pcie_load_cpu_sections()
867 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || in iwl_pcie_load_cpu_sections()
868 image->sec[i].offset == PAGING_SEPARATOR_SECTION) { in iwl_pcie_load_cpu_sections()
875 ret = iwl_pcie_load_section(trans, i, &image->sec[i]); in iwl_pcie_load_cpu_sections()
889 &trans->dbg.fw_mon_cfg[alloc_id]; in iwl_pcie_apply_destination_ini()
895 if (le32_to_cpu(fw_mon_cfg->buf_location) == in iwl_pcie_apply_destination_ini()
897 IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n"); in iwl_pcie_apply_destination_ini()
905 if (le32_to_cpu(fw_mon_cfg->buf_location) != in iwl_pcie_apply_destination_ini()
907 !trans->dbg.fw_mon_ini[alloc_id].num_frags) in iwl_pcie_apply_destination_ini()
910 frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0]; in iwl_pcie_apply_destination_ini()
916 frag->physical >> MON_BUFF_SHIFT_VER2); in iwl_pcie_apply_destination_ini()
918 (frag->physical + frag->size - 256) >> in iwl_pcie_apply_destination_ini()
924 const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv; in iwl_pcie_apply_destination()
925 const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; in iwl_pcie_apply_destination()
934 get_fw_dbg_mode_string(dest->monitor_mode)); in iwl_pcie_apply_destination()
936 if (dest->monitor_mode == EXTERNAL_MODE) in iwl_pcie_apply_destination()
937 iwl_pcie_alloc_fw_monitor(trans, dest->size_power); in iwl_pcie_apply_destination()
941 for (i = 0; i < trans->dbg.n_dest_reg; i++) { in iwl_pcie_apply_destination()
942 u32 addr = le32_to_cpu(dest->reg_ops[i].addr); in iwl_pcie_apply_destination()
943 u32 val = le32_to_cpu(dest->reg_ops[i].val); in iwl_pcie_apply_destination()
945 switch (dest->reg_ops[i].op) { in iwl_pcie_apply_destination()
973 IWL_ERR(trans, "FW debug - unknown OP %d\n", in iwl_pcie_apply_destination()
974 dest->reg_ops[i].op); in iwl_pcie_apply_destination()
980 if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) { in iwl_pcie_apply_destination()
981 iwl_write_prph(trans, le32_to_cpu(dest->base_reg), in iwl_pcie_apply_destination()
982 fw_mon->physical >> dest->base_shift); in iwl_pcie_apply_destination()
983 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) in iwl_pcie_apply_destination()
984 iwl_write_prph(trans, le32_to_cpu(dest->end_reg), in iwl_pcie_apply_destination()
985 (fw_mon->physical + fw_mon->size - in iwl_pcie_apply_destination()
986 256) >> dest->end_shift); in iwl_pcie_apply_destination()
988 iwl_write_prph(trans, le32_to_cpu(dest->end_reg), in iwl_pcie_apply_destination()
989 (fw_mon->physical + fw_mon->size) >> in iwl_pcie_apply_destination()
990 dest->end_shift); in iwl_pcie_apply_destination()
1001 image->is_dual_cpus ? "Dual" : "Single"); in iwl_pcie_load_given_ucode()
1008 if (image->is_dual_cpus) { in iwl_pcie_load_given_ucode()
1039 image->is_dual_cpus ? "Dual" : "Single"); in iwl_pcie_load_given_ucode_8000()
1073 bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); in iwl_pcie_check_hw_rf_kill()
1077 set_bit(STATUS_RFKILL_HW, &trans->status); in iwl_pcie_check_hw_rf_kill()
1078 set_bit(STATUS_RFKILL_OPMODE, &trans->status); in iwl_pcie_check_hw_rf_kill()
1080 clear_bit(STATUS_RFKILL_HW, &trans->status); in iwl_pcie_check_hw_rf_kill()
1081 if (trans_pcie->opmode_down) in iwl_pcie_check_hw_rf_kill()
1082 clear_bit(STATUS_RFKILL_OPMODE, &trans->status); in iwl_pcie_check_hw_rf_kill()
1085 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); in iwl_pcie_check_hw_rf_kill()
1120 int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; in iwl_pcie_map_non_rx_causes()
1127 * the first interrupt vector will serve non-RX and FBQ causes. in iwl_pcie_map_non_rx_causes()
1140 trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; in iwl_pcie_map_rx_causes()
1144 * The first RX queue - fallback queue, which is designated for in iwl_pcie_map_rx_causes()
1147 * the other (N - 2) interrupt vectors. in iwl_pcie_map_rx_causes()
1150 for (idx = 1; idx < trans->num_rx_queues; idx++) { in iwl_pcie_map_rx_causes()
1152 MSIX_FH_INT_CAUSES_Q(idx - offset)); in iwl_pcie_map_rx_causes()
1158 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) in iwl_pcie_map_rx_causes()
1162 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) in iwl_pcie_map_rx_causes()
1168 struct iwl_trans *trans = trans_pcie->trans; in iwl_pcie_conf_msix_hw()
1170 if (!trans_pcie->msix_enabled) { in iwl_pcie_conf_msix_hw()
1171 if (trans->trans_cfg->mq_rx_supported && in iwl_pcie_conf_msix_hw()
1172 test_bit(STATUS_DEVICE_ENABLED, &trans->status)) in iwl_pcie_conf_msix_hw()
1182 if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) in iwl_pcie_conf_msix_hw()
1199 struct iwl_trans *trans = trans_pcie->trans; in iwl_pcie_init_msix()
1203 if (!trans_pcie->msix_enabled) in iwl_pcie_init_msix()
1206 trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); in iwl_pcie_init_msix()
1207 trans_pcie->fh_mask = trans_pcie->fh_init_mask; in iwl_pcie_init_msix()
1208 trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD); in iwl_pcie_init_msix()
1209 trans_pcie->hw_mask = trans_pcie->hw_init_mask; in iwl_pcie_init_msix()
1216 lockdep_assert_held(&trans_pcie->mutex); in _iwl_trans_pcie_stop_device()
1218 if (trans_pcie->is_down) in _iwl_trans_pcie_stop_device()
1221 trans_pcie->is_down = true; in _iwl_trans_pcie_stop_device()
1236 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { in _iwl_trans_pcie_stop_device()
1242 /* Power-down device's busmaster DMA clocks */ in _iwl_trans_pcie_stop_device()
1243 if (!trans->cfg->apmg_not_supported) { in _iwl_trans_pcie_stop_device()
1254 /* Stop the device, and put it in low power state */ in _iwl_trans_pcie_stop_device()
1260 * Upon stop, the IVAR table gets erased, so msi-x won't in _iwl_trans_pcie_stop_device()
1261 * work. This causes a bug in RF-KILL flows, since the interrupt in _iwl_trans_pcie_stop_device()
1273 * should be masked. Re-ACK all the interrupts here. in _iwl_trans_pcie_stop_device()
1278 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); in _iwl_trans_pcie_stop_device()
1279 clear_bit(STATUS_INT_ENABLED, &trans->status); in _iwl_trans_pcie_stop_device()
1280 clear_bit(STATUS_TPOWER_PMI, &trans->status); in _iwl_trans_pcie_stop_device()
1288 /* re-take ownership to prevent other users from stealing the device */ in _iwl_trans_pcie_stop_device()
1296 if (trans_pcie->msix_enabled) { in iwl_pcie_synchronize_irqs()
1299 for (i = 0; i < trans_pcie->alloc_vecs; i++) in iwl_pcie_synchronize_irqs()
1300 synchronize_irq(trans_pcie->msix_entries[i].vector); in iwl_pcie_synchronize_irqs()
1302 synchronize_irq(trans_pcie->pci_dev->irq); in iwl_pcie_synchronize_irqs()
1316 ret = -EIO; in iwl_trans_pcie_start_fw()
1325 * We enabled the RF-Kill interrupt and the handler may very in iwl_trans_pcie_start_fw()
1334 mutex_lock(&trans_pcie->mutex); in iwl_trans_pcie_start_fw()
1339 ret = -ERFKILL; in iwl_trans_pcie_start_fw()
1344 if (trans_pcie->is_down) { in iwl_trans_pcie_start_fw()
1347 ret = -EIO; in iwl_trans_pcie_start_fw()
1367 * by the RF-Kill interrupt (hence mask all the interrupt besides the in iwl_trans_pcie_start_fw()
1369 * RF-Kill switch is toggled, we will find out after having loaded in iwl_trans_pcie_start_fw()
1379 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) in iwl_trans_pcie_start_fw()
1384 /* re-check RF-Kill state since we may have missed the interrupt */ in iwl_trans_pcie_start_fw()
1387 ret = -ERFKILL; in iwl_trans_pcie_start_fw()
1390 mutex_unlock(&trans_pcie->mutex); in iwl_trans_pcie_start_fw()
1406 * Check again since the RF kill state may have changed while in iwl_trans_pcie_handle_stop_rfkill()
1408 * receive the RF kill interrupt and update the state in the in iwl_trans_pcie_handle_stop_rfkill()
1410 * Don't call the op_mode if the rkfill state hasn't changed. in iwl_trans_pcie_handle_stop_rfkill()
1414 * state changed exactly now while we were called from stop_device. in iwl_trans_pcie_handle_stop_rfkill()
1419 set_bit(STATUS_RFKILL_HW, &trans->status); in iwl_trans_pcie_handle_stop_rfkill()
1420 set_bit(STATUS_RFKILL_OPMODE, &trans->status); in iwl_trans_pcie_handle_stop_rfkill()
1422 clear_bit(STATUS_RFKILL_HW, &trans->status); in iwl_trans_pcie_handle_stop_rfkill()
1423 clear_bit(STATUS_RFKILL_OPMODE, &trans->status); in iwl_trans_pcie_handle_stop_rfkill()
1434 mutex_lock(&trans_pcie->mutex); in iwl_trans_pcie_stop_device()
1435 trans_pcie->opmode_down = true; in iwl_trans_pcie_stop_device()
1436 was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status); in iwl_trans_pcie_stop_device()
1439 mutex_unlock(&trans_pcie->mutex); in iwl_trans_pcie_stop_device()
1442 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) in iwl_trans_pcie_rf_kill() argument
1447 lockdep_assert_held(&trans_pcie->mutex); in iwl_trans_pcie_rf_kill()
1450 state ? "disabled" : "enabled"); in iwl_trans_pcie_rf_kill()
1451 if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) { in iwl_trans_pcie_rf_kill()
1452 if (trans->trans_cfg->gen2) in iwl_trans_pcie_rf_kill()
1481 * reset TX queues -- some of their registers reset during S3 in iwl_pcie_d3_complete_suspend()
1502 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { in iwl_trans_pcie_d3_suspend()
1506 ret = wait_event_timeout(trans_pcie->sx_waitq, in iwl_trans_pcie_d3_suspend()
1507 trans_pcie->sx_complete, 2 * HZ); in iwl_trans_pcie_d3_suspend()
1511 trans_pcie->sx_complete = false; in iwl_trans_pcie_d3_suspend()
1515 return -ETIMEDOUT; in iwl_trans_pcie_d3_suspend()
1540 ret = iwl_finish_nic_init(trans, trans->trans_cfg); in iwl_trans_pcie_d3_resume()
1547 * Also enables interrupts - none will happen as in iwl_trans_pcie_d3_resume()
1552 if (!trans_pcie->msix_enabled) in iwl_trans_pcie_d3_resume()
1583 trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { in iwl_trans_pcie_d3_resume()
1584 trans_pcie->sx_complete = false; in iwl_trans_pcie_d3_resume()
1588 ret = wait_event_timeout(trans_pcie->sx_waitq, in iwl_trans_pcie_d3_resume()
1589 trans_pcie->sx_complete, 2 * HZ); in iwl_trans_pcie_d3_resume()
1593 trans_pcie->sx_complete = false; in iwl_trans_pcie_d3_resume()
1597 return -ETIMEDOUT; in iwl_trans_pcie_d3_resume()
1613 if (!cfg_trans->mq_rx_supported) in iwl_pcie_set_interrupt_capa()
1616 if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000) in iwl_pcie_set_interrupt_capa()
1621 trans_pcie->msix_entries[i].entry = i; in iwl_pcie_set_interrupt_capa()
1623 num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries, in iwl_pcie_set_interrupt_capa()
1628 "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n", in iwl_pcie_set_interrupt_capa()
1632 trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0; in iwl_pcie_set_interrupt_capa()
1635 "MSI-X enabled. %d interrupt vectors were allocated\n", in iwl_pcie_set_interrupt_capa()
1645 if (num_irqs <= max_irqs - 2) { in iwl_pcie_set_interrupt_capa()
1646 trans_pcie->trans->num_rx_queues = num_irqs + 1; in iwl_pcie_set_interrupt_capa()
1647 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | in iwl_pcie_set_interrupt_capa()
1649 } else if (num_irqs == max_irqs - 1) { in iwl_pcie_set_interrupt_capa()
1650 trans_pcie->trans->num_rx_queues = num_irqs; in iwl_pcie_set_interrupt_capa()
1651 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; in iwl_pcie_set_interrupt_capa()
1653 trans_pcie->trans->num_rx_queues = num_irqs - 1; in iwl_pcie_set_interrupt_capa()
1655 WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES); in iwl_pcie_set_interrupt_capa()
1657 trans_pcie->alloc_vecs = num_irqs; in iwl_pcie_set_interrupt_capa()
1658 trans_pcie->msix_enabled = true; in iwl_pcie_set_interrupt_capa()
1664 dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret); in iwl_pcie_set_interrupt_capa()
1679 i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1; in iwl_pcie_irq_set_affinity()
1680 iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i; in iwl_pcie_irq_set_affinity()
1685 * (i.e. return will be > i - 1). in iwl_pcie_irq_set_affinity()
1687 cpu = cpumask_next(i - offset, cpu_online_mask); in iwl_pcie_irq_set_affinity()
1688 cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]); in iwl_pcie_irq_set_affinity()
1689 ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector, in iwl_pcie_irq_set_affinity()
1690 &trans_pcie->affinity_mask[i]); in iwl_pcie_irq_set_affinity()
1692 IWL_ERR(trans_pcie->trans, in iwl_pcie_irq_set_affinity()
1703 for (i = 0; i < trans_pcie->alloc_vecs; i++) { in iwl_pcie_init_msix_handler()
1706 const char *qname = queue_name(&pdev->dev, trans_pcie, i); in iwl_pcie_init_msix_handler()
1709 return -ENOMEM; in iwl_pcie_init_msix_handler()
1711 msix_entry = &trans_pcie->msix_entries[i]; in iwl_pcie_init_msix_handler()
1712 ret = devm_request_threaded_irq(&pdev->dev, in iwl_pcie_init_msix_handler()
1713 msix_entry->vector, in iwl_pcie_init_msix_handler()
1715 (i == trans_pcie->def_irq) ? in iwl_pcie_init_msix_handler()
1722 IWL_ERR(trans_pcie->trans, in iwl_pcie_init_msix_handler()
1728 iwl_pcie_irq_set_affinity(trans_pcie->trans); in iwl_pcie_init_msix_handler()
1737 switch (trans->trans_cfg->device_family) { in iwl_trans_pcie_clear_persistence_bit()
1755 return -EPERM; in iwl_trans_pcie_clear_persistence_bit()
1768 ret = iwl_finish_nic_init(trans, trans->trans_cfg); in iwl_pcie_gen2_force_power_gating()
1792 lockdep_assert_held(&trans_pcie->mutex); in _iwl_trans_pcie_start_hw()
1806 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && in _iwl_trans_pcie_start_hw()
1807 trans->trans_cfg->integrated) { in _iwl_trans_pcie_start_hw()
1819 /* From now on, the op_mode will be kept updated about RF kill state */ in _iwl_trans_pcie_start_hw()
1822 trans_pcie->opmode_down = false; in _iwl_trans_pcie_start_hw()
1825 trans_pcie->is_down = false; in _iwl_trans_pcie_start_hw()
1838 mutex_lock(&trans_pcie->mutex); in iwl_trans_pcie_start_hw()
1840 mutex_unlock(&trans_pcie->mutex); in iwl_trans_pcie_start_hw()
1849 mutex_lock(&trans_pcie->mutex); in iwl_trans_pcie_op_mode_leave()
1851 /* disable interrupts - don't enable HW RF kill interrupt */ in iwl_trans_pcie_op_mode_leave()
1860 mutex_unlock(&trans_pcie->mutex); in iwl_trans_pcie_op_mode_leave()
1867 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); in iwl_trans_pcie_write8()
1872 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); in iwl_trans_pcie_write32()
1877 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); in iwl_trans_pcie_read32()
1882 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) in iwl_trans_pcie_prph_msk()
1912 trans->txqs.cmd.q_id = trans_cfg->cmd_queue; in iwl_trans_pcie_configure()
1913 trans->txqs.cmd.fifo = trans_cfg->cmd_fifo; in iwl_trans_pcie_configure()
1914 trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout; in iwl_trans_pcie_configure()
1915 trans->txqs.page_offs = trans_cfg->cb_data_offs; in iwl_trans_pcie_configure()
1916 trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); in iwl_trans_pcie_configure()
1918 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) in iwl_trans_pcie_configure()
1919 trans_pcie->n_no_reclaim_cmds = 0; in iwl_trans_pcie_configure()
1921 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds; in iwl_trans_pcie_configure()
1922 if (trans_pcie->n_no_reclaim_cmds) in iwl_trans_pcie_configure()
1923 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, in iwl_trans_pcie_configure()
1924 trans_pcie->n_no_reclaim_cmds * sizeof(u8)); in iwl_trans_pcie_configure()
1926 trans_pcie->rx_buf_size = trans_cfg->rx_buf_size; in iwl_trans_pcie_configure()
1927 trans_pcie->rx_page_order = in iwl_trans_pcie_configure()
1928 iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size); in iwl_trans_pcie_configure()
1929 trans_pcie->rx_buf_bytes = in iwl_trans_pcie_configure()
1930 iwl_trans_get_rb_size(trans_pcie->rx_buf_size); in iwl_trans_pcie_configure()
1931 trans_pcie->supported_dma_mask = DMA_BIT_MASK(12); in iwl_trans_pcie_configure()
1932 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) in iwl_trans_pcie_configure()
1933 trans_pcie->supported_dma_mask = DMA_BIT_MASK(11); in iwl_trans_pcie_configure()
1935 trans->txqs.bc_table_dword = trans_cfg->bc_table_dword; in iwl_trans_pcie_configure()
1936 trans_pcie->scd_set_active = trans_cfg->scd_set_active; in iwl_trans_pcie_configure()
1937 trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx; in iwl_trans_pcie_configure()
1939 trans->command_groups = trans_cfg->command_groups; in iwl_trans_pcie_configure()
1940 trans->command_groups_size = trans_cfg->command_groups_size; in iwl_trans_pcie_configure()
1942 /* Initialize NAPI here - it should be before registering to mac80211 in iwl_trans_pcie_configure()
1947 if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY) in iwl_trans_pcie_configure()
1948 init_dummy_netdev(&trans_pcie->napi_dev); in iwl_trans_pcie_configure()
1958 if (trans->trans_cfg->gen2) in iwl_trans_pcie_free()
1964 if (trans_pcie->rba.alloc_wq) { in iwl_trans_pcie_free()
1965 destroy_workqueue(trans_pcie->rba.alloc_wq); in iwl_trans_pcie_free()
1966 trans_pcie->rba.alloc_wq = NULL; in iwl_trans_pcie_free()
1969 if (trans_pcie->msix_enabled) { in iwl_trans_pcie_free()
1970 for (i = 0; i < trans_pcie->alloc_vecs; i++) { in iwl_trans_pcie_free()
1972 trans_pcie->msix_entries[i].vector, in iwl_trans_pcie_free()
1976 trans_pcie->msix_enabled = false; in iwl_trans_pcie_free()
1983 if (trans_pcie->pnvm_dram.size) in iwl_trans_pcie_free()
1984 dma_free_coherent(trans->dev, trans_pcie->pnvm_dram.size, in iwl_trans_pcie_free()
1985 trans_pcie->pnvm_dram.block, in iwl_trans_pcie_free()
1986 trans_pcie->pnvm_dram.physical); in iwl_trans_pcie_free()
1988 mutex_destroy(&trans_pcie->mutex); in iwl_trans_pcie_free()
1992 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) in iwl_trans_pcie_set_pmi() argument
1994 if (state) in iwl_trans_pcie_set_pmi()
1995 set_bit(STATUS_TPOWER_PMI, &trans->status); in iwl_trans_pcie_set_pmi()
1997 clear_bit(STATUS_TPOWER_PMI, &trans->status); in iwl_trans_pcie_set_pmi()
2009 struct pci_dev *pdev = removal->pdev; in iwl_trans_pcie_removal_wk()
2012 dev_err(&pdev->dev, "Device gone - attempting removal\n"); in iwl_trans_pcie_removal_wk()
2013 kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop); in iwl_trans_pcie_removal_wk()
2029 spin_lock_irqsave(&trans_pcie->reg_lock, *flags); in iwl_trans_pcie_grab_nic_access()
2031 if (trans_pcie->cmd_hold_nic_awake) in iwl_trans_pcie_grab_nic_access()
2037 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) in iwl_trans_pcie_grab_nic_access()
2045 * host DRAM when sleeping/waking for power-saving. in iwl_trans_pcie_grab_nic_access()
2057 * 5000 series and later (including 1000 series) have non-volatile SRAM, in iwl_trans_pcie_grab_nic_access()
2076 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) in iwl_trans_pcie_grab_nic_access()
2079 IWL_ERR(trans, "Device gone - scheduling removal!\n"); in iwl_trans_pcie_grab_nic_access()
2089 "Module is being unloaded - abort\n"); in iwl_trans_pcie_grab_nic_access()
2102 set_bit(STATUS_TRANS_DEAD, &trans->status); in iwl_trans_pcie_grab_nic_access()
2104 removal->pdev = to_pci_dev(trans->dev); in iwl_trans_pcie_grab_nic_access()
2105 INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk); in iwl_trans_pcie_grab_nic_access()
2106 pci_dev_get(removal->pdev); in iwl_trans_pcie_grab_nic_access()
2107 schedule_work(&removal->work); in iwl_trans_pcie_grab_nic_access()
2114 spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags); in iwl_trans_pcie_grab_nic_access()
2120 * Fool sparse by faking we release the lock - sparse will in iwl_trans_pcie_grab_nic_access()
2123 __release(&trans_pcie->reg_lock); in iwl_trans_pcie_grab_nic_access()
2132 lockdep_assert_held(&trans_pcie->reg_lock); in iwl_trans_pcie_release_nic_access()
2135 * Fool sparse by faking we acquiring the lock - sparse will in iwl_trans_pcie_release_nic_access()
2138 __acquire(&trans_pcie->reg_lock); in iwl_trans_pcie_release_nic_access()
2140 if (trans_pcie->cmd_hold_nic_awake) in iwl_trans_pcie_release_nic_access()
2152 spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags); in iwl_trans_pcie_release_nic_access()
2184 return -EBUSY; in iwl_trans_pcie_read_mem()
2205 ret = -EBUSY; in iwl_trans_pcie_write_mem()
2213 return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev, in iwl_trans_pcie_read_config32()
2224 struct iwl_txq *txq = trans->txqs.txq[queue]; in iwl_trans_pcie_freeze_txq_timer()
2227 spin_lock_bh(&txq->lock); in iwl_trans_pcie_freeze_txq_timer()
2231 if (txq->frozen == freeze) in iwl_trans_pcie_freeze_txq_timer()
2237 txq->frozen = freeze; in iwl_trans_pcie_freeze_txq_timer()
2239 if (txq->read_ptr == txq->write_ptr) in iwl_trans_pcie_freeze_txq_timer()
2244 txq->stuck_timer.expires))) { in iwl_trans_pcie_freeze_txq_timer()
2252 txq->frozen_expiry_remainder = in iwl_trans_pcie_freeze_txq_timer()
2253 txq->stuck_timer.expires - now; in iwl_trans_pcie_freeze_txq_timer()
2254 del_timer(&txq->stuck_timer); in iwl_trans_pcie_freeze_txq_timer()
2259 * Wake a non-empty queue -> arm timer with the in iwl_trans_pcie_freeze_txq_timer()
2262 mod_timer(&txq->stuck_timer, in iwl_trans_pcie_freeze_txq_timer()
2263 now + txq->frozen_expiry_remainder); in iwl_trans_pcie_freeze_txq_timer()
2266 spin_unlock_bh(&txq->lock); in iwl_trans_pcie_freeze_txq_timer()
2274 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { in iwl_trans_pcie_block_txq_ptrs()
2275 struct iwl_txq *txq = trans->txqs.txq[i]; in iwl_trans_pcie_block_txq_ptrs()
2277 if (i == trans->txqs.cmd.q_id) in iwl_trans_pcie_block_txq_ptrs()
2280 spin_lock_bh(&txq->lock); in iwl_trans_pcie_block_txq_ptrs()
2282 if (!block && !(WARN_ON_ONCE(!txq->block))) { in iwl_trans_pcie_block_txq_ptrs()
2283 txq->block--; in iwl_trans_pcie_block_txq_ptrs()
2284 if (!txq->block) { in iwl_trans_pcie_block_txq_ptrs()
2286 txq->write_ptr | (i << 8)); in iwl_trans_pcie_block_txq_ptrs()
2289 txq->block++; in iwl_trans_pcie_block_txq_ptrs()
2292 spin_unlock_bh(&txq->lock); in iwl_trans_pcie_block_txq_ptrs()
2303 if (queue >= trans->num_rx_queues || !trans_pcie->rxq) in iwl_trans_pcie_rxq_dma_data()
2304 return -EINVAL; in iwl_trans_pcie_rxq_dma_data()
2306 data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma; in iwl_trans_pcie_rxq_dma_data()
2307 data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma; in iwl_trans_pcie_rxq_dma_data()
2308 data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma; in iwl_trans_pcie_rxq_dma_data()
2309 data->fr_bd_wid = 0; in iwl_trans_pcie_rxq_dma_data()
2322 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) in iwl_trans_pcie_wait_txq_empty()
2323 return -ENODEV; in iwl_trans_pcie_wait_txq_empty()
2325 if (!test_bit(txq_idx, trans->txqs.queue_used)) in iwl_trans_pcie_wait_txq_empty()
2326 return -EINVAL; in iwl_trans_pcie_wait_txq_empty()
2329 txq = trans->txqs.txq[txq_idx]; in iwl_trans_pcie_wait_txq_empty()
2331 spin_lock_bh(&txq->lock); in iwl_trans_pcie_wait_txq_empty()
2332 overflow_tx = txq->overflow_tx || in iwl_trans_pcie_wait_txq_empty()
2333 !skb_queue_empty(&txq->overflow_q); in iwl_trans_pcie_wait_txq_empty()
2334 spin_unlock_bh(&txq->lock); in iwl_trans_pcie_wait_txq_empty()
2336 wr_ptr = READ_ONCE(txq->write_ptr); in iwl_trans_pcie_wait_txq_empty()
2338 while ((txq->read_ptr != READ_ONCE(txq->write_ptr) || in iwl_trans_pcie_wait_txq_empty()
2342 u8 write_ptr = READ_ONCE(txq->write_ptr); in iwl_trans_pcie_wait_txq_empty()
2350 "WR pointer moved while flushing %d -> %d\n", in iwl_trans_pcie_wait_txq_empty()
2352 return -ETIMEDOUT; in iwl_trans_pcie_wait_txq_empty()
2357 spin_lock_bh(&txq->lock); in iwl_trans_pcie_wait_txq_empty()
2358 overflow_tx = txq->overflow_tx || in iwl_trans_pcie_wait_txq_empty()
2359 !skb_queue_empty(&txq->overflow_q); in iwl_trans_pcie_wait_txq_empty()
2360 spin_unlock_bh(&txq->lock); in iwl_trans_pcie_wait_txq_empty()
2363 if (txq->read_ptr != txq->write_ptr) { in iwl_trans_pcie_wait_txq_empty()
2367 return -ETIMEDOUT; in iwl_trans_pcie_wait_txq_empty()
2382 cnt < trans->trans_cfg->base_params->num_of_queues; in iwl_trans_pcie_wait_txqs_empty()
2385 if (cnt == trans->txqs.cmd.q_id) in iwl_trans_pcie_wait_txqs_empty()
2387 if (!test_bit(cnt, trans->txqs.queue_used)) in iwl_trans_pcie_wait_txqs_empty()
2406 spin_lock_irqsave(&trans_pcie->reg_lock, flags); in iwl_trans_pcie_set_bits_mask()
2408 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); in iwl_trans_pcie_set_bits_mask()
2524 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; in iwl_dbgfs_tx_queue_seq_start()
2525 struct iwl_dbgfs_tx_queue_state *state; in iwl_dbgfs_tx_queue_seq_start() local
2527 if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) in iwl_dbgfs_tx_queue_seq_start()
2530 state = kmalloc(sizeof(*state), GFP_KERNEL); in iwl_dbgfs_tx_queue_seq_start()
2531 if (!state) in iwl_dbgfs_tx_queue_seq_start()
2533 state->pos = *pos; in iwl_dbgfs_tx_queue_seq_start()
2534 return state; in iwl_dbgfs_tx_queue_seq_start()
2540 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; in iwl_dbgfs_tx_queue_seq_next()
2541 struct iwl_dbgfs_tx_queue_state *state = v; in iwl_dbgfs_tx_queue_seq_next() local
2543 *pos = ++state->pos; in iwl_dbgfs_tx_queue_seq_next()
2545 if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) in iwl_dbgfs_tx_queue_seq_next()
2548 return state; in iwl_dbgfs_tx_queue_seq_next()
2558 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; in iwl_dbgfs_tx_queue_seq_show()
2559 struct iwl_dbgfs_tx_queue_state *state = v; in iwl_dbgfs_tx_queue_seq_show() local
2560 struct iwl_trans *trans = priv->trans; in iwl_dbgfs_tx_queue_seq_show()
2561 struct iwl_txq *txq = trans->txqs.txq[state->pos]; in iwl_dbgfs_tx_queue_seq_show()
2564 (unsigned int)state->pos, in iwl_dbgfs_tx_queue_seq_show()
2565 !!test_bit(state->pos, trans->txqs.queue_used), in iwl_dbgfs_tx_queue_seq_show()
2566 !!test_bit(state->pos, trans->txqs.queue_stopped)); in iwl_dbgfs_tx_queue_seq_show()
2570 txq->read_ptr, txq->write_ptr, in iwl_dbgfs_tx_queue_seq_show()
2571 txq->need_update, txq->frozen, in iwl_dbgfs_tx_queue_seq_show()
2572 txq->n_window, txq->ampdu); in iwl_dbgfs_tx_queue_seq_show()
2576 if (state->pos == trans->txqs.cmd.q_id) in iwl_dbgfs_tx_queue_seq_show()
2598 return -ENOMEM; in iwl_dbgfs_tx_queue_open()
2600 priv->trans = inode->i_private; in iwl_dbgfs_tx_queue_open()
2608 struct iwl_trans *trans = file->private_data; in iwl_dbgfs_rx_queue_read()
2614 bufsz = sizeof(char) * 121 * trans->num_rx_queues; in iwl_dbgfs_rx_queue_read()
2616 if (!trans_pcie->rxq) in iwl_dbgfs_rx_queue_read()
2617 return -EAGAIN; in iwl_dbgfs_rx_queue_read()
2621 return -ENOMEM; in iwl_dbgfs_rx_queue_read()
2623 for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) { in iwl_dbgfs_rx_queue_read()
2624 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; in iwl_dbgfs_rx_queue_read()
2626 pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n", in iwl_dbgfs_rx_queue_read()
2628 pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n", in iwl_dbgfs_rx_queue_read()
2629 rxq->read); in iwl_dbgfs_rx_queue_read()
2630 pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n", in iwl_dbgfs_rx_queue_read()
2631 rxq->write); in iwl_dbgfs_rx_queue_read()
2632 pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n", in iwl_dbgfs_rx_queue_read()
2633 rxq->write_actual); in iwl_dbgfs_rx_queue_read()
2634 pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n", in iwl_dbgfs_rx_queue_read()
2635 rxq->need_update); in iwl_dbgfs_rx_queue_read()
2636 pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", in iwl_dbgfs_rx_queue_read()
2637 rxq->free_count); in iwl_dbgfs_rx_queue_read()
2638 if (rxq->rb_stts) { in iwl_dbgfs_rx_queue_read()
2641 pos += scnprintf(buf + pos, bufsz - pos, in iwl_dbgfs_rx_queue_read()
2645 pos += scnprintf(buf + pos, bufsz - pos, in iwl_dbgfs_rx_queue_read()
2659 struct iwl_trans *trans = file->private_data; in iwl_dbgfs_interrupt_read()
2661 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; in iwl_dbgfs_interrupt_read()
2670 return -ENOMEM; in iwl_dbgfs_interrupt_read()
2672 pos += scnprintf(buf + pos, bufsz - pos, in iwl_dbgfs_interrupt_read()
2675 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", in iwl_dbgfs_interrupt_read()
2676 isr_stats->hw); in iwl_dbgfs_interrupt_read()
2677 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", in iwl_dbgfs_interrupt_read()
2678 isr_stats->sw); in iwl_dbgfs_interrupt_read()
2679 if (isr_stats->sw || isr_stats->hw) { in iwl_dbgfs_interrupt_read()
2680 pos += scnprintf(buf + pos, bufsz - pos, in iwl_dbgfs_interrupt_read()
2682 isr_stats->err_code); in iwl_dbgfs_interrupt_read()
2685 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", in iwl_dbgfs_interrupt_read()
2686 isr_stats->sch); in iwl_dbgfs_interrupt_read()
2687 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", in iwl_dbgfs_interrupt_read()
2688 isr_stats->alive); in iwl_dbgfs_interrupt_read()
2690 pos += scnprintf(buf + pos, bufsz - pos, in iwl_dbgfs_interrupt_read()
2691 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill); in iwl_dbgfs_interrupt_read()
2693 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", in iwl_dbgfs_interrupt_read()
2694 isr_stats->ctkill); in iwl_dbgfs_interrupt_read()
2696 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", in iwl_dbgfs_interrupt_read()
2697 isr_stats->wakeup); in iwl_dbgfs_interrupt_read()
2699 pos += scnprintf(buf + pos, bufsz - pos, in iwl_dbgfs_interrupt_read()
2700 "Rx command responses:\t\t %u\n", isr_stats->rx); in iwl_dbgfs_interrupt_read()
2702 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", in iwl_dbgfs_interrupt_read()
2703 isr_stats->tx); in iwl_dbgfs_interrupt_read()
2705 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", in iwl_dbgfs_interrupt_read()
2706 isr_stats->unhandled); in iwl_dbgfs_interrupt_read()
2717 struct iwl_trans *trans = file->private_data; in iwl_dbgfs_interrupt_write()
2719 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; in iwl_dbgfs_interrupt_write()
2736 struct iwl_trans *trans = file->private_data; in iwl_dbgfs_csr_write()
2747 struct iwl_trans *trans = file->private_data; in iwl_dbgfs_fh_reg_read()
2755 return -EINVAL; in iwl_dbgfs_fh_reg_read()
2765 struct iwl_trans *trans = file->private_data; in iwl_dbgfs_rfkill_read()
2771 trans_pcie->debug_rfkill, in iwl_dbgfs_rfkill_read()
2782 struct iwl_trans *trans = file->private_data; in iwl_dbgfs_rfkill_write()
2790 if (new_value == trans_pcie->debug_rfkill) in iwl_dbgfs_rfkill_write()
2792 IWL_WARN(trans, "changing debug rfkill %d->%d\n", in iwl_dbgfs_rfkill_write()
2793 trans_pcie->debug_rfkill, new_value); in iwl_dbgfs_rfkill_write()
2794 trans_pcie->debug_rfkill = new_value; in iwl_dbgfs_rfkill_write()
2803 struct iwl_trans *trans = inode->i_private; in iwl_dbgfs_monitor_data_open()
2806 if (!trans->dbg.dest_tlv || in iwl_dbgfs_monitor_data_open()
2807 trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) { in iwl_dbgfs_monitor_data_open()
2809 return -ENOENT; in iwl_dbgfs_monitor_data_open()
2812 if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED) in iwl_dbgfs_monitor_data_open()
2813 return -EBUSY; in iwl_dbgfs_monitor_data_open()
2815 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN; in iwl_dbgfs_monitor_data_open()
2823 IWL_TRANS_GET_PCIE_TRANS(inode->i_private); in iwl_dbgfs_monitor_data_release()
2825 if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN) in iwl_dbgfs_monitor_data_release()
2826 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; in iwl_dbgfs_monitor_data_release()
2834 int buf_size_left = count - *bytes_copied; in iwl_write_to_user_buf()
2836 buf_size_left = buf_size_left - (buf_size_left % sizeof(u32)); in iwl_write_to_user_buf()
2840 *size -= copy_to_user(user_buf, buf, *size); in iwl_write_to_user_buf()
2852 struct iwl_trans *trans = file->private_data; in iwl_dbgfs_monitor_data_read()
2854 void *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf; in iwl_dbgfs_monitor_data_read()
2855 struct cont_rec *data = &trans_pcie->fw_mon_data; in iwl_dbgfs_monitor_data_read()
2860 if (trans->dbg.dest_tlv) { in iwl_dbgfs_monitor_data_read()
2862 le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); in iwl_dbgfs_monitor_data_read()
2863 wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); in iwl_dbgfs_monitor_data_read()
2869 if (unlikely(!trans->dbg.rec_on)) in iwl_dbgfs_monitor_data_read()
2872 mutex_lock(&data->mutex); in iwl_dbgfs_monitor_data_read()
2873 if (data->state == in iwl_dbgfs_monitor_data_read()
2875 mutex_unlock(&data->mutex); in iwl_dbgfs_monitor_data_read()
2883 if (data->prev_wrap_cnt == wrap_cnt) { in iwl_dbgfs_monitor_data_read()
2884 size = write_ptr - data->prev_wr_ptr; in iwl_dbgfs_monitor_data_read()
2885 curr_buf = cpu_addr + data->prev_wr_ptr; in iwl_dbgfs_monitor_data_read()
2889 data->prev_wr_ptr += size; in iwl_dbgfs_monitor_data_read()
2891 } else if (data->prev_wrap_cnt == wrap_cnt - 1 && in iwl_dbgfs_monitor_data_read()
2892 write_ptr < data->prev_wr_ptr) { in iwl_dbgfs_monitor_data_read()
2893 size = trans->dbg.fw_mon.size - data->prev_wr_ptr; in iwl_dbgfs_monitor_data_read()
2894 curr_buf = cpu_addr + data->prev_wr_ptr; in iwl_dbgfs_monitor_data_read()
2898 data->prev_wr_ptr += size; in iwl_dbgfs_monitor_data_read()
2905 data->prev_wr_ptr = size; in iwl_dbgfs_monitor_data_read()
2906 data->prev_wrap_cnt++; in iwl_dbgfs_monitor_data_read()
2909 if (data->prev_wrap_cnt == wrap_cnt - 1 && in iwl_dbgfs_monitor_data_read()
2910 write_ptr > data->prev_wr_ptr) in iwl_dbgfs_monitor_data_read()
2913 else if (!unlikely(data->prev_wrap_cnt == 0 && in iwl_dbgfs_monitor_data_read()
2914 data->prev_wr_ptr == 0)) in iwl_dbgfs_monitor_data_read()
2922 data->prev_wr_ptr = size; in iwl_dbgfs_monitor_data_read()
2923 data->prev_wrap_cnt = wrap_cnt; in iwl_dbgfs_monitor_data_read()
2926 mutex_unlock(&data->mutex); in iwl_dbgfs_monitor_data_read()
2953 struct dentry *dir = trans->dbgfs_dir; in iwl_trans_pcie_dbgfs_register()
2967 struct cont_rec *data = &trans_pcie->fw_mon_data; in iwl_trans_pcie_debugfs_cleanup()
2969 mutex_lock(&data->mutex); in iwl_trans_pcie_debugfs_cleanup()
2970 data->state = IWL_FW_MON_DBGFS_STATE_DISABLED; in iwl_trans_pcie_debugfs_cleanup()
2971 mutex_unlock(&data->mutex); in iwl_trans_pcie_debugfs_cleanup()
2980 for (i = 0; i < trans->txqs.tfd.max_tbs; i++) in iwl_trans_pcie_get_cmdlen()
2991 int max_len = trans_pcie->rx_buf_bytes; in iwl_trans_pcie_dump_rbs()
2992 /* Dump RBs is supported only for pre-9000 devices (1 queue) */ in iwl_trans_pcie_dump_rbs()
2993 struct iwl_rxq *rxq = &trans_pcie->rxq[0]; in iwl_trans_pcie_dump_rbs()
2996 spin_lock(&rxq->lock); in iwl_trans_pcie_dump_rbs()
3000 for (i = rxq->read, j = 0; in iwl_trans_pcie_dump_rbs()
3003 struct iwl_rx_mem_buffer *rxb = rxq->queue[i]; in iwl_trans_pcie_dump_rbs()
3006 dma_unmap_page(trans->dev, rxb->page_dma, max_len, in iwl_trans_pcie_dump_rbs()
3011 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB); in iwl_trans_pcie_dump_rbs()
3012 (*data)->len = cpu_to_le32(sizeof(*rb) + max_len); in iwl_trans_pcie_dump_rbs()
3013 rb = (void *)(*data)->data; in iwl_trans_pcie_dump_rbs()
3014 rb->index = cpu_to_le32(i); in iwl_trans_pcie_dump_rbs()
3015 memcpy(rb->data, page_address(rxb->page), max_len); in iwl_trans_pcie_dump_rbs()
3017 rxb->page_dma = dma_map_page(trans->dev, rxb->page, in iwl_trans_pcie_dump_rbs()
3018 rxb->offset, max_len, in iwl_trans_pcie_dump_rbs()
3024 spin_unlock(&rxq->lock); in iwl_trans_pcie_dump_rbs()
3037 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR); in iwl_trans_pcie_dump_csr()
3038 (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP); in iwl_trans_pcie_dump_csr()
3039 val = (void *)(*data)->data; in iwl_trans_pcie_dump_csr()
3052 u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND; in iwl_trans_pcie_fh_regs_dump()
3060 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS); in iwl_trans_pcie_fh_regs_dump()
3061 (*data)->len = cpu_to_le32(fh_regs_len); in iwl_trans_pcie_fh_regs_dump()
3062 val = (void *)(*data)->data; in iwl_trans_pcie_fh_regs_dump()
3064 if (!trans->trans_cfg->gen2) in iwl_trans_pcie_fh_regs_dump()
3088 u32 *buffer = (u32 *)fw_mon_data->data; in iwl_trans_pci_dump_marbh_monitor()
3112 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { in iwl_trans_pcie_dump_pointers()
3117 } else if (trans->dbg.dest_tlv) { in iwl_trans_pcie_dump_pointers()
3118 write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); in iwl_trans_pcie_dump_pointers()
3119 wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); in iwl_trans_pcie_dump_pointers()
3120 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); in iwl_trans_pcie_dump_pointers()
3128 fw_mon_data->fw_mon_cycle_cnt = in iwl_trans_pcie_dump_pointers()
3130 fw_mon_data->fw_mon_base_ptr = in iwl_trans_pcie_dump_pointers()
3132 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { in iwl_trans_pcie_dump_pointers()
3133 fw_mon_data->fw_mon_base_high_ptr = in iwl_trans_pcie_dump_pointers()
3137 fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val); in iwl_trans_pcie_dump_pointers()
3145 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; in iwl_trans_pcie_dump_monitor()
3148 if (trans->dbg.dest_tlv || in iwl_trans_pcie_dump_monitor()
3149 (fw_mon->size && in iwl_trans_pcie_dump_monitor()
3150 (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 || in iwl_trans_pcie_dump_monitor()
3151 trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) { in iwl_trans_pcie_dump_monitor()
3154 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); in iwl_trans_pcie_dump_monitor()
3155 fw_mon_data = (void *)(*data)->data; in iwl_trans_pcie_dump_monitor()
3160 if (fw_mon->size) { in iwl_trans_pcie_dump_monitor()
3161 memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size); in iwl_trans_pcie_dump_monitor()
3162 monitor_len = fw_mon->size; in iwl_trans_pcie_dump_monitor()
3163 } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) { in iwl_trans_pcie_dump_monitor()
3164 u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr); in iwl_trans_pcie_dump_monitor()
3169 if (trans->dbg.dest_tlv->version) { in iwl_trans_pcie_dump_monitor()
3172 trans->dbg.dest_tlv->base_shift; in iwl_trans_pcie_dump_monitor()
3174 base += trans->cfg->smem_offset; in iwl_trans_pcie_dump_monitor()
3177 trans->dbg.dest_tlv->base_shift; in iwl_trans_pcie_dump_monitor()
3180 iwl_trans_read_mem(trans, base, fw_mon_data->data, in iwl_trans_pcie_dump_monitor()
3182 } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) { in iwl_trans_pcie_dump_monitor()
3188 /* Didn't match anything - output no monitor data */ in iwl_trans_pcie_dump_monitor()
3193 (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data)); in iwl_trans_pcie_dump_monitor()
3201 if (trans->dbg.fw_mon.size) { in iwl_trans_get_fw_monitor_len()
3204 trans->dbg.fw_mon.size; in iwl_trans_get_fw_monitor_len()
3205 return trans->dbg.fw_mon.size; in iwl_trans_get_fw_monitor_len()
3206 } else if (trans->dbg.dest_tlv) { in iwl_trans_get_fw_monitor_len()
3209 if (trans->dbg.dest_tlv->version == 1) { in iwl_trans_get_fw_monitor_len()
3210 cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg); in iwl_trans_get_fw_monitor_len()
3213 trans->dbg.dest_tlv->base_shift; in iwl_trans_get_fw_monitor_len()
3215 base += trans->cfg->smem_offset; in iwl_trans_get_fw_monitor_len()
3219 trans->dbg.dest_tlv->end_shift; in iwl_trans_get_fw_monitor_len()
3222 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); in iwl_trans_get_fw_monitor_len()
3223 end = le32_to_cpu(trans->dbg.dest_tlv->end_reg); in iwl_trans_get_fw_monitor_len()
3226 trans->dbg.dest_tlv->base_shift; in iwl_trans_get_fw_monitor_len()
3228 trans->dbg.dest_tlv->end_shift; in iwl_trans_get_fw_monitor_len()
3231 if (trans->trans_cfg->device_family >= in iwl_trans_get_fw_monitor_len()
3233 trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) in iwl_trans_get_fw_monitor_len()
3234 end += (1 << trans->dbg.dest_tlv->end_shift); in iwl_trans_get_fw_monitor_len()
3235 monitor_len = end - base; in iwl_trans_get_fw_monitor_len()
3251 struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id]; in iwl_trans_pcie_dump_data()
3256 bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && in iwl_trans_pcie_dump_data()
3257 !trans->trans_cfg->mq_rx_supported && in iwl_trans_pcie_dump_data()
3269 cmdq->n_window * (sizeof(*txcmd) + in iwl_trans_pcie_dump_data()
3282 if (trans->trans_cfg->gen2) in iwl_trans_pcie_dump_data()
3284 (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) - in iwl_trans_pcie_dump_data()
3288 (FH_MEM_UPPER_BOUND - in iwl_trans_pcie_dump_data()
3293 /* Dump RBs is supported only for pre-9000 devices (1 queue) */ in iwl_trans_pcie_dump_data()
3294 struct iwl_rxq *rxq = &trans_pcie->rxq[0]; in iwl_trans_pcie_dump_data()
3299 num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; in iwl_trans_pcie_dump_data()
3302 (PAGE_SIZE << trans_pcie->rx_page_order)); in iwl_trans_pcie_dump_data()
3306 if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) in iwl_trans_pcie_dump_data()
3307 for (i = 0; i < trans->init_dram.paging_cnt; i++) in iwl_trans_pcie_dump_data()
3310 trans->init_dram.paging[i].size; in iwl_trans_pcie_dump_data()
3317 data = (void *)dump_data->data; in iwl_trans_pcie_dump_data()
3320 u16 tfd_size = trans->txqs.tfd.size; in iwl_trans_pcie_dump_data()
3322 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); in iwl_trans_pcie_dump_data()
3323 txcmd = (void *)data->data; in iwl_trans_pcie_dump_data()
3324 spin_lock_bh(&cmdq->lock); in iwl_trans_pcie_dump_data()
3325 ptr = cmdq->write_ptr; in iwl_trans_pcie_dump_data()
3326 for (i = 0; i < cmdq->n_window; i++) { in iwl_trans_pcie_dump_data()
3331 if (trans->trans_cfg->use_tfh) in iwl_trans_pcie_dump_data()
3337 (u8 *)cmdq->tfds + in iwl_trans_pcie_dump_data()
3343 txcmd->cmdlen = cpu_to_le32(cmdlen); in iwl_trans_pcie_dump_data()
3344 txcmd->caplen = cpu_to_le32(caplen); in iwl_trans_pcie_dump_data()
3345 memcpy(txcmd->data, cmdq->entries[idx].cmd, in iwl_trans_pcie_dump_data()
3347 txcmd = (void *)((u8 *)txcmd->data + caplen); in iwl_trans_pcie_dump_data()
3352 spin_unlock_bh(&cmdq->lock); in iwl_trans_pcie_dump_data()
3354 data->len = cpu_to_le32(len); in iwl_trans_pcie_dump_data()
3367 if (trans->trans_cfg->gen2 && in iwl_trans_pcie_dump_data()
3369 for (i = 0; i < trans->init_dram.paging_cnt; i++) { in iwl_trans_pcie_dump_data()
3371 u32 page_len = trans->init_dram.paging[i].size; in iwl_trans_pcie_dump_data()
3373 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); in iwl_trans_pcie_dump_data()
3374 data->len = cpu_to_le32(sizeof(*paging) + page_len); in iwl_trans_pcie_dump_data()
3375 paging = (void *)data->data; in iwl_trans_pcie_dump_data()
3376 paging->index = cpu_to_le32(i); in iwl_trans_pcie_dump_data()
3377 memcpy(paging->data, in iwl_trans_pcie_dump_data()
3378 trans->init_dram.paging[i].block, page_len); in iwl_trans_pcie_dump_data()
3387 dump_data->len = len; in iwl_trans_pcie_dump_data()
3493 if (!cfg_trans->gen2) in iwl_trans_pcie_alloc()
3500 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops, in iwl_trans_pcie_alloc()
3503 return ERR_PTR(-ENOMEM); in iwl_trans_pcie_alloc()
3507 trans_pcie->trans = trans; in iwl_trans_pcie_alloc()
3508 trans_pcie->opmode_down = true; in iwl_trans_pcie_alloc()
3509 spin_lock_init(&trans_pcie->irq_lock); in iwl_trans_pcie_alloc()
3510 spin_lock_init(&trans_pcie->reg_lock); in iwl_trans_pcie_alloc()
3511 spin_lock_init(&trans_pcie->alloc_page_lock); in iwl_trans_pcie_alloc()
3512 mutex_init(&trans_pcie->mutex); in iwl_trans_pcie_alloc()
3513 init_waitqueue_head(&trans_pcie->ucode_write_waitq); in iwl_trans_pcie_alloc()
3515 trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", in iwl_trans_pcie_alloc()
3517 if (!trans_pcie->rba.alloc_wq) { in iwl_trans_pcie_alloc()
3518 ret = -ENOMEM; in iwl_trans_pcie_alloc()
3521 INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); in iwl_trans_pcie_alloc()
3523 trans_pcie->debug_rfkill = -1; in iwl_trans_pcie_alloc()
3525 if (!cfg_trans->base_params->pcie_l1_allowed) { in iwl_trans_pcie_alloc()
3527 * W/A - seems to solve weird behavior. We need to remove this in iwl_trans_pcie_alloc()
3536 trans_pcie->def_rx_queue = 0; in iwl_trans_pcie_alloc()
3540 addr_size = trans->txqs.tfd.addr_size; in iwl_trans_pcie_alloc()
3552 dev_err(&pdev->dev, "No suitable DMA available\n"); in iwl_trans_pcie_alloc()
3559 dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n"); in iwl_trans_pcie_alloc()
3563 trans_pcie->hw_base = pcim_iomap_table(pdev)[0]; in iwl_trans_pcie_alloc()
3564 if (!trans_pcie->hw_base) { in iwl_trans_pcie_alloc()
3565 dev_err(&pdev->dev, "pcim_iomap_table failed\n"); in iwl_trans_pcie_alloc()
3566 ret = -ENODEV; in iwl_trans_pcie_alloc()
3571 * PCI Tx retries from interfering with C3 CPU state */ in iwl_trans_pcie_alloc()
3574 trans_pcie->pci_dev = pdev; in iwl_trans_pcie_alloc()
3577 trans->hw_rev = iwl_read32(trans, CSR_HW_REV); in iwl_trans_pcie_alloc()
3578 if (trans->hw_rev == 0xffffffff) { in iwl_trans_pcie_alloc()
3579 dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n"); in iwl_trans_pcie_alloc()
3580 ret = -EIO; in iwl_trans_pcie_alloc()
3586 * changed, and now the revision step also includes bit 0-1 (no more in iwl_trans_pcie_alloc()
3587 * "dash" value). To keep hw_rev backwards compatible - we'll store it in iwl_trans_pcie_alloc()
3590 if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000) { in iwl_trans_pcie_alloc()
3591 trans->hw_rev = (trans->hw_rev & 0xfff0) | in iwl_trans_pcie_alloc()
3592 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); in iwl_trans_pcie_alloc()
3601 * in-order to recognize C step driver should read chip version in iwl_trans_pcie_alloc()
3610 IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev); in iwl_trans_pcie_alloc()
3613 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; in iwl_trans_pcie_alloc()
3614 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), in iwl_trans_pcie_alloc()
3615 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); in iwl_trans_pcie_alloc()
3618 init_waitqueue_head(&trans_pcie->wait_command_queue); in iwl_trans_pcie_alloc()
3620 init_waitqueue_head(&trans_pcie->sx_waitq); in iwl_trans_pcie_alloc()
3623 if (trans_pcie->msix_enabled) { in iwl_trans_pcie_alloc()
3632 ret = devm_request_threaded_irq(&pdev->dev, pdev->irq, in iwl_trans_pcie_alloc()
3637 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); in iwl_trans_pcie_alloc()
3640 trans_pcie->inta_mask = CSR_INI_SET_MASK; in iwl_trans_pcie_alloc()
3644 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; in iwl_trans_pcie_alloc()
3645 mutex_init(&trans_pcie->fw_mon_data.mutex); in iwl_trans_pcie_alloc()
3655 destroy_workqueue(trans_pcie->rba.alloc_wq); in iwl_trans_pcie_alloc()
3665 bool interrupts_enabled = test_bit(STATUS_INT_ENABLED, &trans->status); in iwl_trans_pcie_sync_nmi()
3668 if (trans_pcie->msix_enabled) { in iwl_trans_pcie_sync_nmi()