Lines Matching +full:exported +full:- +full:sram
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
3 * Copyright (C) 2003-2015, 2018-2023 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
18 #include "iwl-fh.h"
19 #include "iwl-csr.h"
20 #include "iwl-trans.h"
21 #include "iwl-debug.h"
22 #include "iwl-io.h"
23 #include "iwl-op-mode.h"
24 #include "iwl-drv.h"
26 #include "iwl-context-info.h"
47 * @invalid: rxb is in driver ownership - not owned by HW
77 * struct iwl_rx_transfer_desc - transfer descriptor
91 * struct iwl_rx_completion_desc - completion descriptor
105 * struct iwl_rx_completion_desc_bz - Bz completion descriptor
117 * struct iwl_rxq - Rx queue
120 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
129 * @free_count: Number of pre-allocated buffers in rx_free
137 * @lock: per-queue lock
138 * @queue: actual rx queue. Not used for multi-rx queue.
169 * struct iwl_rb_allocator - Rx allocator
191 * iwl_get_closed_rb_stts - get closed rb stts from different structs
198 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { in iwl_get_closed_rb_stts()
199 __le16 *rb_stts = rxq->rb_stts; in iwl_get_closed_rb_stts()
203 struct iwl_rb_status *rb_stts = rxq->rb_stts; in iwl_get_closed_rb_stts()
205 return le16_to_cpu(READ_ONCE(rb_stts->closed_rb_num)) & 0xFFF; in iwl_get_closed_rb_stts()
211 * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
227 * enum iwl_shared_irq_flags - level of sharing for irq
237 * enum iwl_image_response_code - image response values
276 * enum wl_pcie_imr_status - imr dma transfer state
290 * struct iwl_trans_pcie - PCIe transport specific data
306 * @scd_base_addr: scheduler sram base address in SRAM
311 * @pci_dev: basic pci-network driver stuff
315 * @cmd_queue - command queue number
325 * @msix_entries: array of MSI-X entries
326 * @msix_enabled: true if managed to enable MSI-X
350 * @inta_mask: interrupt (INT-A) mask
362 * @debug_rfkill: RF-kill debugging state, -1 for unset, 0/1 for radio
472 return (void *)trans->trans_specific; in IWL_TRANS_GET_PCIE_TRANS()
481 * re-enabled by clearing this bit. This register is defined as in iwl_pcie_clear_irq()
530 * ICT - interrupt handling
572 clear_bit(STATUS_INT_ENABLED, &trans->status); in _iwl_disable_interrupts()
573 if (!trans_pcie->msix_enabled) { in _iwl_disable_interrupts()
584 trans_pcie->fh_init_mask); in _iwl_disable_interrupts()
586 trans_pcie->hw_init_mask); in _iwl_disable_interrupts()
596 while (start < fw->num_sec && in iwl_pcie_get_num_sections()
597 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && in iwl_pcie_get_num_sections()
598 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { in iwl_pcie_get_num_sections()
608 struct iwl_self_init_dram *dram = &trans->init_dram; in iwl_pcie_ctxt_info_free_fw_img()
611 if (!dram->fw) { in iwl_pcie_ctxt_info_free_fw_img()
612 WARN_ON(dram->fw_cnt); in iwl_pcie_ctxt_info_free_fw_img()
616 for (i = 0; i < dram->fw_cnt; i++) in iwl_pcie_ctxt_info_free_fw_img()
617 dma_free_coherent(trans->dev, dram->fw[i].size, in iwl_pcie_ctxt_info_free_fw_img()
618 dram->fw[i].block, dram->fw[i].physical); in iwl_pcie_ctxt_info_free_fw_img()
620 kfree(dram->fw); in iwl_pcie_ctxt_info_free_fw_img()
621 dram->fw_cnt = 0; in iwl_pcie_ctxt_info_free_fw_img()
622 dram->fw = NULL; in iwl_pcie_ctxt_info_free_fw_img()
629 spin_lock_bh(&trans_pcie->irq_lock); in iwl_disable_interrupts()
631 spin_unlock_bh(&trans_pcie->irq_lock); in iwl_disable_interrupts()
639 set_bit(STATUS_INT_ENABLED, &trans->status); in _iwl_enable_interrupts()
640 if (!trans_pcie->msix_enabled) { in _iwl_enable_interrupts()
641 trans_pcie->inta_mask = CSR_INI_SET_MASK; in _iwl_enable_interrupts()
642 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); in _iwl_enable_interrupts()
648 trans_pcie->hw_mask = trans_pcie->hw_init_mask; in _iwl_enable_interrupts()
649 trans_pcie->fh_mask = trans_pcie->fh_init_mask; in _iwl_enable_interrupts()
651 ~trans_pcie->fh_mask); in _iwl_enable_interrupts()
653 ~trans_pcie->hw_mask); in _iwl_enable_interrupts()
661 spin_lock_bh(&trans_pcie->irq_lock); in iwl_enable_interrupts()
663 spin_unlock_bh(&trans_pcie->irq_lock); in iwl_enable_interrupts()
670 trans_pcie->hw_mask = msk; in iwl_enable_hw_int_msk_msix()
678 trans_pcie->fh_mask = msk; in iwl_enable_fh_int_msk_msix()
686 if (!trans_pcie->msix_enabled) { in iwl_enable_fw_load_int()
687 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; in iwl_enable_fw_load_int()
688 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); in iwl_enable_fw_load_int()
691 trans_pcie->hw_init_mask); in iwl_enable_fw_load_int()
703 if (!trans_pcie->msix_enabled) { in iwl_enable_fw_load_int_ctx_info()
711 trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX; in iwl_enable_fw_load_int_ctx_info()
712 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); in iwl_enable_fw_load_int_ctx_info()
720 iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask); in iwl_enable_fw_load_int_ctx_info()
727 if (trans_p->shared_vec_mask) { in queue_name()
728 int vec = trans_p->shared_vec_mask & in queue_name()
740 if (i == trans_p->alloc_vecs - 1) in queue_name()
752 if (!trans_pcie->msix_enabled) { in iwl_enable_rfkill_int()
753 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; in iwl_enable_rfkill_int()
754 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); in iwl_enable_rfkill_int()
757 trans_pcie->fh_init_mask); in iwl_enable_rfkill_int()
762 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) { in iwl_enable_rfkill_int()
764 * On 9000-series devices this bit isn't enabled by default, so in iwl_enable_rfkill_int()
766 * to wake up the PCI-E bus for RF-kill interrupts. in iwl_enable_rfkill_int()
779 lockdep_assert_held(&trans_pcie->mutex); in iwl_is_rfkill_set()
781 if (trans_pcie->debug_rfkill == 1) in iwl_is_rfkill_set()
817 return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans)); in iwl_pcie_dbg_on()
849 /* transport gen 2 exported functions */