/linux/arch/arm64/kvm/ |
H A D | vmid.c | 32 #define vmid2idx(vmid) ((vmid) & ~VMID_MASK) argument 36 * As vmid #0 is always reserved, we will never allocate one 42 #define vmid_gen_match(vmid) \ argument 43 (!(((vmid) ^ atomic64_read(&vmid_generation)) >> kvm_arm_vmid_bits)) 48 u64 vmid; in flush_context() local 53 vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0); in flush_context() 56 if (vmid == 0) in flush_context() 57 vmid = per_cpu(reserved_vmids, cpu); in flush_context() 58 __set_bit(vmid2idx(vmid), vmid_ma in flush_context() 72 check_update_reserved_vmid(u64 vmid,u64 newvmid) check_update_reserved_vmid() argument 95 u64 vmid = atomic64_read(&kvm_vmid->id); new_vmid() local 141 u64 vmid, old_active_vmid; kvm_arm_vmid_update() local [all...] |
/linux/drivers/virt/acrn/ |
H A D | hypercall.h | 76 * @vmid: User VM ID 80 static inline long hcall_start_vm(u64 vmid) in hcall_start_vm() argument 82 return acrn_hypercall1(HC_START_VM, vmid); in hcall_start_vm() 87 * @vmid: User VM ID 91 static inline long hcall_pause_vm(u64 vmid) in hcall_pause_vm() argument 93 return acrn_hypercall1(HC_PAUSE_VM, vmid); in hcall_pause_vm() 98 * @vmid: User VM ID 102 static inline long hcall_destroy_vm(u64 vmid) in hcall_destroy_vm() argument 104 return acrn_hypercall1(HC_DESTROY_VM, vmid); in hcall_destroy_vm() 109 * @vmid 113 hcall_reset_vm(u64 vmid) hcall_reset_vm() argument 125 hcall_set_vcpu_regs(u64 vmid,u64 regs_state) hcall_set_vcpu_regs() argument 137 hcall_inject_msi(u64 vmid,u64 msi) hcall_inject_msi() argument 149 hcall_vm_intr_monitor(u64 vmid,u64 addr) hcall_vm_intr_monitor() argument 161 hcall_set_irqline(u64 vmid,u64 op) hcall_set_irqline() argument 173 hcall_set_ioreq_buffer(u64 vmid,u64 buffer) hcall_set_ioreq_buffer() argument 185 hcall_notify_req_finish(u64 vmid,u64 vcpu) hcall_notify_req_finish() argument 208 hcall_create_vdev(u64 vmid,u64 addr) hcall_create_vdev() argument 220 hcall_destroy_vdev(u64 vmid,u64 addr) hcall_destroy_vdev() argument 232 hcall_assign_mmiodev(u64 vmid,u64 addr) hcall_assign_mmiodev() argument 244 hcall_deassign_mmiodev(u64 vmid,u64 addr) hcall_deassign_mmiodev() argument 256 hcall_assign_pcidev(u64 vmid,u64 addr) hcall_assign_pcidev() argument 268 hcall_deassign_pcidev(u64 vmid,u64 addr) hcall_deassign_pcidev() argument 280 hcall_set_ptdev_intr(u64 vmid,u64 irq) hcall_set_ptdev_intr() argument 292 hcall_reset_ptdev_intr(u64 vmid,u64 irq) hcall_reset_ptdev_intr() argument [all...] |
H A D | vm.c | 31 if (ret < 0 || vm_param->vmid == ACRN_INVALID_VMID) { in acrn_vm_create() 40 vm->vmid = vm_param->vmid; in acrn_vm_create() 44 hcall_destroy_vm(vm_param->vmid); in acrn_vm_create() 45 vm->vmid = ACRN_INVALID_VMID; in acrn_vm_create() 55 dev_dbg(acrn_dev.this_device, "VM %u created.\n", vm->vmid); in acrn_vm_create() 63 if (vm->vmid == ACRN_INVALID_VMID || in acrn_vm_destroy() 67 ret = hcall_destroy_vm(vm->vmid); in acrn_vm_destroy() 70 "Failed to destroy VM %u\n", vm->vmid); in acrn_vm_destroy() 91 dev_dbg(acrn_dev.this_device, "VM %u destroyed.\n", vm->vmid); in acrn_vm_destroy() [all...] |
H A D | hsm.c | 37 vm->vmid = ACRN_INVALID_VMID; in acrn_dev_open() 126 if (vm->vmid == ACRN_INVALID_VMID && cmd != ACRN_IOCTL_CREATE_VM) { in acrn_dev_ioctl() 160 ret = hcall_start_vm(vm->vmid); in acrn_dev_ioctl() 163 "Failed to start VM %u!\n", vm->vmid); in acrn_dev_ioctl() 166 ret = hcall_pause_vm(vm->vmid); in acrn_dev_ioctl() 169 "Failed to pause VM %u!\n", vm->vmid); in acrn_dev_ioctl() 172 ret = hcall_reset_vm(vm->vmid); in acrn_dev_ioctl() 175 "Failed to restart VM %u!\n", vm->vmid); in acrn_dev_ioctl() 211 ret = hcall_set_vcpu_regs(vm->vmid, virt_to_phys(cpu_regs)); in acrn_dev_ioctl() 215 vm->vmid); in acrn_dev_ioctl() [all...] |
/linux/arch/riscv/kvm/ |
H A D | tlb.c | 23 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, in kvm_riscv_local_hfence_gvma_vmid_gpa() argument 30 kvm_riscv_local_hfence_gvma_vmid_all(vmid); in kvm_riscv_local_hfence_gvma_vmid_gpa() 38 : : "r" (pos >> 2), "r" (vmid) : "memory"); in kvm_riscv_local_hfence_gvma_vmid_gpa() 43 : : "r" (pos >> 2), "r" (vmid) : "memory"); in kvm_riscv_local_hfence_gvma_vmid_gpa() 47 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid) in kvm_riscv_local_hfence_gvma_vmid_all() argument 49 asm volatile(HFENCE_GVMA(zero, %0) : : "r" (vmid) : "memory"); in kvm_riscv_local_hfence_gvma_vmid_all() 80 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid, in kvm_riscv_local_hfence_vvma_asid_gva() argument 89 kvm_riscv_local_hfence_vvma_asid_all(vmid, asid); in kvm_riscv_local_hfence_vvma_asid_gva() 93 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT); in kvm_riscv_local_hfence_vvma_asid_gva() 110 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid, in kvm_riscv_local_hfence_vvma_asid_all() argument 122 kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,unsigned long gva,unsigned long gvsz,unsigned long order) kvm_riscv_local_hfence_vvma_gva() argument 150 kvm_riscv_local_hfence_vvma_all(unsigned long vmid) kvm_riscv_local_hfence_vvma_all() argument 170 unsigned long vmid = READ_ONCE(v->vmid); kvm_riscv_tlb_flush_process() local 181 unsigned long vmid = READ_ONCE(v->vmid); kvm_riscv_hfence_vvma_all_process() local 344 kvm_riscv_hfence_gvma_vmid_gpa(struct kvm * kvm,unsigned long hbase,unsigned long hmask,gpa_t gpa,gpa_t gpsz,unsigned long order,unsigned long vmid) kvm_riscv_hfence_gvma_vmid_gpa() argument 360 kvm_riscv_hfence_gvma_vmid_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long vmid) kvm_riscv_hfence_gvma_vmid_all() argument 374 kvm_riscv_hfence_vvma_asid_gva(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long gva,unsigned long gvsz,unsigned long order,unsigned long asid,unsigned long vmid) kvm_riscv_hfence_vvma_asid_gva() argument 390 kvm_riscv_hfence_vvma_asid_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long asid,unsigned long vmid) kvm_riscv_hfence_vvma_asid_all() argument 404 kvm_riscv_hfence_vvma_gva(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long gva,unsigned long gvsz,unsigned long order,unsigned long vmid) kvm_riscv_hfence_vvma_gva() argument 420 kvm_riscv_hfence_vvma_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long vmid) kvm_riscv_hfence_vvma_all() argument [all...] |
H A D | vmid.c | 53 kvm->arch.vmid.vmid_version = 0; in kvm_riscv_gstage_vmid_init() 54 kvm->arch.vmid.vmid = 0; in kvm_riscv_gstage_vmid_init() 59 bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid) in kvm_riscv_gstage_vmid_ver_changed() argument 64 return unlikely(READ_ONCE(vmid->vmid_version) != in kvm_riscv_gstage_vmid_ver_changed() 77 struct kvm_vmid *vmid = &vcpu->kvm->arch.vmid; in kvm_riscv_gstage_vmid_update() local 79 if (!kvm_riscv_gstage_vmid_ver_changed(vmid)) in kvm_riscv_gstage_vmid_update() 86 * another vcpu already allocated a valid vmid for this vm. in kvm_riscv_gstage_vmid_update() 88 if (!kvm_riscv_gstage_vmid_ver_changed(vmid)) { in kvm_riscv_gstage_vmid_update() 130 unsigned long vmid; kvm_riscv_gstage_vmid_sanitize() local [all...] |
H A D | vcpu_sbi_v01.c | 26 unsigned long vmid; in kvm_sbi_ext_v01_handler() local 82 vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid); in kvm_sbi_ext_v01_handler() 84 kvm_riscv_hfence_vvma_all(vcpu->kvm, 0, hmask, vmid); in kvm_sbi_ext_v01_handler() 87 cp->a2, PAGE_SHIFT, vmid); in kvm_sbi_ext_v01_handler() 89 vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid); in kvm_sbi_ext_v01_handler() 92 cp->a3, vmid); in kvm_sbi_ext_v01_handler() [all...] |
H A D | vcpu_sbi_replace.c | 99 unsigned long vmid; in kvm_sbi_ext_rfence_handler() local 107 vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid); in kvm_sbi_ext_rfence_handler() 109 kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask, vmid); in kvm_sbi_ext_rfence_handler() 112 cp->a2, cp->a3, PAGE_SHIFT, vmid); in kvm_sbi_ext_rfence_handler() 116 vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid); in kvm_sbi_ext_rfence_handler() 119 cp->a4, vmid); in kvm_sbi_ext_rfence_handler() [all...] |
/linux/arch/riscv/include/asm/ |
H A D | kvm_tlb.h | 24 unsigned long vmid; member 34 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, 37 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid); 41 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid, 46 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid, 48 void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid, 51 void kvm_riscv_local_hfence_vvma_all(unsigned long vmid); 64 unsigned long order, unsigned long vmid); 67 unsigned long vmid); 72 unsigned long vmid); [all...] |
/linux/drivers/gpu/drm/amd/display/modules/vmid/ |
H A D | vmid.c | 41 static void add_ptb_to_table(struct core_vmid *core_vmid, unsigned int vmid, uint64_t ptb) in add_ptb_to_table() argument 43 if (vmid < MAX_VMID) { in add_ptb_to_table() 44 core_vmid->ptb_assigned_to_vmid[vmid] = ptb; in add_ptb_to_table() 49 static void clear_entry_from_vmid_table(struct core_vmid *core_vmid, unsigned int vmid) in clear_entry_from_vmid_table() argument 51 if (vmid < MAX_VMID) { in clear_entry_from_vmid_table() 52 core_vmid->ptb_assigned_to_vmid[vmid] = 0; in clear_entry_from_vmid_table() 69 // Return value of -1 indicates vmid table uninitialized or ptb dne in the table 82 // Expected to be called only when there's an available vmid 98 int vmid = 0; in mod_vmid_get_for_ptb() local 100 // Physical address gets vmid in mod_vmid_get_for_ptb() [all...] |
/linux/drivers/gpu/drm/amd/display/dc/dcn20/ |
H A D | dcn20_vmid.c | 32 vmid->regs->reg 35 vmid->ctx 39 vmid->shifts->field_name, vmid->masks->field_name 44 static void dcn20_wait_for_vmid_ready(struct dcn20_vmid *vmid) in dcn20_wait_for_vmid_ready() argument 76 void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_config *config) in dcn20_vmid_setup() argument 98 dcn20_wait_for_vmid_ready(vmid); in dcn20_vmid_setup()
|
/linux/drivers/gpu/drm/amd/amdkfd/ |
H A D | cik_event_interrupt.c | 37 unsigned int vmid; in cik_event_interrupt_isr() local 53 vmid = f2g->read_vmid_from_vmfault_reg(dev->adev); in cik_event_interrupt_isr() 54 ret = f2g->get_atc_vmid_pasid_mapping_info(dev->adev, vmid, &pasid); in cik_event_interrupt_isr() 57 tmp_ihre->ring_id |= vmid << 8; in cik_event_interrupt_isr() 61 vmid >= dev->vm_info.first_vmid_kfd && in cik_event_interrupt_isr() 62 vmid <= dev->vm_info.last_vmid_kfd; in cik_event_interrupt_isr() 66 vmid = (ihre->ring_id & 0x0000ff00) >> 8; in cik_event_interrupt_isr() 67 if (vmid < dev->vm_info.first_vmid_kfd || in cik_event_interrupt_isr() 68 vmid > dev->vm_info.last_vmid_kfd) in cik_event_interrupt_isr()
|
H A D | kfd_int_process_v9.c | 264 uint16_t source_id, client_id, pasid, vmid; in event_interrupt_isr_v9() local 271 vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); in event_interrupt_isr_v9() 273 (vmid < dev->vm_info.first_vmid_kfd || in event_interrupt_isr_v9() 274 vmid > dev->vm_info.last_vmid_kfd)) in event_interrupt_isr_v9() 310 pasid = dev->dqm->vmid_pasid[vmid]; in event_interrupt_isr_v9() 318 "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n", in event_interrupt_isr_v9() 319 client_id, source_id, vmid, pasid); in event_interrupt_isr_v9() 360 uint16_t source_id, client_id, pasid, vmid; in event_interrupt_wq_v9() local 367 vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); in event_interrupt_wq_v9() 545 info.vmid in event_interrupt_wq_v9() 578 uint16_t node_id, vmid; event_interrupt_isr_v9_4_3() local [all...] |
H A D | kfd_int_process_v10.c | 137 uint16_t source_id, client_id, pasid, vmid; in event_interrupt_isr_v10() local 144 vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); in event_interrupt_isr_v10() 146 (vmid < dev->vm_info.first_vmid_kfd || in event_interrupt_isr_v10() 147 vmid > dev->vm_info.last_vmid_kfd)) in event_interrupt_isr_v10() 172 "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n", in event_interrupt_isr_v10() 173 client_id, source_id, vmid, pasid); in event_interrupt_isr_v10() 197 uint16_t source_id, client_id, pasid, vmid; in event_interrupt_wq_v10() local 204 vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); in event_interrupt_wq_v10() 356 info.vmid = vmid; in event_interrupt_wq_v10() [all...] |
/linux/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_amdkfd_gfx_v9.h | 23 void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, 28 unsigned int vmid, uint32_t inst); 52 uint8_t vmid, uint16_t *p_pasid); 54 uint32_t vmid, uint64_t page_table_base); 59 uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, 67 uint32_t vmid, 71 uint32_t vmid); 74 uint32_t vmid); 80 uint32_t vmid); 82 uint32_t vmid, [all...] |
H A D | amdgpu_amdkfd_gfx_v10.c | 45 uint32_t queue, uint32_t vmid) in lock_srbm() argument 48 nv_grbm_select(adev, mec, pipe, queue, vmid); in lock_srbm() 80 static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, in kgd_program_sh_mem_settings() argument 86 lock_srbm(adev, 0, 0, 0, vmid); in kgd_program_sh_mem_settings() 96 unsigned int vmid, uint32_t inst) in kgd_set_pasid_vmid_mapping() argument 108 pr_debug("pasid 0x%x vmid %d, reg value %x\n", pasid, vmid, pasid_mapping); in kgd_set_pasid_vmid_mapping() 110 pr_debug("ATHUB, reg %x\n", SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid); in kgd_set_pasid_vmid_mapping() 111 WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid, in kgd_set_pasid_vmid_mapping() 119 (1U << vmid))) in kgd_set_pasid_vmid_mapping() 666 get_atc_vmid_pasid_mapping_info(struct amdgpu_device * adev,uint8_t vmid,uint16_t * p_pasid) get_atc_vmid_pasid_mapping_info() argument 702 set_vm_context_page_table_base(struct amdgpu_device * adev,uint32_t vmid,uint64_t page_table_base) set_vm_context_page_table_base() argument 736 kgd_gfx_v10_set_wave_launch_stall(struct amdgpu_device * adev,uint32_t vmid,bool stall) kgd_gfx_v10_set_wave_launch_stall() argument 755 kgd_gfx_v10_enable_debug_trap(struct amdgpu_device * adev,bool restore_dbg_registers,uint32_t vmid) kgd_gfx_v10_enable_debug_trap() argument 792 kgd_gfx_v10_disable_debug_trap(struct amdgpu_device * adev,bool keep_trap_enabled,uint32_t vmid) kgd_gfx_v10_disable_debug_trap() argument 826 kgd_gfx_v10_set_wave_launch_trap_override(struct amdgpu_device * adev,uint32_t vmid,uint32_t trap_override,uint32_t trap_mask_bits,uint32_t trap_mask_request,uint32_t * trap_mask_prev,uint32_t kfd_dbg_trap_cntl_prev) kgd_gfx_v10_set_wave_launch_trap_override() argument 861 kgd_gfx_v10_set_wave_launch_mode(struct amdgpu_device * adev,uint8_t wave_launch_mode,uint32_t vmid) kgd_gfx_v10_set_wave_launch_mode() argument 1048 program_trap_handler_settings(struct amdgpu_device * adev,uint32_t vmid,uint64_t tba_addr,uint64_t tma_addr,uint32_t inst) program_trap_handler_settings() argument [all...] |
H A D | amdgpu_amdkfd_gfx_v9.c | 51 uint32_t queue, uint32_t vmid, uint32_t inst) in kgd_gfx_v9_lock_srbm() argument 54 soc15_grbm_select(adev, mec, pipe, queue, vmid, GET_INST(GC, inst)); in kgd_gfx_v9_lock_srbm() 86 void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, in kgd_gfx_v9_program_sh_mem_settings() argument 92 kgd_gfx_v9_lock_srbm(adev, 0, 0, 0, vmid, inst); in kgd_gfx_v9_program_sh_mem_settings() 102 unsigned int vmid, uint32_t inst) in kgd_gfx_v9_set_pasid_vmid_mapping() argument 120 WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid, in kgd_gfx_v9_set_pasid_vmid_mapping() 126 (1U << vmid))) in kgd_gfx_v9_set_pasid_vmid_mapping() 131 1U << vmid); in kgd_gfx_v9_set_pasid_vmid_mapping() 133 /* Mapping vmid to pasid also for IH block */ in kgd_gfx_v9_set_pasid_vmid_mapping() 134 WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid, in kgd_gfx_v9_set_pasid_vmid_mapping() 616 kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device * adev,uint8_t vmid,uint16_t * p_pasid) kgd_gfx_v9_get_atc_vmid_pasid_mapping_info() argument 672 kgd_gfx_v9_set_wave_launch_stall(struct amdgpu_device * adev,uint32_t vmid,bool stall) kgd_gfx_v9_set_wave_launch_stall() argument 703 kgd_gfx_v9_enable_debug_trap(struct amdgpu_device * adev,bool restore_dbg_registers,uint32_t vmid) kgd_gfx_v9_enable_debug_trap() argument 726 kgd_gfx_v9_disable_debug_trap(struct amdgpu_device * adev,bool keep_trap_enabled,uint32_t vmid) kgd_gfx_v9_disable_debug_trap() argument 760 kgd_gfx_v9_set_wave_launch_trap_override(struct amdgpu_device * adev,uint32_t vmid,uint32_t trap_override,uint32_t trap_mask_bits,uint32_t trap_mask_request,uint32_t * trap_mask_prev,uint32_t kfd_dbg_cntl_prev) kgd_gfx_v9_set_wave_launch_trap_override() argument 795 kgd_gfx_v9_set_wave_launch_mode(struct amdgpu_device * adev,uint8_t wave_launch_mode,uint32_t vmid) kgd_gfx_v9_set_wave_launch_mode() argument 914 kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device * adev,uint32_t vmid,uint64_t page_table_base) kgd_gfx_v9_set_vm_context_page_table_base() argument 1104 kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device * adev,uint32_t vmid,uint64_t tba_addr,uint64_t tma_addr,uint32_t inst) kgd_gfx_v9_program_trap_handler_settings() argument [all...] |
H A D | gmc_v12_0.c | 125 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n", in gmc_v12_0_process_interrupt() 127 entry->src_id, entry->ring_id, entry->vmid, entry->pasid); in gmc_v12_0_process_interrupt() 184 uint8_t vmid, uint16_t *p_pasid) in gmc_v12_0_get_vmid_pasid_mapping_info() argument 186 *p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff; in gmc_v12_0_get_vmid_pasid_mapping_info() 198 static void gmc_v12_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, in gmc_v12_0_flush_vm_hub() argument 203 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); in gmc_v12_0_flush_vm_hub() 243 tmp &= 1 << vmid; in gmc_v12_0_flush_vm_hub() 284 * @vmid: vm instance to flush 290 static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, in gmc_v12_0_flush_gpu_tlb() argument 306 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_typ in gmc_v12_0_flush_gpu_tlb() 337 int vmid, i; gmc_v12_0_flush_gpu_tlb_pasid() local 360 gmc_v12_0_emit_flush_gpu_tlb(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr) gmc_v12_0_emit_flush_gpu_tlb() argument 407 gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring * ring,unsigned vmid,unsigned pasid) gmc_v12_0_emit_pasid_mapping() argument [all...] |
H A D | amdgpu_amdkfd_gfx_v7.c | 49 uint32_t queue, uint32_t vmid) in lock_srbm() argument 51 uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue); in lock_srbm() 77 static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, in kgd_program_sh_mem_settings() argument 83 lock_srbm(adev, 0, 0, 0, vmid); in kgd_program_sh_mem_settings() 94 unsigned int vmid, uint32_t inst) in kgd_set_pasid_vmid_mapping() argument 105 WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping); in kgd_set_pasid_vmid_mapping() 107 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid))) in kgd_set_pasid_vmid_mapping() 109 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid); in kgd_set_pasid_vmid_mapping() 111 /* Mapping vmid to pasid also for IH block */ in kgd_set_pasid_vmid_mapping() 112 WREG32(mmIH_VMID_0_LUT + vmid, pasid_mappin in kgd_set_pasid_vmid_mapping() 521 get_atc_vmid_pasid_mapping_info(struct amdgpu_device * adev,uint8_t vmid,uint16_t * p_pasid) get_atc_vmid_pasid_mapping_info() argument 532 set_scratch_backing_va(struct amdgpu_device * adev,uint64_t va,uint32_t vmid) set_scratch_backing_va() argument 540 set_vm_context_page_table_base(struct amdgpu_device * adev,uint32_t vmid,uint64_t page_table_base) set_vm_context_page_table_base() argument [all...] |
H A D | gmc_v11_0.c | 132 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n", in gmc_v11_0_process_interrupt() 134 entry->src_id, entry->ring_id, entry->vmid, entry->pasid); in gmc_v11_0_process_interrupt() 191 uint8_t vmid, uint16_t *p_pasid) in gmc_v11_0_get_vmid_pasid_mapping_info() argument 193 *p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff; in gmc_v11_0_get_vmid_pasid_mapping_info() 202 * @vmid: vm instance to flush 208 static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, in gmc_v11_0_flush_gpu_tlb() argument 213 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); in gmc_v11_0_flush_gpu_tlb() 237 1 << vmid, GET_INST(GC, 0)); in gmc_v11_0_flush_gpu_tlb() 271 tmp &= 1 << vmid; in gmc_v11_0_flush_gpu_tlb() 317 int vmid, in gmc_v11_0_flush_gpu_tlb_pasid() local 340 gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr) gmc_v11_0_emit_flush_gpu_tlb() argument 387 gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring * ring,unsigned int vmid,unsigned int pasid) gmc_v11_0_emit_pasid_mapping() argument [all...] |
H A D | amdgpu_amdkfd_gfx_v8.c | 43 uint32_t queue, uint32_t vmid) in lock_srbm() argument 45 uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue); in lock_srbm() 71 static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, in kgd_program_sh_mem_settings() argument 77 lock_srbm(adev, 0, 0, 0, vmid); in kgd_program_sh_mem_settings() 88 unsigned int vmid, uint32_t inst) in kgd_set_pasid_vmid_mapping() argument 100 WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping); in kgd_set_pasid_vmid_mapping() 102 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid))) in kgd_set_pasid_vmid_mapping() 104 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid); in kgd_set_pasid_vmid_mapping() 106 /* Mapping vmid to pasid also for IH block */ in kgd_set_pasid_vmid_mapping() 107 WREG32(mmIH_VMID_0_LUT + vmid, pasid_mappin in kgd_set_pasid_vmid_mapping() 532 get_atc_vmid_pasid_mapping_info(struct amdgpu_device * adev,uint8_t vmid,uint16_t * p_pasid) get_atc_vmid_pasid_mapping_info() argument 567 set_scratch_backing_va(struct amdgpu_device * adev,uint64_t va,uint32_t vmid) set_scratch_backing_va() argument 575 set_vm_context_page_table_base(struct amdgpu_device * adev,uint32_t vmid,uint64_t page_table_base) set_vm_context_page_table_base() argument [all...] |
H A D | vcn_sw_ring.c | 47 uint32_t vmid = AMDGPU_JOB_GET_VMID(job); in vcn_dec_sw_ring_emit_ib() local 50 amdgpu_ring_write(ring, vmid); in vcn_dec_sw_ring_emit_ib() 66 uint32_t vmid, uint64_t pd_addr) in vcn_dec_sw_ring_emit_vm_flush() argument 71 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); in vcn_dec_sw_ring_emit_vm_flush() 74 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance; in vcn_dec_sw_ring_emit_vm_flush()
|
H A D | gmc_v10_0.c | 162 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n", in gmc_v10_0_process_interrupt() 164 entry->src_id, entry->ring_id, entry->vmid, entry->pasid); in gmc_v10_0_process_interrupt() 222 uint8_t vmid, uint16_t *p_pasid) in gmc_v10_0_get_atc_vmid_pasid_mapping_info() argument 227 + vmid); in gmc_v10_0_get_atc_vmid_pasid_mapping_info() 244 * @vmid: vm instance to flush 250 static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, in gmc_v10_0_flush_gpu_tlb() argument 255 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); in gmc_v10_0_flush_gpu_tlb() 276 1 << vmid, GET_INST(GC, 0)); in gmc_v10_0_flush_gpu_tlb() 318 tmp &= 1 << vmid; in gmc_v10_0_flush_gpu_tlb() 352 int vmid, in gmc_v10_0_flush_gpu_tlb_pasid() local 375 gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr) gmc_v10_0_emit_flush_gpu_tlb() argument 422 gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring * ring,unsigned int vmid,unsigned int pasid) gmc_v10_0_emit_pasid_mapping() argument [all...] |
/linux/drivers/gpu/drm/amd/include/ |
H A D | kgd_kfd_interface.h | 51 uint32_t vmid; member 170 * @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp 222 void (*program_sh_mem_settings)(struct amdgpu_device *adev, uint32_t vmid, 228 unsigned int vmid, uint32_t inst); 271 uint8_t vmid, 279 uint64_t va, uint32_t vmid); 282 uint32_t vmid, uint64_t page_table_base); 287 uint32_t vmid); 290 uint32_t vmid); 295 uint32_t vmid, [all...] |
/linux/samples/acrn/ |
H A D | vm-sample.c | 30 __u16 vmid; variable 40 ioctl(hsm_fd, ACRN_IOCTL_PAUSE_VM, vmid); in vm_exit() 67 vmid = create_vm.vmid; in main() 101 ret = ioctl(hsm_fd, ACRN_IOCTL_START_VM, vmid); in main() 120 notify.vmid = vmid; in main()
|