Home
last modified time | relevance | path

Searched refs:vm (Results 1 – 25 of 654) sorted by relevance

12345678910>>...27

/linux/drivers/virtio/
H A Dvirtio_mem.c285 static void virtio_mem_retry(struct virtio_mem *vm);
286 static int virtio_mem_create_resource(struct virtio_mem *vm);
287 static void virtio_mem_delete_resource(struct virtio_mem *vm);
293 static int register_virtio_mem_device(struct virtio_mem *vm) in register_virtio_mem_device() argument
302 list_add_rcu(&vm->next, &virtio_mem_devices); in register_virtio_mem_device()
312 static void unregister_virtio_mem_device(struct virtio_mem *vm) in unregister_virtio_mem_device() argument
316 list_del_rcu(&vm->next); in unregister_virtio_mem_device()
343 static unsigned long virtio_mem_phys_to_bb_id(struct virtio_mem *vm, in virtio_mem_phys_to_bb_id() argument
346 return addr / vm->bbm.bb_size; in virtio_mem_phys_to_bb_id()
352 static uint64_t virtio_mem_bb_id_to_phys(struct virtio_mem *vm, in virtio_mem_bb_id_to_phys() argument
[all …]
/linux/tools/testing/selftests/kvm/lib/arm64/
H A Dprocessor.c24 static uint64_t page_align(struct kvm_vm *vm, uint64_t v) in page_align() argument
26 return (v + vm->page_size) & ~(vm->page_size - 1); in page_align()
29 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pgd_index() argument
31 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; in pgd_index()
32 uint64_t mask = (1UL << (vm->va_bits - shift)) - 1; in pgd_index()
37 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) in pud_index() argument
39 unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift; in pud_index()
40 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; in pud_index()
42 TEST_ASSERT(vm->pgtable_levels == 4, in pud_index()
43 "Mode %d does not have 4 page table levels", vm->mode); in pud_index()
[all …]
/linux/tools/testing/selftests/kvm/lib/x86/
H A Dsev.c17 static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *region, in encrypt_region() argument
22 const sparsebit_idx_t lowest_page_in_region = gpa_base >> vm->page_shift; in encrypt_region()
28 if (!is_sev_snp_vm(vm)) in encrypt_region()
29 sev_register_encrypted_memory(vm, region); in encrypt_region()
32 const uint64_t size = (j - i + 1) * vm->page_size; in encrypt_region()
33 const uint64_t offset = (i - lowest_page_in_region) * vm->page_size; in encrypt_region()
36 vm_mem_set_private(vm, gpa_base + offset, size); in encrypt_region()
38 if (is_sev_snp_vm(vm)) in encrypt_region()
39 snp_launch_update_data(vm, gpa_base + offset, in encrypt_region()
40 (uint64_t)addr_gpa2hva(vm, gpa_base + offset), in encrypt_region()
[all …]
/linux/tools/testing/selftests/kvm/s390/
H A Dcmma_test.c97 static void create_main_memslot(struct kvm_vm *vm) in create_main_memslot() argument
101 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, MAIN_PAGE_COUNT, 0); in create_main_memslot()
104 vm->memslots[i] = 0; in create_main_memslot()
107 static void create_test_memslot(struct kvm_vm *vm) in create_test_memslot() argument
109 vm_userspace_mem_region_add(vm, in create_test_memslot()
111 TEST_DATA_START_GFN << vm->page_shift, in create_test_memslot()
116 vm->memslots[MEM_REGION_TEST_DATA] = TEST_DATA_MEMSLOT; in create_test_memslot()
119 static void create_memslots(struct kvm_vm *vm) in create_memslots() argument
135 create_main_memslot(vm); in create_memslots()
136 create_test_memslot(vm); in create_memslots()
[all …]
/linux/drivers/gpu/drm/xe/
H A Dxe_vm.h34 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm) in xe_vm_get() argument
36 drm_gpuvm_get(&vm->gpuvm); in xe_vm_get()
37 return vm; in xe_vm_get()
40 static inline void xe_vm_put(struct xe_vm *vm) in xe_vm_put() argument
42 drm_gpuvm_put(&vm->gpuvm); in xe_vm_put()
45 int xe_vm_lock(struct xe_vm *vm, bool intr);
47 void xe_vm_unlock(struct xe_vm *vm);
49 static inline bool xe_vm_is_closed(struct xe_vm *vm) in xe_vm_is_closed() argument
52 return !vm->size; in xe_vm_is_closed()
55 static inline bool xe_vm_is_banned(struct xe_vm *vm) in xe_vm_is_banned() argument
[all …]
H A Dxe_userptr.c42 int __xe_vm_userptr_needs_repin(struct xe_vm *vm) in __xe_vm_userptr_needs_repin() argument
44 lockdep_assert_held_read(&vm->svm.gpusvm.notifier_lock); in __xe_vm_userptr_needs_repin()
46 return (list_empty(&vm->userptr.repin_list) && in __xe_vm_userptr_needs_repin()
47 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN; in __xe_vm_userptr_needs_repin()
53 struct xe_vm *vm = xe_vma_vm(vma); in xe_vma_userptr_pin_pages() local
54 struct xe_device *xe = vm->xe; in xe_vma_userptr_pin_pages()
60 lockdep_assert_held(&vm->lock); in xe_vma_userptr_pin_pages()
66 return drm_gpusvm_get_pages(&vm->svm.gpusvm, &uvma->userptr.pages, in xe_vma_userptr_pin_pages()
74 static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma) in __vma_userptr_invalidate() argument
90 if (!xe_vm_in_fault_mode(vm) && in __vma_userptr_invalidate()
[all …]
H A Dxe_vm.c45 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) in xe_vm_obj() argument
47 return vm->gpuvm.r_obj; in xe_vm_obj()
59 int xe_vm_drm_exec_lock(struct xe_vm *vm, struct drm_exec *exec) in xe_vm_drm_exec_lock() argument
61 return drm_exec_lock_obj(exec, xe_vm_obj(vm)); in xe_vm_drm_exec_lock()
64 static bool preempt_fences_waiting(struct xe_vm *vm) in preempt_fences_waiting() argument
68 lockdep_assert_held(&vm->lock); in preempt_fences_waiting()
69 xe_vm_assert_held(vm); in preempt_fences_waiting()
71 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in preempt_fences_waiting()
90 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list, in alloc_preempt_fences() argument
93 lockdep_assert_held(&vm->lock); in alloc_preempt_fences()
[all …]
/linux/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_vm.c122 struct amdgpu_vm *vm; member
136 static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm) in amdgpu_vm_assert_locked() argument
138 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_assert_locked()
151 struct amdgpu_vm *vm = vm_bo->vm; in amdgpu_vm_bo_evicted() local
155 amdgpu_vm_assert_locked(vm); in amdgpu_vm_bo_evicted()
156 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted()
158 list_move(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
160 list_move_tail(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
161 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted()
173 amdgpu_vm_assert_locked(vm_bo->vm); in amdgpu_vm_bo_moved()
[all …]
/linux/tools/testing/selftests/kvm/lib/loongarch/
H A Dprocessor.c15 static uint64_t virt_pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) in virt_pte_index() argument
20 shift = level * (vm->page_shift - 3) + vm->page_shift; in virt_pte_index()
21 mask = (1UL << (vm->page_shift - 3)) - 1; in virt_pte_index()
25 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry) in pte_addr() argument
27 return entry & ~((0x1UL << vm->page_shift) - 1); in pte_addr()
30 static uint64_t ptrs_per_pte(struct kvm_vm *vm) in ptrs_per_pte() argument
32 return 1 << (vm->page_shift - 3); in ptrs_per_pte()
35 static void virt_set_pgtable(struct kvm_vm *vm, vm_paddr_t table, vm_paddr_t child) in virt_set_pgtable() argument
40 ptep = addr_gpa2hva(vm, table); in virt_set_pgtable()
41 ptrs_per_pte = 1 << (vm->page_shift - 3); in virt_set_pgtable()
[all …]
/linux/drivers/gpu/drm/i915/gt/
H A Dintel_ggtt.c57 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_init_hw()
59 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); in ggtt_init_hw()
61 ggtt->vm.is_ggtt = true; in ggtt_init_hw()
64 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); in ggtt_init_hw()
67 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; in ggtt_init_hw()
73 ggtt->vm.cleanup(&ggtt->vm); in ggtt_init_hw()
115 void i915_ggtt_suspend_vm(struct i915_address_space *vm, bool evict_all) in i915_ggtt_suspend_vm() argument
120 drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); in i915_ggtt_suspend_vm()
123 i915_gem_drain_freed_objects(vm->i915); in i915_ggtt_suspend_vm()
125 mutex_lock(&vm->mutex); in i915_ggtt_suspend_vm()
[all …]
H A Dintel_gtt.c41 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz) in alloc_pt_lmem() argument
57 obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz, in alloc_pt_lmem()
58 vm->lmem_pt_obj_flags); in alloc_pt_lmem()
65 obj->base.resv = i915_vm_resv_get(vm); in alloc_pt_lmem()
66 obj->shares_resv_from = vm; in alloc_pt_lmem()
68 if (vm->fpriv) in alloc_pt_lmem()
69 i915_drm_client_add_object(vm->fpriv->client, obj); in alloc_pt_lmem()
75 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz) in alloc_pt_dma() argument
79 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) in alloc_pt_dma()
80 i915_gem_shrink_all(vm->i915); in alloc_pt_dma()
[all …]
H A Dgen8_ppgtt.c90 struct drm_i915_private *i915 = ppgtt->vm.i915; in gen8_ppgtt_notify_vgt()
91 struct intel_uncore *uncore = ppgtt->vm.gt->uncore; in gen8_ppgtt_notify_vgt()
102 if (i915_vm_is_4lvl(&ppgtt->vm)) { in gen8_ppgtt_notify_vgt()
180 static unsigned int gen8_pd_top_count(const struct i915_address_space *vm) in gen8_pd_top_count() argument
182 unsigned int shift = __gen8_pte_shift(vm->top); in gen8_pd_top_count()
184 return (vm->total + (1ull << shift) - 1) >> shift; in gen8_pd_top_count()
188 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx) in gen8_pdp_for_page_index() argument
190 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm); in gen8_pdp_for_page_index()
192 if (vm->top == 2) in gen8_pdp_for_page_index()
195 return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top)); in gen8_pdp_for_page_index()
[all …]
H A Dintel_gtt.h64 #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
233 void (*bind_vma)(struct i915_address_space *vm,
242 void (*unbind_vma)(struct i915_address_space *vm,
308 (*alloc_pt_dma)(struct i915_address_space *vm, int sz);
310 (*alloc_scratch_dma)(struct i915_address_space *vm, int sz);
319 void (*allocate_va_range)(struct i915_address_space *vm,
322 void (*clear_range)(struct i915_address_space *vm,
324 void (*scratch_range)(struct i915_address_space *vm,
326 void (*insert_page)(struct i915_address_space *vm,
331 void (*insert_entries)(struct i915_address_space *vm,
[all …]
/linux/drivers/gpu/drm/lima/
H A Dlima_vm.c18 struct lima_vm *vm; member
35 static void lima_vm_unmap_range(struct lima_vm *vm, u32 start, u32 end) in lima_vm_unmap_range() argument
43 vm->bts[pbe].cpu[bte] = 0; in lima_vm_unmap_range()
47 static int lima_vm_map_page(struct lima_vm *vm, dma_addr_t pa, u32 va) in lima_vm_map_page() argument
52 if (!vm->bts[pbe].cpu) { in lima_vm_map_page()
57 vm->bts[pbe].cpu = dma_alloc_wc( in lima_vm_map_page()
58 vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT, in lima_vm_map_page()
59 &vm->bts[pbe].dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); in lima_vm_map_page()
60 if (!vm->bts[pbe].cpu) in lima_vm_map_page()
63 pts = vm->bts[pbe].dma; in lima_vm_map_page()
[all …]
/linux/drivers/gpu/drm/panthor/
H A Dpanthor_mmu.c47 struct panthor_vm *vm; member
103 } vm; member
407 struct panthor_vm *vm; member
439 struct panthor_vm *vm = cookie; in alloc_pt() local
443 if (unlikely(!vm->root_page_table)) { in alloc_pt()
446 drm_WARN_ON(&vm->ptdev->base, vm->op_ctx); in alloc_pt()
447 p = alloc_pages_node(dev_to_node(vm->ptdev->base.dev), in alloc_pt()
450 vm->root_page_table = page; in alloc_pt()
457 if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K)) in alloc_pt()
463 if (drm_WARN_ON(&vm->ptdev->base, !vm->op_ctx) || in alloc_pt()
[all …]
/linux/tools/testing/selftests/kvm/lib/s390/
H A Dprocessor.c13 void virt_arch_pgd_alloc(struct kvm_vm *vm) in virt_arch_pgd_alloc() argument
17 TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x", in virt_arch_pgd_alloc()
18 vm->page_size); in virt_arch_pgd_alloc()
20 if (vm->pgd_created) in virt_arch_pgd_alloc()
23 paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION, in virt_arch_pgd_alloc()
25 vm->memslots[MEM_REGION_PT]); in virt_arch_pgd_alloc()
26 memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_arch_pgd_alloc()
28 vm->pgd = paddr; in virt_arch_pgd_alloc()
29 vm->pgd_created = true; in virt_arch_pgd_alloc()
37 static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri) in virt_alloc_region() argument
[all …]
/linux/drivers/virt/acrn/
H A Dvm.c25 struct acrn_vm *acrn_vm_create(struct acrn_vm *vm, in acrn_vm_create() argument
37 mutex_init(&vm->regions_mapping_lock); in acrn_vm_create()
38 INIT_LIST_HEAD(&vm->ioreq_clients); in acrn_vm_create()
39 spin_lock_init(&vm->ioreq_clients_lock); in acrn_vm_create()
40 vm->vmid = vm_param->vmid; in acrn_vm_create()
41 vm->vcpu_num = vm_param->vcpu_num; in acrn_vm_create()
43 if (acrn_ioreq_init(vm, vm_param->ioreq_buf) < 0) { in acrn_vm_create()
45 vm->vmid = ACRN_INVALID_VMID; in acrn_vm_create()
50 list_add(&vm->list, &acrn_vm_list); in acrn_vm_create()
53 acrn_ioeventfd_init(vm); in acrn_vm_create()
[all …]
H A Dirqfd.c30 struct acrn_vm *vm; member
41 struct acrn_vm *vm = irqfd->vm; in acrn_irqfd_inject() local
43 acrn_msi_inject(vm, irqfd->msi.msi_addr, in acrn_irqfd_inject()
51 lockdep_assert_held(&irqfd->vm->irqfds_lock); in hsm_irqfd_shutdown()
63 struct acrn_vm *vm; in hsm_irqfd_shutdown_work() local
66 vm = irqfd->vm; in hsm_irqfd_shutdown_work()
67 mutex_lock(&vm->irqfds_lock); in hsm_irqfd_shutdown_work()
70 mutex_unlock(&vm->irqfds_lock); in hsm_irqfd_shutdown_work()
79 struct acrn_vm *vm; in hsm_irqfd_wakeup() local
82 vm = irqfd->vm; in hsm_irqfd_wakeup()
[all …]
H A Dioreq.c39 static int ioreq_complete_request(struct acrn_vm *vm, u16 vcpu, in ioreq_complete_request() argument
64 ret = hcall_notify_req_finish(vm->vmid, vcpu); in ioreq_complete_request()
79 if (vcpu >= client->vm->vcpu_num) in acrn_ioreq_complete_request()
84 acrn_req = (struct acrn_io_request *)client->vm->ioreq_buf; in acrn_ioreq_complete_request()
88 ret = ioreq_complete_request(client->vm, vcpu, acrn_req); in acrn_ioreq_complete_request()
93 int acrn_ioreq_request_default_complete(struct acrn_vm *vm, u16 vcpu) in acrn_ioreq_request_default_complete() argument
97 spin_lock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_request_default_complete()
98 if (vm->default_client) in acrn_ioreq_request_default_complete()
99 ret = acrn_ioreq_complete_request(vm->default_client, in acrn_ioreq_request_default_complete()
101 spin_unlock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_request_default_complete()
[all …]
/linux/tools/testing/selftests/kvm/lib/
H A Dkvm_util.c168 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) in vm_enable_dirty_ring() argument
170 if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL)) in vm_enable_dirty_ring()
171 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size); in vm_enable_dirty_ring()
173 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size); in vm_enable_dirty_ring()
174 vm->dirty_ring_size = ring_size; in vm_enable_dirty_ring()
177 static void vm_open(struct kvm_vm *vm) in vm_open() argument
179 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR); in vm_open()
183 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type); in vm_open()
184 TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd)); in vm_open()
187 vm->stats.fd = vm_get_stats_fd(vm); in vm_open()
[all …]
/linux/drivers/gpu/drm/i915/selftests/
H A Dmock_gtt.c27 static void mock_insert_page(struct i915_address_space *vm, in mock_insert_page() argument
35 static void mock_insert_entries(struct i915_address_space *vm, in mock_insert_entries() argument
41 static void mock_bind_ppgtt(struct i915_address_space *vm, in mock_bind_ppgtt() argument
51 static void mock_unbind_ppgtt(struct i915_address_space *vm, in mock_unbind_ppgtt() argument
56 static void mock_cleanup(struct i915_address_space *vm) in mock_cleanup() argument
60 static void mock_clear_range(struct i915_address_space *vm, in mock_clear_range() argument
73 ppgtt->vm.gt = to_gt(i915); in mock_ppgtt()
74 ppgtt->vm.i915 = i915; in mock_ppgtt()
75 ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); in mock_ppgtt()
76 ppgtt->vm.dma = i915->drm.dev; in mock_ppgtt()
[all …]
/linux/tools/testing/selftests/kvm/include/x86/
H A Dsev.h34 static inline bool is_sev_snp_vm(struct kvm_vm *vm) in is_sev_snp_vm() argument
36 return vm->type == KVM_X86_SNP_VM; in is_sev_snp_vm()
39 static inline bool is_sev_es_vm(struct kvm_vm *vm) in is_sev_es_vm() argument
41 return is_sev_snp_vm(vm) || vm->type == KVM_X86_SEV_ES_VM; in is_sev_es_vm()
44 static inline bool is_sev_vm(struct kvm_vm *vm) in is_sev_vm() argument
46 return is_sev_es_vm(vm) || vm->type == KVM_X86_SEV_VM; in is_sev_vm()
49 void sev_vm_launch(struct kvm_vm *vm, uint32_t policy);
50 void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement);
51 void sev_vm_launch_finish(struct kvm_vm *vm);
52 void snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy);
[all …]
/linux/sound/pci/ctxfi/
H A Dctvmem.c30 get_vm_block(struct ct_vm *vm, unsigned int size, struct ct_atc *atc) in get_vm_block() argument
36 if (size > vm->size) { in get_vm_block()
42 guard(mutex)(&vm->lock); in get_vm_block()
43 list_for_each(pos, &vm->unused) { in get_vm_block()
48 if (pos == &vm->unused) in get_vm_block()
53 list_move(&entry->list, &vm->used); in get_vm_block()
54 vm->size -= size; in get_vm_block()
64 list_add(&block->list, &vm->used); in get_vm_block()
67 vm->size -= size; in get_vm_block()
72 static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block) in put_vm_block() argument
[all …]
/linux/drivers/gpu/drm/i915/display/
H A Dintel_dpt.c19 struct i915_address_space vm; member
26 #define i915_is_dpt(vm) ((vm)->is_dpt) argument
29 i915_vm_to_dpt(struct i915_address_space *vm) in i915_vm_to_dpt() argument
31 BUILD_BUG_ON(offsetof(struct i915_dpt, vm)); in i915_vm_to_dpt()
32 drm_WARN_ON(&vm->i915->drm, !i915_is_dpt(vm)); in i915_vm_to_dpt()
33 return container_of(vm, struct i915_dpt, vm); in i915_vm_to_dpt()
41 static void dpt_insert_page(struct i915_address_space *vm, in dpt_insert_page() argument
47 struct i915_dpt *dpt = i915_vm_to_dpt(vm); in dpt_insert_page()
51 vm->pte_encode(addr, pat_index, flags)); in dpt_insert_page()
54 static void dpt_insert_entries(struct i915_address_space *vm, in dpt_insert_entries() argument
[all …]
/linux/tools/testing/selftests/kvm/arm64/
H A Dsmccc_filter.c27 struct kvm_vm *vm = vm_create(1); in test_runs_at_el2() local
30 kvm_get_default_vcpu_target(vm, &init); in test_runs_at_el2()
31 kvm_vm_free(vm); in test_runs_at_el2()
52 static int __set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions, in __set_smccc_filter() argument
61 return __kvm_device_attr_set(vm->fd, KVM_ARM_VM_SMCCC_CTRL, in __set_smccc_filter()
65 static void set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions, in set_smccc_filter() argument
68 int ret = __set_smccc_filter(vm, start, nr_functions, action); in set_smccc_filter()
76 struct kvm_vm *vm; in setup_vm() local
78 vm = vm_create(1); in setup_vm()
79 kvm_get_default_vcpu_target(vm, &init); in setup_vm()
[all …]

12345678910>>...27